diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 5754585d72650902ae307d1d47e0d987f7015cc9..7539e44b53e4f6b4aedfb3d30e140cd47155c414 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3450,6 +3450,10 @@ force Enable ASPM even on devices that claim not to support it. WARNING: Forcing ASPM on may cause system lockups. + pcie_hp= [PCIE] PCI Express Hotplug driver options: + nomsi Do not use MSI for PCI Express Native Hotplug (this + makes all PCIe ports use INTx for hotplug services). + pcie_ports= [PCIE] PCIe port services handling: native Use native PCIe services (PME, AER, DPC, PCIe hotplug) even if the platform doesn't give the OS permission to diff --git a/Documentation/devicetree/bindings/gpio/gpio-phytium-sgpio.txt b/Documentation/devicetree/bindings/gpio/gpio-phytium-sgpio.txt new file mode 100644 index 0000000000000000000000000000000000000000..6db0d5306ba4d44b525d0dfd92349be5345f4ba4 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/gpio-phytium-sgpio.txt @@ -0,0 +1,32 @@ +* Phytium SGPIO controller + +This SGPIO controller is for Phytium Pe220x SoCs, which supports up to +96 (32x3) Serial GPIOs. + +Required properties: +- compatible : Should contain "phytium,gpio" +- reg : Address and length of the register set for the device. +- interrupts: Interrupt mapping for GPIO IRQ. +- gpio-controller : Marks the device node as a gpio controller. +- #gpio-cells : Should be 2. The first cell is the pin number and + the second cell is used to specify the gpio polarity: + 0 = active high + 1 = active low +- ngpios: number of GPIO lines, see gpio.txt + (should be multiple of 32, up to 96 pins) +- bus-frequency: SGPIO CLK frequency +- clocks: A phandle to the APB clock for SGPIO clock division + +Example: + + sgpio: sgpio@2807d000 { + compatible = "phytium,sgpio"; + reg = <0x0 0x2807d000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + ngpios = <96>; + bus-frequency = <48000>; + gpio-controller; + #gpio-cells = <2>; + }; + diff --git a/Documentation/devicetree/bindings/gpio/gpio-phytium.txt b/Documentation/devicetree/bindings/gpio/gpio-phytium.txt new file mode 100644 index 0000000000000000000000000000000000000000..77d4c6c03d0037ab37f7783f7ee074cae1301df4 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/gpio-phytium.txt @@ -0,0 +1,47 @@ +* Phytium GPIO controller + +Required properties: +- compatible : Should contain "phytium,gpio" +- reg : Address and length of the register set for the device. +- interrupts: Interrupt mapping for GPIO IRQ. +- gpio-controller : Marks the device node as a gpio controller. +- #gpio-cells : Should be 2. The first cell is the pin number and + the second cell is used to specify the gpio polarity: + 0 = active high + 1 = active low +- #address-cells : should be 1 (for addressing port subnodes). +- #size-cells : should be 0 (port subnodes). + +The GPIO controller has two ports, each of which are represented as child +nodes with the following properties: + +Required properties: +- compatible : "phytium,gpio-port" +- reg : The integer port index of the port, a single cell. + +Optional properties: +- nr-gpios : The number of pins in the port, a single cell. + +Example: + +gpio: gpio@28004000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28004000 0x0 0x1000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <8>; + }; + + portb { + compatible = "phytium,gpio-port"; + reg = <1>; + nr-gpios = <8>; + }; +}; diff --git a/Documentation/devicetree/bindings/gpu/phytium-display.txt b/Documentation/devicetree/bindings/gpu/phytium-display.txt new file mode 100644 index 0000000000000000000000000000000000000000..ff2435967e1d6d14381a6d56d27d08874a466bc3 --- /dev/null +++ b/Documentation/devicetree/bindings/gpu/phytium-display.txt @@ -0,0 +1,23 @@ +* Phytium Display Engine + +Required properties: + - compatible : value should be "phytium,dc". + - reg : first reg: Physical base address of the registers and length of memory + mapped region. + second reg (optional): Physical base address and length of video memory which is reserved system memory. + - interrupts : interrupt number. + - pipe_mask : specify which pipe is enabled, each bit corresponds to a pipe. + - edp_mask : specify which pipe is edp port, each bit corresponds to a pipe (0:dp, 1:edp). + +Example: + /memreserve/ 0xf4000000 0x4000000; // (optional) + + dc0@32000000 { + compatible = "phytium,dc"; + reg = <0x0 0x32000000 0x0 0x8000>, + <0x0 0xf4000000 0x0 0x4000000>; // (optional) + interrupts = ; + pipe_mask = 0x3 + edp_mask = 0x0; + }; + diff --git a/Documentation/devicetree/bindings/hwlock/phytium-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/phytium-hwspinlock.txt new file mode 100644 index 0000000000000000000000000000000000000000..f39c86492bc9d4464c2d10bb66606bff34ba1369 --- /dev/null +++ b/Documentation/devicetree/bindings/hwlock/phytium-hwspinlock.txt @@ -0,0 +1,21 @@ +Phytium HwSpinlock Driver +======================== + +Required properties: +- compatible: Should be "phytium,hwspinlock" +- reg: Contains the hwspinlock module register address space + (base address and length) +- #hwlock-cells: Should be 1. +- nr-locks: The number of locks in the device. + +Please look at the generic hwlock binding for usage information for consumers, +"Documentation/devicetree/bindings/hwlock/hwlock.txt" + +Example: + +hwspinlock: spinlock@40000000 { + compatible = "phytium,hwspinlock"; + reg = <0x40000000 0x1000>; + #hwlock-cells = <1>; + nr-locks = <32>; +}; diff --git a/Documentation/devicetree/bindings/hwmon/tacho-phytium.txt b/Documentation/devicetree/bindings/hwmon/tacho-phytium.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d02b0684c13f1e7b11cf763a04eb7d536dda637 --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/tacho-phytium.txt @@ -0,0 +1,48 @@ +Phytium Fan Tacho and capture counter controller device driver + +The controller can support one input signal. The function of controller is to +measure the speed of fan and the edge number of input signal. The function +can be selected by devicetree setting. The edging mode and anti-jitter level +can also setted in devicetree. + +Required properties for tacho node: +- #address-cells : should be 1. + +- #size-cells : should be 1. + +- reg : address and length of the register set for the device. + +- compatible : should be "phytium,tacho" + +- clocks : phandle to clock provider with the clock number in the second cell + +Optional properties for tacho node: + +- tacho : set the controller work as fan tachometer, which is a default option. + +- capture : set the controller work as capture counter. + +------------------------------------------------------------------------------- + +- up : set the input edging mode as ascending, which is a default option. + +- down : set the input edging mode as descending. + +- double : set the input edging mode as doule-edging. + +------------------------------------------------------------------------------- + +- debounce-level : set the debounce-levle, which can be 0, 1, 2 and 3. + +Examples: + +tacho: tacho@28054000 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0x0 0x28054000 0x0 0x1000>; + compatible = "phytium,tacho"; + clocks = <&sysclk>; + tacho; + up; + debounce-level = <2>; +}; diff --git a/Documentation/devicetree/bindings/i2c/i2c-phytium.txt b/Documentation/devicetree/bindings/i2c/i2c-phytium.txt new file mode 100644 index 0000000000000000000000000000000000000000..a1da97b7c60dcdec68f9ff002d62d39f2c14df9a --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-phytium.txt @@ -0,0 +1,24 @@ +* Phytium I2C/SMBus controller + +Required properties : + + - compatible : should be "phytium,i2c" + - reg : Offset and length of the register set for the device + - interrupts : where IRQ is the interrupt number. + - clock-frequency : desired I2C bus clock frequency in Hz. + +Optional properties: + + - interrupt-names: should be "smbus_alert" if SMBus alert + interrupt is supported. + +Examples : + + i2c0: i2c@28011000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28011000 0x0 0x1000>; + interrupts = ; + interrupt-names = "smbus_alert"; + clocks = <&sysclk_48mhz>; + }; + diff --git a/Documentation/devicetree/bindings/iio/adc/phytium-adc.txt b/Documentation/devicetree/bindings/iio/adc/phytium-adc.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fc793a857648a1085bd5037c2e6e7ccf2325346 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/phytium-adc.txt @@ -0,0 +1,57 @@ +Phytium ADC + +This device is a 10-bit converter for 8 voltage channels. All inputs are +single ended. + +Required properties: +- compatible: Should be "phytium,adc" +- reg: memory window mapping address and length +- interrupts: Interrupt for the ADC control interface. +- clocks: Input clock used to derive the sample clock. +- #address-cells: Should be <1> (settings for the subnodes). +- #size-cells: Should be <0> (settings for the subnodes). + +Required subnodes: + +The ADC channels are configured as subnodes of the ADC. + +Required channel node properties: + +- reg: should contain the hardware channel number. + +Examples: + + adc0: adc@2807b000 { + compatible = "phytium,adc"; + reg = <0x0 0x2807b000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + + #address-cells = <1>; + #size-cells = <0>; + + channel@0 { + reg = <0>; + }; + channel@1 { + reg = <1>; + }; + channel@2 { + reg = <2>; + }; + channel@3 { + reg = <3>; + }; + channel@4 { + reg = <4>; + }; + channel@5 { + reg = <5>; + }; + channel@6 { + reg = <5>; + }; + channel@7 { + reg = <7>; + }; + }; diff --git a/Documentation/devicetree/bindings/input/phytium-keypad.txt b/Documentation/devicetree/bindings/input/phytium-keypad.txt new file mode 100644 index 0000000000000000000000000000000000000000..b34e6a1b367821aa575c7400090fcf0b759a50cd --- /dev/null +++ b/Documentation/devicetree/bindings/input/phytium-keypad.txt @@ -0,0 +1,41 @@ +* Phytium Keypad Port device tree bindings + +The keypad port is designed to interface with a keypad matrix, which +simplify the software task of scanning a keypad matrix. It is capable +of detecting, debouncing, and decoding one or multiple keys pressed +simultaneously on a keypad. + +Required SoC Specific Properties: +- compatible: Should be "phytium,keypad". +- reg: Physical base address and length of memory mapped region. +- interrupts: Interrupt number to the CPU(s). + +Required Board Specific Properties: +- linux,keymap: The definition can be found at +bindings/input/matrix-keymap.txt. + +Example: + +keypad: keypad@2807a000 { + compatible = "phytium,keypad"; + reg = <0x 0x2807a000 0x0 0x1000>; + interrupts = ; + keypad,num-rows = <4>; + keypad,num-columns = <4>; + linux,keymap = <0x00000067 /* KEY_UP */ + 0x0001006c /* KEY_DOWN */ + 0x00020072 /* KEY_VOLUMEDOWN */ + 0x00030066 /* KEY_HOME */ + 0x0100006a /* KEY_RIGHT */ + 0x01010069 /* KEY_LEFT */ + 0x0102001c /* KEY_ENTER */ + 0x01030073 /* KEY_VOLUMEUP */ + 0x02000040 /* KEY_F6 */ + 0x02010042 /* KEY_F8 */ + 0x02020043 /* KEY_F9 */ + 0x02030044 /* KEY_F10 */ + 0x0300003b /* KEY_F1 */ + 0x0301003c /* KEY_F2 */ + 0x0302003d /* KEY_F3 */ + 0x03030074>; /* KEY_POWER */ +}; diff --git a/Documentation/devicetree/bindings/ipmi/phytium,bt-bmc.txt b/Documentation/devicetree/bindings/ipmi/phytium,bt-bmc.txt new file mode 100644 index 0000000000000000000000000000000000000000..7fb1de15d19b4e58a0bbd73d1c1a9be5c7557314 --- /dev/null +++ b/Documentation/devicetree/bindings/ipmi/phytium,bt-bmc.txt @@ -0,0 +1,20 @@ +Phytium BT (Block Transfer) IPMI interface + +The Phytium E-series SOCs can be used in BMC which +have a BT interface used to perform in-band IPMI communication +with their host. + +Required properties: + +- compatible : should be one of + "phytium,bt-bmc" +- reg: physical address and size of the registers +- interrupts: interrupt generated by the BT interface. + +Example: + + bt: bt@250 { + compatible = "phytium,bt-bmc"; + reg = <0x250 0x1c>; + interrupts = ; + }; diff --git a/Documentation/devicetree/bindings/ipmi/phytium,kcs-bmc.txt b/Documentation/devicetree/bindings/ipmi/phytium,kcs-bmc.txt new file mode 100644 index 0000000000000000000000000000000000000000..85cf7114c7a97a9d77b80d475d2fbb16749ac488 --- /dev/null +++ b/Documentation/devicetree/bindings/ipmi/phytium,kcs-bmc.txt @@ -0,0 +1,23 @@ +Phytium KCS (Keyboard Controller Style) IPMI interface + +The Phytium E-series SOC can be used in BMC which +have the KCS interface to perform in-band IPMI communication +with their host. + +Required properties: +- compatible : should be one of + "phytium,kcs-bmc" +- reg : physical address and size of the registers +- interrupts : interrupt generated by the controller +- kcs_chan : The LPC channel number in the controller +- kcs_addr : The host CPU IO map address + +Example: + + kcs0: kcs@24 { + compatible = "phytium,kcs-bmc"; + reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>; + interrupts = ; + kcs_chan = <1>; + kcs_addr = <0xca0>; + }; diff --git a/Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt b/Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d6f5a44f6e4bb961d4df5be5befa5cabe91010d --- /dev/null +++ b/Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt @@ -0,0 +1,32 @@ +Phytium Mailbox Driver +====================== + +The Phytium mailbox controller that has a channel/link to communicate +with the remote end. A link raises interrupt for any received data. However, +there is no specified way of knowing if the sent data has been read by the +remote. This driver assumes the sender polls STAT register and the remote +clears it after having read the data. + +Mailbox Device Node: +==================== + +Required properties: +-------------------- +- compatible: Shall be "phytium,mbox" +- reg: Contains the mailbox register address range (base + address and length) +- #mbox-cells Shall be 1 - the index of the channel needed. +- interrupts: Contains the interrupt information corresponding to + the link. + +Example: +-------- + +mbox: mailbox@2a000000 { + compatible = "phytium,mbox"; + reg = <0x0 0x2a000000 0x0 0x1000>; + #mbox-cells = <1>; + interrupts = <0 48 4>; + clocks = <&sycclk>; + clock-names = "apb_pclk"; +}; diff --git a/Documentation/devicetree/bindings/media/phytium-jpeg.txt b/Documentation/devicetree/bindings/media/phytium-jpeg.txt new file mode 100644 index 0000000000000000000000000000000000000000..6dc6d970e8e956d994de622839542e6c9687051b --- /dev/null +++ b/Documentation/devicetree/bindings/media/phytium-jpeg.txt @@ -0,0 +1,21 @@ +* Device tree bindings for Phytium JPEG Engine + +The JPEG Engine embedded in the Phytium SOCs can +capture and compress video data from digital or analog sources. + +Required properties: + - compatible: "phytium,jpeg" + - reg: contains the offset and length of the + JPEG Engine memory region + - interrupts: the interrupt associated with the VE on this platform + - phytium,ocm-buf-addr the physical address used to storage the inputing video data + +Example: + +jpeg: jpeg@32b32000 { + compatible = "phytium,jpeg"; + reg = <0x0 0x32b32000 0 0x1000>; + interrupts = <0 41 4>; + phytium,ocm-buf-addr = <0x30c40000 0x30c60000>; + status = "okay"; +}; diff --git a/Documentation/devicetree/bindings/mmc/phytium-mci.txt b/Documentation/devicetree/bindings/mmc/phytium-mci.txt new file mode 100644 index 0000000000000000000000000000000000000000..129efb1fb2f6940e5f462e5ed5189ce357a5595e --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/phytium-mci.txt @@ -0,0 +1,36 @@ +* Phytium Multimedia Card Interface controller + +The highspeed MMC host controller on Phytium SoCs provides an interface +for MMC, SD and SDIO types of memory cards. + +Required properties: +- compatible : should be "phytium,mci". +- reg: mmc controller base registers. +- clocks : phandles to input clocks. +- clock-names : should be "phytium_mci_clk". +- interrupts : mmc controller interrupt. + +Examples: + - Within .dtsi: + mmc0: mmc@28000000 { + compatible = "phytium,mci"; + reg = <0x0 0x28000000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_1200mhz>; + clock-names = "phytium_mci_clk"; + status = "disabled"; + }; + + - Within dts: + &mmc0 { + bus-width = <4>; + max-frequency = <50000000>; + cap-sdio-irq; + cap-sd-highspeed; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; + no-mmc; + status = "ok"; + }; + diff --git a/Documentation/devicetree/bindings/net/can/phytium-can.txt b/Documentation/devicetree/bindings/net/can/phytium-can.txt new file mode 100644 index 0000000000000000000000000000000000000000..ab109e54052c8f5c356cbd09596c7b14591cce71 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/phytium-can.txt @@ -0,0 +1,29 @@ +Phytium CAN controller +------------------------ + +Required properties: +- compatible: Should be: + - "phytium,can" for Phytium CAN controllers + - "phytium,canfd" for Phytium CAN controllers with CANFD support +- reg: Should contain CANFD controller registers location and length +- interrupts: Should contain IRQ line for the CANFD controller +- clocks: CLocks used by the controller +- clock-names: Input clock names, should be "can_clk" +- tx-fifo-depth: Indicates the length of TX FIFO +- rx-fifo-depth: Indicates the length of TX FIFO + +Optional property: +- extend_brp: Indicates to apply the extend BRP parameter of bit timming for + early version of CAN controller + +Example: + + can0: can@2800a000{ + compatible = "phytium,canfd"; + reg = <0x0 0x2800a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "can_clk"; + tx-fifo-depth = <64>; + rx-fifo-depth = <64>; + }; diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index 3e17ac1d5d58caa2c89dae53648c5b1d33efe0dd..27168b116916324695cfe57a9cec224a09ff63c8 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -15,6 +15,7 @@ Required properties: Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC. + Use "cdns,phytium-gem" for Phytium SoCs. Or the generic form: "cdns,emac". - reg: Address and length of the register set for the device - interrupts: Should contain macb interrupt diff --git a/Documentation/devicetree/bindings/pci/phytium,phytium-pcie-ep.txt b/Documentation/devicetree/bindings/pci/phytium,phytium-pcie-ep.txt new file mode 100644 index 0000000000000000000000000000000000000000..16bcafb3166ef78fcbb82e732cd2fdc10efea45a --- /dev/null +++ b/Documentation/devicetree/bindings/pci/phytium,phytium-pcie-ep.txt @@ -0,0 +1,21 @@ +* Phytium PCIe endpoint controller + +Required properties: +- compatible: Should contain "phytium,phytium-pcie-ep" to identify the IP used. +- reg: Should contain the controller register base address, AXI interface + region base address and hpb register base address respectively. +- reg-names: Must be "reg", "mem" and "hpb" respectively. +- max-outbound-regions: Set to maximum number of outbound regions. +- max-functions: Maximum number of functions that can be configured (default 1). + +Example: + +ep0: ep@0x29030000 { + compatible = "phytium,dp2008-pcie-ep"; + reg = <0x0 0x29030000 0x0 0x10000>, + <0x11 0x00000000 0x1 0x00000000>, + <0x0 0x29101000 0x0 0x1000>; + reg-names = "reg", "mem", "hpb"; + max-outbound-regions = <3>; + max-functions = /bits/ 8 <1>; +}; diff --git a/Documentation/devicetree/bindings/pwm/pwm-phytium.txt b/Documentation/devicetree/bindings/pwm/pwm-phytium.txt new file mode 100644 index 0000000000000000000000000000000000000000..0285e95a3730905651c5cc564edd9b981d8d2a22 --- /dev/null +++ b/Documentation/devicetree/bindings/pwm/pwm-phytium.txt @@ -0,0 +1,23 @@ +Phytium PWM controller + +Required properties: +- compatible: should be "phytium,pwm" +- reg: physical base address and length of the controller's registers +- interrupts: interrupt for the pwm controller +- clocks : clock specifiers for both ipg and per clocks. +- phytium,db: One or two to describe dead-band configurations. + "cntmod" indicates the counter mode (0 for modulo, 1 for up-and-down). + "dutymod" indicdates which duty to compare with (0 for PMW_CCR, 1 for FIFO). + "div" selects the clock divider value, from 0 to 1023. + "updbcly" selects the rising edge delay cycles. + "dbpolarity" selects the polarity for dead-band. + +Example: + +pwm0: pwm@2804a000 { + compatible = "phytium,pwm"; + reg= <0x0 0x2804a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_48mhz>; + phytium,db = <0 0 0 1000 0>; +}; diff --git a/Documentation/devicetree/bindings/rng/phytium-rng.txt b/Documentation/devicetree/bindings/rng/phytium-rng.txt new file mode 100644 index 0000000000000000000000000000000000000000..ee893f65c94ce4a011a4553c1f212764dc3a7f9b --- /dev/null +++ b/Documentation/devicetree/bindings/rng/phytium-rng.txt @@ -0,0 +1,13 @@ +Phytium Random Number Generator + +Required properties: +- compatible : Should be "phytium,rng" +- reg : Offset and length of the register set of this block + +Example: + + rng@0x31a06000 { + compatible = "phytium,rng"; + reg = <0x0 0x31a06000 0x0 0x1000>; + }; + diff --git a/Documentation/devicetree/bindings/sound/phytium-i2s.txt b/Documentation/devicetree/bindings/sound/phytium-i2s.txt new file mode 100644 index 0000000000000000000000000000000000000000..bc4acffc4d5b8e3a6f418549f96bb594bbdfc17a --- /dev/null +++ b/Documentation/devicetree/bindings/sound/phytium-i2s.txt @@ -0,0 +1,24 @@ +* Phytium I2S controller + +The I2S bus (Inter-IC sound bus) is a serial link for digital +audio data transfer between devices in the system. + +Required properties: + +- compatible: should be "phytium,i2s" +- reg: It contains two register region: + - first is for physical base address and length of I2S controller. + - second is for physical base address and length of DMA_BDL controller. +- interrupts: should contain the DMA_BDL interrupt. +- clocks: phandle to clock provider with the clock number in the second cell +- dai-name: it will set dai's name used in driver. + +Example for Pe220x I2S controller: + +i2s@28009000 { + compatible = "phytium,i2s"; + reg = <0x0 0x28009000 0x0 0x1000>, <0x0 0x28005000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_600mhz>; + dai-name = "phytium-i2s-lsd"; +}; diff --git a/Documentation/devicetree/bindings/spi/spi-phytium.txt b/Documentation/devicetree/bindings/spi/spi-phytium.txt new file mode 100644 index 0000000000000000000000000000000000000000..a674d192132c0c5f5ddb5ea34481d19b2bfc8676 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/spi-phytium.txt @@ -0,0 +1,24 @@ +Phytium SPI controller + +Required properties: +- compatible: should be "phytium,spi" +- #address-cells: see spi-bus.txt +- #size-cells: see spi-bus.txt +- reg: address and length of the spi master registers +- interrupts: should contain one interrupt +- clocks: spi clock phandle +- num-cs: see spi-bus.txt + +Optional properties: +- cs-gpios: see spi-bus.txt + +Example: + + +spi0: spi@2800c000 { + compatible = "phytium,spi"; + interrupts = ; + reg = <0x0 0x2800c000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + num-cs = <4>; +}; diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt index ac4cd0d6195a89f6bf5619f34cd3987c20597aac..f8618a5204679d0869e3d327eddc8894fb6ae90a 100644 --- a/Documentation/devicetree/bindings/usb/usb-xhci.txt +++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt @@ -18,6 +18,7 @@ Required properties: - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible device - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device + - "phytium,pe220x-xhci" for Phytium Pe220x SoC - "xhci-platform" (deprecated) When compatible with the generic version, nodes must list the diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 2c3fc512e7466fe0d7577db7d7c74b803739585c..3334d8399a36103b270a163e77b1cad606d473ba 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -292,6 +292,7 @@ parade Parade Technologies Inc. pericom Pericom Technology Inc. pervasive Pervasive Displays, Inc. phytec PHYTEC Messtechnik GmbH +phytium Phytium Technology Co., Ltd. picochip Picochip Ltd pine64 Pine64 pixcir PIXCIR MICROELECTRONICS Co., Ltd diff --git a/Documentation/devicetree/bindings/w1/phytium-w1.txt b/Documentation/devicetree/bindings/w1/phytium-w1.txt new file mode 100644 index 0000000000000000000000000000000000000000..43784c3aa6de16524a216add43a1fc984b21fe62 --- /dev/null +++ b/Documentation/devicetree/bindings/w1/phytium-w1.txt @@ -0,0 +1,14 @@ +* Phytium 1-wire bus master controller + +Required properties: +- compatible : should be "phytium,w1" +- reg : Address and length of the register set for the device +- interrupts : interrupt line. + +Example: + + onewire0: onewire@2803f000 { + compatible = "phytium,w1"; + reg = <0x0 0x2803f000 0x0 0x1000>; + interrupts = ; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 1061db6fbc326134b0be16538d144e1a151406b8..21de8cb2b6aa57bc1d2b7836ee054e7ee434d34a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9759,6 +9759,12 @@ F: Documentation/media/v4l-drivers/meye* F: drivers/media/pci/meye/ F: include/uapi/linux/meye.h +MOTORCOMM PHY DRIVER +M: Peter Geis +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/phy/motorcomm.c + MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD M: Jiri Slaby S: Maintained diff --git a/arch/Kconfig b/arch/Kconfig index dd71b34fe4f5f94950195833c3faf337dd85d112..f53e81602922e14c8353d8f2f482a9876182f8e7 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -28,6 +28,7 @@ config OPROFILE tristate "OProfile system profiling" depends on PROFILING depends on HAVE_OPROFILE + depends on !PREEMPT_RT_FULL select RING_BUFFER select RING_BUFFER_ALLOW_SWAP help diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h index 1d5716bc060bed0a3b00109478c6faa81dae3c12..6883bc952d2249ea163eaf8d78cb65f0c3c15319 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ #ifndef _ALPHA_SPINLOCK_TYPES_H #define _ALPHA_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - typedef struct { volatile unsigned int lock; } arch_spinlock_t; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index d89d013f586cbb4528951b100e2a5c773c76e33b..2607cae04885975346fd1011d7c4b0a6eb8062d6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -51,7 +51,7 @@ config ARM select HARDIRQS_SW_RESEND select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 - select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU + select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) @@ -91,6 +91,7 @@ config ARM select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RSEQ @@ -2166,7 +2167,7 @@ config NEON config KERNEL_MODE_NEON bool "Support for NEON in kernel mode" - depends on NEON && AEABI + depends on NEON && AEABI && !PREEMPT_RT_BASE help Say Y to include support for NEON in kernel mode. diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index e4b1be66b3f56a8d0ecb2590559902bb347b539b..f4b253bd05edee50f175ca0eb830fe922e17ef6c 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig @@ -19,6 +19,7 @@ CONFIG_ARCH_MULTI_V5=y CONFIG_ARCH_AT91=y CONFIG_SOC_AT91RM9200=y CONFIG_SOC_AT91SAM9=y +# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set CONFIG_AEABI=y CONFIG_UACCESS_WITH_MEMCPY=y CONFIG_ZBOOT_ROM_TEXT=0x0 @@ -64,7 +65,6 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=4 CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_ATMEL_TCLIB=y CONFIG_ATMEL_SSC=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig index 2080025556b548fda23c33d10260b08889b20c1e..be92871ab15577bab7275887ee83ce3d9a0150f3 100644 --- a/arch/arm/configs/sama5_defconfig +++ b/arch/arm/configs/sama5_defconfig @@ -20,6 +20,7 @@ CONFIG_ARCH_AT91=y CONFIG_SOC_SAMA5D2=y CONFIG_SOC_SAMA5D3=y CONFIG_SOC_SAMA5D4=y +# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set CONFIG_AEABI=y CONFIG_UACCESS_WITH_MEMCPY=y CONFIG_ZBOOT_ROM_TEXT=0x0 @@ -75,7 +76,6 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=4 CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_ATMEL_TCLIB=y CONFIG_ATMEL_SSC=y CONFIG_EEPROM_AT24=y CONFIG_SCSI=y diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 46d41140df27dd9c4f15c713189db2199c1352a2..c421b5b81946e099af40713ebba17b4426b97336 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -23,6 +23,8 @@ #endif #ifndef __ASSEMBLY__ +#include + struct irqaction; struct pt_regs; diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 5976958647fe16e71a4b0977b057911470f6c450..a37c0803954baa7cf4830de21afddb0e1d1df266 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - #define TICKET_SHIFT 16 typedef struct { diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index d3e937dcee4d0832ce3602156931dc3ecfb0aabc..6ab96a2ce1f8e0ce791d46152aeae884cb6474f6 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -4,6 +4,13 @@ #include +#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); +#else +static inline void +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } +#endif + /* * For v7 SMP cores running a preemptible kernel we may be pre-empted * during a TLB maintenance operation, so execute an inner-shareable dsb @@ -26,6 +33,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define switch_to(prev,next,last) \ do { \ __complete_pending_tlbi(); \ + switch_kmaps(prev, next); \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 8f55dc520a3e5512cd1474f4a5c3c2d7e72698e8..4f834bfca4703f5e034bc545f807688f07630fd9 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -49,6 +49,7 @@ struct cpu_context_save { struct thread_info { unsigned long flags; /* low level flags */ int preempt_count; /* 0 => preemptable, <0 => bug */ + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ __u32 cpu; /* cpu */ @@ -139,7 +140,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ -#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ +#define TIF_SECCOMP 8 /* seccomp syscall filtering active */ +#define TIF_NEED_RESCHED_LAZY 7 #define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_USING_IWMMXT 17 @@ -149,6 +151,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) @@ -164,7 +167,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, * Change these and you break ASM code in entry-common.S */ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE) + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_NEED_RESCHED_LAZY) #endif /* __KERNEL__ */ #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 40afe953a0e2d736563e204435eee659a8b955b5..d634d6f36140f965dcd9ca22b50f03a93a75fa47 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -58,6 +58,7 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d779cd1a3b0c3c217f8267be2e34cb9bef71e1ea..dc7ae3d8e62fa5f15e74709dfbaac66f1bb93e91 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -209,11 +209,18 @@ __irq_svc: #ifdef CONFIG_PREEMPT ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - ldr r0, [tsk, #TI_FLAGS] @ get flags teq r8, #0 @ if preempt count != 0 + bne 1f @ return from exeption + ldr r0, [tsk, #TI_FLAGS] @ get flags + tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set + blne svc_preempt @ preempt! + + ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count + teq r8, #0 @ if preempt lazy count != 0 movne r0, #0 @ force flags to 0 - tst r0, #_TIF_NEED_RESCHED + tst r0, #_TIF_NEED_RESCHED_LAZY blne svc_preempt +1: #endif svc_exit r5, irq = 1 @ return from exception @@ -228,8 +235,14 @@ svc_preempt: 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED + bne 1b + tst r0, #_TIF_NEED_RESCHED_LAZY reteq r8 @ go again - b 1b + ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count + teq r0, #0 @ if preempt lazy count != 0 + beq 1b + ret r8 @ go again + #endif __und_fault: diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index e27fc2df523167cd6dbbeeb5c65bd7d23dd21402..bc326519f648602f7e71f8dca097753af874d3dc 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -56,7 +56,9 @@ __ret_fast_syscall: cmp r2, #TASK_SIZE blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) + bne fast_work_pending + tst r1, #_TIF_SECCOMP bne fast_work_pending @@ -93,8 +95,11 @@ __ret_fast_syscall: cmp r2, #TASK_SIZE blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) + bne do_slower_path + tst r1, #_TIF_SECCOMP beq no_work_pending +do_slower_path: UNWIND(.fnend ) ENDPROC(ret_fast_syscall) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 1c01358b9b6db8d4b3b7847d0584dc6f4e9a663c..263b0321dc5016d97ea122f2024395c52788fece 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -652,7 +652,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) */ trace_hardirqs_off(); do { - if (likely(thread_flags & _TIF_NEED_RESCHED)) { + if (likely(thread_flags & (_TIF_NEED_RESCHED | + _TIF_NEED_RESCHED_LAZY))) { schedule(); } else { if (unlikely(!user_mode(regs))) diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig index 903f23c309df8bcfba40b8b49076c662c3a13960..fa493a86e2bb3675608ba43f69ec33f9305d43d8 100644 --- a/arch/arm/mach-at91/Kconfig +++ b/arch/arm/mach-at91/Kconfig @@ -107,6 +107,31 @@ config SOC_AT91SAM9 AT91SAM9X35 AT91SAM9XE +comment "Clocksource driver selection" + +config ATMEL_CLOCKSOURCE_PIT + bool "Periodic Interval Timer (PIT) support" + depends on SOC_AT91SAM9 || SOC_SAMA5 + default SOC_AT91SAM9 || SOC_SAMA5 + select ATMEL_PIT + help + Select this to get a clocksource based on the Atmel Periodic Interval + Timer. It has a relatively low resolution and the TC Block clocksource + should be preferred. + +config ATMEL_CLOCKSOURCE_TCB + bool "Timer Counter Blocks (TCB) support" + depends on SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5 || COMPILE_TEST + default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5 + depends on !ATMEL_TCLIB + select ATMEL_ARM_TCB_CLKSRC + help + Select this to get a high precision clocksource based on a + TC block with a 5+ MHz base clock rate. + On platforms with 16-bit counters, two timer channels are combined + to make a single 32-bit timer. + It can also be used as a clock event device supporting oneshot mode. + config HAVE_AT91_UTMI bool diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 6a1e682371b32c400041fb2aa0db1698a19a63d3..17dca0ff336e018ae58f69a392d9442345beb793 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -239,7 +239,7 @@ static void write_pen_release(int val) sync_cache_w(&pen_release); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static void exynos_secondary_init(unsigned int cpu) { @@ -252,8 +252,8 @@ static void exynos_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr) @@ -317,7 +317,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) * Set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -344,7 +344,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) if (timeout == 0) { printk(KERN_ERR "cpu1 power enable failed"); - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return -ETIMEDOUT; } } @@ -390,7 +390,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) * calibrations, then wait for it to finish */ fail: - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? ret : 0; } diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c index f66815c3dd07e4708e8a4f7dd1a19f733c0b35a8..00524abd963f77d2bd4af1c2e3420f472f99a9be 100644 --- a/arch/arm/mach-hisi/platmcpm.c +++ b/arch/arm/mach-hisi/platmcpm.c @@ -61,7 +61,7 @@ static void __iomem *sysctrl, *fabric; static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER]; -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static u32 fabric_phys_addr; /* * [0]: bootwrapper physical address @@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle) if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) return -EINVAL; - spin_lock_irq(&boot_lock); + raw_spin_lock_irq(&boot_lock); if (hip04_cpu_table[cluster][cpu]) goto out; @@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle) out: hip04_cpu_table[cluster][cpu]++; - spin_unlock_irq(&boot_lock); + raw_spin_unlock_irq(&boot_lock); return 0; } @@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu) cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); hip04_cpu_table[cluster][cpu]--; if (hip04_cpu_table[cluster][cpu] == 1) { /* A power_up request went ahead of us. */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return; } else if (hip04_cpu_table[cluster][cpu] > 1) { pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); @@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu) } last_man = hip04_cluster_is_down(cluster); - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); if (last_man) { /* Since it's Cortex A15, disable L2 prefetching. */ asm volatile( @@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu) cpu >= HIP04_MAX_CPUS_PER_CLUSTER); count = TIMEOUT_MSEC / POLL_MSEC; - spin_lock_irq(&boot_lock); + raw_spin_lock_irq(&boot_lock); for (tries = 0; tries < count; tries++) { if (hip04_cpu_table[cluster][cpu]) goto err; @@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu) data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); if (data & CORE_WFI_STATUS(cpu)) break; - spin_unlock_irq(&boot_lock); + raw_spin_unlock_irq(&boot_lock); /* Wait for clean L2 when the whole cluster is down. */ msleep(POLL_MSEC); - spin_lock_irq(&boot_lock); + raw_spin_lock_irq(&boot_lock); } if (tries >= count) goto err; @@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu) goto err; if (hip04_cluster_is_down(cluster)) hip04_set_snoop_filter(cluster, 0); - spin_unlock_irq(&boot_lock); + raw_spin_unlock_irq(&boot_lock); return 1; err: - spin_unlock_irq(&boot_lock); + raw_spin_unlock_irq(&boot_lock); return 0; } #endif diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c index 326e870d712394fad445033defd8e3ff5975ebdd..d9ac80aa1eb0a17dae81a0aa4e883f7386b86886 100644 --- a/arch/arm/mach-imx/cpuidle-imx6q.c +++ b/arch/arm/mach-imx/cpuidle-imx6q.c @@ -17,22 +17,22 @@ #include "hardware.h" static int num_idle_cpus = 0; -static DEFINE_SPINLOCK(cpuidle_lock); +static DEFINE_RAW_SPINLOCK(cpuidle_lock); static int imx6q_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - spin_lock(&cpuidle_lock); + raw_spin_lock(&cpuidle_lock); if (++num_idle_cpus == num_online_cpus()) imx6_set_lpm(WAIT_UNCLOCKED); - spin_unlock(&cpuidle_lock); + raw_spin_unlock(&cpuidle_lock); cpu_do_idle(); - spin_lock(&cpuidle_lock); + raw_spin_lock(&cpuidle_lock); if (num_idle_cpus-- == num_online_cpus()) imx6_set_lpm(WAIT_CLOCKED); - spin_unlock(&cpuidle_lock); + raw_spin_unlock(&cpuidle_lock); return index; } diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 1c73694c871ad8289b572056d5c3727f3ee22eb2..ac4d2f030b874f46d0864782f0ce2acc9f097845 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -69,7 +69,7 @@ static const struct omap_smp_config omap5_cfg __initconst = { .startup_addr = omap5_secondary_startup, }; -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); void __iomem *omap4_get_scu_base(void) { @@ -177,8 +177,8 @@ static void omap4_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -191,7 +191,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) * Set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * Update the AuxCoreBoot0 with boot state for secondary core. @@ -270,7 +270,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) * Now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return 0; } diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c index 75ef5d4be554ce9f8564f347e52da1e6766bf5ac..c17c86e5d860a1488800de12d2e76f3890b2573a 100644 --- a/arch/arm/mach-prima2/platsmp.c +++ b/arch/arm/mach-prima2/platsmp.c @@ -22,7 +22,7 @@ static void __iomem *clk_base; -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static void sirfsoc_secondary_init(unsigned int cpu) { @@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static const struct of_device_id clk_ids[] = { @@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) /* make sure write buffer is drained */ mb(); - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c index 5494c9e0c909b549ec696a9482852c339a4039a9..e8ce157d3548a654064e9c3939011711ad70d9f4 100644 --- a/arch/arm/mach-qcom/platsmp.c +++ b/arch/arm/mach-qcom/platsmp.c @@ -46,7 +46,7 @@ extern void secondary_startup_arm(void); -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); #ifdef CONFIG_HOTPLUG_CPU static void qcom_cpu_die(unsigned int cpu) @@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static int scss_release_secondary(unsigned int cpu) @@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int)) * set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * Send the secondary CPU a soft interrupt, thereby causing @@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int)) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return ret; } diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c index 39038a03836acb8f3288488f063a99d5ef0f814c..6da5c93872bf87405fa6853d9c884c23ad36b860 100644 --- a/arch/arm/mach-spear/platsmp.c +++ b/arch/arm/mach-spear/platsmp.c @@ -32,7 +32,7 @@ static void write_pen_release(int val) sync_cache_w(&pen_release); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static void __iomem *scu_base = IOMEM(VA_SCU_BASE); @@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) * set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c index 231f19e174365229f034c9897b6e61984b92d84c..a3419b7003e6f10b66ba40fcad0eb0ca82c8af1b 100644 --- a/arch/arm/mach-sti/platsmp.c +++ b/arch/arm/mach-sti/platsmp.c @@ -35,7 +35,7 @@ static void write_pen_release(int val) sync_cache_w(&pen_release); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static void sti_secondary_init(unsigned int cpu) { @@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) * set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index a9ee0d9dc740a0582028a447d969d9ff0e39afbb..20b0e146de98bf8b1f933c2dbc0c9fc725d81a23 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -439,6 +439,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, if (addr < TASK_SIZE) return do_page_fault(addr, fsr, regs); + if (interrupts_enabled(regs)) + local_irq_enable(); + if (user_mode(regs)) goto bad_area; @@ -506,6 +509,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, static int do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { + if (interrupts_enabled(regs)) + local_irq_enable(); + do_bad_area(addr, fsr, regs); return 0; } diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index d02f8187b1ccedb0df0520d2d75e6d93c838b360..542692dbd40a5efe7ffc78a2eccd69ff1d368900 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr) return *ptep; } +static unsigned int fixmap_idx(int type) +{ + return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); +} + void *kmap(struct page *page) { might_sleep(); @@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap); void *kmap_atomic(struct page *page) { + pte_t pte = mk_pte(page, kmap_prot); unsigned int idx; unsigned long vaddr; void *kmap; int type; - preempt_disable(); + preempt_disable_nort(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); @@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page) type = kmap_atomic_idx_push(); - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); + idx = fixmap_idx(type); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM /* @@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page) * in place, so the contained TLB flush ensures the TLB is updated * with the new mapping. */ - set_fixmap_pte(idx, mk_pte(page, kmap_prot)); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_fixmap_pte(idx, pte); return (void *)vaddr; } @@ -106,44 +115,75 @@ void __kunmap_atomic(void *kvaddr) if (kvaddr >= (void *)FIXADDR_START) { type = kmap_atomic_idx(); - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); + idx = fixmap_idx(type); if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = __pte(0); +#endif #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(idx)); - set_fixmap_pte(idx, __pte(0)); #else (void) idx; /* to kill a warning */ #endif + set_fixmap_pte(idx, __pte(0)); kmap_atomic_idx_pop(); } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); } pagefault_enable(); - preempt_enable(); + preempt_enable_nort(); } EXPORT_SYMBOL(__kunmap_atomic); void *kmap_atomic_pfn(unsigned long pfn) { + pte_t pte = pfn_pte(pfn, kmap_prot); unsigned long vaddr; int idx, type; struct page *page = pfn_to_page(pfn); - preempt_disable(); + preempt_disable_nort(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); type = kmap_atomic_idx_push(); - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); + idx = fixmap_idx(type); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(get_fixmap_pte(vaddr))); #endif - set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_fixmap_pte(idx, pte); return (void *)vaddr; } +#if defined CONFIG_PREEMPT_RT_FULL +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) +{ + int i; + + /* + * Clear @prev's kmap_atomic mappings + */ + for (i = 0; i < prev_p->kmap_idx; i++) { + int idx = fixmap_idx(i); + + set_fixmap_pte(idx, __pte(0)); + } + /* + * Restore @next_p's kmap_atomic mappings + */ + for (i = 0; i < next_p->kmap_idx; i++) { + int idx = fixmap_idx(i); + + if (!pte_none(next_p->kmap_pte[i])) + set_fixmap_pte(idx, next_p->kmap_pte[i]); + } +} +#endif diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c index c2366510187a8fb54eb27b994b3d7b998f3cad72..6b60f582b738ceaa61f7cc24c2382d20717b152e 100644 --- a/arch/arm/plat-versatile/platsmp.c +++ b/arch/arm/plat-versatile/platsmp.c @@ -32,7 +32,7 @@ static void write_pen_release(int val) sync_cache_w(&pen_release); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); void versatile_secondary_init(unsigned int cpu) { @@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * Set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * This is really belt and braces; we hold unintended secondary @@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c index bc9a37b3cecd6247c92c98c03221240898241bc8..cf94843ddddcd7e54061476c07ad262e427ff3ce 100644 --- a/arch/arm/xen/efi.c +++ b/arch/arm/xen/efi.c @@ -22,7 +22,7 @@ /* Set XEN EFI runtime services function pointers. Other fields of struct efi, * e.g. efi.systab, will be set like normal EFI. */ -void __init xen_efi_runtime_setup(void) +void xen_efi_runtime_setup(void) { efi.get_time = xen_efi_get_time; efi.set_time = xen_efi_set_time; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a101f5d2fbed4a2f2443894d1838695b06cc4cc1..77c5bfb3743c782debfa532979438f44417574ed 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -75,6 +75,7 @@ config ARM64 select CLONE_BACKWARDS select COMMON_CLK select CPU_PM if (SUSPEND || CPU_IDLE) + select CRC32 select DCACHE_WORD_ACCESS select DMA_DIRECT_OPS select EDAC_SUPPORT @@ -141,6 +142,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RCU_TABLE_FREE select HAVE_RSEQ diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 91c7ffad854134eba8ffde43cf1792122f159492..d3ba9fe74f9761085b21d11d296c904902dd4f13 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -140,6 +140,11 @@ config ARCH_MVEBU - Armada 7K SoC Family - Armada 8K SoC Family +config ARCH_PHYTIUM + bool "Phytium SoC Family" + help + This enables support for Phytium ARMv8 SoC family. + config ARCH_QCOM bool "Qualcomm Platforms" select GPIOLIB diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index 4690364d584bf4e6a9bf43e8b8788fb8b1c5f0cf..ff8820d78db5021d3a57ceb51fb3544687803eb1 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile @@ -16,6 +16,7 @@ subdir-y += lg subdir-y += marvell subdir-y += mediatek subdir-y += nvidia +subdir-y += phytium subdir-y += qcom subdir-y += realtek subdir-y += renesas diff --git a/arch/arm64/boot/dts/phytium/Makefile b/arch/arm64/boot/dts/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..baef40bc012dcc20332bacb7cb06c27b79de35dc --- /dev/null +++ b/arch/arm64/boot/dts/phytium/Makefile @@ -0,0 +1,10 @@ +dtb-$(CONFIG_ARCH_PHYTIUM) += e2000q-demo-board.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += e2000d-demo-board.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += e2000s-demo-board.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += e2000q-miniitx-board.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += e2000d-miniitx-board.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += e2000q-vpx-board.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += e2000q-edu-board.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += e2000q-come-board.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += e2000d-power-board.dtb +dtb-$(CONFIG_ARCH_PHYTIUM) += e2000q-hanwei-board.dtb diff --git a/arch/arm64/boot/dts/phytium/e2000d-demo-board.dts b/arch/arm64/boot/dts/phytium/e2000d-demo-board.dts new file mode 100644 index 0000000000000000000000000000000000000000..cde22bf16111cd431569522b71dc609dbc78ba3a --- /dev/null +++ b/arch/arm64/boot/dts/phytium/e2000d-demo-board.dts @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium Pe2202 demo board + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "pe2202.dtsi" +#include "dt-bindings/gpio/gpio.h" + + +/{ + model = "Pe2202 DEMO DDR4"; + compatible = "phytium,pe2202"; + + chosen { + stdout-path = "serial1:115200n8"; + }; + + memory@00{ + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0x80000000>; + }; + + sound_card: sound { + compatible = "simple-audio-card"; + simple-audio-card,format = "i2s"; + simple-audio-card,name = "phytium,pe220x-i2s-audio"; + simple-audio-card,pin-switches = "mic-in"; + simple-audio-card,widgets = "Microphone","mic-in"; + simple-audio-card,routing = "MIC2","mic-in"; + simple-audio-card,cpu { + sound-dai = <&i2s0>; + }; + simple-audio-card,codec{ + sound-dai = <&codec0>; + }; + }; +}; + +&soc { + mio9: i2c@28026000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28026000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + /* localized demo board use this rtc device */ + rtc@32 { + compatible = "wave,sd3068"; + reg = <0x32>; + }; + + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; + }; + + mio14: i2c@28030000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28030000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + codec0: es8336@10 { + det-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>; + sel-gpios = <&gpio2 11 GPIO_ACTIVE_LOW>; + #sound-dai-cells = <0>; + compatible = "everest,es8336"; + reg = <0x10>; + }; + }; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&gpio2 { + status = "okay"; +}; + +&gpio3 { + status = "okay"; +}; + +&gpio4 { + status = "okay"; +}; + +&gpio5 { + status = "okay"; +}; + +&watchdog0 { + status = "okay"; +}; + +&watchdog1 { + status = "okay"; +}; + +&pcie { + status = "okay"; +}; + +&usb3_0 { + status = "okay"; +}; + +&usb3_1 { + status = "okay"; +}; + +&usb2_0 { + dr_mode = "otg"; + status = "okay"; +}; + +&usb2_1 { + dr_mode = "peripheral"; + status = "okay"; +}; + +&usb2_2 { + dr_mode = "peripheral"; + status = "okay"; +}; + +&macb0 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +/* + * In demo board, M.2 interface may insert two types of disks: PCIE (nvme protocol) or SATA (ahci protocol), + * so here enable 'sata0' node by default to correspond to the sata drive case. Pin multiplexing exists + * in PCIE2 and SATA0, when used as a boot disk, the BIOS can automatically recognize these two conditions. + */ +&sata0 { + status = "okay"; +}; + +&sata1 { + status = "okay"; +}; + +&qspi0 { + status = "okay"; + + flash@0 { + status = "okay"; + }; +}; + +&spi2 { + global-cs = <1>; + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&can0 { + status = "okay"; +}; + +&can1 { + status = "okay"; +}; + +&mmc0 { + bus-width = <0x00000008>; + max-frequency = <100000000>; + cap-mmc-hw-reset; + cap-mmc-highspeed; + mmc-hs200-1_8v; + no-sdio; + no-sd; + non-removable; + status = "okay"; +}; + +&mmc1 { + bus-width = <0x00000004>; + max-frequency = <50000000>; + cap-sdio-irq; + cap-sd-highspeed; + no-sdio; + no-mmc; + status = "okay"; +}; + +&i2s0 { + #sound-dai-cells = <0>; + status = "okay"; +}; + +&dc0 { + pipe_mask = /bits/ 8 <0x2>; + edp_mask = /bits/ 8 <0x0>; + status = "okay"; +}; + +&i2s_dp1 { + status = "okay"; +}; + +&pmdk_dp { + num-dp = <1>; + dp-mask = /bits/ 8 <0x2>; + status = "okay"; +}; + +&rng0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/phytium/e2000d-miniitx-board.dts b/arch/arm64/boot/dts/phytium/e2000d-miniitx-board.dts new file mode 100644 index 0000000000000000000000000000000000000000..1760a910d8dcac0e8cd0a41b15bbf295af0ae6a0 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/e2000d-miniitx-board.dts @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium Pe2202 miniitx board + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + * + * Hongmin Qi + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "pe2202.dtsi" + +/{ + model = "Pe2202 MINIITX Board"; + compatible = "phytium,pe2202"; + + chosen { + stdout-path = "serial1:115200n8"; + }; + + memory@00{ + device_type = "memory"; + reg = <0x0 0x80000000 0x1 0x00000000>; + }; + + sound_card: sound { + compatible = "simple-audio-card"; + simple-audio-card,format = "i2s"; + simple-audio-card,name = "phytium,pe220x-i2s-audio"; + simple-audio-card,cpu { + sound-dai = <&i2s0>; + }; + simple-audio-card,codec { + sound-dai = <&codec0>; + }; + }; +}; + +&soc { + mio0: uart@28014000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x28014000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "disabled"; + }; + + mio1: uart@28016000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x28016000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "disabled"; + }; + + mio9: i2c@28026000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28026000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; + }; + + mio6: uart@28020000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28020000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio7: uart@28022000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28022000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio14: i2c@28030000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28030000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + codec0: es8336@10 { + det-gpios = <&gpio2 5 0>; + sel-gpios = <&gpio2 6 0>; + #sound-dai-cells = <0>; + compatible = "everest,es8336"; + reg = <0x10>; + mic-src = [20]; + }; + }; + + mio15: uart@28032000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28032000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; +}; + +&pcie { + status = "okay"; +}; + +&usb3_0 { + status = "okay"; +}; + +&usb3_1 { + status = "okay"; +}; + +&usb2_0 { + dr_mode = "otg"; + status = "okay"; +}; + +&usb2_1 { + dr_mode = "host"; + status = "disabled"; +}; + +&usb2_2 { + dr_mode = "host"; + status = "disabled"; +}; + +&usb2_3 { + dr_mode = "host"; + status = "okay"; +}; + +&usb2_4 { + dr_mode = "host"; + status = "okay"; +}; + +&macb0 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +&macb1 { + phy-mode = "sgmii"; + use-mii; + status = "disabled"; +}; + +&macb2 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +&macb3 { + phy-mode = "sgmii"; + use-mii; + status = "disabled"; +}; + +&can0 { + status = "okay"; +}; + +&can1 { + status = "okay"; +}; + +&qspi0 { + status = "okay"; + + flash@0 { + status = "okay"; + }; +}; + +&spi0 { + global-cs = <1>; + status = "disabled"; + + flash: w25q128@0 { + compatible = "winbond,w25q128", "jedec,spi-nor"; + spi-tx-bus-width = <1>; + spi-rx-bus-width = <1>; + spi-max-frequency = <12000000>; + reg = <0x00>; + status = "disabled"; + }; +}; + +&spi2 { + global-cs = <1>; + status = "disabled"; +}; + +&spi3 { + global-cs = <1>; + status = "disabled"; +}; + +&mmc0 { + status = "okay"; + bus-width = <0x4>; + max-frequency = <50000000>; + cap-sdio-irq; + cap-sd-highspeed; + no-mmc; +}; + +&mmc1 { + status = "okay"; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&uart3 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&gpio2 { + status = "okay"; +}; + +&gpio3 { + status = "okay"; +}; + +&gpio4 { + status = "okay"; +}; + +&sata0 { + status = "okay"; +}; + +&sata1 { + status = "okay"; +}; + +&dc0 { + status = "okay"; + pipe_mask = [02]; + edp_mask = [00]; +}; + +&i2s0 { + #sound-dai-cells = <0>; + dai-name = "phytium-i2s-lsd"; + status = "okay"; +}; + +&pwm1 { + phytium,db = <0 0 0 1000 1000 0>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/phytium/e2000d-power-board.dts b/arch/arm64/boot/dts/phytium/e2000d-power-board.dts new file mode 100755 index 0000000000000000000000000000000000000000..099dace5410f3d569568ad4e391765fae326afa4 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/e2000d-power-board.dts @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium Pe2202 power board + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + * + * Hongmin Qi + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "pe2202.dtsi" + +/{ + model = "Pe2202 power Board"; + compatible = "phytium,pe2202"; + + chosen { + stdout-path = "serial1:115200n8"; + }; + + memory@00{ + device_type = "memory"; + reg = <0x0 0x80000000 0x1 0x00000000>; + }; +}; + +&soc { + mio9: i2c@28026000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28026000 0x0 0x1000>; + interrupts = <0x0 0x65 0x4>; + clocks = <&sysclk_50mhz>; + #address-cells = <0x1>; + #size-cells = <0x0>; + status = "okay"; + + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; + }; + + mio6: i2c@28020000 { + status = "okay"; + compatible = "phytium,i2c"; + reg = <0x0 0x28020000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <0x1>; + #size-cells = <0x0>; + eeprom@50 { + compatible = "atmel,24c02"; + reg = <0x50>; + pagesize = <1>; + }; + }; + + mio10: uart@28028000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28028000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio11: uart@2802a000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x2802a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio13: uart@2802e000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x2802e000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio14: uart@28030000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28030000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio15: uart@28032000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28032000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; +}; + +&pcie { + status = "okay"; +}; + +&usb3_0 { + status = "okay"; +}; + +&usb3_1 { + status = "okay"; +}; + +&usb2_0 { + dr_mode = "otg"; + status = "okay"; +}; + +&usb2_3 { + dr_mode = "host"; + status = "okay"; +}; + +&usb2_4 { + dr_mode = "host"; + status = "okay"; +}; + +&macb0 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +&macb1 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +&macb2 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +&macb3 { + phy-mode = "sgmii"; + use-mii; + status = "disabled"; +}; + +&dc0 { + pipe_mask = [02]; + edp_mask = [00]; + status = "okay"; +}; + +&can0 { + status = "okay"; +}; + +&can1 { + status = "okay"; +}; + +&qspi0 { + status = "okay"; + + flash@0 { + status = "okay"; + }; +}; + +&spi0 { + global-cs = <1>; + status = "disabled"; +}; + +&mmc0 { + bus-width = <8>; + max-frequency = <50000000>; + cap-mmc-hw-reset; + cap-mmc-highspeed; + no-sdio; + no-sd; + non-removable; + status = "okay"; +}; + +&mmc1 { + status = "okay"; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&uart3 { + status = "okay"; +}; + + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&gpio2 { + status = "okay"; +}; + +&gpio3 { + status = "okay"; +}; + +&gpio4 { + status = "okay"; +}; + +&gpio5 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/phytium/e2000q-come-board.dts b/arch/arm64/boot/dts/phytium/e2000q-come-board.dts new file mode 100755 index 0000000000000000000000000000000000000000..3d803602f8aabcc494155032162fb0b1d0357a74 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/e2000q-come-board.dts @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium Pe2204 come board + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + * + * Hongmin Qi + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "pe2204.dtsi" + +/{ + model = "Pe2204 come Board"; + compatible = "phytium,pe2204"; + + chosen { + stdout-path = "serial1:115200n8"; + }; + + memory@00{ + device_type = "memory"; + reg = <0x0 0x80000000 0x2 0x00000000>; + }; +}; + +&soc { + mio3: i2c@2801a000 { + status = "okay"; + compatible = "phytium,i2c"; + reg = <0x0 0x2801a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom@50 { + compatible = "atmel,24c02"; + reg = <0x50>; + pagesize = <1>; + }; + }; + + mio9: i2c@28026000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28026000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + rtc@32 { + compatible = "wave,sd3068"; + reg = <0x32>; + }; + }; + + mio6: uart@28020000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28020000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio10: uart@28028000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28028000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio14: uart@28030000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28030000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio15: uart@28032000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28032000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; +}; + +&pcie { + status = "okay"; +}; + +&usb3_0 { + status = "okay"; +}; + +&usb3_1 { + status = "okay"; +}; + +&usb2_0 { + dr_mode = "otg"; + status = "okay"; +}; + +&usb2_3 { + dr_mode = "host"; + status = "okay"; +}; + +&usb2_4 { + dr_mode = "host"; + status = "okay"; +}; + +&macb0 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +&macb1 { + phy-mode = "sgmii"; + use-mii; + status = "disabled"; +}; + +&macb2 { + phy-mode = "rgmii"; + use-mii; + status = "disabled"; +}; + +&macb3 { + phy-mode = "rgmii"; + use-mii; + status = "disabled"; +}; + +&can0 { + status = "okay"; +}; + +&can1 { + status = "okay"; +}; + +&qspi0 { + status = "okay"; + + flash@0 { + status = "okay"; + }; +}; + +&spi0 { + global-cs = <1>; + status = "disabled"; +}; + +&spi2 { + global-cs = <1>; + status = "okay"; + + flash: w25q128@0 { + compatible = "winbond,w25q128", "jedec,spi-nor"; + spi-tx-bus-width = <1>; + spi-rx-bus-width = <1>; + spi-max-frequency = <12000000>; + reg = <0x00>; + }; +}; + +&mmc0 { + bus-width = <4>; + max-frequency = <25000000>; + cap-mmc-hw-reset; + cap-mmc-highspeed; + no-sdio; + no-sd; + non-removable; + status = "okay"; +}; + +&mmc1 { + status = "okay"; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&uart3 { + status = "okay"; +}; + +&sata0 { + status = "okay"; +}; + +&sata1 { + status = "okay"; +}; + +&hda0 { + status = "okay"; +}; + +&dc0 { + status = "okay"; + pipe_mask = [03]; + edp_mask = [00]; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&gpio2 { + status = "okay"; +}; + +&gpio3 { + status = "okay"; +}; + +&gpio4 { + status = "okay"; +}; + +&gpio5 { + status = "okay"; +}; + +&pwm1 { + phytium,db = <0 0 0 1000 1000 0>; + status = "okay"; +}; + diff --git a/arch/arm64/boot/dts/phytium/e2000q-demo-board.dts b/arch/arm64/boot/dts/phytium/e2000q-demo-board.dts new file mode 100644 index 0000000000000000000000000000000000000000..8215b57cc0f940901cd876e36bacbd475539f80a --- /dev/null +++ b/arch/arm64/boot/dts/phytium/e2000q-demo-board.dts @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium Pe2204 demo board + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "pe2204.dtsi" +#include "dt-bindings/gpio/gpio.h" + +/{ + model = "Pe2204 DEMO DDR4"; + compatible = "phytium,pe2204"; + + chosen { + stdout-path = "serial1:115200n8"; + }; + + memory@00{ + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0x80000000>; + }; + + sound_card: sound { + compatible = "simple-audio-card"; + simple-audio-card,format = "i2s"; + simple-audio-card,name = "phytium,pe220x-i2s-audio"; + simple-audio-card,pin-switches = "mic-in"; + simple-audio-card,widgets = "Microphone","mic-in"; + simple-audio-card,routing = "MIC2","mic-in"; + simple-audio-card,cpu { + sound-dai = <&i2s0>; + }; + simple-audio-card,codec{ + sound-dai = <&codec0>; + }; + }; +}; + +&soc { + mio9: i2c@28026000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28026000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + /* localized demo board use this rtc device */ + rtc@32 { + compatible = "wave,sd3068"; + reg = <0x32>; + }; + + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; + }; + + mio14: i2c@28030000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28030000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + codec0: es8336@10 { + det-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>; + sel-gpios = <&gpio2 11 GPIO_ACTIVE_LOW>; + #sound-dai-cells = <0x0>; + compatible = "everest,es8336"; + reg = <0x10>; + }; + }; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&gpio2 { + status = "okay"; +}; + +&gpio3 { + status = "okay"; +}; + +&gpio4 { + status = "okay"; +}; + +&gpio5 { + status = "okay"; +}; + +&watchdog0 { + status = "okay"; +}; + +&watchdog1 { + status = "okay"; +}; + +&pcie { + status = "okay"; +}; + +&usb3_0 { + status = "okay"; +}; + +&usb3_1 { + status = "okay"; +}; + +&usb2_0 { + dr_mode = "otg"; + status = "okay"; +}; + +&usb2_1 { + dr_mode = "peripheral"; + status = "okay"; +}; + +&usb2_2 { + dr_mode = "peripheral"; + status = "okay"; +}; + +&macb0 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +/* + * In demo board, M.2 interface may insert two types of disks: PCIE (nvme protocol) or SATA (ahci protocol), + * so here enable 'sata0' node by default to correspond to the sata drive case. Pin multiplexing exists + * in PCIE2 and SATA0, when used as a boot disk, the BIOS can automatically recognize these two conditions. + */ +&sata0 { + status = "okay"; +}; + +&sata1 { + status = "okay"; +}; + +&qspi0 { + status = "okay"; + + flash@0 { + status = "okay"; + }; +}; + +&spi2 { + global-cs = <1>; + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&can0 { + status = "okay"; +}; + +&can1 { + status = "okay"; +}; + +&mmc0 { + bus-width = <0x00000008>; + max-frequency = <100000000>; + cap-mmc-hw-reset; + cap-mmc-highspeed; + mmc-hs200-1_8v; + no-sdio; + no-sd; + non-removable; + status = "okay"; +}; + +&mmc1 { + bus-width = <0x00000004>; + max-frequency = <50000000>; + cap-sdio-irq; + cap-sd-highspeed; + no-sdio; + no-mmc; + status = "okay"; +}; + +&i2s0 { + #sound-dai-cells = <0>; + dai-name = "phytium-i2s-lsd"; + status = "okay"; +}; + +&dc0 { + pipe_mask = /bits/ 8 <0x3>; + edp_mask = /bits/ 8 <0x0>; + status = "okay"; +}; + +&i2s_dp0 { + status = "okay"; +}; + +&i2s_dp1 { + status = "okay"; +}; + +&pmdk_dp { + num-dp = <2>; + dp-mask = /bits/ 8 <0x3>; + status = "okay"; +}; + +&rng0 { + status = "okay"; +}; + diff --git a/arch/arm64/boot/dts/phytium/e2000q-edu-board.dts b/arch/arm64/boot/dts/phytium/e2000q-edu-board.dts new file mode 100755 index 0000000000000000000000000000000000000000..27039933ac1459a672f3265cfb291ae23ee597b5 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/e2000q-edu-board.dts @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium Pe2204 edu board + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + * + * Hongmin Qi + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "pe2204.dtsi" + +/{ + model = "Pe2204 edu Board"; + compatible = "phytium,pe2204"; + + chosen { + stdout-path = "serial1:115200n8"; + }; + + memory@00{ + device_type = "memory"; + reg = <0x0 0x80000000 0x2 0x00000000>; + }; + + sound_card: sound { + compatible = "simple-audio-card"; + simple-audio-card,format = "i2s"; + simple-audio-card,name = "phytium,pe220x-i2s-audio"; + simple-audio-card,cpu { + sound-dai = <&i2s0>; + }; + simple-audio-card,codec { + sound-dai = <&codec0>; + }; + }; +}; + +&soc { + mio9: i2c@28026000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28026000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <0x1>; + #size-cells = <0x0>; + status = "okay"; + + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; + }; + + mio8: i2c@28024000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28024000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <0x1>; + #size-cells = <0x0>; + status = "okay"; + }; + + mio14: i2c@28030000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28030000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <0x1>; + #size-cells = <0x0>; + status = "okay"; + + codec0:es8336@10 { + det-gpios = <&gpio2 5 0>; + sel-gpios = <&gpio2 6 0>; + #sound-dai-cells = <0x0>; + compatible = "everest,es8336"; + reg = <0x10>; + mic-src = [30]; + }; + }; + + mio0: uart@28014000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28014000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio1: uart@28016000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28016000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio11: uart@2802A000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x2802a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio15: uart@28032000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28032000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; +}; + +&gpio2 { + status = "okay"; +}; + +&pcie { + status = "okay"; +}; + +&usb3_0 { + status = "okay"; +}; + +&usb3_1 { + //status = "okay"; +}; + +&usb2_0 { + dr_mode = "otg"; + status = "okay"; +}; + +&usb2_1 { + dr_mode = "host"; + //status = "okay"; +}; + +&usb2_2 { + dr_mode = "host"; + //status = "okay"; +}; + +&usb2_3 { + dr_mode = "host"; + status = "okay"; +}; + +&usb2_4 { + dr_mode = "host"; + //status = "okay"; +}; + +&macb0 { + phy-mode = "sgmii"; + use-mii; + //status = "okay"; +}; + +&macb1 { + phy-mode = "sgmii"; + use-mii; + //status = "okay"; +}; + +&macb2 { + phy-mode = "sgmii"; + use-mii; + //status = "okay"; +}; + +&macb3 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +&can0 { + status = "okay"; +}; + +&can1 { + status = "okay"; +}; + +&qspi0 { + status = "okay"; + + flash@0 { + status = "okay"; + }; +}; + +&spi0 { + global-cs = <1>; + status = "okay"; + + flash: w25q128@0 { + compatible = "jedec,spi-nor"; + spi-tx-bus-width = <1>; + spi-rx-bus-width = <1>; + spi-max-frequency = <12000000>; + reg = <0x00>; + status = "okay"; + }; +}; + +&mmc0 { + bus-width = <0x8>; + max-frequency = <50000000>; + cap-mmc-hw-reset; + cap-mmc-highspeed; + no-sdio; + no-sd; + non-removable; + status = "okay"; +}; + +&mmc1 { + bus-width = <0x4>; + max-frequency = <25000000>; + cap-sdio-irq; + cap-sd-highspeed; + status = "okay"; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&uart3 { + status = "okay"; +}; + +&sata0 { + //status = "okay"; +}; + +&sata1 { + status = "okay"; +}; + +&dc0 { + status = "okay"; + pipe_mask = [01]; + edp_mask = [00]; +}; + +&i2s0 { + #sound-dai-cells = <0>; + dai-name = "phytium-i2s-lsd"; + status = "okay"; +}; + +&i2s_dp0 { + status = "okay"; +}; + +&pmdk_dp { + num-dp = <1>; + dp-mask = /bits/ 8 <0x1>; + status = "okay"; +}; + +&pwm0 { + phytium,db = <0 0 0 0 0 0>; + status = "okay"; +}; + +&pwm1 { + phytium,db = <0 0 0 0 0 0>; + status = "okay"; +}; + + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&gpio2 { + status = "okay"; +}; + +&gpio3 { + status = "okay"; +}; + +&gpio4 { + status = "okay"; +}; + +&gpio5 { + status = "okay"; +}; + +&keypad { + keypad,num-rows = <4>; + keypad,num-columns = <4>; + linux,keymap = <0x00000011 + 0x00010012 + 0x00020013 + 0x00030014 + 0x01000021 /*KEY_21*/ + 0x01010022 /*KEY_22*/ + 0x01020023 /*KEY_23*/ + 0x01030024 /*KEY_24*/ + 0x02000031 /*KEY_31*/ + 0x02010032 /*KEY_32*/ + 0x02020033 /*KEY_33*/ + 0x02030034 /*KEY_34*/ + 0x03000041 /*KEY_41*/ + 0x03010042 /*KEY_42*/ + 0x03020043 /*KEY_43*/ + 0x03030044 /*KEY_44*/>; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/phytium/e2000q-hanwei-board.dts b/arch/arm64/boot/dts/phytium/e2000q-hanwei-board.dts new file mode 100755 index 0000000000000000000000000000000000000000..e5902d612d2cfbaecc50285796c8de10b2219fa4 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/e2000q-hanwei-board.dts @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium Pe2204 hanwei board + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + * + * Hongmin Qi + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "pe2204.dtsi" + +/{ + model = "Pe2204 hanwei Board"; + compatible = "phytium,pe2204"; + + chosen { + stdout-path = "serial1:115200n8"; + }; + + memory@00{ + device_type = "memory"; + reg = <0x0 0x80000000 0x2 0x00000000>; + }; +}; + +&soc { + mio3: uart@2801a000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x2801a000 0x0 0x1000>; + interrupts = <0x0 0x5f 0x4>; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio8: uart@28024000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28024000 0x0 0x1000>; + interrupts = <0x0 0x64 0x4>; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio10: uart@28028000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28028000 0x0 0x1000>; + interrupts = <0x0 0x66 0x4>; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio14: uart@28030000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28030000 0x0 0x1000>; + interrupts = <0x0 0x6a 0x4>; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio15: uart@28032000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x28032000 0x0 0x1000>; + interrupts = <0x0 0x6b 0x4>; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; +}; + +&pcie { + status = "okay"; +}; + +&usb3_0 { + status = "okay"; +}; + +&usb3_1 { + status = "okay"; +}; + +&usb2_0 { + dr_mode = "otg"; + status = "okay"; +}; + +&usb2_3 { + dr_mode = "host"; + status = "okay"; +}; + +&usb2_4 { + dr_mode = "host"; + status = "okay"; +}; + +&macb0 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +&macb1 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +&macb2 { + phy-mode = "rgmii"; + use-mii; + status = "okay"; +}; + +&macb3 { + phy-mode = "rgmii"; + use-mii; + status = "okay"; +}; + +&can0 { + status = "okay"; +}; + +&can1 { + status = "okay"; +}; + +&qspi0 { + status = "okay"; + + flash@0 { + status = "okay"; + }; +}; + +&spi0 { + global-cs = <1>; + status = "disabled"; +}; + +&mmc0 { + bus-width = <8>; + max-frequency = <25000000>; + cap-mmc-hw-reset; + cap-mmc-highspeed; + no-sdio; + no-sd; + non-removable; + status = "disabled"; +}; + +&mmc1 { + status = "okay"; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&uart3 { + status = "okay"; +}; + +&sata0 { + status = "okay"; +}; + +&sata1 { + status = "okay"; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&gpio2 { + status = "okay"; +}; + +&gpio3 { + status = "okay"; +}; + +&gpio4 { + status = "okay"; +}; + +&gpio5 { + status = "okay"; +}; + diff --git a/arch/arm64/boot/dts/phytium/e2000q-miniitx-board.dts b/arch/arm64/boot/dts/phytium/e2000q-miniitx-board.dts new file mode 100644 index 0000000000000000000000000000000000000000..9015eae866dce1082c91d269f83bb5bc6e27fc73 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/e2000q-miniitx-board.dts @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium miniITX-Pe2204 development board. + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + * + * Shaojun Yang + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "pe2204.dtsi" + +/{ + model = "miniITX-Pe2204 Board"; + compatible = "phytium,pe2204"; + + chosen { + stdout-path = "serial1:115200n8"; + }; + aliases { + serial4 = &mio0; + serial5 = &mio1; + serial6 = &mio8; + serial7 = &mio11; + serial8 = &mio15; + }; + + memory@00{ + device_type = "memory"; + reg = <0x0 0x80000000 0x2 0x00000000>; + }; + + sound_card: sound { + compatible = "simple-audio-card"; + simple-audio-card,format = "i2s"; + simple-audio-card,name = "phytium,pe220x-i2s-audio"; + simple-audio-card,cpu { + sound-dai = <&i2s0>; + }; + simple-audio-card,codec{ + sound-dai = <&codec0>; + }; + }; +}; + +&soc { + mio9: i2c@28026000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28026000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; + }; + + mio14: i2c@28030000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28030000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + codec0: es8336@10 { + det-gpios = <&gpio2 5 0>; + sel-gpios = <&gpio2 6 0>; + #sound-dai-cells = <0>; + compatible = "everest,es8336"; + reg = <0x10>; + mic-src = [20]; + }; + }; + + + mio0: uart@28014000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x28014000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio1: uart@28016000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x28016000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio8: uart@28024000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x28024000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio11: uart@2802A000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x2802A000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio15: uart@28032000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x28032000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&gpio2 { + status = "okay"; +}; + +&gpio3 { + status = "okay"; +}; + +&gpio4 { + status = "okay"; +}; + +&gpio5 { + status = "okay"; +}; + +&watchdog0 { + status = "okay"; +}; + +&watchdog1 { + status = "okay"; +}; + +&pcie { + status = "okay"; +}; + +&sata1 { + status = "okay"; +}; + +&usb3_0 { + status = "okay"; +}; + +&usb3_1 { + status = "okay"; +}; + +&usb2_0 { + dr_mode = "otg"; + status = "okay"; +}; + +&usb2_1 { + dr_mode = "peripheral"; + status = "disabled"; +}; + +&usb2_2 { + dr_mode = "peripheral"; + status = "disabled"; +}; + +&usb2_3 { + dr_mode = "host"; + status = "okay"; +}; + +&usb2_4 { + dr_mode = "host"; + status = "okay"; +}; + +&macb0 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +&macb2 { + phy-mode = "rgmii"; + use-mii; + status = "okay"; +}; + +&dc0 { + pipe_mask = [03]; + edp_mask = [00]; + status = "okay"; +}; + +&i2s0 { + #sound-dai-cells = <0>; + dai-name = "phytium-i2s-lsd"; + status = "okay"; +}; + +&i2s_dp0 { + dai-name = "phytium-i2s-dp0"; + status = "okay"; +}; + +&qspi0 { + status = "okay"; + + flash@0 { + status = "okay"; + }; +}; + +&spi0 { + global-cs = <1>; + status = "disabled"; + + flash: w25q128@0 { + compatible = "winbond,w25q128", "jedec,spi-nor"; + spi-tx-bus-width = <1>; + spi-rx-bus-width = <1>; + spi-max-frequency = <12000000>; + reg = <0x00>; + status = "disabled"; + }; +}; + +&spi1 { + global-cs = <1>; + status = "disabled"; +}; + +&spi2 { + global-cs = <1>; + status = "disabled"; +}; + +&mmc0 { + bus-width = <0x00000004>; + max-frequency = <50000000>; + cap-sdio-irq; + cap-sd-highspeed; + no-mmc; + status = "okay"; +}; + +&mmc1 { + bus-width = <0x00000008>; + max-frequency = <50000000>; + cap-mmc-hw-reset; + cap-mmc-highspeed; + no-sdio; + no-sd; + non-removable; + status = "disabled"; +}; + +&pwm0 { + phytium,db = <0 0 0 1000 1000 0>; + status = "okay"; +}; + +&pwm1 { + phytium,db = <0 0 0 1000 1000 0>; + status = "okay"; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&uart3 { + status = "okay"; +}; + +&can0 { + status = "okay"; +}; + +&can1 { + status = "okay"; +}; + +&rng0 { + status = "okay"; +}; + diff --git a/arch/arm64/boot/dts/phytium/e2000q-vpx-board.dts b/arch/arm64/boot/dts/phytium/e2000q-vpx-board.dts new file mode 100755 index 0000000000000000000000000000000000000000..c2c0b9e711907c4609d690062013b3d70788e9fa --- /dev/null +++ b/arch/arm64/boot/dts/phytium/e2000q-vpx-board.dts @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium Pe2204 vpx board. + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + * + * Tianyu Liu + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "pe2204.dtsi" + +/{ + model = "Pe2204 vpx board"; + compatible = "phytium,pe2204"; + + chosen { + stdout-path = "serial1:115200n8"; + }; + + memory@00{ + device_type = "memory"; + reg = <0x0 0x80000000 0x2 0x00000000>; + }; + aliases { + serial4 = &mio3; + serial5 = &mio8; + serial6 = &mio10; + serial7 = &mio11; + serial8 = &mio15; + }; + + sound_card: sound { + compatible = "simple-audio-card"; + simple-audio-card,format = "i2s"; + simple-audio-card,name = "phytium,pe220x-i2s-audio"; + simple-audio-card,cpu { + sound-dai = <&i2s0>; + }; + simple-audio-card,codec{ + sound-dai = <&codec0>; + }; + }; +}; + +&soc { + mio9: i2c@28026000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28026000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; + }; + + mio14: i2c@28030000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28030000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + codec0: es8336@10 { + det-gpios = <&gpio2 5 0>; + sel-gpios = <&gpio2 6 0>; + #sound-dai-cells = <0>; + compatible = "everest,es8336"; + reg = <0x10>; + mic-src = [30]; + }; + }; + + mio3: uart@2801A000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x2801A000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio8: uart@28024000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x28024000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio10: uart@28028000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x28028000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio11: uart@2802A000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x2802A000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio13: uart@2802E000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x2802E000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; + + mio15: uart@28032000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x28032000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz &sysclk_50mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "okay"; + }; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&gpio2 { + status = "okay"; +}; + +&gpio3 { + status = "okay"; +}; + +&gpio4 { + status = "okay"; +}; + +&gpio5 { + status = "okay"; +}; + +&pcie { + status = "okay"; +}; + +&sata0 { + status = "okay"; +}; + +&sata1 { + status = "okay"; +}; + +&usb3_0 { + status = "okay"; +}; + +&usb3_1 { + status = "okay"; +}; + +&usb2_0 { + dr_mode = "otg"; + status = "okay"; +}; + +&usb2_1 { + dr_mode = "host"; + status = "disabled"; +}; + +&usb2_2 { + dr_mode = "host"; + status = "disabled"; +}; + +&usb2_3 { + dr_mode = "host"; + status = "okay"; +}; + +&usb2_4 { + dr_mode = "host"; + status = "okay"; +}; + +&macb0 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +&macb2 { + phy-mode = "rgmii"; + use-mii; + force-phy-mode; + status = "okay"; +}; + +&macb3 { + phy-mode = "rgmii"; + use-mii; + status = "okay"; +}; + +&dc0 { + pipe_mask = [03]; + edp_mask = [00]; + status = "okay"; +}; + +&i2s0 { + #sound-dai-cells = <0>; + dai-name = "phytium-i2s-lsd"; + status = "okay"; +}; + +&i2s_dp0 { + status = "okay"; +}; + +&i2s_dp1 { + status = "okay"; +}; + +&pmdk_dp { + num-dp = <2>; + dp-mask = /bits/ 8 <0x3>; + status = "okay"; +}; + +&qspi0 { + status = "okay"; + + flash@0 { + status = "okay"; + }; +}; + +&spi0 { + global-cs = <1>; + status = "disabled"; + + flash: w25q128@0 { + compatible = "winbond,w25q128", "jedec,spi-nor"; + spi-tx-bus-width = <1>; + spi-rx-bus-width = <1>; + spi-max-frequency = <12000000>; + reg = <0x00>; + status = "disabled"; + }; +}; + +&spi2 { + global-cs = <1>; + status = "disabled"; +}; + +&mmc0 { + bus-width = <0x00000008>; + max-frequency = <25000000>; + cap-mmc-hw-reset; + cap-mmc-highspeed; + no-sdio; + no-sd; + non-removable; + status = "okay"; +}; + +&uart0 { + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&uart3 { + status = "okay"; +}; + +&can0 { + status = "okay"; +}; + +&can1 { + status = "okay"; +}; + +&rng0 { + status = "okay"; +}; + diff --git a/arch/arm64/boot/dts/phytium/e2000s-demo-board.dts b/arch/arm64/boot/dts/phytium/e2000s-demo-board.dts new file mode 100644 index 0000000000000000000000000000000000000000..ebaec1a5911c20bab18b88a270adccce7545102d --- /dev/null +++ b/arch/arm64/boot/dts/phytium/e2000s-demo-board.dts @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DTS file for Phytium Pe2201 demo board + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + */ + +/dts-v1/; +/memreserve/ 0x80000000 0x10000; + +#include "pe2201.dtsi" + +/{ + model = "Pe2201 DEMO DDR4"; + compatible = "phytium,pe2201"; + + chosen { + stdout-path = "serial1:115200n8"; + }; + + memory@00{ + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0x80000000>; + }; +}; + +&soc { + mio9: i2c@28026000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28026000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + }; +}; + +&pcie { + status = "okay"; +}; + +&usb2_0 { + dr_mode = "otg"; + status = "okay"; +}; + +&usb2_1 { + dr_mode = "peripheral"; + status = "okay"; +}; + +&usb2_2 { + dr_mode = "peripheral"; + status = "okay"; +}; + +&macb0 { + phy-mode = "sgmii"; + use-mii; + status = "okay"; +}; + +&qspi0 { + status = "okay"; + + flash@0 { + status = "okay"; + }; +}; + +&spi2 { + global-cs = <1>; + status = "okay"; +}; + +&uart1 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&mmc0 { + status = "okay"; +}; + +&mmc1 { + status = "okay"; +}; + +&rng0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/phytium/pe2201.dtsi b/arch/arm64/boot/dts/phytium/pe2201.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..dfee0aec8f5509527f14ab7b74a9e84584ea0e18 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/pe2201.dtsi @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dts file for Phytium Pe2201 SoC + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + */ + +#include "pe220x.dtsi" + +/ { + compatible = "phytium,pe2201"; + + aliases { + ethernet0 = &macb0; + ethernet1 = &macb1; + }; +}; + +&cpu { + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "phytium,ftc310", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + clocks = <&scmi_dvfs 2>; + + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + next-level-cache = <&l2_cache>; + }; + + l2_cache: l2-cache { + compatible = "cache"; + cache-level = <2>; + cache-unified; + cache-size = <0x40000>; + cache-line-size = <64>; + cache-sets = <16>; + }; +}; + +&soc { + i2c0: i2c@28011000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28011000 0x0 0x1000>; + interrupts = ; + interrupt-names = "smbus_alert"; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + i2c1: i2c@28012000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28012000 0x0 0x1000>; + interrupts = ; + interrupt-names = "smbus_alert"; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + i2c2: i2c@28013000 { + compatible = "phytium,i2c"; + reg = <0x0 0x28013000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + onewire0: onewire@2803f000 { + compatible = "phytium,w1"; + reg = <0x0 0x2803f000 0x0 0x1000>; + interrupts = ; + status = "disabled"; + }; + + pwm2: pwm@2804c000 { + compatible = "phytium,pwm"; + reg = <0x0 0x2804c000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + pwm3: pwm@2804d000 { + compatible = "phytium,pwm"; + reg = <0x0 0x2804d000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + pwm4: pwm@2804e000 { + compatible = "phytium,pwm"; + reg = <0x0 0x2804e000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + pwm5: pwm@2804f000 { + compatible = "phytium,pwm"; + reg = <0x0 0x2804f000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + pwm6: pwm@28050000 { + compatible = "phytium,pwm"; + reg = <0x0 0x28050000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + pwm7: pwm@28051000 { + compatible = "phytium,pwm"; + reg = <0x0 0x28051000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + adc0: adc@2807b000 { + compatible = "phytium,adc"; + reg = <0x0 0x2807b000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + + #address-cells = <1>; + #size-cells = <0>; + + channel@0 { + reg = <0>; + }; + channel@1 { + reg = <1>; + }; + channel@2 { + reg = <2>; + }; + channel@3 { + reg = <3>; + }; + channel@4 { + reg = <4>; + }; + channel@5 { + reg = <5>; + }; + channel@6 { + reg = <6>; + }; + channel@7 { + reg = <7>; + }; + }; + + adc1: adc@2807c000 { + compatible = "phytium,adc"; + reg = <0x0 0x2807c000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + + #address-cells = <1>; + #size-cells = <0>; + + channel@0 { + reg = <0>; + }; + channel@1 { + reg = <1>; + }; + channel@2 { + reg = <2>; + }; + channel@3 { + reg = <3>; + }; + channel@4 { + reg = <4>; + }; + channel@5 { + reg = <5>; + }; + channel@6 { + reg = <6>; + }; + channel@7 { + reg = <7>; + }; + }; + + sgpio: sgpio@2807d000 { + compatible = "phytium,sgpio"; + reg = <0x0 0x2807d000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + ngpios = <96>; + bus-frequency = <48000>; + gpio-controller; + #gpio-cells = <2>; + status = "disabled"; + }; + + macb0: ethernet@32010000 { + compatible = "cdns,phytium-gem"; + reg = <0x0 0x32010000 0x0 0x2000>; + interrupts = , + , + , + ; + clock-names = "pclk", "hclk", "tx_clk", "tsu_clk"; + clocks = <&sysclk_250mhz>, <&sysclk_48mhz>, <&sysclk_48mhz>, <&sysclk_250mhz>; + magic-packet; + status = "disabled"; + }; + + macb1: ethernet@32012000 { + compatible = "cdns,phytium-gem"; + reg = <0x0 0x32012000 0x0 0x2000>; + interrupts = , + , + , + ; + clock-names = "pclk", "hclk", "tx_clk", "tsu_clk"; + clocks = <&sysclk_250mhz>, <&sysclk_48mhz>, <&sysclk_48mhz>, <&sysclk_250mhz>; + magic-packet; + status = "disabled"; + }; + + jpeg0: jpeg@32b32000 { + compatible = "phytium,jpeg"; + reg = <0x0 0x32b32000 0 0x1000>, + <0x0 0x28072000 0 0x30>, + <0x0 0x28073000 0 0x30>; + interrupts = , + , + ; + phytium,ocm-buf-addr = <0x30c40000 0x30c60000>; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/phytium/pe2202.dtsi b/arch/arm64/boot/dts/phytium/pe2202.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..79ab5fc63ed09b22eaf968def32b949e6c933f8d --- /dev/null +++ b/arch/arm64/boot/dts/phytium/pe2202.dtsi @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dts file for Phytium Pe2202 SoC + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + */ + +#include "pe220x.dtsi" + +/ { + compatible = "phytium,pe2202"; + + aliases { + ethernet0 = &macb0; + ethernet1 = &macb1; + ethernet2 = &macb2; + ethernet3 = &macb3; + }; +}; + +&cpu { + cpu-map { + cluster0 { + core0 { + cpu = <&cpu_l0>; + }; + + core1 { + cpu = <&cpu_l1>; + }; + }; + }; + + cpu_l0: cpu@0 { + device_type = "cpu"; + compatible = "phytium,ftc310", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + clocks = <&scmi_dvfs 2>; + + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + next-level-cache = <&l2_cache>; + }; + + cpu_l1: cpu@1 { + device_type = "cpu"; + compatible = "phytium,ftc310", "arm,armv8"; + reg = <0x0 0x201>; + enable-method = "psci"; + clocks = <&scmi_dvfs 2>; + + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + next-level-cache = <&l2_cache>; + }; + + l2_cache: l2-cache { + compatible = "cache"; + cache-level = <2>; + cache-unified; + cache-size = <0x40000>; + cache-line-size = <64>; + cache-sets = <16>; + }; +}; + +&soc { + hda0: hda@28006000 { + compatible = "phytium,hda"; + reg = <0x0 0x28006000 0x0 0x1000>; + interrupts = ; + status = "disabled"; + }; + + i2s0: i2s@28009000 { + compatible = "phytium,i2s"; + reg = <0x0 0x28009000 0x0 0x1000>, + <0x0 0x28005000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "i2s_clk"; + status = "disabled"; + }; + + can0: can@2800a000 { + compatible = "phytium,canfd"; + reg = <0x0 0x2800a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_200mhz>; + clock-names = "can_clk"; + tx-fifo-depth = <64>; + rx-fifo-depth = <64>; + status = "disabled"; + }; + + can1: can@2800b000 { + compatible = "phytium,canfd"; + reg = <0x0 0x2800b000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_200mhz>; + clock-names = "can_clk"; + tx-fifo-depth = <64>; + rx-fifo-depth = <64>; + status = "disabled"; + }; + + keypad: keypad@2807a000 { + compatible = "phytium,keypad"; + reg = <0x0 0x2807a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + usb3_0: usb3@31a08000 { + compatible = "phytium,pe220x-xhci"; + reg = <0x0 0x31a08000 0x0 0x18000>; + interrupts = ; + status = "disabled"; + }; + + usb3_1: usb3@31a28000 { + compatible = "phytium,pe220x-xhci"; + reg = <0x0 0x31a28000 0x0 0x18000>; + interrupts = ; + status = "disabled"; + }; + + sata0: sata@31a40000 { + compatible = "generic-ahci"; + reg = <0x0 0x31a40000 0x0 0x1000>; + interrupts = ; + status = "disabled"; + }; + + sata1: sata@32014000 { + compatible = "generic-ahci"; + reg = <0x0 0x32014000 0x0 0x1000>; + interrupts = ; + status = "disabled"; + }; + + macb0: ethernet@3200c000 { + compatible = "cdns,phytium-gem"; + reg = <0x0 0x3200c000 0x0 0x2000>; + interrupts = , + , + , + , + , + , + , + ; + clock-names = "pclk", "hclk", "tx_clk", "tsu_clk"; + clocks = <&sysclk_250mhz>, <&sysclk_48mhz>, <&sysclk_48mhz>, <&sysclk_250mhz>; + magic-packet; + status = "disabled"; + }; + + macb1: ethernet@3200e000 { + compatible = "cdns,phytium-gem"; + reg = <0x0 0x3200e000 0x0 0x2000>; + interrupts = , + , + , + ; + clock-names = "pclk", "hclk", "tx_clk", "tsu_clk"; + clocks = <&sysclk_250mhz>, <&sysclk_48mhz>, <&sysclk_48mhz>, <&sysclk_250mhz>; + magic-packet; + status = "disabled"; + }; + + macb2: ethernet@32010000 { + compatible = "cdns,phytium-gem"; + reg = <0x0 0x32010000 0x0 0x2000>; + interrupts = , + , + , + ; + clock-names = "pclk", "hclk", "tx_clk", "tsu_clk"; + clocks = <&sysclk_250mhz>, <&sysclk_48mhz>, <&sysclk_48mhz>, <&sysclk_250mhz>; + magic-packet; + use-mii; + status = "disabled"; + }; + + macb3: ethernet@32012000 { + compatible = "cdns,phytium-gem"; + reg = <0x0 0x32012000 0x0 0x2000>; + interrupts = , + , + , + ; + clock-names = "pclk", "hclk", "tx_clk", "tsu_clk"; + clocks = <&sysclk_250mhz>, <&sysclk_48mhz>, <&sysclk_48mhz>, <&sysclk_250mhz>; + magic-packet; + use-mii; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/phytium/pe2204.dtsi b/arch/arm64/boot/dts/phytium/pe2204.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..6fd6cb523df78a532f5249fc72673e5a8d4f314b --- /dev/null +++ b/arch/arm64/boot/dts/phytium/pe2204.dtsi @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dts file for Phytium Pe2204 SoC + * + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + */ + +#include "pe220x.dtsi" + +/ { + compatible = "phytium,pe2204"; + + aliases { + ethernet0 = &macb0; + ethernet1 = &macb1; + ethernet2 = &macb2; + ethernet3 = &macb3; + }; +}; + +&cpu { + cpu-map { + cluster0 { + core0 { + cpu = <&cpu_b0>; + }; + }; + + cluster1 { + core0 { + cpu = <&cpu_b1>; + }; + }; + + cluster2 { + core0 { + cpu = <&cpu_l0>; + }; + + core1 { + cpu = <&cpu_l1>; + }; + }; + }; + + cpu_l0: cpu@0 { + device_type = "cpu"; + compatible = "phytium,ftc310", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + clocks = <&scmi_dvfs 2>; + + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + next-level-cache = <&l2_cache_l>; + }; + + cpu_l1: cpu@1 { + device_type = "cpu"; + compatible = "phytium,ftc310", "arm,armv8"; + reg = <0x0 0x201>; + enable-method = "psci"; + clocks = <&scmi_dvfs 2>; + + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + next-level-cache = <&l2_cache_l>; + }; + + l2_cache_l: l2-cache { + compatible = "cache"; + cache-level = <2>; + cache-unified; + cache-size = <0x40000>; + cache-line-size = <64>; + cache-sets = <16>; + }; + + cpu_b0: cpu@100 { + device_type = "cpu"; + compatible = "phytium,ftc664", "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + clocks = <&scmi_dvfs 0>; + + i-cache-size = <0xc000>; + i-cache-line-size = <64>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + next-level-cache = <&l2_cache_b>; + }; + + cpu_b1: cpu@101 { + device_type = "cpu"; + compatible = "phytium,ftc664", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + clocks = <&scmi_dvfs 1>; + + i-cache-size = <0xc000>; + i-cache-line-size = <64>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + next-level-cache = <&l2_cache_b>; + }; + + l2_cache_b: l2-cache { + compatible = "cache"; + cache-level = <2>; + cache-unified; + cache-size = <0x100000>; + cache-line-size = <64>; + cache-sets = <16>; + }; +}; + +&soc { + hda0: hda@28006000 { + compatible = "phytium,hda"; + reg = <0x0 0x28006000 0x0 0x1000>; + interrupts = ; + status = "disabled"; + }; + + i2s0: i2s@28009000 { + compatible = "phytium,i2s"; + reg = <0x0 0x28009000 0x0 0x1000>, + <0x0 0x28005000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "i2s_clk"; + status = "disabled"; + }; + + can0: can@2800a000 { + compatible = "phytium,canfd"; + reg = <0x0 0x2800a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_200mhz>; + clock-names = "can_clk"; + tx-fifo-depth = <64>; + rx-fifo-depth = <64>; + status = "disabled"; + }; + + can1: can@2800b000 { + compatible = "phytium,canfd"; + reg = <0x0 0x2800b000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_200mhz>; + clock-names = "can_clk"; + tx-fifo-depth = <64>; + rx-fifo-depth = <64>; + status = "disabled"; + }; + + keypad: keypad@2807a000 { + compatible = "phytium,keypad"; + reg = <0x0 0x2807a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + usb3_0: usb3@31a08000 { + compatible = "phytium,pe220x-xhci"; + reg = <0x0 0x31a08000 0x0 0x18000>; + interrupts = ; + status = "disabled"; + }; + + usb3_1: usb3@31a28000 { + compatible = "phytium,pe220x-xhci"; + reg = <0x0 0x31a28000 0x0 0x18000>; + interrupts = ; + status = "disabled"; + }; + + sata0: sata@31a40000 { + compatible = "generic-ahci"; + reg = <0x0 0x31a40000 0x0 0x1000>; + interrupts = ; + status = "disabled"; + }; + + sata1: sata@32014000 { + compatible = "generic-ahci"; + reg = <0x0 0x32014000 0x0 0x1000>; + interrupts = ; + status = "disabled"; + }; + + macb0: ethernet@3200c000 { + compatible = "cdns,phytium-gem"; + reg = <0x0 0x3200c000 0x0 0x2000>; + interrupts = , + , + , + , + , + , + , + ; + clock-names = "pclk", "hclk", "tx_clk", "tsu_clk"; + clocks = <&sysclk_250mhz>, <&sysclk_48mhz>, <&sysclk_48mhz>, <&sysclk_250mhz>; + magic-packet; + support-tsn; + status = "disabled"; + }; + + macb1: ethernet@3200e000 { + compatible = "cdns,phytium-gem"; + reg = <0x0 0x3200e000 0x0 0x2000>; + interrupts = , + , + , + ; + clock-names = "pclk", "hclk", "tx_clk", "tsu_clk"; + clocks = <&sysclk_250mhz>, <&sysclk_48mhz>, <&sysclk_48mhz>, <&sysclk_250mhz>; + magic-packet; + status = "disabled"; + }; + + macb2: ethernet@32010000 { + compatible = "cdns,phytium-gem"; + reg = <0x0 0x32010000 0x0 0x2000>; + interrupts = , + , + , + ; + clock-names = "pclk", "hclk", "tx_clk", "tsu_clk"; + clocks = <&sysclk_250mhz>, <&sysclk_48mhz>, <&sysclk_48mhz>, <&sysclk_250mhz>; + magic-packet; + status = "disabled"; + }; + + macb3: ethernet@32012000 { + compatible = "cdns,phytium-gem"; + reg = <0x0 0x32012000 0x0 0x2000>; + interrupts = , + , + , + ; + clock-names = "pclk", "hclk", "tx_clk", "tsu_clk"; + clocks = <&sysclk_250mhz>, <&sysclk_48mhz>, <&sysclk_48mhz>, <&sysclk_250mhz>; + magic-packet; + status = "disabled"; + }; + +}; diff --git a/arch/arm64/boot/dts/phytium/pe220x.dtsi b/arch/arm64/boot/dts/phytium/pe220x.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..37bfb66b13f40886cb06180d81e4e1d7afc02880 --- /dev/null +++ b/arch/arm64/boot/dts/phytium/pe220x.dtsi @@ -0,0 +1,975 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * dts file for Phytium Pe220x SoC + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include + +/ { + compatible = "phytium,pe220x"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + serial0 = &uart0; + serial1 = &uart1; + serial2 = &uart2; + serial3 = &uart3; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + cpu_suspend = <0xc4000001>; + cpu_off = <0x84000002>; + cpu_on = <0xc4000003>; + sys_poweroff = <0x84000008>; + sys_reset = <0x84000009>; + }; + + firmware { + scmi: scmi { + compatible = "arm,scmi"; + mboxes = <&mbox 0 &mbox 1>; + mbox-names = "tx", "rx"; + shmem = <&cpu_scp_hpri &cpu_scp_lpri>; + #address-cells = <1>; + #size-cells = <0>; + + scmi_dvfs: protocol@13 { + reg = <0x13>; + #clock-cells = <1>; + }; + + scmi_sensors0: protocol@15 { + reg = <0x15>; + #thermal-sensor-cells = <1>; + }; + }; + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; + + thermal-zones { + sensor0 { + polling-delay-passive = <100>; + polling-delay = <1000>; + thermal-sensors = <&scmi_sensors0 0>; + }; + + sensor1 { + polling-delay-passive = <100>; + polling-delay = <1000>; + thermal-sensors = <&scmi_sensors0 1>; + }; + }; + + cpu: cpus { + #address-cells = <0x2>; + #size-cells = <0x0>; + }; + + gic: interrupt-controller@30800000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + interrupt-controller; + reg = <0x0 0x30800000 0 0x20000>, /* GICD */ + <0x0 0x30880000 0 0x80000>, /* GICR */ + <0x0 0x30840000 0 0x10000>, /* GICC */ + <0x0 0x30850000 0 0x10000>, /* GICH */ + <0x0 0x30860000 0 0x10000>; /* GICV */ + interrupts = ; + + its: gic-its@30820000 { + compatible = "arm,gic-v3-its"; + msi-controller; + reg = <0x0 0x30820000 0x0 0x20000>; + }; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = ; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + clock-frequency = <50000000>; + }; + + clocks { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + sysclk_48mhz: clk48mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <48000000>; + }; + + sysclk_50mhz: clk50mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <50000000>; + }; + + sysclk_100mhz: clk100mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <100000000>; + }; + + sysclk_200mhz: clk200mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <200000000>; + }; + + sysclk_250mhz: clk250mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <250000000>; + }; + + sysclk_300mhz: clk300mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <300000000>; + }; + + sysclk_600mhz: clk600mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <600000000>; + }; + + sysclk_1200mhz: clk1200mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <1200000000>; + }; + }; + + smmu: iommu@30000000 { + compatible = "arm,smmu-v3"; + reg = <0x0 0x30000000 0x0 0x800000>; + interrupts = , + , + , + ; + interrupt-names = "eventq", "priq", "cmdq-sync", "gerror"; + dma-coherent; + #iommu-cells = <1>; + }; + + soc: soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + dma-coherent; + ranges; + + mmc0: mmc@28000000 { + compatible = "phytium,mci"; + reg = <0x0 0x28000000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_1200mhz>; + clock-names = "phytium_mci_clk"; + status = "disabled"; + }; + + mmc1: mmc@28001000 { + compatible = "phytium,mci"; + reg = <0x0 0x28001000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_1200mhz>; + clock-names = "phytium_mci_clk"; + status = "disabled"; + }; + + nand0: nand@28002000 { + compatible = "phytium,nfc"; + reg = <0x0 0x28002000 0x0 0x1000>; + interrupts = ; + status = "disabled"; + }; + + qspi0: spi@28008000 { + compatible = "phytium,qspi"; + reg = <0x0 0x28008000 0x0 0x1000>, <0x0 0x0 0x0 0x0fffffff>; + reg-names = "qspi", "qspi_mm"; + clocks = <&sysclk_50mhz>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + flash@0 { /* Note: By default, chip select 0 is for BIOS, so be careful when enabling this node!!! */ + reg = <0>; + spi-rx-bus-width = <1>; + spi-max-frequency = <50000000>; + status = "disabled"; + }; + }; + + uart0: uart@2800c000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x2800c000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_100mhz &sysclk_100mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "disabled"; + }; + + uart1: uart@2800d000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x2800d000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_100mhz &sysclk_100mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "disabled"; + }; + + uart2: uart@2800e000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x2800e000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_100mhz &sysclk_100mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "disabled"; + }; + + uart3: uart@2800f000 { + compatible = "arm,pl011","arm,primecell"; + reg = <0x0 0x2800f000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_100mhz &sysclk_100mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "disabled"; + }; + + lpc: lpc@28010000 { + compatible = "simple-mfd", "syscon"; + reg = <0x0 0x28010000 0x0 0x1000>; + reg-io-width = <4>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x28010000 0x1000>; + + kcs0: kcs@24 { + compatible = "phytium,kcs-bmc"; + reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>; + interrupts = ; + status = "disabled"; + }; + + kcs1: kcs@28 { + compatible = "phytium,kcs-bmc"; + reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>; + interrupts = ; + status = "disabled"; + }; + + kcs2: kcs@2c { + compatible = "phytium,kcs-bmc"; + reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>; + interrupts = ; + status = "disabled"; + }; + + kcs3: kcs@8c { + compatible = "phytium,kcs-bmc"; + reg = <0x8c 0x1>, <0x90 0x1>, <0x94 0x1>; + interrupts = ; + status = "disabled"; + }; + + bt: bt@48 { + compatible = "phytium,bt-bmc"; + reg = <0x48 0x20>; + interrupts = ; + status = "disabled"; + }; + }; + + /* MIO */ + + gpio0: gpio@28034000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28034000 0x0 0x1000>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <16>; + }; + }; + + gpio1: gpio@28035000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28035000 0x0 0x1000>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <16>; + }; + }; + + gpio2: gpio@28036000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28036000 0x0 0x1000>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <16>; + }; + }; + + gpio3: gpio@28037000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28037000 0x0 0x1000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <16>; + }; + }; + + gpio4: gpio@28038000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28038000 0x0 0x1000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <16>; + }; + }; + + gpio5: gpio@28039000 { + compatible = "phytium,gpio"; + reg = <0x0 0x28039000 0x0 0x1000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + porta { + compatible = "phytium,gpio-port"; + reg = <0>; + nr-gpios = <16>; + }; + }; + + spi0: spi@2803a000 { + compatible = "phytium,spi"; + reg = <0x0 0x2803a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + num-cs = <4>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spi1: spi@2803b000 { + compatible = "phytium,spi"; + reg = <0x0 0x2803b000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + num-cs = <4>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spi2: spi@2803c000 { + compatible = "phytium,spi"; + reg = <0x0 0x2803c000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + num-cs = <4>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spi3: spi@2803d000 { + compatible = "phytium,spi"; + reg = <0x0 0x2803d000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + num-cs = <4>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + watchdog0: watchdog@28040000 { + compatible = "arm,sbsa-gwdt"; + reg = <0x0 0x28041000 0x0 0x1000>, + <0x0 0x28040000 0x0 0x1000>; + interrupts = ; + timeout-sec = <30>; + status = "disabled"; + }; + + watchdog1: watchdog@28042000 { + compatible = "arm,sbsa-gwdt"; + reg = <0x0 0x28043000 0x0 0x1000>, + <0x0 0x28042000 0x0 0x1000>; + interrupts = ; + timeout-sec = <30>; + status = "disabled"; + }; + + pwm0: pwm@2804a000 { + compatible = "phytium,pwm"; + reg = <0x0 0x2804a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + pwm1: pwm@2804b000 { + compatible = "phytium,pwm"; + reg = <0x0 0x2804b000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz>; + status = "disabled"; + }; + + tacho0: tacho@28054000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28054000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho1: tacho@28055000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28055000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho2: tacho@28056000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28056000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho3: tacho@28057000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28057000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho4: tacho@28058000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28058000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho5: tacho@28059000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28059000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho6: tacho@2805a000 { + compatible = "phytium,tacho"; + reg = <0x0 0x2805a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho7: tacho@2805b000 { + compatible = "phytium,tacho"; + reg = <0x0 0x2805b000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho8: tacho@2805c000 { + compatible = "phytium,tacho"; + reg = <0x0 0x2805c000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho9: tacho@2805d000 { + compatible = "phytium,tacho"; + reg = <0x0 0x2805d000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho10: tacho@2805e000 { + compatible = "phytium,tacho"; + reg = <0x0 0x2805e000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho11: tacho@2805f000 { + compatible = "phytium,tacho"; + reg = <0x0 0x2805f000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho12: tacho@28060000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28060000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho13: tacho@28061000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28061000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho14: tacho@28062000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28062000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho15: tacho@28063000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28063000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho16: tacho@28064000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28064000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho17: tacho@28065000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28065000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho18: tacho@28066000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28066000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho19: tacho@28067000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28067000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho20: tacho@28068000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28068000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho21: tacho@28069000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28069000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho22: tacho@2806a000 { + compatible = "phytium,tacho"; + reg = <0x0 0x2806a000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho23: tacho@2806b000 { + compatible = "phytium,tacho"; + reg = <0x0 0x2806b000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho24: tacho@2806c000 { + compatible = "phytium,tacho"; + reg = <0x0 0x2806c000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho25: tacho@2806d000 { + compatible = "phytium,tacho"; + reg = <0x0 0x2806d000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho26: tacho@2806e000 { + compatible = "phytium,tacho"; + reg = <0x0 0x2806e000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho27: tacho@2806f000 { + compatible = "phytium,tacho"; + reg = <0x0 0x2806f000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho28: tacho@28070000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28070000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho29: tacho@28071000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28071000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho30: tacho@28072000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28072000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho31: tacho@28073000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28073000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho32: tacho@28074000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28074000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho33: tacho@28075000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28075000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho34: tacho@28076000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28076000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho35: tacho@28077000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28077000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho36: tacho@28078000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28078000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + tacho37: tacho@28079000 { + compatible = "phytium,tacho"; + reg = <0x0 0x28079000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_50mhz >; + status = "disabled"; + }; + + usb2_0: usb2@31800000 { /* usb_vhub0 USB2_P2 only otg mode */ + compatible = "phytium,usb2"; + reg = <0x0 0x31800000 0x0 0x80000>, + <0x0 0x31990000 0x0 0x10000>; + interrupts = ; + status = "disabled"; + }; + + usb2_1: usb2@31880000 { /* usb_vhub1 USB2_P2 */ + compatible = "phytium,usb2"; + reg = <0x0 0x31880000 0x0 0x80000>, + <0x0 0x319a0000 0x0 0x10000>; + interrupts = ; + status = "disabled"; + }; + + usb2_2: usb2@31900000 { /* usb_vhub2 USB2_P2 */ + compatible = "phytium,usb2"; + reg = <0x0 0x31900000 0x0 0x80000>, + <0x0 0x319b0000 0x0 0x10000>; + interrupts = ; + status = "disabled"; + }; + + usb2_3: usb2@32800000 { /* USB2_0 controller registers USB2_P3 */ + compatible = "phytium,usb2"; + reg = <0x0 0x32800000 0x0 0x40000>, + <0x0 0x32880000 0x0 0x40000>; /* USB2_0 UIB registers */ + interrupts = ; + status = "disabled"; + }; + + usb2_4: usb2@32840000 { /* USB2_1 controller registers USB2_P4 */ + compatible = "phytium,usb2"; + reg = <0x0 0x32840000 0x0 0x40000>, + <0x0 0x328c0000 0x0 0x40000>; /* USB2_1 UIB registers */ + interrupts = ; + status = "disabled"; + }; + + dc0: dc@32000000 { + compatible = "phytium,dc"; + reg = <0x0 0x32000000 0x0 0x8000>; + interrupts = ; + status = "disabled"; + }; + + i2s_dp0: i2s_dp0@32009000 { + compatible = "phytium,i2s"; + reg = <0x0 0x32009000 0x0 0x1000>, + <0x0 0x32008000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "i2s_clk"; + dai-name = "phytium-i2s-dp0"; + status = "disabled"; + }; + + i2s_dp1: i2s_dp1@3200B000 { + compatible = "phytium,i2s"; + reg = <0x0 0x3200B000 0x0 0x1000>, + <0x0 0x3200A000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_600mhz>; + clock-names = "i2s_clk"; + dai-name = "phytium-i2s-dp1"; + status = "disabled"; + }; + + pmdk_dp: pmdk_dp { + compatible = "phytium,pmdk-dp"; + status = "disabled"; + }; + + mbox: mailbox@32a00000 { + compatible = "phytium,mbox"; + reg = <0x0 0x32a00000 0x0 0x1000>; + interrupts = ; + #mbox-cells = <1>; + }; + + rng0: rng@32a36000 { + compatible = "phytium,rng"; + reg = <0x0 0x32a36000 0x0 0x1000>; + status = "disabled"; + }; + + sram: sram@32a10000 { + compatible = "phytium,pe220x-sram-ns", "mmio-sram"; + reg = <0x0 0x32a10000 0x0 0x2000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x32a10000 0x2000>; + + cpu_scp_lpri: scp-shmem@0 { + compatible = "arm,scmi-shmem"; + reg = <0x1000 0x400>; + }; + + cpu_scp_hpri: scp-shmem@1 { + compatible = "arm,scmi-shmem"; + reg = <0x1400 0x400>; + }; + }; + + hwspinlock: spinlock@32b36000 { + compatible = "phytium,hwspinlock"; + reg = <0x0 0x32b36000 0x0 0x1000>; + #hwlock-cells = <1>; + nr-locks = <32>; + status = "disabled"; + }; + + pcie: pcie@40000000 { + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + reg = <0x0 0x40000000 0x0 0x10000000>; + msi-parent = <&its>; + bus-range = <0x0 0xff>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x2 &gic 0x0 0x0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x3 &gic 0x0 0x0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x0 0x0 0x4 &gic 0x0 0x0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; + ranges = <0x01000000 0x00 0x00000000 0x0 0x50000000 0x0 0x00f00000>, + <0x02000000 0x00 0x58000000 0x0 0x58000000 0x0 0x28000000>, + <0x03000000 0x10 0x00000000 0x10 0x00000000 0x10 0x00000000>; + iommu-map = <0x0 &smmu 0x0 0x10000>; + status = "disabled"; + }; + + }; +}; diff --git a/arch/arm64/configs/phytium_defconfig b/arch/arm64/configs/phytium_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..1e69005e2efe06c1accbbd02841f1517d8bfe0ab --- /dev/null +++ b/arch/arm64/configs/phytium_defconfig @@ -0,0 +1,543 @@ +CONFIG_LOCALVERSION="-phytium-embeded" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_RT_FULL=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_USER_NS=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_ARCH_PHYTIUM=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCI_DEBUG=y +CONFIG_PCI_REALLOC_ENABLE_AUTO=y +CONFIG_PCI_IOV=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCIE_PHYTIUM_EP=y +CONFIG_PCI_ENDPOINT=y +CONFIG_ARM64_VA_BITS_48=y +CONFIG_SCHED_MC=y +CONFIG_SECCOMP=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_XEN=y +CONFIG_COMPAT=y +CONFIG_HIBERNATION=y +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_ARM_CPUIDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPUFREQ_DT=y +CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_ARM_BIG_LITTLE_CPUFREQ=y +CONFIG_ARM_SCPI_CPUFREQ=y +CONFIG_ARM_SCMI_CPUFREQ=y +CONFIG_ARM_SCMI_PROTOCOL=y +CONFIG_ARM_SCMI_TRANSPORT_FORCE_POLLING=y +CONFIG_ARM_SCPI_PROTOCOL=y +CONFIG_DMI_SYSFS=y +CONFIG_EFI_CAPSULE_LOADER=y +CONFIG_ACPI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA256_ARM64=y +CONFIG_CRYPTO_SHA512_ARM64=m +CONFIG_CRYPTO_CRC32_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_BLK_DEV_INTEGRITY=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_KSM=y +CONFIG_MEMORY_FAILURE=y +CONFIG_CMA=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IPV6 is not set +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_MANGLE=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_BPF_JIT=y +CONFIG_CAN=y +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +CONFIG_CAN_PHYTIUM=m +CONFIG_CAN_PHYTIUM_PLATFORM=m +CONFIG_CAN_PHYTIUM_PCI=m +CONFIG_BT=m +CONFIG_BT_HIDP=m +# CONFIG_BT_HS is not set +# CONFIG_BT_LE is not set +CONFIG_BT_LEDS=y +# CONFIG_BT_DEBUGFS is not set +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_LL=y +CONFIG_BT_HCIUART_BCM=y +CONFIG_CFG80211=m +CONFIG_MAC80211=m +CONFIG_MAC80211_LEDS=y +CONFIG_RFKILL=m +CONFIG_NET_9P=m +CONFIG_NET_9P_VIRTIO=m +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=32 +CONFIG_BRCMSTB_GISB_ARB=y +CONFIG_SIMPLE_PM_BUS=y +CONFIG_VEXPRESS_CONFIG=y +CONFIG_MTD=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_M25P80=y +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_PHYTIUM_PCI=y +CONFIG_MTD_NAND_PHYTIUM_PLAT=y +CONFIG_MTD_SPI_NOR=y +CONFIG_SPI_PHYTIUM_QUADSPI=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_SRAM=y +CONFIG_EEPROM_AT25=m +# CONFIG_SCSI_PROC_FS is not set +CONFIG_BLK_DEV_SD=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_UFSHCD=m +CONFIG_SCSI_UFSHCD_PLATFORM=m +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_AHCI_CEVA=y +CONFIG_AHCI_QORIQ=y +CONFIG_SATA_SIL24=y +CONFIG_PATA_PLATFORM=y +CONFIG_PATA_OF_PLATFORM=y +CONFIG_NETDEVICES=y +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_TUN=y +CONFIG_VETH=m +CONFIG_VIRTIO_NET=y +CONFIG_AMD_XGBE=y +CONFIG_ATL1C=m +CONFIG_MACB=y +CONFIG_THUNDER_NIC_PF=y +# CONFIG_NET_VENDOR_HISILICON is not set +# CONFIG_NET_VENDOR_HUAWEI is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_MVMDIO=y +CONFIG_SKY2=y +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +CONFIG_STMMAC_ETH=m +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_MDIO_BITBANG=y +CONFIG_MDIO_BUS_MUX_MMIOREG=y +CONFIG_AT803X_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +CONFIG_MICREL_PHY=y +CONFIG_MOTORCOMM_PHY=y +CONFIG_REALTEK_PHY=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9800=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +CONFIG_BRCMFMAC=m +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_WL18XX=m +CONFIG_WLCORE_SDIO=m +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_ADC=m +CONFIG_KEYBOARD_GPIO=y +CONFIG_KEYBOARD_PHYTIUM=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ATMEL_MXT=m +CONFIG_INPUT_MISC=y +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_AMBAKMI=y +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_PHYTIUM_PCI=y +CONFIG_SERIAL_DEV_BUS=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_PHYTIUM_KCS_IPMI_BMC=y +CONFIG_PHYTIUM_BT_IPMI_BMC=y +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_CAVIUM is not set +CONFIG_HW_RANDOM_PHYTIUM=y +CONFIG_TCG_TPM=y +CONFIG_TCG_TIS_I2C_INFINEON=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_PHYTIUM_PCI=y +CONFIG_I2C_PHYTIUM_PLATFORM=y +CONFIG_I2C_SLAVE_EEPROM=y +CONFIG_SPI=y +CONFIG_SPI_PHYTIUM_PLAT=y +CONFIG_SPI_PHYTIUM_PCI=y +CONFIG_SPI_SPIDEV=m +CONFIG_SPMI=y +CONFIG_PINCTRL=y +CONFIG_PINCTRL_SINGLE=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_W1=m +CONFIG_W1_MASTER_PHYTIUM=m +CONFIG_W1_SLAVE_THERM=m +CONFIG_POWER_AVS=y +CONFIG_POWER_RESET_BRCMSTB=y +CONFIG_POWER_RESET_VEXPRESS=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_SYSCON_REBOOT_MODE=y +CONFIG_BATTERY_SBS=m +CONFIG_BATTERY_BQ27XXX=y +CONFIG_SENSORS_ARM_SCMI=y +CONFIG_SENSORS_ARM_SCPI=y +CONFIG_SENSORS_PHYTIUM=y +CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y +CONFIG_CPU_THERMAL=y +CONFIG_THERMAL_EMULATION=y +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_ARM_SBSA_WATCHDOG=y +CONFIG_MFD_PHYTIUM_I2S_LSD=y +CONFIG_MFD_PHYTIUM_I2S_MMD=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_FAN53555=y +CONFIG_REGULATOR_GPIO=y +CONFIG_REGULATOR_PWM=y +CONFIG_REGULATOR_VCTRL=m +CONFIG_RC_CORE=m +CONFIG_RC_DECODERS=y +CONFIG_RC_DEVICES=y +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +# CONFIG_DVB_NET is not set +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=m +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_VIDEO_PHYTIUM_JPEG=m +CONFIG_V4L_MEM2MEM_DRIVERS=y +CONFIG_DRM=y +CONFIG_DRM_RCAR_LVDS=m +CONFIG_DRM_PANEL_SIMPLE=y +CONFIG_DRM_I2C_ADV7511=m +CONFIG_DRM_PHYTIUM=y +CONFIG_FB_ARMCLCD=y +CONFIG_BACKLIGHT_GENERIC=m +CONFIG_BACKLIGHT_PWM=m +CONFIG_BACKLIGHT_LP855X=m +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_HDA_PHYTIUM=m +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_SOC=y +CONFIG_SND_SOC_PHYTIUM_I2S=m +CONFIG_SND_PMDK_ES8388=m +CONFIG_SND_PMDK_ES8336=m +CONFIG_SND_PMDK_DP=m +CONFIG_SND_SOC_ES8336=y +CONFIG_SND_SOC_ES8388=y +CONFIG_SND_SIMPLE_CARD=m +CONFIG_SND_AUDIO_GRAPH_CARD=m +CONFIG_I2C_HID=m +CONFIG_USB=y +CONFIG_USB_OTG=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_MUSB_HDRC=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC2=y +CONFIG_USB_CHIPIDEA=y +CONFIG_USB_CHIPIDEA_UDC=y +CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_ISP1760=y +CONFIG_USB_PHYTIUM=y +CONFIG_USB_HSIC_USB3503=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_ULPI=y +CONFIG_USB_GADGET=y +CONFIG_USB_SNP_UDC_PLAT=y +CONFIG_USB_BDC_UDC=y +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_ACM=y +CONFIG_USB_CONFIGFS_OBEX=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_ECM=y +CONFIG_USB_CONFIGFS_ECM_SUBSET=y +CONFIG_USB_CONFIGFS_RNDIS=y +CONFIG_USB_CONFIGFS_EEM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_UAC1=y +CONFIG_USB_CONFIGFS_F_UAC1_LEGACY=y +CONFIG_USB_CONFIGFS_F_UAC2=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_UVC=y +CONFIG_USB_CONFIGFS_F_PRINTER=y +CONFIG_USB_ROLE_SWITCH=m +CONFIG_MMC=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_ARMMMCI=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_ACPI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SPI=y +CONFIG_MMC_CQHCI=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_PWM=y +CONFIG_LEDS_SYSCON=y +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_LEDS_TRIGGER_PANIC=y +CONFIG_EDAC=y +CONFIG_EDAC_GHES=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_DS1307=y +CONFIG_RTC_DRV_SD3068=m +CONFIG_RTC_DRV_DS3232=y +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_PL031=y +CONFIG_DMADEVICES=y +CONFIG_MV_XOR_V2=y +CONFIG_PL330_DMA=y +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_UIO_NETX=m +CONFIG_UIO_PRUSS=m +CONFIG_UIO_MF624=m +CONFIG_VFIO=y +CONFIG_VFIO_PCI=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_MMIO=y +CONFIG_XEN_GNTDEV=y +CONFIG_XEN_GRANT_DEV_ALLOC=y +CONFIG_STAGING=y +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +CONFIG_COMMON_CLK_SCPI=y +CONFIG_COMMON_CLK_CS2000_CP=y +CONFIG_CLK_QORIQ=y +CONFIG_COMMON_CLK_PWM=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_PHYTIUM=y +CONFIG_ARM_TIMER_SP804=y +CONFIG_ARM_MHU=y +CONFIG_PHYTIUM_MBOX=y +CONFIG_PLATFORM_MHU=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_V3=y +CONFIG_REMOTEPROC=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_VIRTIO=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_MEMORY=y +CONFIG_IIO=y +CONFIG_PHYTIUM_ADC=y +CONFIG_PWM=y +CONFIG_PWM_PHYTIUM=y +CONFIG_GENERIC_PHY=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +CONFIG_XFS_ONLINE_SCRUB=y +CONFIG_XFS_ONLINE_REPAIR=y +CONFIG_XFS_DEBUG=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_ISO9660_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_HUGETLBFS=y +CONFIG_EFIVAR_FS=y +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_ZSTD=y +CONFIG_NFS_FS=y +CONFIG_NFS_V4=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_ROOT_NFS=y +CONFIG_NFSD=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NFSD_FAULT_INJECTION=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_SECURITY=y +CONFIG_CRYPTO_USER=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA3=m +CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_INDIRECT_PIO=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_KERNEL=y +# CONFIG_SCHED_DEBUG is not set +CONFIG_RCU_TRACE=y +# CONFIG_FTRACE is not set +CONFIG_MEMTEST=y diff --git a/arch/arm64/configs/phytium_optee.config b/arch/arm64/configs/phytium_optee.config new file mode 100644 index 0000000000000000000000000000000000000000..07554cf843d34ef484dd90155765a91519476980 --- /dev/null +++ b/arch/arm64/configs/phytium_optee.config @@ -0,0 +1,2 @@ +CONFIG_TEE=y +CONFIG_OPTEE=y diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index d51944ff9f91dcccae2b28ef2777f5f6293d4429..0d4b3f0cfba69b681a2e84c4e5d945e49e41f35d 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -19,43 +19,43 @@ config CRYPTO_SHA512_ARM64 config CRYPTO_SHA1_ARM64_CE tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)" - depends on KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_HASH select CRYPTO_SHA1 config CRYPTO_SHA2_ARM64_CE tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)" - depends on KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_HASH select CRYPTO_SHA256_ARM64 config CRYPTO_SHA512_ARM64_CE tristate "SHA-384/SHA-512 digest algorithm (ARMv8 Crypto Extensions)" - depends on KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_HASH select CRYPTO_SHA512_ARM64 config CRYPTO_SHA3_ARM64 tristate "SHA3 digest algorithm (ARMv8.2 Crypto Extensions)" - depends on KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_HASH select CRYPTO_SHA3 config CRYPTO_SM3_ARM64_CE tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)" - depends on KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_HASH select CRYPTO_SM3 config CRYPTO_SM4_ARM64_CE tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)" - depends on KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_ALGAPI select CRYPTO_SM4 config CRYPTO_GHASH_ARM64_CE tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions" - depends on KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_HASH select CRYPTO_GF128MUL select CRYPTO_AES @@ -63,7 +63,7 @@ config CRYPTO_GHASH_ARM64_CE config CRYPTO_CRCT10DIF_ARM64_CE tristate "CRCT10DIF digest algorithm using PMULL instructions" - depends on KERNEL_MODE_NEON && CRC_T10DIF + depends on KERNEL_MODE_NEON && CRC_T10DIF && !PREEMPT_RT_BASE select CRYPTO_HASH config CRYPTO_CRC32_ARM64_CE @@ -77,13 +77,13 @@ config CRYPTO_AES_ARM64 config CRYPTO_AES_ARM64_CE tristate "AES core cipher using ARMv8 Crypto Extensions" - depends on ARM64 && KERNEL_MODE_NEON + depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_ALGAPI select CRYPTO_AES_ARM64 config CRYPTO_AES_ARM64_CE_CCM tristate "AES in CCM mode using ARMv8 Crypto Extensions" - depends on ARM64 && KERNEL_MODE_NEON + depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_ALGAPI select CRYPTO_AES_ARM64_CE select CRYPTO_AES_ARM64 @@ -91,7 +91,7 @@ config CRYPTO_AES_ARM64_CE_CCM config CRYPTO_AES_ARM64_CE_BLK tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions" - depends on KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_BLKCIPHER select CRYPTO_AES_ARM64_CE select CRYPTO_AES_ARM64 @@ -99,7 +99,7 @@ config CRYPTO_AES_ARM64_CE_BLK config CRYPTO_AES_ARM64_NEON_BLK tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions" - depends on KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_BLKCIPHER select CRYPTO_AES_ARM64 select CRYPTO_AES @@ -107,13 +107,13 @@ config CRYPTO_AES_ARM64_NEON_BLK config CRYPTO_CHACHA20_NEON tristate "NEON accelerated ChaCha20 symmetric cipher" - depends on KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_BLKCIPHER select CRYPTO_CHACHA20 config CRYPTO_AES_ARM64_BS tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm" - depends on KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE select CRYPTO_BLKCIPHER select CRYPTO_AES_ARM64_NEON_BLK select CRYPTO_AES_ARM64 diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c index 34b4e3d46aab41b2d05d7e68e58d3cc1d7f4d304..ae055cdad8cf9eb812c41732d4dc2b1fc76630d8 100644 --- a/arch/arm64/crypto/crc32-ce-glue.c +++ b/arch/arm64/crypto/crc32-ce-glue.c @@ -208,7 +208,8 @@ static struct shash_alg crc32_pmull_algs[] = { { static int __init crc32_pmull_mod_init(void) { - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_PMULL)) { + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && + !IS_ENABLED(CONFIG_PREEMPT_RT_BASE) && (elf_hwcap & HWCAP_PMULL)) { crc32_pmull_algs[0].update = crc32_pmull_update; crc32_pmull_algs[1].update = crc32c_pmull_update; diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 4fbbcdda70d76b5e80421aee1847c9a1c3d950d9..99b215602a1a90db522615fd268eb2bc82a27ac5 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -35,6 +35,12 @@ void apply_alternatives_module(void *start, size_t length); static inline void apply_alternatives_module(void *start, size_t length) { } #endif +#ifdef CONFIG_KVM_ARM_HOST +void kvm_compute_layout(void); +#else +static inline void kvm_compute_layout(void) { } +#endif + #define ALTINSTR_ENTRY(feature) \ " .word 661b - .\n" /* label */ \ " .word 663f - .\n" /* new instruction */ \ diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 64ae14371cae90582156e938f2d0d3404f6d9c79..dc862e00198658e0a7410605b93409da7a8328dd 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -55,7 +55,8 @@ #define ARM64_SSBS 34 #define ARM64_WORKAROUND_1542419 35 #define ARM64_SPECTRE_BHB 36 +#define ARM64_HAS_CRC32 37 -#define ARM64_NCAPS 37 +#define ARM64_NCAPS 38 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h index a157ff465e279f27ed76dd380bc77eaebe052cbe..f952fdda83460d077b6c1c835292956321f94323 100644 --- a/arch/arm64/include/asm/spinlock_types.h +++ b/arch/arm64/include/asm/spinlock_types.h @@ -16,10 +16,6 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H -#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) -# error "please don't include this file directly" -#endif - #include #include diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 69ac5262cccd50fc7ec6df9b359f030619fb2a4f..39e921817f28d7627a52635297509e7e82bcb71f 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -43,6 +43,7 @@ struct thread_info { u64 ttbr0; /* saved TTBR0_EL1 */ #endif int preempt_count; /* 0 => preemptable, <0 => bug */ + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ }; #define thread_saved_pc(tsk) \ @@ -76,6 +77,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ +#define TIF_NEED_RESCHED_LAZY 6 #define TIF_NOHZ 7 #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 @@ -94,6 +96,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_NOHZ (1 << TIF_NOHZ) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) @@ -107,8 +110,9 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ - _TIF_UPROBE | _TIF_FSCHECK) + _TIF_UPROBE | _TIF_FSCHECK | _TIF_NEED_RESCHED_LAZY) +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ _TIF_NOHZ) diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 3747c8d87bdb2382c25a9b10a1a5f59b03432166..75ccb5b279a28fd0114ad6f3d14fee65ef8064a7 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -212,6 +212,7 @@ static int __apply_alternatives_multi_stop(void *unused) void __init apply_alternatives_all(void) { /* better not try code patching on a live SMP system */ + kvm_compute_layout(); stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); } diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 92fba851ce53a404ba979886929e466b82d27e01..844c71bc865bd05c5dbbdec3246605fec41739c0 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -41,6 +41,7 @@ int main(void) BLANK(); DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); + DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count)); DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit)); #ifdef CONFIG_ARM64_SW_TTBR0_PAN DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 03b0fdccaf0529ae883ddc0730b6880d4f52ad2f..05fb061c5700428dd6de570634b4a6ff5a3bcc0e 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1352,6 +1352,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_ssbs, }, #endif + { + .desc = "CRC32 instructions", + .capability = ARM64_HAS_CRC32, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64ISAR0_EL1, + .field_pos = ID_AA64ISAR0_CRC32_SHIFT, + .min_field_value = 1, + }, {}, }; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 85433a84783b8549075e878313bdaf2e07e576a4..201f412b556efb826a9032de41ec2eb2ee23d66d 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -634,11 +634,16 @@ el1_irq: #ifdef CONFIG_PREEMPT ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count - cbnz w24, 1f // preempt count != 0 + cbnz w24, 2f // preempt count != 0 ldr x0, [tsk, #TSK_TI_FLAGS] // get flags - tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? - bl el1_preempt + tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? + + ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count + cbnz w24, 2f // preempt lazy count != 0 + tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling? 1: + bl el1_preempt +2: #endif #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on @@ -652,6 +657,7 @@ el1_preempt: 1: bl preempt_schedule_irq // irq en/disable is done inside ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? + tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling? ret x24 #endif diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 89ab68cb35bbd06600b45e9bc165b61f0c3286da..481793a5adc16574d39fc3347102125f499dc9be 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -159,6 +159,16 @@ static void sve_free(struct task_struct *task) __sve_free(task); } +static void *sve_free_atomic(struct task_struct *task) +{ + void *sve_state = task->thread.sve_state; + + WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); + + task->thread.sve_state = NULL; + return sve_state; +} + /* * TIF_SVE controls whether a task can use SVE without trapping while * in userspace, and also the way a task's FPSIMD/SVE state is stored @@ -549,6 +559,7 @@ int sve_set_vector_length(struct task_struct *task, * non-SVE thread. */ if (task == current) { + preempt_disable(); local_bh_disable(); fpsimd_save(); @@ -559,8 +570,10 @@ int sve_set_vector_length(struct task_struct *task, if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) sve_to_fpsimd(task); - if (task == current) + if (task == current) { local_bh_enable(); + preempt_enable(); + } /* * Force reallocation of task SVE state to the correct size @@ -815,6 +828,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) sve_alloc(current); + preempt_disable(); local_bh_disable(); fpsimd_save(); @@ -828,6 +842,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) WARN_ON(1); /* SVE access shouldn't have trapped */ local_bh_enable(); + preempt_enable(); } /* @@ -894,10 +909,12 @@ void fpsimd_thread_switch(struct task_struct *next) void fpsimd_flush_thread(void) { int vl, supported_vl; + void *mem = NULL; if (!system_supports_fpsimd()) return; + preempt_disable(); local_bh_disable(); memset(¤t->thread.uw.fpsimd_state, 0, @@ -906,7 +923,7 @@ void fpsimd_flush_thread(void) if (system_supports_sve()) { clear_thread_flag(TIF_SVE); - sve_free(current); + mem = sve_free_atomic(current); /* * Reset the task vector length as required. @@ -942,6 +959,8 @@ void fpsimd_flush_thread(void) set_thread_flag(TIF_FOREIGN_FPSTATE); local_bh_enable(); + preempt_enable(); + kfree(mem); } /* @@ -953,9 +972,11 @@ void fpsimd_preserve_current_state(void) if (!system_supports_fpsimd()) return; + preempt_disable(); local_bh_disable(); fpsimd_save(); local_bh_enable(); + preempt_enable(); } /* @@ -1026,6 +1047,7 @@ void fpsimd_restore_current_state(void) return; } + preempt_disable(); local_bh_disable(); if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { @@ -1034,6 +1056,7 @@ void fpsimd_restore_current_state(void) } local_bh_enable(); + preempt_enable(); } /* @@ -1046,6 +1069,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) if (WARN_ON(!system_supports_fpsimd())) return; + preempt_disable(); local_bh_disable(); current->thread.uw.fpsimd_state = *state; @@ -1058,6 +1082,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) clear_thread_flag(TIF_FOREIGN_FPSTATE); local_bh_enable(); + preempt_enable(); } /* @@ -1104,6 +1129,7 @@ void kernel_neon_begin(void) BUG_ON(!may_use_simd()); + preempt_disable(); local_bh_disable(); __this_cpu_write(kernel_neon_busy, true); @@ -1117,6 +1143,7 @@ void kernel_neon_begin(void) preempt_disable(); local_bh_enable(); + preempt_enable(); } EXPORT_SYMBOL(kernel_neon_begin); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index b3354ff94e7984641dd5a0c076d63f67db5f62e9..c41ccf4055e305cb1df7fc1ee72900f484223324 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -278,6 +278,7 @@ static int __init reserve_memblock_reserved_regions(void) arch_initcall(reserve_memblock_reserved_regions); u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; +EXPORT_SYMBOL(__cpu_logical_map); void __init setup_arch(char **cmdline_p) { diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index ca565853dea6455e27e59b38998a4bede4f38ef4..3f321daa770214262912448f8883bb52def383d3 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -919,7 +919,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, /* Check valid user FS if needed */ addr_limit_user_check(); - if (thread_flags & _TIF_NEED_RESCHED) { + if (thread_flags & _TIF_NEED_RESCHED_MASK) { /* Unmask Debug and SError for the next task */ local_daif_restore(DAIF_PROCCTX_NOIRQ); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 716810a08f24250f602e679c3fc78bf5346fdf21..d7d0d03f9262d9802110433c48186c7394dc92e7 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -59,6 +59,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -82,7 +83,8 @@ enum ipi_msg_type { IPI_CPU_CRASH_STOP, IPI_TIMER, IPI_IRQ_WORK, - IPI_WAKEUP + IPI_WAKEUP, + IPI_RPROC = 9, }; #ifdef CONFIG_HOTPLUG_CPU @@ -914,6 +916,11 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; #endif + case IPI_RPROC: + irq_enter(); + rproc_handle_ipi(ipinr); + irq_exit(); + break; default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index c712a7376bc1a982e217454aa63a974a1722096f..792da0e125de68b5403e35142e803610de2a6330 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -33,7 +33,7 @@ static u8 tag_lsb; static u64 tag_val; static u64 va_mask; -static void compute_layout(void) +__init void kvm_compute_layout(void) { phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); u64 hyp_va_msb; @@ -121,8 +121,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt, BUG_ON(nr_inst != 5); - if (!has_vhe() && !va_mask) - compute_layout(); for (i = 0; i < nr_inst; i++) { u32 rd, rn, insn, oinsn; @@ -167,9 +165,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt, return; } - if (!va_mask) - compute_layout(); - /* * Compute HYP VA by using the same computation as kern_hyp_va() */ diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 5df2d611b77d9c2befc0c92bd540d358a75daa3d..69ff9887f724d930a09457dc9cc08936c94bf452 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -25,3 +25,5 @@ KCOV_INSTRUMENT_atomic_ll_sc.o := n UBSAN_SANITIZE_atomic_ll_sc.o := n lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o + +obj-$(CONFIG_CRC32) += crc32.o diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S new file mode 100644 index 0000000000000000000000000000000000000000..5bc1e85b4e1c283a93a2490f9fded58d210c797c --- /dev/null +++ b/arch/arm64/lib/crc32.S @@ -0,0 +1,60 @@ +/* + * Accelerated CRC32(C) using AArch64 CRC instructions + * + * Copyright (C) 2016 - 2018 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + + .cpu generic+crc + + .macro __crc32, c +0: subs x2, x2, #16 + b.mi 8f + ldp x3, x4, [x1], #16 +CPU_BE( rev x3, x3 ) +CPU_BE( rev x4, x4 ) + crc32\c\()x w0, w0, x3 + crc32\c\()x w0, w0, x4 + b.ne 0b + ret + +8: tbz x2, #3, 4f + ldr x3, [x1], #8 +CPU_BE( rev x3, x3 ) + crc32\c\()x w0, w0, x3 +4: tbz x2, #2, 2f + ldr w3, [x1], #4 +CPU_BE( rev w3, w3 ) + crc32\c\()w w0, w0, w3 +2: tbz x2, #1, 1f + ldrh w3, [x1], #2 +CPU_BE( rev16 w3, w3 ) + crc32\c\()h w0, w0, w3 +1: tbz x2, #0, 0f + ldrb w3, [x1] + crc32\c\()b w0, w0, w3 +0: ret + .endm + + .align 5 +ENTRY(crc32_le) +alternative_if_not ARM64_HAS_CRC32 + b crc32_le_base +alternative_else_nop_endif + __crc32 +ENDPROC(crc32_le) + + .align 5 +ENTRY(__crc32c_le) +alternative_if_not ARM64_HAS_CRC32 + b __crc32c_le_base +alternative_else_nop_endif + __crc32 c +ENDPROC(__crc32c_le) diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 5c9073bace83a656e0c2b6cb64d3a5bc62b1462a..7e856c9fb3ed784c3d069ec6df180141c355112d 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -88,6 +88,8 @@ EXPORT_SYMBOL(flush_dcache_page); * Additional functions defined in assembly. */ EXPORT_SYMBOL(__flush_icache_range); +EXPORT_SYMBOL(__flush_dcache_area); +EXPORT_SYMBOL(__inval_dcache_area); #ifdef CONFIG_ARCH_HAS_PMEM_API void arch_wb_cache_pmem(void *addr, size_t size) diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h index 7a906b5214a430602b851775388057bfbc9b59c3..d8f596fec022b596263da437bfab3d1f868dd851 100644 --- a/arch/hexagon/include/asm/spinlock_types.h +++ b/arch/hexagon/include/asm/spinlock_types.h @@ -21,10 +21,6 @@ #ifndef _ASM_SPINLOCK_TYPES_H #define _ASM_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - typedef struct { volatile unsigned int lock; } arch_spinlock_t; diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 6e345fefcdcab0e1962b15a1f03180667b3533f3..681408d6816fbc9157304659162b4783e4ac1eb0 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ #ifndef _ASM_IA64_SPINLOCK_TYPES_H #define _ASM_IA64_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - typedef struct { volatile unsigned int lock; } arch_spinlock_t; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index d7400b2844f1cac0c168553b8c00e594a6ade2f0..40abc24b3b0238da7c70bf4f6cb469197c38410f 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1824,7 +1824,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, ti->cpu = cpu; p->stack = ti; p->state = TASK_UNINTERRUPTIBLE; - cpumask_set_cpu(cpu, &p->cpus_allowed); + cpumask_set_cpu(cpu, &p->cpus_mask); INIT_LIST_HEAD(&p->tasks); p->parent = p->real_parent = p->group_leader = p; INIT_LIST_HEAD(&p->children); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 8a227a80f6bd5fdf8d19cbac187e847158308850..bbd790058e462c6545d1dca20519afaf38b28a55 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2523,7 +2523,7 @@ config MIPS_CRC_SUPPORT # config HIGHMEM bool "High Memory Support" - depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA + depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL config CPU_SUPPORTS_HIGHMEM bool diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index e610473d61b8c9841c74c604e5ed970921eab1f0..1428b4febbc95d4c72424f35f44053b8024bfd67 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -42,7 +42,7 @@ extern struct task_struct *ll_task; * inline to try to keep the overhead down. If we have been forced to run on * a "CPU" with an FPU because of a previous high level of FP computation, * but did not actually use the FPU during the most recent time-slice (CU1 - * isn't set), we undo the restriction on cpus_allowed. + * isn't set), we undo the restriction on cpus_mask. * * We're not calling set_cpus_allowed() here, because we have no need to * force prompt migration - we're already switching the current CPU to a @@ -57,7 +57,7 @@ do { \ test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ (!(KSTK_STATUS(prev) & ST0_CU1))) { \ clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ - prev->cpus_allowed = prev->thread.user_cpus_allowed; \ + prev->cpus_mask = prev->thread.user_cpus_allowed; \ } \ next->thread.emulated_fp = 0; \ } while(0) diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index a7c0f97e4b0d6eea56db2f9eedad24a9158d669b..1a08428eedcf1d3b1c933d5996a4760468fc4348 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -177,7 +177,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, if (retval) goto out_unlock; - cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); + cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr); cpumask_and(&mask, &allowed, cpu_active_mask); out_unlock: diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 0ca4185cc5e38837cabbccb937896170360bf295..97bf5291130a7699fcd91c82819b8cb7d994c4e0 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1174,12 +1174,12 @@ static void mt_ase_fp_affinity(void) * restricted the allowed set to exclude any CPUs with FPUs, * we'll skip the procedure. */ - if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) { + if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) { cpumask_t tmask; current->thread.user_cpus_allowed - = current->cpus_allowed; - cpumask_and(&tmask, ¤t->cpus_allowed, + = current->cpus_mask; + cpumask_and(&tmask, ¤t->cpus_mask, &mt_fpu_cpumask); set_cpus_allowed_ptr(current, &tmask); set_thread_flag(TIF_FPUBOUND); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index f0e09d5f0bedd2374340b022256d9284f5edc78c..4536d047cdffc7525ae64a10b82a40b813f0da9e 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -105,10 +105,11 @@ config LOCKDEP_SUPPORT config RWSEM_GENERIC_SPINLOCK bool + default y if PREEMPT_RT_FULL config RWSEM_XCHGADD_ALGORITHM bool - default y + default y if !PREEMPT_RT_FULL config GENERIC_LOCKBREAK bool @@ -216,6 +217,7 @@ config PPC select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY select HAVE_RCU_TABLE_FREE if SMP select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN @@ -399,7 +401,7 @@ menu "Kernel options" config HIGHMEM bool "High memory support" - depends on PPC32 + depends on PPC32 && !PREEMPT_RT_FULL source kernel/Kconfig.hz diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 87adaf13b7e8407d90b31d49e4c7ba798150ed43..7305cb6a53e473ea7a2a4f4fbecb34cc8e6c86a8 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H #define _ASM_POWERPC_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - typedef struct { volatile unsigned int slock; } arch_spinlock_t; diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 3c0002044bc9e6237e0c5c7a18ca922e315d7231..64c3d1a720e26254ea8961d821b7839028d6094b 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -37,6 +37,8 @@ struct thread_info { int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_lazy_count; /* 0 => preemptable, + <0 => BUG */ unsigned long local_flags; /* private flags for thread */ #ifdef CONFIG_LIVEPATCH unsigned long *livepatch_sp; @@ -81,18 +83,18 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_FSCHECK 3 /* Check FS is USER_DS on return */ -#define TIF_32BIT 4 /* 32 bit binary */ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ #define TIF_PATCH_PENDING 6 /* pending live patching update */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ #define TIF_NOHZ 9 /* in adaptive nohz mode */ #define TIF_SECCOMP 10 /* secure computing */ -#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ -#define TIF_NOERROR 12 /* Force successful syscall return */ + +#define TIF_NEED_RESCHED_LAZY 11 /* lazy rescheduling necessary */ +#define TIF_SYSCALL_TRACEPOINT 12 /* syscall tracepoint instrumentation */ + #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ #define TIF_UPROBE 14 /* breakpointed or single-stepping */ -#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation for stack store? */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ @@ -100,6 +102,10 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src #define TIF_ELF2ABI 18 /* function descriptors must die! */ #endif #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ +#define TIF_32BIT 20 /* 32 bit binary */ +#define TIF_RESTOREALL 21 /* Restore all regs (implies NOERROR) */ +#define TIF_NOERROR 22 /* Force successful syscall return */ + /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<flags) set_bits(irqtp->flags, &curtp->flags); } +#endif irq_hw_number_t virq_to_hw(unsigned int virq) { diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 695b24a2d9542fc2f0ed710a5ac5dffdb7ffdccd..032ada21b7bde1f828ef40e77d4969da89b9bc7c 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -42,6 +42,7 @@ * We store the saved ksp_limit in the unused part * of the STACK_FRAME_OVERHEAD */ +#ifndef CONFIG_PREEMPT_RT_FULL _GLOBAL(call_do_softirq) mflr r0 stw r0,4(r1) @@ -58,6 +59,7 @@ _GLOBAL(call_do_softirq) stw r10,THREAD+KSP_LIMIT(r2) mtlr r0 blr +#endif /* * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index facc02964ab3155b9c400f825dd38f9018de4014..8b1774186c6832b2271f9306a041fc3dca957722 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -32,6 +32,7 @@ .text +#ifndef CONFIG_PREEMPT_RT_FULL _GLOBAL(call_do_softirq) mflr r0 std r0,16(r1) @@ -42,6 +43,7 @@ _GLOBAL(call_do_softirq) ld r0,16(r1) mtlr r0 blr +#endif _GLOBAL(call_do_irq) mflr r0 diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 68a0e9d5b44023579daad3602ca89433f94a1208..6f4d5d7615af3d66aa73c91b3fa781cf9be54591 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -178,6 +178,7 @@ config KVM_E500MC config KVM_MPIC bool "KVM in-kernel MPIC emulation" depends on KVM && E500 + depends on !PREEMPT_RT_FULL select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index c9ef3c5321692170937cd98d937222b080ad0963..cb10249b1125f0b0bc446cc9942545127d68935e 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx) * runqueue. The context will be rescheduled on the proper node * if it is timesliced or preempted. */ - cpumask_copy(&ctx->cpus_allowed, ¤t->cpus_allowed); + cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr); /* Save the current cpu id for spu interrupt routing. */ ctx->last_ran = raw_smp_processor_id(); diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c index e7075aaff1bb62094f8d8f1df706c7d5157496e3..1580464a9d5b5e258a032aa062859b7df7c5ef24 100644 --- a/arch/powerpc/platforms/ps3/device-init.c +++ b/arch/powerpc/platforms/ps3/device-init.c @@ -752,8 +752,8 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev, } pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); - res = wait_event_interruptible(dev->done.wait, - dev->done.done || kthread_should_stop()); + res = swait_event_interruptible_exclusive(dev->done.wait, + dev->done.done || kthread_should_stop()); if (kthread_should_stop()) res = -EINTR; if (res) { diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index b1a08cb760e0b6ab8c6c5b08a22d8c9e485babe9..8ef818ad83ecf0eccb8e1980dd8305776a807da6 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -212,6 +213,7 @@ static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift, } static DEFINE_PER_CPU(__be64 *, tce_page); +static DEFINE_LOCAL_IRQ_LOCK(tcp_page_lock); static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, @@ -233,7 +235,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, direction, attrs); } - local_irq_save(flags); /* to protect tcep and the page behind it */ + /* to protect tcep and the page behind it */ + local_lock_irqsave(tcp_page_lock, flags); tcep = __this_cpu_read(tce_page); @@ -244,7 +247,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, tcep = (__be64 *)__get_free_page(GFP_ATOMIC); /* If allocation fails, fall back to the loop implementation */ if (!tcep) { - local_irq_restore(flags); + local_unlock_irqrestore(tcp_page_lock, flags); return tce_build_pSeriesLP(tbl->it_index, tcenum, tbl->it_page_shift, npages, uaddr, direction, attrs); @@ -279,7 +282,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, tcenum += limit; } while (npages > 0 && !rc); - local_irq_restore(flags); + local_unlock_irqrestore(tcp_page_lock, flags); if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { ret = (int)rc; @@ -450,13 +453,14 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, DMA_BIDIRECTIONAL, 0); } - local_irq_disable(); /* to protect tcep and the page behind it */ + /* to protect tcep and the page behind it */ + local_lock_irq(tcp_page_lock); tcep = __this_cpu_read(tce_page); if (!tcep) { tcep = (__be64 *)__get_free_page(GFP_ATOMIC); if (!tcep) { - local_irq_enable(); + local_unlock_irq(tcp_page_lock); return -ENOMEM; } __this_cpu_write(tce_page, tcep); @@ -502,7 +506,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, /* error cleanup: caller will clear whole range */ - local_irq_enable(); + local_unlock_irq(tcp_page_lock); return rc; } diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index cfed272e4fd592d9d5844485e64921c30a61ccc2..8e28e8176ec88a5defd6e1289739327488ac2933 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - typedef struct { int lock; } __attribute__ ((aligned (4))) arch_spinlock_t; diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index e82369f286a20fee9f94f59db4a0f2a24326c537..22ca9a98bbb8bc34921b1b331283d2d5f0ca637c 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ #ifndef __ASM_SH_SPINLOCK_TYPES_H #define __ASM_SH_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - typedef struct { volatile unsigned int lock; } arch_spinlock_t; diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 5717c7cbdd97a4b86e0f620a1bdc2e53773c37dd..66dd399b200766d5776020fc421520e62fc2368b 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -148,6 +148,7 @@ void irq_ctx_exit(int cpu) hardirq_ctx[cpu] = NULL; } +#ifndef CONFIG_PREEMPT_RT_FULL void do_softirq_own_stack(void) { struct thread_info *curctx; @@ -175,6 +176,7 @@ void do_softirq_own_stack(void) "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" ); } +#endif #else static inline void handle_one_irq(unsigned int irq) { diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 713670e6d13dccd18e4ff0ee17acadd910a1adae..5dfc715343f90d7ccad33c89e52fe2ba167a34bc 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) set_irq_regs(old_regs); } +#ifndef CONFIG_PREEMPT_RT_FULL void do_softirq_own_stack(void) { void *orig_sp, *sp = softirq_stack[smp_processor_id()]; @@ -868,6 +869,7 @@ void do_softirq_own_stack(void) __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); } +#endif #ifdef CONFIG_HOTPLUG_CPU void fixup_irqs(void) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index be4403a8e1b414362886fd0a6427c8d2b5cd319a..7299aeaa7cbbf0fdc834546110862b70a4775914 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -180,6 +180,7 @@ config X86 select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY select HAVE_RCU_TABLE_FREE if PARAVIRT select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API @@ -264,8 +265,11 @@ config ARCH_MAY_HAVE_PC_FDC def_bool y depends on ISA_DMA_API +config RWSEM_GENERIC_SPINLOCK + def_bool PREEMPT_RT_FULL + config RWSEM_XCHGADD_ALGORITHM - def_bool y + def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL config GENERIC_CALIBRATE_DELAY def_bool y @@ -935,7 +939,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT config MAXSMP bool "Enable Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && DEBUG_KERNEL - select CPUMASK_OFFSTACK + select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL ---help--- Enable maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 917f25e4d0a80411f5b24c082daca3b0d27e63cc..58d8c03fc32d84e87895ea7ec29f1e30fb2ac85b 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -434,14 +434,14 @@ static int ecb_encrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, true); - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = skcipher_walk_done(&walk, nbytes); } - kernel_fpu_end(); return err; } @@ -456,14 +456,14 @@ static int ecb_decrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, true); - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = skcipher_walk_done(&walk, nbytes); } - kernel_fpu_end(); return err; } @@ -478,14 +478,14 @@ static int cbc_encrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, true); - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = skcipher_walk_done(&walk, nbytes); } - kernel_fpu_end(); return err; } @@ -500,14 +500,14 @@ static int cbc_decrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, true); - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = skcipher_walk_done(&walk, nbytes); } - kernel_fpu_end(); return err; } @@ -557,18 +557,20 @@ static int ctr_crypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, true); - kernel_fpu_begin(); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + kernel_fpu_begin(); aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = skcipher_walk_done(&walk, nbytes); } if (walk.nbytes) { + kernel_fpu_begin(); ctr_crypt_final(ctx, &walk); + kernel_fpu_end(); err = skcipher_walk_done(&walk, 0); } - kernel_fpu_end(); return err; } diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index 41034745d6a2af2e9938c00dd103fdf5db71320a..d4bf7fc02ee751036913c30943d33f64f7ac0633 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -61,7 +61,7 @@ static inline void cast5_fpu_end(bool fpu_enabled) static int ecb_crypt(struct skcipher_request *req, bool enc) { - bool fpu_enabled = false; + bool fpu_enabled; struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; @@ -76,7 +76,7 @@ static int ecb_crypt(struct skcipher_request *req, bool enc) u8 *wsrc = walk.src.virt.addr; u8 *wdst = walk.dst.virt.addr; - fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); + fpu_enabled = cast5_fpu_begin(false, &walk, nbytes); /* Process multi-block batch */ if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { @@ -105,10 +105,9 @@ static int ecb_crypt(struct skcipher_request *req, bool enc) } while (nbytes >= bsize); done: + cast5_fpu_end(fpu_enabled); err = skcipher_walk_done(&walk, nbytes); } - - cast5_fpu_end(fpu_enabled); return err; } @@ -212,7 +211,7 @@ static int cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); - bool fpu_enabled = false; + bool fpu_enabled; struct skcipher_walk walk; unsigned int nbytes; int err; @@ -220,12 +219,11 @@ static int cbc_decrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes)) { - fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); + fpu_enabled = cast5_fpu_begin(false, &walk, nbytes); nbytes = __cbc_decrypt(ctx, &walk); + cast5_fpu_end(fpu_enabled); err = skcipher_walk_done(&walk, nbytes); } - - cast5_fpu_end(fpu_enabled); return err; } @@ -292,7 +290,7 @@ static int ctr_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); - bool fpu_enabled = false; + bool fpu_enabled; struct skcipher_walk walk; unsigned int nbytes; int err; @@ -300,13 +298,12 @@ static int ctr_crypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { - fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); + fpu_enabled = cast5_fpu_begin(false, &walk, nbytes); nbytes = __ctr_crypt(&walk, ctx); + cast5_fpu_end(fpu_enabled); err = skcipher_walk_done(&walk, nbytes); } - cast5_fpu_end(fpu_enabled); - if (walk.nbytes) { ctr_crypt_final(&walk, ctx); err = skcipher_walk_done(&walk, 0); diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c index dce7c5d39c2f26f8e1cf6d31c69e1ef627f6cb10..6194160b7fbc5f0876deb964983fbe5b8ec6f272 100644 --- a/arch/x86/crypto/chacha20_glue.c +++ b/arch/x86/crypto/chacha20_glue.c @@ -81,23 +81,24 @@ static int chacha20_simd(struct skcipher_request *req) crypto_chacha20_init(state, ctx, walk.iv); - kernel_fpu_begin(); - while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { + kernel_fpu_begin(); + chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); + kernel_fpu_end(); err = skcipher_walk_done(&walk, walk.nbytes % CHACHA20_BLOCK_SIZE); } if (walk.nbytes) { + kernel_fpu_begin(); chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes); + kernel_fpu_end(); err = skcipher_walk_done(&walk, 0); } - kernel_fpu_end(); - return err; } diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c index a78ef99a9981b47e1ccbd2942295a5195801a75d..dac489a1c4dac872f20b2d105be015f909811fb3 100644 --- a/arch/x86/crypto/glue_helper.c +++ b/arch/x86/crypto/glue_helper.c @@ -38,7 +38,7 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); const unsigned int bsize = 128 / 8; struct skcipher_walk walk; - bool fpu_enabled = false; + bool fpu_enabled; unsigned int nbytes; int err; @@ -51,7 +51,7 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, unsigned int i; fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - &walk, fpu_enabled, nbytes); + &walk, false, nbytes); for (i = 0; i < gctx->num_funcs; i++) { func_bytes = bsize * gctx->funcs[i].num_blocks; @@ -69,10 +69,9 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, if (nbytes < bsize) break; } + glue_fpu_end(fpu_enabled); err = skcipher_walk_done(&walk, nbytes); } - - glue_fpu_end(fpu_enabled); return err; } EXPORT_SYMBOL_GPL(glue_ecb_req_128bit); @@ -115,7 +114,7 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); const unsigned int bsize = 128 / 8; struct skcipher_walk walk; - bool fpu_enabled = false; + bool fpu_enabled; unsigned int nbytes; int err; @@ -129,7 +128,7 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, u128 last_iv; fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - &walk, fpu_enabled, nbytes); + &walk, false, nbytes); /* Start of the last block. */ src += nbytes / bsize - 1; dst += nbytes / bsize - 1; @@ -161,10 +160,10 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, done: u128_xor(dst, dst, (u128 *)walk.iv); *(u128 *)walk.iv = last_iv; + glue_fpu_end(fpu_enabled); err = skcipher_walk_done(&walk, nbytes); } - glue_fpu_end(fpu_enabled); return err; } EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit); @@ -175,7 +174,7 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); const unsigned int bsize = 128 / 8; struct skcipher_walk walk; - bool fpu_enabled = false; + bool fpu_enabled; unsigned int nbytes; int err; @@ -189,7 +188,7 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, le128 ctrblk; fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - &walk, fpu_enabled, nbytes); + &walk, false, nbytes); be128_to_le128(&ctrblk, (be128 *)walk.iv); @@ -213,11 +212,10 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, } le128_to_be128((be128 *)walk.iv, &ctrblk); + glue_fpu_end(fpu_enabled); err = skcipher_walk_done(&walk, nbytes); } - glue_fpu_end(fpu_enabled); - if (nbytes) { le128 ctrblk; u128 tmp; @@ -278,7 +276,7 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx, { const unsigned int bsize = 128 / 8; struct skcipher_walk walk; - bool fpu_enabled = false; + bool fpu_enabled; unsigned int nbytes; int err; @@ -289,21 +287,24 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx, /* set minimum length to bsize, for tweak_fn */ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - &walk, fpu_enabled, + &walk, false, nbytes < bsize ? bsize : nbytes); /* calculate first value of T */ tweak_fn(tweak_ctx, walk.iv, walk.iv); while (nbytes) { + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, + &walk, fpu_enabled, + nbytes < bsize ? bsize : nbytes); nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk); + glue_fpu_end(fpu_enabled); + fpu_enabled = false; err = skcipher_walk_done(&walk, nbytes); nbytes = walk.nbytes; } - glue_fpu_end(fpu_enabled); - return err; } EXPORT_SYMBOL_GPL(glue_xts_req_128bit); diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 8353348ddeaffcb0e09c886069c34609325c16f0..3b5e41d9b29d1240bb31dea2bf4da39ca65861d4 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -134,7 +134,7 @@ static long syscall_trace_enter(struct pt_regs *regs) #define EXIT_TO_USERMODE_LOOP_FLAGS \ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) + _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) { @@ -149,9 +149,16 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) /* We have work to do. */ local_irq_enable(); - if (cached_flags & _TIF_NEED_RESCHED) + if (cached_flags & _TIF_NEED_RESCHED_MASK) schedule(); +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND + if (unlikely(current->forced_info.si_signo)) { + struct task_struct *t = current; + force_sig_info(t->forced_info.si_signo, &t->forced_info, t); + t->forced_info.si_signo = 0; + } +#endif if (cached_flags & _TIF_UPROBE) uprobe_notify_resume(regs); diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 37d9016d476837355f204d963875462d4f09ac27..d8b40085c2b85e19108bd08010c0103d5a806e39 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -750,8 +750,25 @@ END(ret_from_exception) ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) .Lneed_resched: + # preempt count == 0 + NEED_RS set? cmpl $0, PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY jnz restore_all_kernel +#else + jz test_int_off + + # atleast preempt count == 0 ? + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) + jne restore_all_kernel + + movl PER_CPU_VAR(current_task), %ebp + cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? + jnz restore_all_kernel + + testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp) + jz restore_all_kernel +test_int_off: +#endif testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all_kernel call preempt_schedule_irq diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index dfe26f3cfffc21d61150a1107160bf26f32021cd..94fccaa04bfa48bb2ea1bfe13423f96ba9528181 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -733,7 +733,23 @@ retint_kernel: btl $9, EFLAGS(%rsp) /* were interrupts off? */ jnc 1f 0: cmpl $0, PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY jnz 1f +#else + jz do_preempt_schedule_irq + + # atleast preempt count == 0 ? + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) + jnz 1f + + movq PER_CPU_VAR(current_task), %rcx + cmpl $0, TASK_TI_preempt_lazy_count(%rcx) + jnz 1f + + btl $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx) + jnc 1f +do_preempt_schedule_irq: +#endif call preempt_schedule_irq jmp 0b 1: @@ -1084,6 +1100,7 @@ bad_gs: jmp 2b .previous +#ifndef CONFIG_PREEMPT_RT_FULL /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(do_softirq_own_stack) pushq %rbp @@ -1094,6 +1111,7 @@ ENTRY(do_softirq_own_stack) leaveq ret ENDPROC(do_softirq_own_stack) +#endif #ifdef CONFIG_XEN idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index b56d504af6545b95726f623886de5fb8ba2b006b..e51c7094075d956c7b97516b161425834814726c 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -20,6 +20,7 @@ */ extern void kernel_fpu_begin(void); extern void kernel_fpu_end(void); +extern void kernel_fpu_resched(void); extern bool irq_fpu_usable(void); /* diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 7f2dbd91fc743a516a0ad8e094680b13832f61ef..afa0e42ccdd1233e4cbe65ca59ec04399141bdf2 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -86,17 +86,48 @@ static __always_inline void __preempt_count_sub(int val) * a decrement which hits zero means we have no preempt_count and should * reschedule. */ -static __always_inline bool __preempt_count_dec_and_test(void) +static __always_inline bool ____preempt_count_dec_and_test(void) { GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); } +static __always_inline bool __preempt_count_dec_and_test(void) +{ + if (____preempt_count_dec_and_test()) + return true; +#ifdef CONFIG_PREEMPT_LAZY + if (preempt_count()) + return false; + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +#else + return false; +#endif +} + /* * Returns true when we need to resched and can (barring IRQ state). */ static __always_inline bool should_resched(int preempt_offset) { +#ifdef CONFIG_PREEMPT_LAZY + u32 tmp; + + tmp = raw_cpu_read_4(__preempt_count); + if (tmp == preempt_offset) + return true; + + /* preempt count == 0 ? */ + tmp &= ~PREEMPT_NEED_RESCHED; + if (tmp != preempt_offset) + return false; + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +#else return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); +#endif } #ifdef CONFIG_PREEMPT diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 33d3c88a7225ff938d81c8912d5ec05457b07e60..c00e27af2205a8a0179d8405c7886463c4862917 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -28,6 +28,19 @@ typedef struct { #define SA_IA32_ABI 0x02000000u #define SA_X32_ABI 0x01000000u +/* + * Because some traps use the IST stack, we must keep preemption + * disabled while calling do_trap(), but do_trap() may call + * force_sig_info() which will grab the signal spin_locks for the + * task, which in PREEMPT_RT_FULL are mutexes. By defining + * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set + * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the + * trap. + */ +#if defined(CONFIG_PREEMPT_RT_FULL) +#define ARCH_RT_DELAYS_SIGNAL_SEND +#endif + #ifndef CONFIG_COMPAT typedef sigset_t compat_sigset_t; #endif diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 9c556ea2eaa728c959d159d6ed7671a47732e6df..b136992beb1bfdd4ae4cc7d67f29e17b96d42491 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -65,7 +65,7 @@ */ static __always_inline void boot_init_stack_canary(void) { - u64 canary; + u64 uninitialized_var(canary); u64 tsc; #ifdef CONFIG_X86_64 @@ -76,8 +76,14 @@ static __always_inline void boot_init_stack_canary(void) * of randomness. The TSC only matters for very early init, * there it already has some randomness on most systems. Later * on during the bootup the random pool has true entropy too. + * For preempt-rt we need to weaken the randomness a bit, as + * we can't call into the random generator from atomic context + * due to locking constraints. We just leave canary + * uninitialized and use the TSC based randomness on top of it. */ +#ifndef CONFIG_PREEMPT_RT_FULL get_random_bytes(&canary, sizeof(canary)); +#endif tsc = rdtsc(); canary += tsc + (tsc << 32UL); canary &= CANARY_MASK; diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index b5e4c357523e83f920560877f6d572a3a97ffc97..d311dc042ddd871a80365dee582597e32f775150 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -56,17 +56,24 @@ struct task_struct; struct thread_info { unsigned long flags; /* low level flags */ u32 status; /* thread synchronous flags */ + int preempt_lazy_count; /* 0 => lazy preemptable + <0 => BUG */ }; #define INIT_THREAD_INFO(tsk) \ { \ .flags = 0, \ + .preempt_lazy_count = 0, \ } #else /* !__ASSEMBLY__ */ #include +#define GET_THREAD_INFO(reg) \ + _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \ + _ASM_SUB $(THREAD_SIZE),reg ; + #endif /* @@ -91,6 +98,7 @@ struct thread_info { #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_IA32 17 /* IA32 compatibility process */ +#define TIF_NEED_RESCHED_LAZY 18 /* lazy rescheduling necessary */ #define TIF_NOHZ 19 /* in adaptive nohz mode */ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ @@ -120,6 +128,7 @@ struct thread_info { #define _TIF_NOCPUID (1 << TIF_NOCPUID) #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_IA32 (1 << TIF_IA32) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_NOHZ (1 << TIF_NOHZ) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) @@ -165,6 +174,8 @@ struct thread_info { #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) + #define STACK_WARN (THREAD_SIZE/8) /* diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 677508baf95a06e686538d8220e7533ceecf1124..b1a7f453415f7d3892f57a02582068a9026ad6d7 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1732,7 +1732,7 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data) return false; } -static inline bool ioapic_irqd_mask(struct irq_data *data) +static inline bool ioapic_prepare_move(struct irq_data *data) { /* If we are moving the IRQ we need to mask it */ if (unlikely(irqd_is_setaffinity_pending(data))) { @@ -1743,9 +1743,9 @@ static inline bool ioapic_irqd_mask(struct irq_data *data) return false; } -static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) +static inline void ioapic_finish_move(struct irq_data *data, bool moveit) { - if (unlikely(masked)) { + if (unlikely(moveit)) { /* Only migrate the irq if the ack has been received. * * On rare occasions the broadcast level triggered ack gets @@ -1780,11 +1780,11 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) } } #else -static inline bool ioapic_irqd_mask(struct irq_data *data) +static inline bool ioapic_prepare_move(struct irq_data *data) { return false; } -static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) +static inline void ioapic_finish_move(struct irq_data *data, bool moveit) { } #endif @@ -1793,11 +1793,11 @@ static void ioapic_ack_level(struct irq_data *irq_data) { struct irq_cfg *cfg = irqd_cfg(irq_data); unsigned long v; - bool masked; + bool moveit; int i; irq_complete_move(cfg); - masked = ioapic_irqd_mask(irq_data); + moveit = ioapic_prepare_move(irq_data); /* * It appears there is an erratum which affects at least version 0x11 @@ -1852,7 +1852,7 @@ static void ioapic_ack_level(struct irq_data *irq_data) eoi_ioapic_pin(cfg->vector, irq_data->chip_data); } - ioapic_irqd_unmask(irq_data, masked); + ioapic_finish_move(irq_data, moveit); } static void ioapic_ir_ack_level(struct irq_data *irq_data) diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 01de31db300d3cbf17de120b3a89011f170f65d3..ce1c5b9fbd8cab56c8deb92d7268ad31cf75130d 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -38,6 +38,7 @@ void common(void) { BLANK(); OFFSET(TASK_TI_flags, task_struct, thread_info.flags); + OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count); OFFSET(TASK_addr_limit, task_struct, thread.addr_limit); BLANK(); @@ -94,6 +95,7 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); + DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); /* TLB state for the entry code */ OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask); diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c index a999a58ca33180ea3374b6ee570cd2426aea67f9..d6410d0740eaf3be1b480b561246b5a73889d2b4 100644 --- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c @@ -1445,7 +1445,7 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) * may be scheduled elsewhere and invalidate entries in the * pseudo-locked region. */ - if (!cpumask_subset(¤t->cpus_allowed, &plr->d->cpu_mask)) { + if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { mutex_unlock(&rdtgroup_mutex); return -EINVAL; } diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 2e5003fef51a9392ecca9d413f7f6558095ea9c7..768c53767bb23487b4d56e26418a71b78d1ee54b 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -136,6 +136,18 @@ void kernel_fpu_end(void) } EXPORT_SYMBOL_GPL(kernel_fpu_end); +void kernel_fpu_resched(void) +{ + WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); + + if (should_resched(PREEMPT_OFFSET)) { + kernel_fpu_end(); + cond_resched(); + kernel_fpu_begin(); + } +} +EXPORT_SYMBOL_GPL(kernel_fpu_resched); + /* * Save the FPU state (mark it for reload if necessary): * diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 86a231338bbfe571fc938b2ecddd02b1cbfb9c4b..e5c8d5245289cada51126f997d2fd7d212b72a86 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -351,10 +351,12 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); } + preempt_disable(); local_bh_disable(); fpu->initialized = 1; fpu__restore(fpu); local_bh_enable(); + preempt_enable(); /* Failure is already handled */ return err; diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 95600a99ae93652dbbd4143c9e538e808911bb24..9192d76085ba7f5ba1f7b6566760d30eacc02ff8 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -130,6 +130,7 @@ void irq_ctx_init(int cpu) cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); } +#ifndef CONFIG_PREEMPT_RT_FULL void do_softirq_own_stack(void) { struct irq_stack *irqstk; @@ -146,6 +147,7 @@ void do_softirq_own_stack(void) call_on_stack(__do_softirq, isp); } +#endif bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) { diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 020efe0f9614f8198218e8dbba8fdb4d501a1ce6..5d0c975559ad81c66dc1378acf3f9a186e5d1558 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -205,6 +206,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) } EXPORT_SYMBOL_GPL(start_thread); +#ifdef CONFIG_PREEMPT_RT_FULL +static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) +{ + int i; + + /* + * Clear @prev's kmap_atomic mappings + */ + for (i = 0; i < prev_p->kmap_idx; i++) { + int idx = i + KM_TYPE_NR * smp_processor_id(); + pte_t *ptep = kmap_pte - idx; + + kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); + } + /* + * Restore @next_p's kmap_atomic mappings + */ + for (i = 0; i < next_p->kmap_idx; i++) { + int idx = i + KM_TYPE_NR * smp_processor_id(); + + if (!pte_none(next_p->kmap_pte[i])) + set_pte(kmap_pte - idx, next_p->kmap_pte[i]); + } +} +#else +static inline void +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } +#endif + /* * switch_to(x,y) should switch tasks from x to y. @@ -274,6 +304,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) switch_to_extra(prev_p, next_p); + switch_kmaps(prev_p, next_p); + /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 89d07312e58cfbc1e7daa0cbe6487b23cca0f66c..037dbe04fda2371602536a29a0af374039c3589b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2258,7 +2258,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) apic->vcpu = vcpu; hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, - HRTIMER_MODE_ABS_PINNED); + HRTIMER_MODE_ABS_PINNED_HARD); apic->lapic_timer.timer.function = apic_timer_fn; /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 417abc9ba1ad4008e14c07fb3d5cfc64094d0503..9829e0fbdd41d8959d72000a7b84645c052ff7f6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6875,6 +6875,13 @@ int kvm_arch_init(void *opaque) goto out; } +#ifdef CONFIG_PREEMPT_RT_FULL + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { + printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); + return -EOPNOTSUPP; + } +#endif + r = kvm_mmu_module_init(); if (r) goto out_free_percpu; diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 6d18b70ed5a9bb808f2c976450671e7034e3169c..f752724c22e8a413cfdbd30f4bde215139074024 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap); */ void *kmap_atomic_prot(struct page *page, pgprot_t prot) { + pte_t pte = mk_pte(page, prot); unsigned long vaddr; int idx, type; - preempt_disable(); + preempt_disable_nort(); pagefault_disable(); if (!PageHighMem(page)) @@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); - set_pte(kmap_pte-idx, mk_pte(page, prot)); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_pte(kmap_pte-idx, pte); arch_flush_lazy_mmu_mode(); return (void *)vaddr; @@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr) * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = __pte(0); +#endif kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); arch_flush_lazy_mmu_mode(); @@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr) #endif pagefault_enable(); - preempt_enable(); + preempt_enable_nort(); } EXPORT_SYMBOL(__kunmap_atomic); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index b3294d36769d0da73bb68e6d33a56f52fa735a6f..c0ec8d430c028431b765897d4f07e9be3604b54d 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -59,6 +59,7 @@ EXPORT_SYMBOL_GPL(iomap_free); void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { + pte_t pte = pfn_pte(pfn, prot); unsigned long vaddr; int idx, type; @@ -68,7 +69,12 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); + WARN_ON(!pte_none(*(kmap_pte - idx))); + +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_pte(kmap_pte - idx, pte); arch_flush_lazy_mmu_mode(); return (void *)vaddr; @@ -119,6 +125,9 @@ iounmap_atomic(void __iomem *kvaddr) * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = __pte(0); +#endif kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); } diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 101f3ad0d6ad14714891fad928ce8c7471db7d43..0b0396261ca1006864fa68553a6a7c1d49c59c47 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -687,12 +687,18 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, pgprot_t ref_prot; spin_lock(&pgd_lock); + /* + * Keep preemption disabled after __flush_tlb_all() which expects not be + * preempted during the flush of the local TLB. + */ + preempt_disable(); /* * Check for races, another CPU might have split this page * up for us already: */ tmp = _lookup_address_cpa(cpa, address, &level); if (tmp != kpte) { + preempt_enable(); spin_unlock(&pgd_lock); return 1; } @@ -726,6 +732,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, break; default: + preempt_enable(); spin_unlock(&pgd_lock); return 1; } @@ -764,6 +771,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, * going on. */ __flush_tlb_all(); + preempt_enable(); spin_unlock(&pgd_lock); return 0; diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 77d05b56089a2437492b9166e390efe6e577b422..cd7e4bfb43cf80834bad1cdf3876fba82da85a3c 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -619,18 +619,16 @@ void __init efi_dump_pagetable(void) /* * Makes the calling thread switch to/from efi_mm context. Can be used - * for SetVirtualAddressMap() i.e. current->active_mm == init_mm as well - * as during efi runtime calls i.e current->active_mm == current_mm. - * We are not mm_dropping()/mm_grabbing() any mm, because we are not - * losing/creating any references. + * in a kernel thread and user context. Preemption needs to remain disabled + * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm + * can not change under us. + * It should be ensured that there are no concurent calls to this function. */ void efi_switch_mm(struct mm_struct *mm) { - task_lock(current); efi_scratch.prev_mm = current->active_mm; current->active_mm = mm; switch_mm(efi_scratch.prev_mm, mm, NULL); - task_unlock(current); } #ifdef CONFIG_EFI_MIXED diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h index bb1fe6c1816eb1e04733f4e4086c5e2604c1256f..8a22f1e7b6c9f5c810813b881bde1cd9223c6439 100644 --- a/arch/xtensa/include/asm/spinlock_types.h +++ b/arch/xtensa/include/asm/spinlock_types.h @@ -2,10 +2,6 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - typedef struct { volatile unsigned int slock; } arch_spinlock_t; diff --git a/block/blk-core.c b/block/blk-core.c index 80f3e729fdd4dbc228c4af401a32c92b14248432..ed6ae335756dd351a143f9dda2183757a1d0da09 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -189,6 +189,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq) INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->timeout_list); +#ifdef CONFIG_PREEMPT_RT_FULL + INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); +#endif rq->cpu = -1; rq->q = q; rq->__sector = (sector_t) -1; @@ -970,12 +973,21 @@ void blk_queue_exit(struct request_queue *q) percpu_ref_put(&q->q_usage_counter); } +static void blk_queue_usage_counter_release_wrk(struct work_struct *work) +{ + struct request_queue *q = + container_of(work, struct request_queue, mq_pcpu_wake); + + wake_up_all(&q->mq_freeze_wq); +} + static void blk_queue_usage_counter_release(struct percpu_ref *ref) { struct request_queue *q = container_of(ref, struct request_queue, q_usage_counter); - wake_up_all(&q->mq_freeze_wq); + if (wq_has_sleeper(&q->mq_freeze_wq)) + schedule_work(&q->mq_pcpu_wake); } static void blk_rq_timed_out_timer(struct timer_list *t) @@ -1075,6 +1087,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); init_waitqueue_head(&q->mq_freeze_wq); + INIT_WORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk); /* * Init percpu_ref in atomic mode so that it's faster to shutdown. diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 4c810969c3e2fb77657b895e740e1796a4e6c79e..48089f5bcc1140721f87710e104c9c5a0fe59f04 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "blk.h" @@ -119,7 +120,7 @@ static void ioc_release_fn(struct work_struct *work) spin_unlock(q->queue_lock); } else { spin_unlock_irqrestore(&ioc->lock, flags); - cpu_relax(); + cpu_chill(); spin_lock_irqsave_nested(&ioc->lock, flags, 1); } } @@ -203,7 +204,7 @@ void put_io_context_active(struct io_context *ioc) spin_unlock(icq->q->queue_lock); } else { spin_unlock_irqrestore(&ioc->lock, flags); - cpu_relax(); + cpu_chill(); goto retry; } } diff --git a/block/blk-mq.c b/block/blk-mq.c index ae70b4809bec4db5eaf1753f85756c9d32b636b3..0f8dcb68b8a8529c27798e6c77265f81b3cb7a21 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -320,6 +320,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->extra_len = 0; rq->__deadline = 0; +#ifdef CONFIG_PREEMPT_RT_FULL + INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); +#endif INIT_LIST_HEAD(&rq->timeout_list); rq->timeout = 0; @@ -547,12 +550,24 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) } EXPORT_SYMBOL(blk_mq_end_request); +#ifdef CONFIG_PREEMPT_RT_FULL + +void __blk_mq_complete_request_remote_work(struct work_struct *work) +{ + struct request *rq = container_of(work, struct request, work); + + rq->q->softirq_done_fn(rq); +} + +#else + static void __blk_mq_complete_request_remote(void *data) { struct request *rq = data; rq->q->softirq_done_fn(rq); } +#endif static void __blk_mq_complete_request(struct request *rq) { @@ -570,19 +585,27 @@ static void __blk_mq_complete_request(struct request *rq) return; } - cpu = get_cpu(); + cpu = get_cpu_light(); if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) shared = cpus_share_cache(cpu, ctx->cpu); if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { +#ifdef CONFIG_PREEMPT_RT_FULL + /* + * We could force QUEUE_FLAG_SAME_FORCE then we would not get in + * here. But we could try to invoke it one the CPU like this. + */ + schedule_work_on(ctx->cpu, &rq->work); +#else rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; smp_call_function_single_async(ctx->cpu, &rq->csd); +#endif } else { rq->q->softirq_done_fn(rq); } - put_cpu(); + put_cpu_light(); } static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) @@ -1387,14 +1410,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, return; if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { - int cpu = get_cpu(); + int cpu = get_cpu_light(); if (cpumask_test_cpu(cpu, hctx->cpumask)) { __blk_mq_run_hw_queue(hctx); - put_cpu(); + put_cpu_light(); return; } - put_cpu(); + put_cpu_light(); } kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, @@ -3139,10 +3162,9 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, kt = nsecs; mode = HRTIMER_MODE_REL; - hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode); + hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode, current); hrtimer_set_expires(&hs.timer, kt); - hrtimer_init_sleeper(&hs, current); do { if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) break; diff --git a/block/blk-mq.h b/block/blk-mq.h index 5ad9251627f80567e1fb459cf699f5d1a76a3342..5a96c97991b644eb8a0592ce5e1107b3996d4572 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -113,12 +113,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, */ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) { - return __blk_mq_get_ctx(q, get_cpu()); + return __blk_mq_get_ctx(q, get_cpu_light()); } static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) { - put_cpu(); + put_cpu_light(); } struct blk_mq_alloc_data { diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 15c1f5e12eb89460bc42eb7f5807eaa03254e51d..1628277885a12dc3bafa448929f24e29b48b3a09 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -53,6 +53,7 @@ static void trigger_softirq(void *data) raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } /* @@ -91,6 +92,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu) this_cpu_ptr(&blk_cpu_done)); raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); return 0; } @@ -143,6 +145,7 @@ void __blk_complete_request(struct request *req) goto do_local; local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(__blk_complete_request); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index e0c8e907b08674427649436fd5f769b7ca66cd11..e079f9a70201c2e8293f4ac8873b8a0dd46ae543 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -39,6 +39,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); struct cryptd_cpu_queue { struct crypto_queue queue; struct work_struct work; + spinlock_t qlock; }; struct cryptd_queue { @@ -117,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue, cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); INIT_WORK(&cpu_queue->work, cryptd_queue_worker); + spin_lock_init(&cpu_queue->qlock); } pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); return 0; @@ -141,8 +143,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, struct cryptd_cpu_queue *cpu_queue; atomic_t *refcnt; - cpu = get_cpu(); - cpu_queue = this_cpu_ptr(queue->cpu_queue); + cpu_queue = raw_cpu_ptr(queue->cpu_queue); + spin_lock_bh(&cpu_queue->qlock); + cpu = smp_processor_id(); + err = crypto_enqueue_request(&cpu_queue->queue, request); refcnt = crypto_tfm_ctx(request->tfm); @@ -158,7 +162,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, atomic_inc(refcnt); out_put_cpu: - put_cpu(); + spin_unlock_bh(&cpu_queue->qlock); return err; } @@ -174,16 +178,11 @@ static void cryptd_queue_worker(struct work_struct *work) cpu_queue = container_of(work, struct cryptd_cpu_queue, work); /* * Only handle one request at a time to avoid hogging crypto workqueue. - * preempt_disable/enable is used to prevent being preempted by - * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent - * cryptd_enqueue_request() being accessed from software interrupts. */ - local_bh_disable(); - preempt_disable(); + spin_lock_bh(&cpu_queue->qlock); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); - preempt_enable(); - local_bh_enable(); + spin_unlock_bh(&cpu_queue->qlock); if (!req) return; diff --git a/crypto/scompress.c b/crypto/scompress.c index 968bbcf65c9411679d74f737a2550bddb5eb0e51..c2f0077e0801e165cb3ce096053eadaa341ccf04 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,7 @@ static void * __percpu *scomp_src_scratches; static void * __percpu *scomp_dst_scratches; static int scomp_scratch_users; static DEFINE_MUTEX(scomp_lock); +static DEFINE_LOCAL_IRQ_LOCK(scomp_scratches_lock); #ifdef CONFIG_NET static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) @@ -146,7 +148,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) void **tfm_ctx = acomp_tfm_ctx(tfm); struct crypto_scomp *scomp = *tfm_ctx; void **ctx = acomp_request_ctx(req); - const int cpu = get_cpu(); + const int cpu = local_lock_cpu(scomp_scratches_lock); u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu); u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu); int ret; @@ -181,7 +183,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) 1); } out: - put_cpu(); + local_unlock_cpu(scomp_scratches_lock); return ret; } diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index dd1eea90f67f1c9c998fd00941b4cb6c9b2e7fef..fb91fe3d7a52d32570ca00f5045686016e1fd60d 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -338,7 +338,7 @@ config ACPI_CUSTOM_DSDT_FILE See Documentation/acpi/dsdt-override.txt Enter the full path name to the file which includes the AmlCode - declaration. + or dsdt_aml_code declaration. If unsure, don't enter a file name. diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 2664452fa112630d254106099bc8e3026a2ecb7b..b0ad5151f55ab6565bcaa6be81d5d1dfabed153a 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -162,6 +162,17 @@ static const struct apd_device_desc hip08_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 250000000, }; + +static const struct apd_device_desc phytium_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 200000000, +}; + +static const struct apd_device_desc phytium_pe220x_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 50000000, +}; + static const struct apd_device_desc thunderx2_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 125000000, @@ -234,6 +245,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "CAV9007", APD_ADDR(thunderx2_i2c_desc) }, { "HISI02A1", APD_ADDR(hip07_i2c_desc) }, { "HISI02A2", APD_ADDR(hip08_i2c_desc) }, + { "PHYT0003", APD_ADDR(phytium_i2c_desc) }, + { "PHYT0038", APD_ADDR(phytium_pe220x_i2c_desc) }, #endif { } }; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 913613cf5c53f19a3ee2e8c1ffbf8190a9fe2d60..d0368bd63734eeb3ea2678593c1bc40b8c1f0160 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -91,6 +91,18 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent); acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context); void acpi_scan_table_handler(u32 event, void *table, void *context); +#ifdef CONFIG_ACPI_GENERIC_GSI +int acpi_register_irq(struct device *dev, u32 hwirq, int trigger, + int polarity, struct fwnode_handle *fwnode); +#else +static inline +int acpi_register_irq(struct device *dev, u32 hwirq, int trigger, + int polarity, struct fwnode_handle *fwnode) +{ + return acpi_register_gsi(dev, hwirq, trigger, polarity); +} +#endif + /* -------------------------------------------------------------------------- Device Node Initialization / Removal -------------------------------------------------------------------------- */ diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index 7c352cba052893bb59f135b597f797a345974cd4..fc9b52ea4ad531bbe64e588b458fbf468259c585 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -13,6 +13,8 @@ #include #include +#include "internal.h" + enum acpi_irq_model_id acpi_irq_model; static struct fwnode_handle *acpi_gsi_domain_id; @@ -41,6 +43,24 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) } EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); +int acpi_register_irq(struct device *dev, u32 hwirq, int trigger, + int polarity, struct fwnode_handle *fwnode) +{ + struct irq_fwspec fwspec; + + if (!fwnode) { + dev_warn(dev, "No registered irqchip for hwirq %d\n", hwirq); + return -EINVAL; + } + + fwspec.fwnode = fwnode; + fwspec.param[0] = hwirq; + fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); + fwspec.param_count = 2; + + return irq_create_fwspec_mapping(&fwspec); +} + /** * acpi_register_gsi() - Map a GSI to a linux IRQ number * @dev: device for which IRQ has to be mapped @@ -54,19 +74,7 @@ EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { - struct irq_fwspec fwspec; - - if (WARN_ON(!acpi_gsi_domain_id)) { - pr_warn("GSI: No registered irqchip, giving up\n"); - return -EINVAL; - } - - fwspec.fwnode = acpi_gsi_domain_id; - fwspec.param[0] = gsi; - fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); - fwspec.param_count = 2; - - return irq_create_fwspec_mapping(&fwspec); + return acpi_register_irq(dev, gsi, trigger, polarity, acpi_gsi_domain_id); } EXPORT_SYMBOL_GPL(acpi_register_gsi); @@ -95,7 +103,7 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi); * Return: * The referenced device fwhandle or NULL on failure */ -static struct fwnode_handle * +struct fwnode_handle * acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) { struct fwnode_handle *result; @@ -295,3 +303,29 @@ void __init acpi_set_irq_model(enum acpi_irq_model_id model, acpi_irq_model = model; acpi_gsi_domain_id = fwnode; } + +/** + * acpi_irq_create_hierarchy - Create a hierarchical IRQ domain with the default + * GSI domain as its parent. + * @flags: Irq domain flags associated with the domain + * @size: Size of the domain. + * @fwnode: Optional fwnode of the interrupt controller + * @ops: Pointer to the interrupt domain callbacks + * @host_data: Controller private data pointer + */ +struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, + unsigned int size, + struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, + void *host_data) +{ + struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, + DOMAIN_BUS_ANY); + + if (!d) + return NULL; + + return irq_domain_create_hierarchy(d, flags, size, fwnode, ops, + host_data); +} +EXPORT_SYMBOL_GPL(acpi_irq_create_hierarchy); diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 94ded9513c73b0bede043e85448251813b32806c..e926cddd9eba7eea10485a25a229cd4633818789 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -35,6 +35,8 @@ #include #include +#include "internal.h" + #define PREFIX "ACPI: " #define _COMPONENT ACPI_PCI_COMPONENT @@ -423,6 +425,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) char *link = NULL; char link_desc[16]; int rc; + struct fwnode_handle *rs_fwnode; pin = dev->pin; if (!pin) { @@ -451,7 +454,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev) gsi = acpi_pci_link_allocate_irq(entry->link, entry->index, &triggering, &polarity, - &link); + &link, + &rs_fwnode); else gsi = entry->index; } else @@ -475,7 +479,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) return 0; } - rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); + rc = acpi_register_irq(&dev->dev, gsi, triggering, polarity, rs_fwnode); if (rc < 0) { dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", pin_name(pin)); diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index d5eec352a6e1293c13c70f421e4379e529793920..14010783eece54a8868ac085e407a06665575d3d 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -74,6 +74,7 @@ struct acpi_pci_link_irq { u8 resource_type; u8 possible_count; u32 possible[ACPI_PCI_LINK_MAX_POSSIBLE]; + struct acpi_resource_source resource_source; u8 initialized:1; u8 reserved:7; }; @@ -135,6 +136,8 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, { struct acpi_resource_extended_irq *p = &resource->data.extended_irq; + struct acpi_resource_source *rs = + &link->irq.resource_source; if (!p || !p->interrupt_count) { printk(KERN_WARNING PREFIX "Blank _PRS EXT IRQ resource\n"); @@ -155,6 +158,12 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource, link->irq.triggering = p->triggering; link->irq.polarity = p->polarity; link->irq.resource_type = ACPI_RESOURCE_TYPE_EXTENDED_IRQ; + if (p->resource_source.string_length) { + rs->index = p->resource_source.index; + rs->string_length = p->resource_source.string_length; + rs->string_ptr = kstrdup(p->resource_source.string_ptr, + GFP_KERNEL); + } break; } default: @@ -341,7 +350,8 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) resource->res.data.irq.sharable = ACPI_SHARED; resource->res.data.extended_irq.interrupt_count = 1; resource->res.data.extended_irq.interrupts[0] = irq; - /* ignore resource_source, it's optional */ + resource->res.data.extended_irq.resource_source = + link->irq.resource_source; break; default: printk(KERN_ERR PREFIX "Invalid Resource_type %d\n", link->irq.resource_type); @@ -627,7 +637,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) * failure: return -1 */ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, - int *polarity, char **name) + int *polarity, char **name, struct fwnode_handle **rs_fwnode) { int result; struct acpi_device *device; @@ -671,6 +681,9 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, *polarity = link->irq.polarity; if (name) *name = acpi_device_bid(link->device); + if (rs_fwnode) + *rs_fwnode = acpi_get_irq_source_fwhandle(&link->irq.resource_source); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link %s is referenced\n", acpi_device_bid(link->device))); diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 041ee23819276e52ad1b169f9a8560f3b10e5baf..11caaa455f3e6bb6a746fdea37c25c5428994c3f 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -713,6 +713,11 @@ acpi_os_physical_table_override(struct acpi_table_header *existing_table, table_length); } +#ifdef CONFIG_ACPI_CUSTOM_DSDT +static void *amlcode __attribute__ ((weakref("AmlCode"))); +static void *dsdt_amlcode __attribute__ ((weakref("dsdt_aml_code"))); +#endif + acpi_status acpi_os_table_override(struct acpi_table_header *existing_table, struct acpi_table_header **new_table) @@ -723,8 +728,11 @@ acpi_os_table_override(struct acpi_table_header *existing_table, *new_table = NULL; #ifdef CONFIG_ACPI_CUSTOM_DSDT - if (strncmp(existing_table->signature, "DSDT", 4) == 0) - *new_table = (struct acpi_table_header *)AmlCode; + if (!strncmp(existing_table->signature, "DSDT", 4)) { + *new_table = (struct acpi_table_header *)&amlcode; + if (!(*new_table)) + *new_table = (struct acpi_table_header *)&dsdt_amlcode; + } #endif if (*new_table != NULL) acpi_table_taint(existing_table); diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 4ed0a78fdc09633d20add1d018bc2caf639926e3..eece022620003325a86dd17c15a540b6a014baf3 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -116,12 +116,20 @@ ssize_t zcomp_available_show(const char *comp, char *buf) struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) { - return *get_cpu_ptr(comp->stream); + struct zcomp_strm *zstrm; + + zstrm = *get_local_ptr(comp->stream); + spin_lock(&zstrm->zcomp_lock); + return zstrm; } void zcomp_stream_put(struct zcomp *comp) { - put_cpu_ptr(comp->stream); + struct zcomp_strm *zstrm; + + zstrm = *this_cpu_ptr(comp->stream); + spin_unlock(&zstrm->zcomp_lock); + put_local_ptr(zstrm); } int zcomp_compress(struct zcomp_strm *zstrm, @@ -171,6 +179,7 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) pr_err("Can't allocate a compression stream\n"); return -ENOMEM; } + spin_lock_init(&zstrm->zcomp_lock); *per_cpu_ptr(comp->stream, cpu) = zstrm; return 0; } diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index 41c1002a7d7dffc4ec0c458a0e0f1d68fb5f1a3a..d424eafcbf8ee872040584d2d70e8d83f27eb3ac 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -14,6 +14,7 @@ struct zcomp_strm { /* compression/decompression buffer */ void *buffer; struct crypto_comp *tfm; + spinlock_t zcomp_lock; }; /* dynamic per-device compression frontend */ diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 5e05bfcecd7b70a9a5801279ec6c881271406ffa..2919ad13bce94d9914e45fc3e9f43e8455a3f641 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -53,6 +53,40 @@ static size_t huge_class_size; static void zram_free_page(struct zram *zram, size_t index); +#ifdef CONFIG_PREEMPT_RT_BASE +static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) +{ + size_t index; + + for (index = 0; index < num_pages; index++) + spin_lock_init(&zram->table[index].lock); +} + +static int zram_slot_trylock(struct zram *zram, u32 index) +{ + int ret; + + ret = spin_trylock(&zram->table[index].lock); + if (ret) + __set_bit(ZRAM_LOCK, &zram->table[index].value); + return ret; +} + +static void zram_slot_lock(struct zram *zram, u32 index) +{ + spin_lock(&zram->table[index].lock); + __set_bit(ZRAM_LOCK, &zram->table[index].value); +} + +static void zram_slot_unlock(struct zram *zram, u32 index) +{ + __clear_bit(ZRAM_LOCK, &zram->table[index].value); + spin_unlock(&zram->table[index].lock); +} + +#else +static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { } + static int zram_slot_trylock(struct zram *zram, u32 index) { return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].value); @@ -67,6 +101,7 @@ static void zram_slot_unlock(struct zram *zram, u32 index) { bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value); } +#endif static inline bool init_done(struct zram *zram) { @@ -902,6 +937,8 @@ static DEVICE_ATTR_RO(io_stat); static DEVICE_ATTR_RO(mm_stat); static DEVICE_ATTR_RO(debug_stat); + + static void zram_meta_free(struct zram *zram, u64 disksize) { size_t num_pages = disksize >> PAGE_SHIFT; @@ -932,6 +969,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) if (!huge_class_size) huge_class_size = zs_huge_class_size(zram->mem_pool); + zram_meta_init_table_locks(zram, num_pages); return true; } @@ -990,6 +1028,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, unsigned long handle; unsigned int size; void *src, *dst; + struct zcomp_strm *zstrm; if (zram_wb_enabled(zram)) { zram_slot_lock(zram, index); @@ -1024,6 +1063,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, size = zram_get_obj_size(zram, index); + zstrm = zcomp_stream_get(zram->comp); src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); if (size == PAGE_SIZE) { dst = kmap_atomic(page); @@ -1031,14 +1071,13 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, kunmap_atomic(dst); ret = 0; } else { - struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); dst = kmap_atomic(page); ret = zcomp_decompress(zstrm, src, size, dst); kunmap_atomic(dst); - zcomp_stream_put(zram->comp); } zs_unmap_object(zram->mem_pool, handle); + zcomp_stream_put(zram->comp); zram_slot_unlock(zram, index); /* Should NEVER happen. Return bio error if it does. */ diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index d1095dfdffa81471196f4b008a5b373ea1834e3b..144e91061df84b4af7a4a6058981585ec374c838 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -61,6 +61,9 @@ struct zram_table_entry { unsigned long element; }; unsigned long value; +#ifdef CONFIG_PREEMPT_RT_BASE + spinlock_t lock; +#endif #ifdef CONFIG_ZRAM_MEMORY_TRACKING ktime_t ac_time; #endif diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index ca300b1914ce24170a4862ea422b526acfeafd4d..dbb675e2dd3f9d732dbb3965491550a861d9d179 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -32,3 +32,4 @@ obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o + diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index dac895dc01b956882486b90e0ea75813b93ea1c7..f11db8b7e51ad671e5837bc7a428c5c38994138e 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -424,6 +424,19 @@ config HW_RANDOM_EXYNOS will be called exynos-trng. If unsure, say Y. + +config HW_RANDOM_PHYTIUM + tristate "Phytium Random Number Generator support" + depends on ARCH_PHYTIUM || COMPILE_TEST + help + This driver provides kernel-side support for the Random Number + Generator hardware found on Phytium SoCs. + + To compile this driver as a module, choose M here: the + module will be called phytium-rng. + + If unsure, say Y. + endif # HW_RANDOM config UML_RANDOM diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index e35ec3ce3a20426afb7d464784242a7a22faebb0..d2797a27d6eba4a5282713c56506e8f902b9e573 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -38,3 +38,4 @@ obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o +obj-$(CONFIG_HW_RANDOM_PHYTIUM) += phytium-rng.o diff --git a/drivers/char/hw_random/phytium-rng.c b/drivers/char/hw_random/phytium-rng.c new file mode 100644 index 0000000000000000000000000000000000000000..5b5e896516b934288ac9f69605069112f3a37fc4 --- /dev/null +++ b/drivers/char/hw_random/phytium-rng.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SoC RNG Driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TRNG_CR 0x00 +#define TRNG_CR_RNGEN BIT(0) +#define TRNG_CR_ROSEN_MASK GENMASK(7, 4) +#define TRNG_CR_DIEN BIT(16) +#define TRNG_CR_ERIEN BIT(17) +#define TRNG_CR_IRQEN BIT(24) +#define TRNG_MSEL 0x04 +#define TRNG_MSEL_MSEL BIT(0) +#define TRNG_SR 0x08 +#define TRNG_SR_HTF BIT(0) +#define TRNG_SR_DRDY BIT(1) +#define TRNG_SR_ERERR BIT(3) +#define TRNG_DR 0x0C +#define TRNG_RESEED 0x40 +#define TRNG_RESEED_RSED BIT(0) + +#define DELAY 10 +#define TIMEOUT 100 + +static int msel; +module_param(msel, int, 0444); +MODULE_PARM_DESC(msel, "Phytium RNG mode selection: 0 - TRNG. 1 - PRNG."); + +struct phytium_rng { + struct hwrng rng; + void __iomem *base; +}; + +static int phytium_rng_init(struct hwrng *rng) +{ + struct phytium_rng *priv = container_of(rng, struct phytium_rng, rng); + u32 reg; + + /* Mode Selection */ + reg = msel ? TRNG_MSEL_MSEL : 0; + writel(reg, priv->base + TRNG_MSEL); + + /* If PRGN mode is on, do reseed operations */ + if (msel) + writel(TRNG_RESEED_RSED, priv->base + TRNG_RESEED); + + /* Clear status */ + writel(0x7, priv->base + TRNG_SR); + + /* Enable TRNG */ + reg = readl(priv->base + TRNG_CR) | TRNG_CR_ROSEN_MASK | TRNG_CR_RNGEN; + writel(reg, priv->base + TRNG_CR); + + return 0; +} + +static void phytium_rng_cleanup(struct hwrng *rng) +{ + struct phytium_rng *priv = container_of(rng, struct phytium_rng, rng); + + writel(0x7, priv->base + TRNG_SR); +} + +static int phytium_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct phytium_rng *priv = container_of(rng, struct phytium_rng, rng); + u32 reg; + int ret = 0; + + /* TRNG can generate at most 8*32bit random number per time */ + max = max > 32 ? 32 : max; + + reg = readl(priv->base + TRNG_SR); + if (!(reg & TRNG_SR_DRDY) && wait) { + ret = readl_poll_timeout(priv->base + TRNG_SR, reg, + reg & TRNG_SR_DRDY, DELAY, TIMEOUT); + if (ret) { + dev_err((struct device *)priv->rng.priv, + "%s: timeout %x!\n", __func__, reg); + return -EIO; + } + } + + while (max >= 4) { + *(u32 *)buf = readl(priv->base + TRNG_DR); + + ret += sizeof(u32); + buf += sizeof(u32); + max -= sizeof(u32); + } + + /* Clear DRDY by writing 1 */ + writel(reg | TRNG_SR_DRDY, priv->base + TRNG_SR); + + return ret; +} + +static int phytium_rng_probe(struct platform_device *pdev) +{ + struct phytium_rng *priv; + struct resource *mem; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->rng.name = pdev->name; + priv->rng.init = phytium_rng_init; + priv->rng.cleanup = phytium_rng_cleanup; + priv->rng.read = phytium_rng_read; + priv->rng.priv = (unsigned long)&pdev->dev; + priv->rng.quality = 1000; + + return devm_hwrng_register(&pdev->dev, &priv->rng); +} + +static const struct of_device_id phytium_rng_dt_ids[] = { + { .compatible = "phytium,rng" }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_rng_dt_ids); + +static struct platform_driver phytium_rng_driver = { + .probe = phytium_rng_probe, + .driver = { + .name = "phytium-rng", + .of_match_table = of_match_ptr(phytium_rng_dt_ids), + } +}; +module_platform_driver(phytium_rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium random number generator driver"); +MODULE_AUTHOR("Chen Baozi "); diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index c108441882ccde34311729361afc7f2876e5f182..9f9910799499f112cbfa75e23eb8f3d9304321da 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig @@ -103,6 +103,27 @@ config ASPEED_KCS_IPMI_BMC The driver implements the BMC side of the KCS contorller, it provides the access of KCS IO space for BMC side. +config PHYTIUM_KCS_IPMI_BMC + depends on ARCH_PHYTIUM + select IPMI_KCS_BMC + select REGMAP_MMIO + tristate "PHYTIUM KCS IPMI BMC driver" + help + Provides a driver for the KCS (Keyboard Controller Style) IPMI + interface found on Phytium SOCs. + + The driver implements the BMC side of the KCS contorller, it + provides the access of KCS IO space for BMC side. + +config PHYTIUM_BT_IPMI_BMC + depends on ARCH_PHYTIUM + depends on REGMAP && REGMAP_MMIO && MFD_SYSCON + tristate "PHYTIUM BT BMC driver" + help + Provides a driver for the BT (Block Transfer) interface found + on Phytium SOCs. The driver implements the BMC side of + the BT interface. + config NPCM7XX_KCS_IPMI_BMC depends on ARCH_NPCM7XX || COMPILE_TEST select IPMI_KCS_BMC diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index 7a3baf301a8f7a15ff442fe84ba045dd209c0d84..e94565313102241a85730b6edc31358fb8afbf74 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -25,3 +25,5 @@ obj-$(CONFIG_IPMI_KCS_BMC) += kcs_bmc.o obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o obj-$(CONFIG_ASPEED_KCS_IPMI_BMC) += kcs_bmc_aspeed.o obj-$(CONFIG_NPCM7XX_KCS_IPMI_BMC) += kcs_bmc_npcm7xx.o +obj-$(CONFIG_PHYTIUM_KCS_IPMI_BMC) += kcs_bmc_phytium.o +obj-$(CONFIG_PHYTIUM_BT_IPMI_BMC) += bt_bmc_phytium.o diff --git a/drivers/char/ipmi/bt_bmc_phytium.c b/drivers/char/ipmi/bt_bmc_phytium.c new file mode 100644 index 0000000000000000000000000000000000000000..a4d009ddf4a22e73637ef5d2b0f302df2469391b --- /dev/null +++ b/drivers/char/ipmi/bt_bmc_phytium.c @@ -0,0 +1,530 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2021-2022 phytium + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This is a BMC device used to communicate to the host + */ +#define DEVICE_NAME "ipmi-bt-host" + +#define BT_IO_BASE 0xe4 +#define BT_IRQ 10 + +#define LPC_HICR0 0x00 +#define LPC_HICR0_LPC3E BIT(7) +#define LPC_HICR0_LPC2E BIT(6) +#define LPC_HICR0_LPC1E BIT(5) +#define LPC_HICR0_SDWNE BIT(3) /* 0:disabl,1:enable for HICR1_SDWNB */ +#define LPC_HICR0_PMEE BIT(2) /* 0:disabl,1:enable for HICR1_PMEB */ + +#define LPC_HICR2 0x08 +#define LPC_HICR2_IBFIF3 BIT(3) /* 0:normal,1:enable irq 3*/ + +#define LPC_HICR4 0x10 +#define LPC_HICR4_LADR12AS BIT(7) +#define LPC_HICR4_KCSENBL BIT(2) +#define LPC_BTENABLE BIT(0) /* share bt channel enable,0:disable,1:enable */ +#define LPC_LADR3H_BT 0x014 +#define LPC_LADR3L_BT 0x018 + +#define BT_CR1 0x4C +#define BT_CR1_IRQ_H2B 0x04 +#define BT_CR1_IRQ_HWRST 0x40 + +#define BT_CSR1 0x54 +#define BT_CSR1_IRQ_H2B 0x04 +#define BT_CSR1_IRQ_HWRST 0x40 + +#define BT_CTRL 0x58 +#define BT_CTRL_B_BUSY 0x80 +#define BT_CTRL_H_BUSY 0x40 +#define BT_CTRL_OEM0 0x20 +#define BT_CTRL_SMS_ATN 0x10 +#define BT_CTRL_B2H_ATN 0x08 +#define BT_CTRL_H2B_ATN 0x04 +#define BT_CTRL_CLR_RD_PTR 0x02 +#define BT_CTRL_CLR_WR_PTR 0x01 +#define BT_BMC2HOST 0x5C +#define BT_HOST2BMC 0x98 +#define BT_INTMASK 0x60 +#define BT_INTMASK_B2H_IRQEN 0x01 +#define BT_INTMASK_B2H_IRQ 0x02 +#define BT_INTMASK_BMC_HWRST 0x80 + +#define BT_BMC_BUFFER_SIZE 256 + +struct bt_bmc { + struct device dev; + struct miscdevice miscdev; + struct regmap *map; + int irq; + wait_queue_head_t queue; + struct timer_list poll_timer; + struct mutex mutex; +}; + +static atomic_t open_count = ATOMIC_INIT(0); + +static const struct regmap_config bt_regmap_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +static u8 bt_inb(struct bt_bmc *bt_bmc, int reg) +{ + uint32_t val = 0; + int rc; + + rc = regmap_read(bt_bmc->map, reg, &val); + WARN(rc != 0, "regmap_read() failed: %d\n", rc); + + return rc == 0 ? (u8) val : 0; +} + +static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg) +{ + int rc; + + rc = regmap_write(bt_bmc->map, reg, data); + WARN(rc != 0, "regmap_write() failed: %d\n", rc); +} + +static void clr_rd_ptr(struct bt_bmc *bt_bmc) +{ + bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL); +} + +static void clr_wr_ptr(struct bt_bmc *bt_bmc) +{ + bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL); +} + +static void clr_h2b_atn(struct bt_bmc *bt_bmc) +{ + bt_outb(bt_bmc, 0, BT_CTRL); +} + +static void set_b_busy(struct bt_bmc *bt_bmc) +{ + if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)) + bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL); +} + +static void clr_b_busy(struct bt_bmc *bt_bmc) +{ + if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY) + bt_outb(bt_bmc, 0, BT_CTRL); +} + +static void set_b2h_atn(struct bt_bmc *bt_bmc) +{ + bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL); +} + +static u8 bt_read(struct bt_bmc *bt_bmc) +{ + return bt_inb(bt_bmc, BT_HOST2BMC); +} + +static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n) +{ + int i; + + for (i = 0; i < n; i++) + buf[i] = bt_read(bt_bmc); + return n; +} + +static void bt_write(struct bt_bmc *bt_bmc, u8 c) +{ + bt_outb(bt_bmc, c, BT_BMC2HOST); +} + +static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n) +{ + int i; + + for (i = 0; i < n; i++) + bt_write(bt_bmc, buf[i]); + return n; +} + +static void set_sms_atn(struct bt_bmc *bt_bmc) +{ + bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL); +} + +static struct bt_bmc *file_bt_bmc(struct file *file) +{ + return container_of(file->private_data, struct bt_bmc, miscdev); +} + +static int bt_bmc_open(struct inode *inode, struct file *file) +{ + struct bt_bmc *bt_bmc = file_bt_bmc(file); + + if (atomic_inc_return(&open_count) == 1) { + clr_b_busy(bt_bmc); + return 0; + } + + atomic_dec(&open_count); + + return -EBUSY; +} + +/* + * The BT (Block Transfer) interface means that entire messages are + * buffered by the host before a notification is sent to the BMC that + * there is data to be read. The first byte is the length and the + * message data follows. The read operation just tries to capture the + * whole before returning it to userspace. + * + * BT Message format : + * + * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5:N + * Length NetFn/LUN Seq Cmd Data + * + */ +static ssize_t bt_bmc_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct bt_bmc *bt_bmc = file_bt_bmc(file); + u8 len; + int len_byte = 1; + u8 kbuffer[BT_BMC_BUFFER_SIZE]; + ssize_t ret = 0; + ssize_t nread; + + WARN_ON(*ppos); + + if (wait_event_interruptible(bt_bmc->queue, + bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN)) + return -ERESTARTSYS; + + mutex_lock(&bt_bmc->mutex); + + if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) { + ret = -EIO; + goto out_unlock; + } + + set_b_busy(bt_bmc); + clr_h2b_atn(bt_bmc); + clr_rd_ptr(bt_bmc); + + /* + * The BT frames start with the message length, which does not + * include the length byte. + */ + kbuffer[0] = bt_read(bt_bmc); + len = kbuffer[0]; + + /* We pass the length back to userspace as well */ + if (len + 1 > count) + len = count - 1; + + while (len) { + nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte); + + bt_readn(bt_bmc, kbuffer + len_byte, nread); + + if (copy_to_user(buf, kbuffer, nread + len_byte)) { + ret = -EFAULT; + break; + } + len -= nread; + buf += nread + len_byte; + ret += nread + len_byte; + len_byte = 0; + } + + clr_b_busy(bt_bmc); + +out_unlock: + mutex_unlock(&bt_bmc->mutex); + return ret; +} + +/* + * BT Message response format : + * + * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6:N + * Length NetFn/LUN Seq Cmd Code Data + */ +static ssize_t bt_bmc_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct bt_bmc *bt_bmc = file_bt_bmc(file); + u8 kbuffer[BT_BMC_BUFFER_SIZE]; + ssize_t ret = 0; + ssize_t nwritten; + + /* + * send a minimum response size + */ + if (count < 5) + return -EINVAL; + + WARN_ON(*ppos); + + /* + * There's no interrupt for clearing bmc busy so we have to + * poll + */ + if (wait_event_interruptible(bt_bmc->queue, + !(bt_inb(bt_bmc, BT_CTRL) & + (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))) + return -ERESTARTSYS; + + mutex_lock(&bt_bmc->mutex); + + if (unlikely(bt_inb(bt_bmc, BT_CTRL) & + (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) { + ret = -EIO; + goto out_unlock; + } + + clr_wr_ptr(bt_bmc); + + while (count) { + nwritten = min_t(ssize_t, count, sizeof(kbuffer)); + if (copy_from_user(&kbuffer, buf, nwritten)) { + ret = -EFAULT; + break; + } + + bt_writen(bt_bmc, kbuffer, nwritten); + + count -= nwritten; + buf += nwritten; + ret += nwritten; + } + + set_b2h_atn(bt_bmc); + +out_unlock: + mutex_unlock(&bt_bmc->mutex); + return ret; +} + +static long bt_bmc_ioctl(struct file *file, unsigned int cmd, + unsigned long param) +{ + struct bt_bmc *bt_bmc = file_bt_bmc(file); + + switch (cmd) { + case BT_BMC_IOCTL_SMS_ATN: + set_sms_atn(bt_bmc); + return 0; + } + return -EINVAL; +} + +static int bt_bmc_release(struct inode *inode, struct file *file) +{ + struct bt_bmc *bt_bmc = file_bt_bmc(file); + + atomic_dec(&open_count); + set_b_busy(bt_bmc); + return 0; +} + +static __poll_t bt_bmc_poll(struct file *file, poll_table *wait) +{ + struct bt_bmc *bt_bmc = file_bt_bmc(file); + __poll_t mask = 0; + u8 ctrl; + + poll_wait(file, &bt_bmc->queue, wait); + + ctrl = bt_inb(bt_bmc, BT_CTRL); + + if (ctrl & BT_CTRL_H2B_ATN) + mask |= EPOLLIN; + + if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) + mask |= EPOLLOUT; + + return mask; +} + +static const struct file_operations bt_bmc_fops = { + .owner = THIS_MODULE, + .open = bt_bmc_open, + .read = bt_bmc_read, + .write = bt_bmc_write, + .release = bt_bmc_release, + .poll = bt_bmc_poll, + .unlocked_ioctl = bt_bmc_ioctl, +}; + +static void poll_timer(struct timer_list *t) +{ + struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer); + + bt_bmc->poll_timer.expires += msecs_to_jiffies(500); + wake_up(&bt_bmc->queue); + add_timer(&bt_bmc->poll_timer); +} + +static irqreturn_t bt_bmc_irq(int irq, void *arg) +{ + struct bt_bmc *bt_bmc = arg; + u32 reg, intmsk; + int rc; + + rc = regmap_read(bt_bmc->map, BT_CR1, ®); + if (rc) + return IRQ_NONE; + + reg &= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HWRST; + if (!reg) + return IRQ_NONE; + + /* ack pending IRQs */ + regmap_write(bt_bmc->map, BT_CR1, 0); + + if (reg & BT_CR1_IRQ_HWRST) { + regmap_read(bt_bmc->map, BT_INTMASK, &intmsk); + if (intmsk & BT_INTMASK_BMC_HWRST) + regmap_write(bt_bmc->map, BT_INTMASK, 0); + } + + wake_up(&bt_bmc->queue); + return IRQ_HANDLED; +} + +static int bt_bmc_config_irq(struct bt_bmc *bt_bmc, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int rc; + + bt_bmc->irq = platform_get_irq(pdev, 0); + if (!bt_bmc->irq) + return -ENODEV; + + rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED, + DEVICE_NAME, bt_bmc); + if (rc < 0) { + dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq); + bt_bmc->irq = 0; + return rc; + } + + /* + * Configure IRQs on the bmc clearing the H2B and HBUSY bits; + * H2B will be asserted when the bmc has data for us; HBUSY + * will be cleared (along with B2H) when we can write the next + * message to the BT buffer + */ + rc = regmap_update_bits(bt_bmc->map, BT_CSR1, + BT_CSR1_IRQ_H2B | BT_CSR1_IRQ_HWRST, + BT_CSR1_IRQ_H2B | BT_CSR1_IRQ_HWRST); + + return rc; +} + +static int bt_bmc_probe(struct platform_device *pdev) +{ + struct bt_bmc *bt_bmc; + struct device *dev; + int rc; + + if (!pdev || !pdev->dev.of_node) + return -ENODEV; + + dev = &pdev->dev; + dev_info(dev, "Found bt bmc device\n"); + + bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL); + if (!bt_bmc) + return -ENOMEM; + + dev_set_drvdata(&pdev->dev, bt_bmc); + + bt_bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node); + + mutex_init(&bt_bmc->mutex); + init_waitqueue_head(&bt_bmc->queue); + + bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR, + bt_bmc->miscdev.name = DEVICE_NAME, + bt_bmc->miscdev.fops = &bt_bmc_fops, + bt_bmc->miscdev.parent = dev; + rc = misc_register(&bt_bmc->miscdev); + if (rc) { + dev_err(dev, "Unable to register misc device\n"); + return rc; + } + + bt_bmc_config_irq(bt_bmc, pdev); + + if (bt_bmc->irq) { + dev_info(dev, "Using IRQ %d\n", bt_bmc->irq); + } else { + dev_info(dev, "No IRQ; using timer\n"); + timer_setup(&bt_bmc->poll_timer, poll_timer, 0); + bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10); + add_timer(&bt_bmc->poll_timer); + } + + regmap_update_bits(bt_bmc->map, LPC_HICR0, LPC_HICR0_LPC3E, LPC_HICR0_LPC3E); + regmap_update_bits(bt_bmc->map, LPC_HICR4, LPC_BTENABLE, LPC_BTENABLE); + regmap_write(bt_bmc->map, LPC_LADR3H_BT, BT_IO_BASE >> 8); + regmap_write(bt_bmc->map, LPC_LADR3L_BT, BT_IO_BASE); + regmap_update_bits(bt_bmc->map, LPC_HICR2, LPC_HICR2_IBFIF3, LPC_HICR2_IBFIF3); + clr_b_busy(bt_bmc); + + return 0; +} + +static int bt_bmc_remove(struct platform_device *pdev) +{ + struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev); + + misc_deregister(&bt_bmc->miscdev); + if (!bt_bmc->irq) + del_timer_sync(&bt_bmc->poll_timer); + + return 0; +} + +static const struct of_device_id bt_bmc_match[] = { + { .compatible = "phytium,bt-bmc" }, + { }, +}; +MODULE_DEVICE_TABLE(of, bt_bmc_match); + +static struct platform_driver bt_bmc_driver = { + .driver = { + .name = DEVICE_NAME, + .of_match_table = bt_bmc_match, + }, + .probe = bt_bmc_probe, + .remove = bt_bmc_remove, +}; + +module_platform_driver(bt_bmc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cheng Quan +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kcs_bmc.h" + +#define DEVICE_NAME "phytium-kcs-bmc" + +#define KCS_CHANNEL_MAX 4 + +/* mapped to lpc-bmc@0 IO space */ +#define LPC_HICR0 0x000 +#define LPC_HICR0_LPC3E BIT(7) +#define LPC_HICR0_LPC2E BIT(6) +#define LPC_HICR0_LPC1E BIT(5) +#define LPC_HICR0_SDWNE BIT(3) /* 0:disabl,1:enable for HICR1_SDWNB */ +#define LPC_HICR0_PMEE BIT(2) /* 0:disabl,1:enable for HICR1_PMEB */ +#define LPC_HICR1 0x004 +#define LPC_HICR1_LPCBSY BIT(7) /* 0:idle,1:trans busy */ +#define LPC_HICR1_IRQBSY BIT(5) /* 0:idle,1:trans data */ +#define LPC_HICR1_LPCSWRST BIT(4) /* 0:normal,1:reset */ +#define LPC_HICR1_SDWNB BIT(3) /* 0:normal,1:software shutdown */ +#define LPC_HICR1_PMEB BIT(2) /* 0:LPCPD low,1:LPCPD high */ +#define LPC_HICR2 0x008 +#define LPC_LPCSWRST_ERRIRQ BIT(6) /* 0:normal,1:reset irq */ +#define LPC_SDWN_ERRIRQ BIT(5) /* 0:normal,1:lpcpd irq */ +#define LPC_ABRT_ERRIRQ BIT(4) /* 0:normal,1:lframe low when busy*/ +#define LPC_HICR2_IBFIF3 BIT(3) /* 0:normal,1:enable irq 3*/ +#define LPC_HICR2_IBFIF2 BIT(2) +#define LPC_HICR2_IBFIF1 BIT(1) +/* 0:normal,1:enable err irq-reset,power down,abort */ +#define LPC_HICR2_ERRIE BIT(0) +#define LPC_HICR3 0x00C +#define LPC_LFRAME_STATUS BIT(7) /* R */ +#define LPC_SERIRQ_STATUS BIT(5) /* R */ +#define LPC_LREST_STATUS BIT(4) /* R */ +#define LPC_LPCPD_STATUS BIT(3) /* R */ +#define LPC_PME_STATUS BIT(2) /* R */ +#define LPC_HICR4 0x010 +#define LPC_HICR4_LADR12AS BIT(7) +#define LPC_HICR4_KCSENBL BIT(2) +#define LPC_BTENABLE BIT(0) /* share bt channel enable,0:disable,1:enable */ +#define LPC_LADR3H 0x014 +#define LPC_LADR3L 0x018 +#define LPC_LADR12H 0x01C +#define LPC_LADR12L 0x020 +#define LPC_IDR1 0x024 +#define LPC_IDR2 0x028 +#define LPC_IDR3 0x02C +#define LPC_ODR1 0x030 +#define LPC_ODR2 0x034 +#define LPC_ODR3 0x038 +#define LPC_STR1 0x03C +#define LPC_STR2 0x040 +#define LPC_STR3 0x044 + +#define LPC_HICRB 0x80 +#define LPC_HICRB_IBFIF4 BIT(1) +#define LPC_HICRB_LPC4E BIT(0) +#define LPC_LADR4 0x88 +#define LPC_IDR4 0x8c +#define LPC_ODR4 0x90 +#define LPC_STR4 0x94 + +struct phytium_kcs_bmc { + struct regmap *map; +}; + +static u8 phytium_kcs_inb(struct kcs_bmc *kcs_bmc, u32 reg) +{ + struct phytium_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc); + u32 val = 0; + int rc; + + rc = regmap_read(priv->map, reg, &val); + WARN(rc != 0, "regmap_read() failed: %d\n", rc); + + return rc == 0 ? (u8) val : 0; +} + +static void phytium_kcs_outb(struct kcs_bmc *kcs_bmc, u32 reg, u8 data) +{ + struct phytium_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc); + int rc; + + rc = regmap_write(priv->map, reg, data); + WARN(rc != 0, "regmap_write() failed: %d\n", rc); +} + +/* + * Background: + * we note D for Data, and C for Cmd/Status, default rules are + * A. KCS1 / KCS2 ( D / C:X / X+4 ) + * D / C : CA0h / CA4h + * D / C : CA8h / CACh + * B. KCS3 ( D / C:XX2h / XX3h ) + * D / C : CA2h / CA3h + * D / C : CB2h / CB3h -use + * C. KCS4 + * D / C : CA4h / CA5h + * D / C : CB0h / CB1h -use + */ +static void phytium_kcs_set_address(struct kcs_bmc *kcs_bmc, u16 addr) +{ + struct phytium_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc); + + switch (kcs_bmc->channel) { + case 1: + regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, 0); + regmap_write(priv->map, LPC_LADR12H, addr >> 8); + regmap_write(priv->map, LPC_LADR12L, addr & 0xFF); + break; + case 2: + regmap_update_bits(priv->map, LPC_HICR4, + LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS); + regmap_write(priv->map, LPC_LADR12H, addr >> 8); + regmap_write(priv->map, LPC_LADR12L, addr & 0xFF); + break; + case 3: + regmap_write(priv->map, LPC_LADR3H, addr >> 8); + regmap_write(priv->map, LPC_LADR3L, addr & 0xFF); + break; + case 4: + regmap_write(priv->map, LPC_LADR4, ((addr + 1) << 16) | + addr); + break; + default: + break; + } +} + +static void phytium_kcs_enable_channel(struct kcs_bmc *kcs_bmc, bool enable) +{ + struct phytium_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc); + + switch (kcs_bmc->channel) { + case 1: + if (enable) { + regmap_update_bits(priv->map, LPC_HICR2, + LPC_HICR2_IBFIF1, LPC_HICR2_IBFIF1); + regmap_update_bits(priv->map, LPC_HICR0, + LPC_HICR0_LPC1E, LPC_HICR0_LPC1E); + } else { + regmap_update_bits(priv->map, LPC_HICR0, + LPC_HICR0_LPC1E, 0); + regmap_update_bits(priv->map, LPC_HICR2, + LPC_HICR2_IBFIF1, 0); + } + break; + case 2: + if (enable) { + regmap_update_bits(priv->map, LPC_HICR2, + LPC_HICR2_IBFIF2, LPC_HICR2_IBFIF2); + regmap_update_bits(priv->map, LPC_HICR0, + LPC_HICR0_LPC2E, LPC_HICR0_LPC2E); + } else { + regmap_update_bits(priv->map, LPC_HICR0, + LPC_HICR0_LPC2E, 0); + regmap_update_bits(priv->map, LPC_HICR2, + LPC_HICR2_IBFIF2, 0); + } + break; + case 3: + if (enable) { + regmap_update_bits(priv->map, LPC_HICR2, + LPC_HICR2_IBFIF3, LPC_HICR2_IBFIF3); + regmap_update_bits(priv->map, LPC_HICR0, + LPC_HICR0_LPC3E, LPC_HICR0_LPC3E); + regmap_update_bits(priv->map, LPC_HICR4, + LPC_HICR4_KCSENBL, LPC_HICR4_KCSENBL); + } else { + regmap_update_bits(priv->map, LPC_HICR0, + LPC_HICR0_LPC3E, 0); + regmap_update_bits(priv->map, LPC_HICR4, + LPC_HICR4_KCSENBL, 0); + regmap_update_bits(priv->map, LPC_HICR2, + LPC_HICR2_IBFIF3, 0); + } + break; + case 4: + if (enable) + regmap_update_bits(priv->map, LPC_HICRB, + LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E, + LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E); + else + regmap_update_bits(priv->map, LPC_HICRB, + LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E, + 0); + break; + default: + break; + } +} + +static irqreturn_t phytium_kcs_irq(int irq, void *arg) +{ + struct kcs_bmc *kcs_bmc = arg; + + if (!kcs_bmc_handle_event(kcs_bmc)) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +static int phytium_kcs_config_irq(struct kcs_bmc *kcs_bmc, struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int irq; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + return devm_request_irq(dev, irq, phytium_kcs_irq, IRQF_SHARED, + dev_name(dev), kcs_bmc); +} + +static const struct kcs_ioreg phytium_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = { + { .idr = LPC_IDR1, .odr = LPC_ODR1, .str = LPC_STR1 }, + { .idr = LPC_IDR2, .odr = LPC_ODR2, .str = LPC_STR2 }, + { .idr = LPC_IDR3, .odr = LPC_ODR3, .str = LPC_STR3 }, + { .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 }, +}; + +static int phytium_kcs_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phytium_kcs_bmc *priv; + struct kcs_bmc *kcs_bmc; + u32 chan, addr; + int rc; + + rc = of_property_read_u32(dev->of_node, "kcs_chan", &chan); + if ((rc != 0) || (chan == 0 || chan > KCS_CHANNEL_MAX)) { + dev_err(dev, "no valid 'kcs_chan' configured\n"); + return -ENODEV; + } + + rc = of_property_read_u32(dev->of_node, "kcs_addr", &addr); + if (rc) { + dev_err(dev, "no valid 'kcs_addr' configured\n"); + return -ENODEV; + } + + kcs_bmc = kcs_bmc_alloc(dev, sizeof(*priv), chan); + if (!kcs_bmc) + return -ENOMEM; + + priv = kcs_bmc_priv(kcs_bmc); + priv->map = syscon_node_to_regmap(dev->parent->of_node); + if (IS_ERR(priv->map)) { + dev_err(dev, "Couldn't get regmap\n"); + return -ENODEV; + } + + kcs_bmc->ioreg = phytium_kcs_bmc_ioregs[chan - 1]; + kcs_bmc->io_inputb = phytium_kcs_inb; + kcs_bmc->io_outputb = phytium_kcs_outb; + + dev_set_drvdata(dev, kcs_bmc); + + phytium_kcs_set_address(kcs_bmc, addr); + phytium_kcs_enable_channel(kcs_bmc, true); + rc = phytium_kcs_config_irq(kcs_bmc, pdev); + if (rc) + return rc; + + rc = misc_register(&kcs_bmc->miscdev); + if (rc) { + dev_err(dev, "Unable to register device\n"); + return rc; + } + + pr_info("channel=%u addr=0x%x idr=0x%x odr=0x%x str=0x%x\n", + chan, addr, kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr, kcs_bmc->ioreg.str); + + return 0; +} + +static int phytium_kcs_remove(struct platform_device *pdev) +{ + struct kcs_bmc *kcs_bmc = dev_get_drvdata(&pdev->dev); + + misc_deregister(&kcs_bmc->miscdev); + + return 0; +} + +static const struct of_device_id phytium_kcs_bmc_match[] = { + { .compatible = "phytium,kcs-bmc" }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_kcs_bmc_match); + +static struct platform_driver phytium_kcs_bmc_driver = { + .driver = { + .name = DEVICE_NAME, + .of_match_table = phytium_kcs_bmc_match, + }, + .probe = phytium_kcs_probe, + .remove = phytium_kcs_remove, +}; +module_platform_driver(phytium_kcs_bmc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium device interface to the KCS BMC device"); diff --git a/drivers/char/random.c b/drivers/char/random.c index 1127343781465aac47169636224925f93778afbe..5e5aaa837348a1feddc3031699e7a49ccc75aba5 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1237,28 +1237,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) return *ptr; } -void add_interrupt_randomness(int irq, int irq_flags) +void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) { struct entropy_store *r; struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); - struct pt_regs *regs = get_irq_regs(); unsigned long now = jiffies; cycles_t cycles = random_get_entropy(); __u32 c_high, j_high; - __u64 ip; unsigned long seed; int credit = 0; if (cycles == 0) - cycles = get_reg(fast_pool, regs); + cycles = get_reg(fast_pool, NULL); c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; j_high = (sizeof(now) > 4) ? now >> 32 : 0; fast_pool->pool[0] ^= cycles ^ j_high ^ irq; fast_pool->pool[1] ^= now ^ c_high; - ip = regs ? instruction_pointer(regs) : _RET_IP_; + if (!ip) + ip = _RET_IP_; fast_pool->pool[2] ^= ip; fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : - get_reg(fast_pool, regs); + get_reg(fast_pool, NULL); fast_mix(fast_pool); add_interrupt_bench(cycles); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 5a3a4f095391046cbfe32d4001b6f179060e5a44..5ecca65850498f1d76194d846290fe7374d52117 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -54,6 +54,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da return container_of(data, struct tpm_tis_tcg_phy, priv); } +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * Flushes previous write operations to chip so that a subsequent + * ioread*()s won't stall a cpu. + */ +static inline void tpm_tis_flush(void __iomem *iobase) +{ + ioread8(iobase + TPM_ACCESS(0)); +} +#else +#define tpm_tis_flush(iobase) do { } while (0) +#endif + +static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr) +{ + iowrite8(b, iobase + addr); + tpm_tis_flush(iobase); +} + +static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr) +{ + iowrite32(b, iobase + addr); + tpm_tis_flush(iobase); +} + static int interrupts = -1; module_param(interrupts, int, 0444); MODULE_PARM_DESC(interrupts, "Enable interrupts"); @@ -173,7 +198,7 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); while (len--) - iowrite8(*value++, phy->iobase + addr); + tpm_tis_iowrite8(*value++, phy->iobase, addr); return 0; } @@ -200,7 +225,7 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - iowrite32(value, phy->iobase + addr); + tpm_tis_iowrite32(value, phy->iobase, addr); return 0; } diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 06504384c3765190ab8e1b310b5099e1e7b9b1f3..1dbd40f6c01ddf98a5e65aaeb81be219aab79b06 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -405,8 +405,11 @@ config ARMV7M_SYSTICK This options enables support for the ARMv7M system timer unit config ATMEL_PIT + bool "Microchip ARM Periodic Interval Timer (PIT)" if COMPILE_TEST select TIMER_OF if OF - def_bool SOC_AT91SAM9 || SOC_SAMA5 + help + This enables build of clocksource and clockevent driver for + the integrated PIT in Microchip ARM SoCs. config ATMEL_ST bool "Atmel ST timer support" if COMPILE_TEST @@ -416,6 +419,14 @@ config ATMEL_ST help Support for the Atmel ST timer. +config ATMEL_ARM_TCB_CLKSRC + bool "Microchip ARM TC Block" if COMPILE_TEST + select REGMAP_MMIO + depends on GENERIC_CLOCKEVENTS + help + This enables build of clocksource and clockevent driver for + the integrated Timer Counter Blocks in Microchip ARM SoCs. + config CLKSRC_EXYNOS_MCT bool "Exynos multi core timer driver" if COMPILE_TEST depends on ARM || ARM64 diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index db51b2427e8a64e9bfe20bbb41a9363069b101f4..0df9384a1230d2a8cdb7250f9a3a82052c253928 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -3,7 +3,8 @@ obj-$(CONFIG_TIMER_OF) += timer-of.o obj-$(CONFIG_TIMER_PROBE) += timer-probe.o obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o -obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o +obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o +obj-$(CONFIG_ATMEL_ARM_TCB_CLKSRC) += timer-atmel-tcb.o obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 43f4d5c4d6fa4fdb8f4581d71010e4b54ecd18da..ba15242a6066553c93a78ffd4cdac94650da5ea3 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -25,8 +25,7 @@ * this 32 bit free-running counter. the second channel is not used. * * - The third channel may be used to provide a 16-bit clockevent - * source, used in either periodic or oneshot mode. This runs - * at 32 KiHZ, and can handle delays of up to two seconds. + * source, used in either periodic or oneshot mode. * * A boot clocksource and clockevent source are also currently needed, * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so @@ -126,6 +125,8 @@ static struct clocksource clksrc = { struct tc_clkevt_device { struct clock_event_device clkevt; struct clk *clk; + bool clk_enabled; + u32 freq; void __iomem *regs; }; @@ -134,15 +135,26 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) return container_of(clkevt, struct tc_clkevt_device, clkevt); } -/* For now, we always use the 32K clock ... this optimizes for NO_HZ, - * because using one of the divided clocks would usually mean the - * tick rate can never be less than several dozen Hz (vs 0.5 Hz). - * - * A divided clock could be good for high resolution timers, since - * 30.5 usec resolution can seem "low". - */ static u32 timer_clock; +static void tc_clk_disable(struct clock_event_device *d) +{ + struct tc_clkevt_device *tcd = to_tc_clkevt(d); + + clk_disable(tcd->clk); + tcd->clk_enabled = false; +} + +static void tc_clk_enable(struct clock_event_device *d) +{ + struct tc_clkevt_device *tcd = to_tc_clkevt(d); + + if (tcd->clk_enabled) + return; + clk_enable(tcd->clk); + tcd->clk_enabled = true; +} + static int tc_shutdown(struct clock_event_device *d) { struct tc_clkevt_device *tcd = to_tc_clkevt(d); @@ -150,8 +162,14 @@ static int tc_shutdown(struct clock_event_device *d) writel(0xff, regs + ATMEL_TC_REG(2, IDR)); writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); + return 0; +} + +static int tc_shutdown_clk_off(struct clock_event_device *d) +{ + tc_shutdown(d); if (!clockevent_state_detached(d)) - clk_disable(tcd->clk); + tc_clk_disable(d); return 0; } @@ -164,9 +182,9 @@ static int tc_set_oneshot(struct clock_event_device *d) if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) tc_shutdown(d); - clk_enable(tcd->clk); + tc_clk_enable(d); - /* slow clock, count up to RC, then irq and stop */ + /* count up to RC, then irq and stop */ writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); @@ -186,12 +204,12 @@ static int tc_set_periodic(struct clock_event_device *d) /* By not making the gentime core emulate periodic mode on top * of oneshot, we get lower overhead and improved accuracy. */ - clk_enable(tcd->clk); + tc_clk_enable(d); - /* slow clock, count up to RC, then irq and restart */ + /* count up to RC, then irq and restart */ writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); - writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); + writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); /* Enable clock and interrupts on RC compare */ writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); @@ -218,9 +236,13 @@ static struct tc_clkevt_device clkevt = { .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, /* Should be lower than at91rm9200's system timer */ +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK .rating = 125, +#else + .rating = 200, +#endif .set_next_event = tc_next_event, - .set_state_shutdown = tc_shutdown, + .set_state_shutdown = tc_shutdown_clk_off, .set_state_periodic = tc_set_periodic, .set_state_oneshot = tc_set_oneshot, }, @@ -240,8 +262,9 @@ static irqreturn_t ch2_irq(int irq, void *handle) return IRQ_NONE; } -static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) +static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) { + unsigned divisor = atmel_tc_divisors[divisor_idx]; int ret; struct clk *t2_clk = tc->clk[2]; int irq = tc->irq[2]; @@ -262,7 +285,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) clkevt.regs = tc->regs; clkevt.clk = t2_clk; - timer_clock = clk32k_divisor_idx; + timer_clock = divisor_idx; + if (!divisor) + clkevt.freq = 32768; + else + clkevt.freq = clk_get_rate(t2_clk) / divisor; clkevt.clkevt.cpumask = cpumask_of(0); @@ -273,7 +300,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) return ret; } - clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); + clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); return ret; } @@ -410,7 +437,11 @@ static int __init tcb_clksrc_init(void) goto err_disable_t1; /* channel 2: periodic and oneshot timer support */ +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK ret = setup_clkevents(tc, clk32k_divisor_idx); +#else + ret = setup_clkevents(tc, best_divisor_idx); +#endif if (ret) goto err_unregister_clksrc; diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c new file mode 100644 index 0000000000000000000000000000000000000000..63ce3b69338a08543a53ac7c91c8a2860c668a0f --- /dev/null +++ b/drivers/clocksource/timer-atmel-tcb.c @@ -0,0 +1,617 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct atmel_tcb_clksrc { + struct clocksource clksrc; + struct clock_event_device clkevt; + struct regmap *regmap; + void __iomem *base; + struct clk *clk[2]; + char name[20]; + int channels[2]; + int bits; + int irq; + struct { + u32 cmr; + u32 imr; + u32 rc; + bool clken; + } cache[2]; + u32 bmr_cache; + bool registered; + bool clk_enabled; +}; + +static struct atmel_tcb_clksrc tc, tce; + +static struct clk *tcb_clk_get(struct device_node *node, int channel) +{ + struct clk *clk; + char clk_name[] = "t0_clk"; + + clk_name[1] += channel; + clk = of_clk_get_by_name(node->parent, clk_name); + if (!IS_ERR(clk)) + return clk; + + return of_clk_get_by_name(node->parent, "t0_clk"); +} + +/* + * Clockevent device using its own channel + */ + +static void tc_clkevt2_clk_disable(struct clock_event_device *d) +{ + clk_disable(tce.clk[0]); + tce.clk_enabled = false; +} + +static void tc_clkevt2_clk_enable(struct clock_event_device *d) +{ + if (tce.clk_enabled) + return; + clk_enable(tce.clk[0]); + tce.clk_enabled = true; +} + +static int tc_clkevt2_stop(struct clock_event_device *d) +{ + writel(0xff, tce.base + ATMEL_TC_IDR(tce.channels[0])); + writel(ATMEL_TC_CCR_CLKDIS, tce.base + ATMEL_TC_CCR(tce.channels[0])); + + return 0; +} + +static int tc_clkevt2_shutdown(struct clock_event_device *d) +{ + tc_clkevt2_stop(d); + if (!clockevent_state_detached(d)) + tc_clkevt2_clk_disable(d); + + return 0; +} + +/* For now, we always use the 32K clock ... this optimizes for NO_HZ, + * because using one of the divided clocks would usually mean the + * tick rate can never be less than several dozen Hz (vs 0.5 Hz). + * + * A divided clock could be good for high resolution timers, since + * 30.5 usec resolution can seem "low". + */ +static int tc_clkevt2_set_oneshot(struct clock_event_device *d) +{ + if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) + tc_clkevt2_stop(d); + + tc_clkevt2_clk_enable(d); + + /* slow clock, count up to RC, then irq and stop */ + writel(ATMEL_TC_CMR_TCLK(4) | ATMEL_TC_CMR_CPCSTOP | + ATMEL_TC_CMR_WAVE | ATMEL_TC_CMR_WAVESEL_UPRC, + tce.base + ATMEL_TC_CMR(tce.channels[0])); + writel(ATMEL_TC_CPCS, tce.base + ATMEL_TC_IER(tce.channels[0])); + + return 0; +} + +static int tc_clkevt2_set_periodic(struct clock_event_device *d) +{ + if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) + tc_clkevt2_stop(d); + + /* By not making the gentime core emulate periodic mode on top + * of oneshot, we get lower overhead and improved accuracy. + */ + tc_clkevt2_clk_enable(d); + + /* slow clock, count up to RC, then irq and restart */ + writel(ATMEL_TC_CMR_TCLK(4) | ATMEL_TC_CMR_WAVE | + ATMEL_TC_CMR_WAVESEL_UPRC, + tce.base + ATMEL_TC_CMR(tce.channels[0])); + writel((32768 + HZ / 2) / HZ, tce.base + ATMEL_TC_RC(tce.channels[0])); + + /* Enable clock and interrupts on RC compare */ + writel(ATMEL_TC_CPCS, tce.base + ATMEL_TC_IER(tce.channels[0])); + writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG, + tce.base + ATMEL_TC_CCR(tce.channels[0])); + + return 0; +} + +static int tc_clkevt2_next_event(unsigned long delta, + struct clock_event_device *d) +{ + writel(delta, tce.base + ATMEL_TC_RC(tce.channels[0])); + writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG, + tce.base + ATMEL_TC_CCR(tce.channels[0])); + + return 0; +} + +static irqreturn_t tc_clkevt2_irq(int irq, void *handle) +{ + unsigned int sr; + + sr = readl(tce.base + ATMEL_TC_SR(tce.channels[0])); + if (sr & ATMEL_TC_CPCS) { + tce.clkevt.event_handler(&tce.clkevt); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static void tc_clkevt2_suspend(struct clock_event_device *d) +{ + tce.cache[0].cmr = readl(tce.base + ATMEL_TC_CMR(tce.channels[0])); + tce.cache[0].imr = readl(tce.base + ATMEL_TC_IMR(tce.channels[0])); + tce.cache[0].rc = readl(tce.base + ATMEL_TC_RC(tce.channels[0])); + tce.cache[0].clken = !!(readl(tce.base + ATMEL_TC_SR(tce.channels[0])) & + ATMEL_TC_CLKSTA); +} + +static void tc_clkevt2_resume(struct clock_event_device *d) +{ + /* Restore registers for the channel, RA and RB are not used */ + writel(tce.cache[0].cmr, tc.base + ATMEL_TC_CMR(tce.channels[0])); + writel(tce.cache[0].rc, tc.base + ATMEL_TC_RC(tce.channels[0])); + writel(0, tc.base + ATMEL_TC_RA(tce.channels[0])); + writel(0, tc.base + ATMEL_TC_RB(tce.channels[0])); + /* Disable all the interrupts */ + writel(0xff, tc.base + ATMEL_TC_IDR(tce.channels[0])); + /* Reenable interrupts that were enabled before suspending */ + writel(tce.cache[0].imr, tc.base + ATMEL_TC_IER(tce.channels[0])); + + /* Start the clock if it was used */ + if (tce.cache[0].clken) + writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG, + tc.base + ATMEL_TC_CCR(tce.channels[0])); +} + +static int __init tc_clkevt_register(struct device_node *node, + struct regmap *regmap, void __iomem *base, + int channel, int irq, int bits) +{ + int ret; + struct clk *slow_clk; + + tce.regmap = regmap; + tce.base = base; + tce.channels[0] = channel; + tce.irq = irq; + + slow_clk = of_clk_get_by_name(node->parent, "slow_clk"); + if (IS_ERR(slow_clk)) + return PTR_ERR(slow_clk); + + ret = clk_prepare_enable(slow_clk); + if (ret) + return ret; + + tce.clk[0] = tcb_clk_get(node, tce.channels[0]); + if (IS_ERR(tce.clk[0])) { + ret = PTR_ERR(tce.clk[0]); + goto err_slow; + } + + snprintf(tce.name, sizeof(tce.name), "%s:%d", + kbasename(node->parent->full_name), channel); + tce.clkevt.cpumask = cpumask_of(0); + tce.clkevt.name = tce.name; + tce.clkevt.set_next_event = tc_clkevt2_next_event, + tce.clkevt.set_state_shutdown = tc_clkevt2_shutdown, + tce.clkevt.set_state_periodic = tc_clkevt2_set_periodic, + tce.clkevt.set_state_oneshot = tc_clkevt2_set_oneshot, + tce.clkevt.suspend = tc_clkevt2_suspend, + tce.clkevt.resume = tc_clkevt2_resume, + tce.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + tce.clkevt.rating = 140; + + /* try to enable clk to avoid future errors in mode change */ + ret = clk_prepare_enable(tce.clk[0]); + if (ret) + goto err_slow; + clk_disable(tce.clk[0]); + + clockevents_config_and_register(&tce.clkevt, 32768, 1, + CLOCKSOURCE_MASK(bits)); + + ret = request_irq(tce.irq, tc_clkevt2_irq, IRQF_TIMER | IRQF_SHARED, + tce.clkevt.name, &tce); + if (ret) + goto err_clk; + + tce.registered = true; + + return 0; + +err_clk: + clk_unprepare(tce.clk[0]); +err_slow: + clk_disable_unprepare(slow_clk); + + return ret; +} + +/* + * Clocksource and clockevent using the same channel(s) + */ +static u64 tc_get_cycles(struct clocksource *cs) +{ + u32 lower, upper; + + do { + upper = readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[1])); + lower = readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[0])); + } while (upper != readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[1]))); + + return (upper << 16) | lower; +} + +static u64 tc_get_cycles32(struct clocksource *cs) +{ + return readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[0])); +} + +static u64 notrace tc_sched_clock_read(void) +{ + return tc_get_cycles(&tc.clksrc); +} + +static u64 notrace tc_sched_clock_read32(void) +{ + return tc_get_cycles32(&tc.clksrc); +} + +static int tcb_clkevt_next_event(unsigned long delta, + struct clock_event_device *d) +{ + u32 old, next, cur; + + old = readl(tc.base + ATMEL_TC_CV(tc.channels[0])); + next = old + delta; + writel(next, tc.base + ATMEL_TC_RC(tc.channels[0])); + cur = readl(tc.base + ATMEL_TC_CV(tc.channels[0])); + + /* check whether the delta elapsed while setting the register */ + if ((next < old && cur < old && cur > next) || + (next > old && (cur < old || cur > next))) { + /* + * Clear the CPCS bit in the status register to avoid + * generating a spurious interrupt next time a valid + * timer event is configured. + */ + old = readl(tc.base + ATMEL_TC_SR(tc.channels[0])); + return -ETIME; + } + + writel(ATMEL_TC_CPCS, tc.base + ATMEL_TC_IER(tc.channels[0])); + + return 0; +} + +static irqreturn_t tc_clkevt_irq(int irq, void *handle) +{ + unsigned int sr; + + sr = readl(tc.base + ATMEL_TC_SR(tc.channels[0])); + if (sr & ATMEL_TC_CPCS) { + tc.clkevt.event_handler(&tc.clkevt); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int tcb_clkevt_oneshot(struct clock_event_device *dev) +{ + if (clockevent_state_oneshot(dev)) + return 0; + + /* + * Because both clockevent devices may share the same IRQ, we don't want + * the less likely one to stay requested + */ + return request_irq(tc.irq, tc_clkevt_irq, IRQF_TIMER | IRQF_SHARED, + tc.name, &tc); +} + +static int tcb_clkevt_shutdown(struct clock_event_device *dev) +{ + writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[0])); + if (tc.bits == 16) + writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[1])); + + if (!clockevent_state_detached(dev)) + free_irq(tc.irq, &tc); + + return 0; +} + +static void __init tcb_setup_dual_chan(struct atmel_tcb_clksrc *tc, + int mck_divisor_idx) +{ + /* first channel: waveform mode, input mclk/8, clock TIOA on overflow */ + writel(mck_divisor_idx /* likely divide-by-8 */ + | ATMEL_TC_CMR_WAVE + | ATMEL_TC_CMR_WAVESEL_UP /* free-run */ + | ATMEL_TC_CMR_ACPA(SET) /* TIOA rises at 0 */ + | ATMEL_TC_CMR_ACPC(CLEAR), /* (duty cycle 50%) */ + tc->base + ATMEL_TC_CMR(tc->channels[0])); + writel(0x0000, tc->base + ATMEL_TC_RA(tc->channels[0])); + writel(0x8000, tc->base + ATMEL_TC_RC(tc->channels[0])); + writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[0])); /* no irqs */ + writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[0])); + + /* second channel: waveform mode, input TIOA */ + writel(ATMEL_TC_CMR_XC(tc->channels[1]) /* input: TIOA */ + | ATMEL_TC_CMR_WAVE + | ATMEL_TC_CMR_WAVESEL_UP, /* free-run */ + tc->base + ATMEL_TC_CMR(tc->channels[1])); + writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[1])); /* no irqs */ + writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[1])); + + /* chain both channel, we assume the previous channel */ + regmap_write(tc->regmap, ATMEL_TC_BMR, + ATMEL_TC_BMR_TCXC(1 + tc->channels[1], tc->channels[1])); + /* then reset all the timers */ + regmap_write(tc->regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC); +} + +static void __init tcb_setup_single_chan(struct atmel_tcb_clksrc *tc, + int mck_divisor_idx) +{ + /* channel 0: waveform mode, input mclk/8 */ + writel(mck_divisor_idx /* likely divide-by-8 */ + | ATMEL_TC_CMR_WAVE + | ATMEL_TC_CMR_WAVESEL_UP, /* free-run */ + tc->base + ATMEL_TC_CMR(tc->channels[0])); + writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[0])); /* no irqs */ + writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[0])); + + /* then reset all the timers */ + regmap_write(tc->regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC); +} + +static void tc_clksrc_suspend(struct clocksource *cs) +{ + int i; + + for (i = 0; i < 1 + (tc.bits == 16); i++) { + tc.cache[i].cmr = readl(tc.base + ATMEL_TC_CMR(tc.channels[i])); + tc.cache[i].imr = readl(tc.base + ATMEL_TC_IMR(tc.channels[i])); + tc.cache[i].rc = readl(tc.base + ATMEL_TC_RC(tc.channels[i])); + tc.cache[i].clken = !!(readl(tc.base + + ATMEL_TC_SR(tc.channels[i])) & + ATMEL_TC_CLKSTA); + } + + if (tc.bits == 16) + regmap_read(tc.regmap, ATMEL_TC_BMR, &tc.bmr_cache); +} + +static void tc_clksrc_resume(struct clocksource *cs) +{ + int i; + + for (i = 0; i < 1 + (tc.bits == 16); i++) { + /* Restore registers for the channel, RA and RB are not used */ + writel(tc.cache[i].cmr, tc.base + ATMEL_TC_CMR(tc.channels[i])); + writel(tc.cache[i].rc, tc.base + ATMEL_TC_RC(tc.channels[i])); + writel(0, tc.base + ATMEL_TC_RA(tc.channels[i])); + writel(0, tc.base + ATMEL_TC_RB(tc.channels[i])); + /* Disable all the interrupts */ + writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[i])); + /* Reenable interrupts that were enabled before suspending */ + writel(tc.cache[i].imr, tc.base + ATMEL_TC_IER(tc.channels[i])); + + /* Start the clock if it was used */ + if (tc.cache[i].clken) + writel(ATMEL_TC_CCR_CLKEN, tc.base + + ATMEL_TC_CCR(tc.channels[i])); + } + + /* in case of dual channel, chain channels */ + if (tc.bits == 16) + regmap_write(tc.regmap, ATMEL_TC_BMR, tc.bmr_cache); + /* Finally, trigger all the channels*/ + regmap_write(tc.regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC); +} + +static int __init tcb_clksrc_register(struct device_node *node, + struct regmap *regmap, void __iomem *base, + int channel, int channel1, int irq, + int bits) +{ + u32 rate, divided_rate = 0; + int best_divisor_idx = -1; + int i, err = -1; + u64 (*tc_sched_clock)(void); + + tc.regmap = regmap; + tc.base = base; + tc.channels[0] = channel; + tc.channels[1] = channel1; + tc.irq = irq; + tc.bits = bits; + + tc.clk[0] = tcb_clk_get(node, tc.channels[0]); + if (IS_ERR(tc.clk[0])) + return PTR_ERR(tc.clk[0]); + err = clk_prepare_enable(tc.clk[0]); + if (err) { + pr_debug("can't enable T0 clk\n"); + goto err_clk; + } + + /* How fast will we be counting? Pick something over 5 MHz. */ + rate = (u32)clk_get_rate(tc.clk[0]); + for (i = 0; i < 5; i++) { + unsigned int divisor = atmel_tc_divisors[i]; + unsigned int tmp; + + if (!divisor) + continue; + + tmp = rate / divisor; + pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp); + if (best_divisor_idx > 0) { + if (tmp < 5 * 1000 * 1000) + continue; + } + divided_rate = tmp; + best_divisor_idx = i; + } + + if (tc.bits == 32) { + tc.clksrc.read = tc_get_cycles32; + tcb_setup_single_chan(&tc, best_divisor_idx); + tc_sched_clock = tc_sched_clock_read32; + snprintf(tc.name, sizeof(tc.name), "%s:%d", + kbasename(node->parent->full_name), tc.channels[0]); + } else { + tc.clk[1] = tcb_clk_get(node, tc.channels[1]); + if (IS_ERR(tc.clk[1])) + goto err_disable_t0; + + err = clk_prepare_enable(tc.clk[1]); + if (err) { + pr_debug("can't enable T1 clk\n"); + goto err_clk1; + } + tc.clksrc.read = tc_get_cycles, + tcb_setup_dual_chan(&tc, best_divisor_idx); + tc_sched_clock = tc_sched_clock_read; + snprintf(tc.name, sizeof(tc.name), "%s:%d,%d", + kbasename(node->parent->full_name), tc.channels[0], + tc.channels[1]); + } + + pr_debug("%s at %d.%03d MHz\n", tc.name, + divided_rate / 1000000, + ((divided_rate + 500000) % 1000000) / 1000); + + tc.clksrc.name = tc.name; + tc.clksrc.suspend = tc_clksrc_suspend; + tc.clksrc.resume = tc_clksrc_resume; + tc.clksrc.rating = 200; + tc.clksrc.mask = CLOCKSOURCE_MASK(32); + tc.clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; + + err = clocksource_register_hz(&tc.clksrc, divided_rate); + if (err) + goto err_disable_t1; + + sched_clock_register(tc_sched_clock, 32, divided_rate); + + tc.registered = true; + + /* Set up and register clockevents */ + tc.clkevt.name = tc.name; + tc.clkevt.cpumask = cpumask_of(0); + tc.clkevt.set_next_event = tcb_clkevt_next_event; + tc.clkevt.set_state_oneshot = tcb_clkevt_oneshot; + tc.clkevt.set_state_shutdown = tcb_clkevt_shutdown; + tc.clkevt.features = CLOCK_EVT_FEAT_ONESHOT; + tc.clkevt.rating = 125; + + clockevents_config_and_register(&tc.clkevt, divided_rate, 1, + BIT(tc.bits) - 1); + + return 0; + +err_disable_t1: + if (tc.bits == 16) + clk_disable_unprepare(tc.clk[1]); + +err_clk1: + if (tc.bits == 16) + clk_put(tc.clk[1]); + +err_disable_t0: + clk_disable_unprepare(tc.clk[0]); + +err_clk: + clk_put(tc.clk[0]); + + pr_err("%s: unable to register clocksource/clockevent\n", + tc.clksrc.name); + + return err; +} + +static int __init tcb_clksrc_init(struct device_node *node) +{ + const struct of_device_id *match; + struct regmap *regmap; + void __iomem *tcb_base; + u32 channel; + int irq, err, chan1 = -1; + unsigned bits; + + if (tc.registered && tce.registered) + return -ENODEV; + + /* + * The regmap has to be used to access registers that are shared + * between channels on the same TCB but we keep direct IO access for + * the counters to avoid the impact on performance + */ + regmap = syscon_node_to_regmap(node->parent); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + tcb_base = of_iomap(node->parent, 0); + if (!tcb_base) { + pr_err("%s +%d %s\n", __FILE__, __LINE__, __func__); + return -ENXIO; + } + + match = of_match_node(atmel_tcb_dt_ids, node->parent); + bits = (uintptr_t)match->data; + + err = of_property_read_u32_index(node, "reg", 0, &channel); + if (err) + return err; + + irq = of_irq_get(node->parent, channel); + if (irq < 0) { + irq = of_irq_get(node->parent, 0); + if (irq < 0) + return irq; + } + + if (tc.registered) + return tc_clkevt_register(node, regmap, tcb_base, channel, irq, + bits); + + if (bits == 16) { + of_property_read_u32_index(node, "reg", 1, &chan1); + if (chan1 == -1) { + if (tce.registered) { + pr_err("%s: clocksource needs two channels\n", + node->parent->full_name); + return -EINVAL; + } else { + return tc_clkevt_register(node, regmap, + tcb_base, channel, + irq, bits); + } + } + } + + return tcb_clksrc_register(node, regmap, tcb_base, channel, chan1, irq, + bits); +} +TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init); diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index ad48fd52cb5373d6758e5483d442138de7a18bf5..c5264b3ee0b068fbab9f2c4cd5c8134793b0ee88 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -32,6 +32,7 @@ #include #include +#include /* * Size of a cn_msg followed by a proc_event structure. Since the @@ -54,10 +55,11 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; /* proc_event_counts is used as the sequence number of the netlink message */ static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 }; +static DEFINE_LOCAL_IRQ_LOCK(send_msg_lock); static inline void send_msg(struct cn_msg *msg) { - preempt_disable(); + local_lock(send_msg_lock); msg->seq = __this_cpu_inc_return(proc_event_counts) - 1; ((struct proc_event *)msg->data)->cpu = smp_processor_id(); @@ -70,7 +72,7 @@ static inline void send_msg(struct cn_msg *msg) */ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT); - preempt_enable(); + local_unlock(send_msg_lock); } void proc_fork_connector(struct task_struct *task) diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 35f71825b7f3020980555612bdf13db7d095aef4..bb4a6160d0f790fa3c825e9bc7c25c76baf25626 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -125,7 +125,7 @@ config X86_POWERNOW_K7_ACPI config X86_POWERNOW_K8 tristate "AMD Opteron/Athlon64 PowerNow!" - depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ + depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE help This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. Support for K10 and newer processors is now in acpi-cpufreq. diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 67f7f8c42c93202af160109fe3e05c26b33450fa..b84e6c8b1e138226ec4feebf66167738da6a10df 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -83,13 +83,6 @@ EXPORT_SYMBOL(caam_congested); static u64 times_congested; #endif -/* - * CPU from where the module initialised. This is required because QMan driver - * requires CGRs to be removed from same CPU from where they were originally - * allocated. - */ -static int mod_init_cpu; - /* * This is a a cache of buffers, from which the users of CAAM QI driver * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than @@ -492,12 +485,11 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx) } EXPORT_SYMBOL(caam_drv_ctx_rel); -int caam_qi_shutdown(struct device *qidev) +void caam_qi_shutdown(struct device *qidev) { - int i, ret; + int i; struct caam_qi_priv *priv = dev_get_drvdata(qidev); const cpumask_t *cpus = qman_affine_cpus(); - struct cpumask old_cpumask = current->cpus_allowed; for_each_cpu(i, cpus) { struct napi_struct *irqtask; @@ -510,26 +502,12 @@ int caam_qi_shutdown(struct device *qidev) dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); } - /* - * QMan driver requires CGRs to be deleted from same CPU from where they - * were instantiated. Hence we get the module removal execute from the - * same CPU from where it was originally inserted. - */ - set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu)); - - ret = qman_delete_cgr(&priv->cgr); - if (ret) - dev_err(qidev, "Deletion of CGR failed: %d\n", ret); - else - qman_release_cgrid(priv->cgr.cgrid); + qman_delete_cgr_safe(&priv->cgr); + qman_release_cgrid(priv->cgr.cgrid); kmem_cache_destroy(qi_cache); - /* Now that we're done with the CGRs, restore the cpus allowed mask */ - set_cpus_allowed_ptr(current, &old_cpumask); - platform_device_unregister(priv->qi_pdev); - return ret; } static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) @@ -718,22 +696,11 @@ int caam_qi_init(struct platform_device *caam_pdev) struct device *ctrldev = &caam_pdev->dev, *qidev; struct caam_drv_private *ctrlpriv; const cpumask_t *cpus = qman_affine_cpus(); - struct cpumask old_cpumask = current->cpus_allowed; static struct platform_device_info qi_pdev_info = { .name = "caam_qi", .id = PLATFORM_DEVID_NONE }; - /* - * QMAN requires CGRs to be removed from same CPU+portal from where it - * was originally allocated. Hence we need to note down the - * initialisation CPU and use the same CPU for module exit. - * We select the first CPU to from the list of portal owning CPUs. - * Then we pin module init to this CPU. - */ - mod_init_cpu = cpumask_first(cpus); - set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu)); - qi_pdev_info.parent = ctrldev; qi_pdev_info.dma_mask = dma_get_mask(ctrldev); qi_pdev = platform_device_register_full(&qi_pdev_info); @@ -795,8 +762,6 @@ int caam_qi_init(struct platform_device *caam_pdev) return -ENOMEM; } - /* Done with the CGRs; restore the cpus allowed mask */ - set_cpus_allowed_ptr(current, &old_cpumask); #ifdef CONFIG_DEBUG_FS debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, ×_congested, &caam_fops_u64_ro); diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h index 357b69f570725bb50749ca92449afaa066f475d7..b6c8acc308536ffd84c96a362fd0b1b808fa300e 100644 --- a/drivers/crypto/caam/qi.h +++ b/drivers/crypto/caam/qi.h @@ -174,7 +174,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc); void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx); int caam_qi_init(struct platform_device *pdev); -int caam_qi_shutdown(struct device *dev); +void caam_qi_shutdown(struct device *dev); /** * qi_cache_alloc - Allocate buffers from CAAM-QI cache diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 69842145c223fd73f8228f37ab668774ac55c112..4c3ef46e71493913295e48405efa3b2e7e82a5d2 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -179,7 +179,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) return 0; retry: - seq = read_seqcount_begin(&resv->seq); + seq = read_seqbegin(&resv->seq); rcu_read_lock(); fobj = rcu_dereference(resv->fence); @@ -188,7 +188,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) else shared_count = 0; fence_excl = rcu_dereference(resv->fence_excl); - if (read_seqcount_retry(&resv->seq, seq)) { + if (read_seqretry(&resv->seq, seq)) { rcu_read_unlock(); goto retry; } @@ -1046,12 +1046,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) robj = buf_obj->resv; while (true) { - seq = read_seqcount_begin(&robj->seq); + seq = read_seqbegin(&robj->seq); rcu_read_lock(); fobj = rcu_dereference(robj->fence); shared_count = fobj ? fobj->shared_count : 0; fence = rcu_dereference(robj->fence_excl); - if (!read_seqcount_retry(&robj->seq, seq)) + if (!read_seqretry(&robj->seq, seq)) break; rcu_read_unlock(); } diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 49ab09468ba15c12ec80e3e34b1656e450f1fa70..f11d58492216beb35230d442271c2d5c4dd07cdb 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -109,8 +109,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, dma_fence_get(fence); - preempt_disable(); - write_seqcount_begin(&obj->seq); + write_seqlock(&obj->seq); for (i = 0; i < fobj->shared_count; ++i) { struct dma_fence *old_fence; @@ -121,8 +120,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, if (old_fence->context == fence->context) { /* memory barrier is added by write_seqcount_begin */ RCU_INIT_POINTER(fobj->shared[i], fence); - write_seqcount_end(&obj->seq); - preempt_enable(); + write_sequnlock(&obj->seq); dma_fence_put(old_fence); return; @@ -146,8 +144,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, fobj->shared_count++; } - write_seqcount_end(&obj->seq); - preempt_enable(); + write_sequnlock(&obj->seq); dma_fence_put(signaled); } @@ -191,15 +188,13 @@ reservation_object_add_shared_replace(struct reservation_object *obj, fobj->shared_count++; done: - preempt_disable(); - write_seqcount_begin(&obj->seq); + write_seqlock(&obj->seq); /* * RCU_INIT_POINTER can be used here, * seqcount provides the necessary barriers */ RCU_INIT_POINTER(obj->fence, fobj); - write_seqcount_end(&obj->seq); - preempt_enable(); + write_sequnlock(&obj->seq); if (!old) return; @@ -259,14 +254,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, if (fence) dma_fence_get(fence); - preempt_disable(); - write_seqcount_begin(&obj->seq); - /* write_seqcount_begin provides the necessary memory barrier */ + write_seqlock(&obj->seq); RCU_INIT_POINTER(obj->fence_excl, fence); if (old) old->shared_count = 0; - write_seqcount_end(&obj->seq); - preempt_enable(); + write_sequnlock(&obj->seq); /* inplace update, no shared fences */ while (i--) @@ -349,13 +341,10 @@ int reservation_object_copy_fences(struct reservation_object *dst, src_list = reservation_object_get_list(dst); old = reservation_object_get_excl(dst); - preempt_disable(); - write_seqcount_begin(&dst->seq); - /* write_seqcount_begin provides the necessary memory barrier */ + write_seqlock(&dst->seq); RCU_INIT_POINTER(dst->fence_excl, new); RCU_INIT_POINTER(dst->fence, dst_list); - write_seqcount_end(&dst->seq); - preempt_enable(); + write_sequnlock(&dst->seq); if (src_list) kfree_rcu(src_list, rcu); @@ -396,7 +385,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, shared_count = i = 0; rcu_read_lock(); - seq = read_seqcount_begin(&obj->seq); + seq = read_seqbegin(&obj->seq); fence_excl = rcu_dereference(obj->fence_excl); if (fence_excl && !dma_fence_get_rcu(fence_excl)) @@ -445,7 +434,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, } } - if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { + if (i != shared_count || read_seqretry(&obj->seq, seq)) { while (i--) dma_fence_put(shared[i]); dma_fence_put(fence_excl); @@ -494,7 +483,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, retry: shared_count = 0; - seq = read_seqcount_begin(&obj->seq); + seq = read_seqbegin(&obj->seq); rcu_read_lock(); i = -1; @@ -541,7 +530,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, rcu_read_unlock(); if (fence) { - if (read_seqcount_retry(&obj->seq, seq)) { + if (read_seqretry(&obj->seq, seq)) { dma_fence_put(fence); goto retry; } @@ -597,7 +586,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, retry: ret = true; shared_count = 0; - seq = read_seqcount_begin(&obj->seq); + seq = read_seqbegin(&obj->seq); if (test_all) { unsigned i; @@ -618,7 +607,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, break; } - if (read_seqcount_retry(&obj->seq, seq)) + if (read_seqretry(&obj->seq, seq)) goto retry; } @@ -631,7 +620,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, if (ret < 0) goto retry; - if (read_seqcount_retry(&obj->seq, seq)) + if (read_seqretry(&obj->seq, seq)) goto retry; } } diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e5f31af65aabf3037202389d5f5e1834b888ccc4..510a48184f12b54151ea2804223240720d5dcef2 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -620,7 +620,6 @@ config ZX_DMA help Support the DMA engine for ZTE ZX family platform devices. - # driver files source "drivers/dma/bestcomm/Kconfig" diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 1c419e4cea8397521b3d8cdc0c946b58caf9ce66..4f019be9de0cd43df351162e65c3f6375cb8d346 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -40,6 +40,18 @@ config ARM_SCMI_PROTOCOL This protocol library provides interface for all the client drivers making use of the features offered by the SCMI. +if ARM_SCMI_PROTOCOL +config ARM_SCMI_TRANSPORT_FORCE_POLLING + bool "Support force polling mode for SCMI Mailbox" + help + Support force polling mode for SCMI Mailbox transports. + + If you want to configure SCMI Mailbox transport to use polling mode + on the TX path and do not use any completion IRQ facility even when + available through kernel parameter, answer Y. If unsure, say N. + +endif #ARM_SCMI_PROTOCOL + config ARM_SCMI_POWER_DOMAIN tristate "SCMI power domain driver" depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index e8cd66705ad7a76933804152e83ce740e7cbdde2..9bcb60d1855381742e652255604af9a612694a7d 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -84,6 +84,10 @@ struct scmi_desc { int max_msg_size; }; +#ifdef CONFIG_ARM_SCMI_TRANSPORT_FORCE_POLLING +static bool scmi_force_polling; +#endif + /** * struct scmi_chan_info - Structure representing a SCMI channel informfation * @@ -272,6 +276,15 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m) struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); struct scmi_shared_mem __iomem *mem = cinfo->payload; +#ifdef CONFIG_ARCH_PHYTIUM + /* callee not set cahnnel free when init, caller set it */ + static int is_init = 0; + if(unlikely(is_init == 0)) { + iowrite32(0x1, &mem->channel_status); + is_init = 1; + } +#endif + /* * Ideally channel must be free by now unless OS timeout last * request and platform continued to process the same, wait @@ -380,6 +393,14 @@ static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo, return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop); } +#ifdef CONFIG_ARM_SCMI_TRANSPORT_FORCE_POLLING +static int __init scmi_set_force_polling(char *str) +{ + return kstrtobool(str, &scmi_force_polling); +} +early_param("scmi.force_polling", scmi_set_force_polling); +#endif + /** * scmi_do_xfer() - Do one transfer * @@ -402,6 +423,11 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) if (unlikely(!cinfo)) return -EINVAL; +#ifdef CONFIG_ARM_SCMI_TRANSPORT_FORCE_POLLING + if (scmi_force_polling) + xfer->hdr.poll_completion = true; +#endif + ret = mbox_send_message(cinfo->chan, xfer); if (ret < 0) { dev_dbg(dev, "mbox send fail %d\n", ret); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 7098744f9276aa7b97fc65b1975098af31861058..da1aa1c211314af0a6919b0b9ecd8eb01e244470 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -52,7 +52,8 @@ struct efi __read_mostly efi = { .properties_table = EFI_INVALID_TABLE_ADDR, .mem_attr_table = EFI_INVALID_TABLE_ADDR, .rng_seed = EFI_INVALID_TABLE_ADDR, - .tpm_log = EFI_INVALID_TABLE_ADDR + .tpm_log = EFI_INVALID_TABLE_ADDR, + .mem_reserve = EFI_INVALID_TABLE_ADDR, }; EXPORT_SYMBOL(efi); @@ -87,7 +88,7 @@ struct mm_struct efi_mm = { struct workqueue_struct *efi_rts_wq; -static bool disable_runtime; +static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT_BASE); static int __init setup_noefi(char *arg) { disable_runtime = true; @@ -113,6 +114,9 @@ static int __init parse_efi_cmdline(char *str) if (parse_option_str(str, "noruntime")) disable_runtime = true; + if (parse_option_str(str, "runtime")) + disable_runtime = false; + return 0; } early_param("efi", parse_efi_cmdline); @@ -489,6 +493,7 @@ static __initdata efi_config_table_type_t common_tables[] = { {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table}, {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed}, {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log}, + {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &efi.mem_reserve}, {NULL_GUID, NULL, NULL}, }; @@ -596,6 +601,41 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, early_memunmap(tbl, sizeof(*tbl)); } + if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { + unsigned long prsv = efi.mem_reserve; + + while (prsv) { + struct linux_efi_memreserve *rsv; + u8 *p; + int i; + + /* + * Just map a full page: that is what we will get + * anyway, and it permits us to map the entire entry + * before knowing its size. + */ + p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), + PAGE_SIZE); + if (p == NULL) { + pr_err("Could not map UEFI memreserve entry!\n"); + return -ENOMEM; + } + + rsv = (void *)(p + prsv % PAGE_SIZE); + + /* reserve the entry itself */ + memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size)); + + for (i = 0; i < atomic_read(&rsv->count); i++) { + memblock_reserve(rsv->entry[i].base, + rsv->entry[i].size); + } + + prsv = rsv->next; + early_memunmap(p, PAGE_SIZE); + } + } + return 0; } @@ -942,6 +982,109 @@ bool efi_is_table_address(unsigned long phys_addr) return false; } +static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); +static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; + +static int __init efi_memreserve_map_root(void) +{ + if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR) + return -ENODEV; + + efi_memreserve_root = memremap(efi.mem_reserve, + sizeof(*efi_memreserve_root), + MEMREMAP_WB); + if (WARN_ON_ONCE(!efi_memreserve_root)) + return -ENOMEM; + return 0; +} + +static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) +{ + struct resource *res, *parent; + + res = kzalloc(sizeof(struct resource), GFP_ATOMIC); + if (!res) + return -ENOMEM; + + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + res->start = addr; + res->end = addr + size - 1; + + /* we expect a conflict with a 'System RAM' region */ + parent = request_resource_conflict(&iomem_resource, res); + return parent ? request_resource(parent, res) : 0; +} + +int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) +{ + struct linux_efi_memreserve *rsv; + unsigned long prsv; + int rc, index; + + if (efi_memreserve_root == (void *)ULONG_MAX) + return -ENODEV; + + if (!efi_memreserve_root) { + rc = efi_memreserve_map_root(); + if (rc) + return rc; + } + + /* first try to find a slot in an existing linked list entry */ + for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { + rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); + index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); + if (index < rsv->size) { + rsv->entry[index].base = addr; + rsv->entry[index].size = size; + + memunmap(rsv); + return efi_mem_reserve_iomem(addr, size); + } + memunmap(rsv); + } + + /* no slot found - allocate a new linked list entry */ + rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); + if (!rsv) + return -ENOMEM; + + rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); + if (rc) { + free_page((unsigned long)rsv); + return rc; + } + + /* + * The memremap() call above assumes that a linux_efi_memreserve entry + * never crosses a page boundary, so let's ensure that this remains true + * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by + * using SZ_4K explicitly in the size calculation below. + */ + rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); + atomic_set(&rsv->count, 1); + rsv->entry[0].base = addr; + rsv->entry[0].size = size; + + spin_lock(&efi_mem_reserve_persistent_lock); + rsv->next = efi_memreserve_root->next; + efi_memreserve_root->next = __pa(rsv); + spin_unlock(&efi_mem_reserve_persistent_lock); + + return efi_mem_reserve_iomem(addr, size); +} + +static int __init efi_memreserve_root_init(void) +{ + if (efi_memreserve_root) + return 0; + if (efi_memreserve_map_root()) + efi_memreserve_root = (void *)ULONG_MAX; + return 0; +} +early_initcall(efi_memreserve_root_init); + #ifdef CONFIG_KEXEC static int update_efi_random_seed(struct notifier_block *nb, unsigned long code, void *unused) diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 6c09644d620e052d01f640c8df84e8d16bb05440..296b3211f689dd46598d8335d1b6bcb85a56bf81 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -69,6 +69,31 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg) return si; } +void install_memreserve_table(efi_system_table_t *sys_table_arg) +{ + struct linux_efi_memreserve *rsv; + efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; + efi_status_t status; + + status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), + (void **)&rsv); + if (status != EFI_SUCCESS) { + pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n"); + return; + } + + rsv->next = 0; + rsv->size = 0; + atomic_set(&rsv->count, 0); + + status = efi_call_early(install_configuration_table, + &memreserve_table_guid, + rsv); + if (status != EFI_SUCCESS) + pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n"); +} + + /* * This function handles the architcture specific differences between arm and * arm64 regarding where the kernel image must be loaded and any memory that @@ -235,6 +260,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, } } + install_memreserve_table(sys_table); + new_fdt_addr = fdt_addr; status = allocate_new_fdt_and_exit_boot(sys_table, handle, &new_fdt_addr, efi_get_max_fdt_addr(dram_base), diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index d855c20de663cfbd38f0af1cbb8217d9a37f4206..2d573acb53f8f94a9fb0e4a7f046263f8904e08c 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -63,6 +63,7 @@ struct psci_operations psci_ops = { .conduit = PSCI_CONDUIT_NONE, .smccc_version = SMCCC_VERSION_1_0, }; +EXPORT_SYMBOL(psci_ops); enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void) { diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 2c34e9537f9e4b4f5f1a72c716f2292241d3652b..c32ae93eaa31e7d3b5b898e41428abc44c5420cb 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -79,6 +79,10 @@ config GPIO_GENERIC # put drivers in the right section, in alphabetical order +# This symbol is selected by both MMIO and PCI expanders +config GPIO_PHYTIUM_CORE + tristate + # This symbol is selected by both I2C and SPI expanders config GPIO_MAX730X tristate @@ -404,6 +408,26 @@ config GPIO_OMAP help Say yes here to enable GPIO support for TI OMAP SoCs. +config GPIO_PHYTIUM_PLAT + tristate "Phytium GPIO Platform support" + default y if ARCH_PHYTIUM + depends on ARM64 + select GPIO_PHYTIUM_CORE + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + select GPIOLIB_IRQCHIP + help + Say yes here to enable GPIO support for Phytium SoCs. + +config GPIO_PHYTIUM_SGPIO + tristate "Phytium SGPIO support" + default y if ARCH_PHYTIUM + depends on ARM64 + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + help + Say yes here to enable SGPIO support for Phytium SoCs. + config GPIO_PL061 bool "PrimeCell PL061 GPIO support" depends on ARM_AMBA @@ -1308,6 +1332,19 @@ config GPIO_PCIE_IDIO_24 Input filter control is not supported by this driver, and the input filters are deactivated by this driver. +config GPIO_PHYTIUM_PCI + tristate "Phytium GPIO PCI support" + select GPIO_PHYTIUM_CORE + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + select GPIOLIB_IRQCHIP + help + Say Y here to support Phytium PCI GPIO controller on Px210 chipset. + An interrupt is generated when any of the inputs change state + (low to high or high to low). + + This driver can be used for Phytium Px210. + config GPIO_RDC321X tristate "RDC R-321x GPIO support" select MFD_CORE diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index c256aff66a6567e22ff8e80cfc73fffe224ac1d1..ae1730e175a48436e64585aa363fadb156e47eee 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -95,6 +95,10 @@ obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o +obj-$(CONFIG_GPIO_PHYTIUM_CORE) += gpio-phytium-core.o +obj-$(CONFIG_GPIO_PHYTIUM_PCI) += gpio-phytium-pci.o +obj-$(CONFIG_GPIO_PHYTIUM_PLAT) += gpio-phytium-platform.o +obj-$(CONFIG_GPIO_PHYTIUM_SGPIO) += gpio-phytium-sgpio.o obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o obj-$(CONFIG_GPIO_PCH) += gpio-pch.o diff --git a/drivers/gpio/gpio-phytium-core.c b/drivers/gpio/gpio-phytium-core.c new file mode 100644 index 0000000000000000000000000000000000000000..ea0bfb2961556676ae64d837efaa7632bfda6803 --- /dev/null +++ b/drivers/gpio/gpio-phytium-core.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include + +#include "gpio-phytium-core.h" + +static int get_pin_location(struct phytium_gpio *gpio, unsigned int offset, + struct pin_loc *pl) +{ + int ret; + + if (offset < gpio->ngpio[0]) { + pl->port = 0; + pl->offset = offset; + ret = 0; + } else if (offset < (gpio->ngpio[0] + gpio->ngpio[1])) { + pl->port = 1; + pl->offset = offset - gpio->ngpio[0]; + ret = 0; + } else { + ret = -EINVAL; + } + + return ret; +} + +static void phytium_gpio_toggle_trigger(struct phytium_gpio *gpio, + unsigned int offset) +{ + struct gpio_chip *gc; + u32 pol; + int val; + + /* Only port A can provide interrupt source */ + if (offset >= gpio->ngpio[0]) + return; + + gc = &gpio->gc; + + pol = readl(gpio->regs + GPIO_INT_POLARITY); + /* Just read the current value right out of the data register */ + val = gc->get(gc, offset); + if (val) + pol &= ~BIT(offset); + else + pol |= BIT(offset); + + writel(pol, gpio->regs + GPIO_INT_POLARITY); +} + +int phytium_gpio_get(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + void __iomem *dat; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + + dat = gpio->regs + GPIO_EXT_PORTA + (loc.port * GPIO_PORT_STRIDE); + + return !!(readl(dat) & BIT(loc.offset)); +} +EXPORT_SYMBOL_GPL(phytium_gpio_get); + +void phytium_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + void __iomem *dr; + unsigned long flags; + u32 mask; + + if (get_pin_location(gpio, offset, &loc)) + return; + dr = gpio->regs + GPIO_SWPORTA_DR + (loc.port * GPIO_PORT_STRIDE); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + if (value) + mask = readl(dr) | BIT(loc.offset); + else + mask = readl(dr) & ~BIT(loc.offset); + + writel(mask, dr); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); +} +EXPORT_SYMBOL_GPL(phytium_gpio_set); + +int phytium_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + unsigned long flags; + void __iomem *ddr; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(readl(ddr) & ~(BIT(loc.offset)), ddr); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_direction_input); + +int phytium_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, + int value) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + unsigned long flags; + void __iomem *ddr; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(readl(ddr) | BIT(loc.offset), ddr); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + phytium_gpio_set(gc, offset, value); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_direction_output); + +void phytium_gpio_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + u32 val = BIT(irqd_to_hwirq(d)); + + raw_spin_lock(&gpio->lock); + + writel(val, gpio->regs + GPIO_PORTA_EOI); + + raw_spin_unlock(&gpio->lock); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_ack); + +void phytium_gpio_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock(&gpio->lock); + + val = readl(gpio->regs + GPIO_INTMASK); + val |= BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTMASK); + + raw_spin_unlock(&gpio->lock); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_mask); + +void phytium_gpio_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock(&gpio->lock); + + val = readl(gpio->regs + GPIO_INTMASK); + val &= ~BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTMASK); + + raw_spin_unlock(&gpio->lock); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_unmask); + +int phytium_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + int hwirq = irqd_to_hwirq(d); + unsigned long flags, lvl, pol; + + if (hwirq < 0 || hwirq >= gpio->ngpio[0]) + return -EINVAL; + + if ((flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) && + (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) { + dev_err(gc->parent, + "trying to configure line %d for both level and edge detection, choose one!\n", + hwirq); + return -EINVAL; + } + + raw_spin_lock_irqsave(&gpio->lock, flags); + + lvl = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + pol = readl(gpio->regs + GPIO_INT_POLARITY); + + switch (flow_type) { + case IRQ_TYPE_EDGE_BOTH: + lvl |= BIT(hwirq); + phytium_gpio_toggle_trigger(gpio, hwirq); + irq_set_handler_locked(d, handle_edge_irq); + dev_dbg(gc->parent, "line %d: IRQ on both edges\n", hwirq); + break; + case IRQ_TYPE_EDGE_RISING: + lvl |= BIT(hwirq); + pol |= BIT(hwirq); + irq_set_handler_locked(d, handle_edge_irq); + dev_dbg(gc->parent, "line %d: IRQ on RISING edge\n", hwirq); + break; + case IRQ_TYPE_EDGE_FALLING: + lvl |= BIT(hwirq); + pol &= ~BIT(hwirq); + irq_set_handler_locked(d, handle_edge_irq); + dev_dbg(gc->parent, "line %d: IRQ on FALLING edge\n", hwirq); + break; + case IRQ_TYPE_LEVEL_HIGH: + lvl &= ~BIT(hwirq); + pol |= BIT(hwirq); + irq_set_handler_locked(d, handle_level_irq); + dev_dbg(gc->parent, "line %d: IRQ on HIGH level\n", hwirq); + break; + case IRQ_TYPE_LEVEL_LOW: + lvl &= ~BIT(hwirq); + pol &= ~BIT(hwirq); + irq_set_handler_locked(d, handle_level_irq); + dev_dbg(gc->parent, "line %d: IRQ on LOW level\n", hwirq); + break; + } + + writel(lvl, gpio->regs + GPIO_INTTYPE_LEVEL); + if (flow_type != IRQ_TYPE_EDGE_BOTH) + writel(pol, gpio->regs + GPIO_INT_POLARITY); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_set_type); + +void phytium_gpio_irq_enable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + unsigned long flags; + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + val = readl(gpio->regs + GPIO_INTEN); + val |= BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTEN); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_enable); + +void phytium_gpio_irq_disable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + unsigned long flags; + u32 val; + + /* Only port A can provide interrupt source */ + if (irqd_to_hwirq(d) >= gpio->ngpio[0]) + return; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + val = readl(gpio->regs + GPIO_INTEN); + val &= ~BIT(irqd_to_hwirq(d)); + writel(val, gpio->regs + GPIO_INTEN); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_disable); + +void phytium_gpio_irq_handler(struct irq_desc *desc) +{ + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + unsigned long pending; + int offset; + + chained_irq_enter(irqchip, desc); + + pending = readl(gpio->regs + GPIO_INTSTATUS); + if (pending) { + for_each_set_bit(offset, &pending, gpio->ngpio[0]) { + int gpio_irq = irq_find_mapping(gc->irq.domain, + offset); + generic_handle_irq(gpio_irq); + + if ((irq_get_trigger_type(gpio_irq) & + IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) + phytium_gpio_toggle_trigger(gpio, offset); + } + } + + chained_irq_exit(irqchip, desc); +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_handler); + +int phytium_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct pin_loc loc; + void __iomem *ddr; + + if (get_pin_location(gpio, offset, &loc)) + return -EINVAL; + ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + + return !(readl(ddr) & BIT(loc.offset)); +} +EXPORT_SYMBOL_GPL(phytium_gpio_get_direction); + +#if CONFIG_SMP +int +phytium_gpio_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) +{ + struct gpio_chip *chip_data = irq_data_get_irq_chip_data(d); + struct irq_chip *chip = irq_get_chip(chip_data->irq.parent_irq); + struct irq_data *data = irq_get_irq_data(chip_data->irq.parent_irq); + + if (chip && chip->irq_set_affinity) + return chip->irq_set_affinity(data, mask_val, force); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_set_affinity); +#endif + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium GPIO Controller core"); diff --git a/drivers/gpio/gpio-phytium-core.h b/drivers/gpio/gpio-phytium-core.h new file mode 100644 index 0000000000000000000000000000000000000000..a308a8aedba34c705c05ff701f991b321eacd10b --- /dev/null +++ b/drivers/gpio/gpio-phytium-core.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019-2023 Phytium Technology Co., Ltd. + */ + +#ifndef _GPIO_PHYTIUM_H +#define _GPIO_PHYTIUM_H + +#include +#include + +#include "gpiolib.h" + +#define GPIO_SWPORTA_DR 0x00 /* WR Port A Output Data Register */ +#define GPIO_SWPORTA_DDR 0x04 /* WR Port A Data Direction Register */ +#define GPIO_EXT_PORTA 0x08 /* RO Port A Input Data Register */ +#define GPIO_SWPORTB_DR 0x0c /* WR Port B Output Data Register */ +#define GPIO_SWPORTB_DDR 0x10 /* WR Port B Data Direction Register */ +#define GPIO_EXT_PORTB 0x14 /* RO Port B Input Data Register */ + +#define GPIO_INTEN 0x18 /* WR Port A Interrput Enable Register */ +#define GPIO_INTMASK 0x1c /* WR Port A Interrupt Mask Register */ +#define GPIO_INTTYPE_LEVEL 0x20 /* WR Port A Interrupt Level Register */ +#define GPIO_INT_POLARITY 0x24 /* WR Port A Interrupt Polarity Register */ +#define GPIO_INTSTATUS 0x28 /* RO Port A Interrupt Status Register */ +#define GPIO_RAW_INTSTATUS 0x2c /* RO Port A Raw Interrupt Status Register */ +#define GPIO_LS_SYNC 0x30 /* WR Level-sensitive Synchronization Enable Register */ +#define GPIO_DEBOUNCE 0x34 /* WR Debounce Enable Register */ +#define GPIO_PORTA_EOI 0x38 /* WO Port A Clear Interrupt Register */ + +#define MAX_NPORTS 2 +#define NGPIO_DEFAULT 8 +#define NGPIO_MAX 32 +#define GPIO_PORT_STRIDE (GPIO_EXT_PORTB - GPIO_EXT_PORTA) + +struct pin_loc { + unsigned int port; + unsigned int offset; +}; + +#ifdef CONFIG_PM_SLEEP +struct phytium_gpio_ctx { + u32 swporta_dr; + u32 swporta_ddr; + u32 ext_porta; + u32 swportb_dr; + u32 swportb_ddr; + u32 ext_portb; + u32 inten; + u32 intmask; + u32 inttype_level; + u32 int_polarity; + u32 intstatus; + u32 raw_intstatus; + u32 ls_sync; + u32 debounce; +}; +#endif + +struct phytium_gpio { + raw_spinlock_t lock; + void __iomem *regs; + struct gpio_chip gc; + struct irq_chip irq_chip; + unsigned int ngpio[2]; + int irq[32]; +#ifdef CONFIG_PM_SLEEP + struct phytium_gpio_ctx ctx; +#endif +}; + +int phytium_gpio_get(struct gpio_chip *gc, unsigned int offset); +void phytium_gpio_set(struct gpio_chip *gc, unsigned int offset, int value); + +int phytium_gpio_get_direction(struct gpio_chip *gc, unsigned int offset); +int phytium_gpio_direction_input(struct gpio_chip *gc, unsigned int offset); +int phytium_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value); + +void phytium_gpio_irq_ack(struct irq_data *d); +void phytium_gpio_irq_mask(struct irq_data *d); +void phytium_gpio_irq_unmask(struct irq_data *d); +int phytium_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type); +void phytium_gpio_irq_enable(struct irq_data *d); +void phytium_gpio_irq_disable(struct irq_data *d); +void phytium_gpio_irq_handler(struct irq_desc *desc); +int phytium_gpio_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force); +#endif diff --git a/drivers/gpio/gpio-phytium-pci.c b/drivers/gpio/gpio-phytium-pci.c new file mode 100644 index 0000000000000000000000000000000000000000..838d4f33f8763d768e9b4f2ce2cff958628560d8 --- /dev/null +++ b/drivers/gpio/gpio-phytium-pci.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "gpio-phytium-core.h" + +static int phytium_gpio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct phytium_gpio *gpio; + struct gpio_irq_chip *girq; + int err; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device: err %d\n", err); + goto out; + } + + err = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); + if (err) { + dev_err(dev, "Failed to iomap PCI device: err %d\n", err); + goto out; + } + + gpio->regs = pcim_iomap_table(pdev)[0]; + if (!gpio->regs) { + dev_err(dev, "Cannot map PCI resource\n"); + err = -ENOMEM; + goto out; + } + + err = pci_enable_msi(pdev); + if (err < 0) + goto out; + + gpio->irq[0] = pdev->irq; + if (gpio->irq < 0) + dev_warn(dev, "no irq is found.\n"); + + /* There is only one group of Pins at the moment. */ + gpio->ngpio[0] = NGPIO_MAX; + + /* irq_chip support */ + gpio->irq_chip.name = dev_name(dev); + gpio->irq_chip.irq_ack = phytium_gpio_irq_ack; + gpio->irq_chip.irq_mask = phytium_gpio_irq_mask; + gpio->irq_chip.irq_unmask = phytium_gpio_irq_unmask; + gpio->irq_chip.irq_set_type = phytium_gpio_irq_set_type; + gpio->irq_chip.irq_enable = phytium_gpio_irq_enable; + gpio->irq_chip.irq_disable = phytium_gpio_irq_disable; + + raw_spin_lock_init(&gpio->lock); + + gpio->gc.base = -1; + gpio->gc.get_direction = phytium_gpio_get_direction; + gpio->gc.direction_input = phytium_gpio_direction_input; + gpio->gc.direction_output = phytium_gpio_direction_output; + gpio->gc.get = phytium_gpio_get; + gpio->gc.set = phytium_gpio_set; + gpio->gc.ngpio = gpio->ngpio[0] + gpio->ngpio[1]; + gpio->gc.label = dev_name(dev); + gpio->gc.parent = dev; + gpio->gc.owner = THIS_MODULE; + + girq = &gpio->gc.irq; + girq->handler = handle_bad_irq; + girq->default_type = IRQ_TYPE_NONE; + + girq->num_parents = 1; + girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents, + sizeof(*girq->parents), GFP_KERNEL); + if (!girq->parents) + return -ENOMEM; + girq->parents[0] = gpio->irq[0]; + girq->parent_handler = phytium_gpio_irq_handler; + + girq->chip = &gpio->irq_chip; + + err = devm_gpiochip_add_data(dev, &gpio->gc, gpio); + if (err) + goto out; + + dev_info(dev, "Phytium PCI GPIO controller @%pa registered\n", + &gpio->regs); + + pci_set_drvdata(pdev, gpio); + +out: + return err; +} + +static const struct pci_device_id phytium_gpio_pci_ids[] = { + { PCI_DEVICE(0x1DB7, 0xDC31) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, phytium_gpio_pci_ids); + +#ifdef CONFIG_PM_SLEEP +static int phytium_gpio_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_gpio *gpio = pci_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + gpio->ctx.swporta_dr = readl(gpio->regs + GPIO_SWPORTA_DR); + gpio->ctx.swporta_ddr = readl(gpio->regs + GPIO_SWPORTA_DDR); + gpio->ctx.ext_porta = readl(gpio->regs + GPIO_EXT_PORTA); + gpio->ctx.swportb_dr = readl(gpio->regs + GPIO_SWPORTB_DR); + gpio->ctx.swportb_ddr = readl(gpio->regs + GPIO_SWPORTB_DDR); + gpio->ctx.ext_portb = readl(gpio->regs + GPIO_EXT_PORTB); + + gpio->ctx.inten = readl(gpio->regs + GPIO_INTEN); + gpio->ctx.intmask = readl(gpio->regs + GPIO_INTMASK); + gpio->ctx.inttype_level = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + gpio->ctx.int_polarity = readl(gpio->regs + GPIO_INT_POLARITY); + gpio->ctx.debounce = readl(gpio->regs + GPIO_DEBOUNCE); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} + +static int phytium_gpio_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_gpio *gpio = pci_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(gpio->ctx.swporta_dr, gpio->regs + GPIO_SWPORTA_DR); + writel(gpio->ctx.swporta_ddr, gpio->regs + GPIO_SWPORTA_DDR); + writel(gpio->ctx.ext_porta, gpio->regs + GPIO_EXT_PORTA); + writel(gpio->ctx.swportb_dr, gpio->regs + GPIO_SWPORTB_DR); + writel(gpio->ctx.swportb_ddr, gpio->regs + GPIO_SWPORTB_DDR); + writel(gpio->ctx.ext_portb, gpio->regs + GPIO_EXT_PORTB); + + writel(gpio->ctx.inten, gpio->regs + GPIO_INTEN); + writel(gpio->ctx.intmask, gpio->regs + GPIO_INTMASK); + writel(gpio->ctx.inttype_level, gpio->regs + GPIO_INTTYPE_LEVEL); + writel(gpio->ctx.int_polarity, gpio->regs + GPIO_INT_POLARITY); + writel(gpio->ctx.debounce, gpio->regs + GPIO_DEBOUNCE); + + writel(0xffffffff, gpio->regs + GPIO_PORTA_EOI); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_gpio_pci_pm_ops, + phytium_gpio_pci_suspend, + phytium_gpio_pci_resume); + +static struct pci_driver phytium_gpio_pci_driver = { + .name = "gpio-phytium-pci", + .id_table = phytium_gpio_pci_ids, + .probe = phytium_gpio_pci_probe, + .driver = { + .pm = &phytium_gpio_pci_pm_ops, + }, +}; + +module_pci_driver(phytium_gpio_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium GPIO PCI Driver"); diff --git a/drivers/gpio/gpio-phytium-platform.c b/drivers/gpio/gpio-phytium-platform.c new file mode 100644 index 0000000000000000000000000000000000000000..eebed6019185b7a5e817fd9ae0a93d4077ee5387 --- /dev/null +++ b/drivers/gpio/gpio-phytium-platform.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019-2023 Phytium Technology Co., Ltd. + * Derived from drivers/gpio/gpio-pl061.c + * Copyright (C) 2008, 2009 Provigent Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gpio-phytium-core.h" + +static const struct of_device_id phytium_gpio_of_match[] = { + { .compatible = "phytium,gpio", }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_gpio_of_match); + +static const struct acpi_device_id phytium_gpio_acpi_match[] = { + { "PHYT0001", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, phytium_gpio_acpi_match); + +static int phytium_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct phytium_gpio *gpio; + struct gpio_irq_chip *girq; + struct fwnode_handle *fwnode; + int err, irq_count; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->regs)) + return PTR_ERR(gpio->regs); + + if (!device_get_child_node_count(dev)) + return -ENODEV; + + device_for_each_child_node(dev, fwnode) { + int idx; + + if (fwnode_property_read_u32(fwnode, "reg", &idx) || + idx >= MAX_NPORTS) { + dev_err(dev, "missing/invalid port index\n"); + fwnode_handle_put(fwnode); + return -EINVAL; + } + + if (fwnode_property_read_u32(fwnode, "nr-gpios", + &gpio->ngpio[idx])) { + dev_info(dev, + "failed to get number of gpios for Port%c\n", + idx ? 'B' : 'A'); + gpio->ngpio[idx] = NGPIO_DEFAULT; + } + } + + /* irq_chip support */ + gpio->irq_chip.name = dev_name(dev); + gpio->irq_chip.irq_ack = phytium_gpio_irq_ack; + gpio->irq_chip.irq_mask = phytium_gpio_irq_mask; + gpio->irq_chip.irq_unmask = phytium_gpio_irq_unmask; + gpio->irq_chip.irq_set_type = phytium_gpio_irq_set_type; + gpio->irq_chip.irq_enable = phytium_gpio_irq_enable; + gpio->irq_chip.irq_disable = phytium_gpio_irq_disable; +#ifdef CONFIG_SMP + gpio->irq_chip.irq_set_affinity = phytium_gpio_irq_set_affinity; +#endif + raw_spin_lock_init(&gpio->lock); + + gpio->gc.base = -1; + gpio->gc.get_direction = phytium_gpio_get_direction; + gpio->gc.direction_input = phytium_gpio_direction_input; + gpio->gc.direction_output = phytium_gpio_direction_output; + gpio->gc.get = phytium_gpio_get; + gpio->gc.set = phytium_gpio_set; + gpio->gc.ngpio = gpio->ngpio[0] + gpio->ngpio[1]; + gpio->gc.label = dev_name(dev); + gpio->gc.parent = dev; + gpio->gc.owner = THIS_MODULE; + + girq = &gpio->gc.irq; + girq->handler = handle_bad_irq; + girq->default_type = IRQ_TYPE_NONE; + + for (irq_count = 0; irq_count < gpio->ngpio[0]; irq_count++) { + gpio->irq[irq_count] = -ENXIO; + gpio->irq[irq_count] = platform_get_irq(pdev, irq_count); + if (gpio->irq[irq_count] < 0) { + dev_warn(dev, "no irq is found.\n"); + break; + } + }; + + girq->num_parents = irq_count; + girq->parents = gpio->irq; + girq->parent_handler = phytium_gpio_irq_handler; + + girq->chip = &gpio->irq_chip; + + err = devm_gpiochip_add_data(dev, &gpio->gc, gpio); + if (err) + return err; + + platform_set_drvdata(pdev, gpio); + dev_info(dev, "Phytium GPIO controller @%pa registered\n", + &res->start); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_gpio *gpio = platform_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + gpio->ctx.swporta_dr = readl(gpio->regs + GPIO_SWPORTA_DR); + gpio->ctx.swporta_ddr = readl(gpio->regs + GPIO_SWPORTA_DDR); + gpio->ctx.ext_porta = readl(gpio->regs + GPIO_EXT_PORTA); + gpio->ctx.swportb_dr = readl(gpio->regs + GPIO_SWPORTB_DR); + gpio->ctx.swportb_ddr = readl(gpio->regs + GPIO_SWPORTB_DDR); + gpio->ctx.ext_portb = readl(gpio->regs + GPIO_EXT_PORTB); + + gpio->ctx.inten = readl(gpio->regs + GPIO_INTEN); + gpio->ctx.intmask = readl(gpio->regs + GPIO_INTMASK); + gpio->ctx.inttype_level = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + gpio->ctx.int_polarity = readl(gpio->regs + GPIO_INT_POLARITY); + gpio->ctx.debounce = readl(gpio->regs + GPIO_DEBOUNCE); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} + +static int phytium_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_gpio *gpio = platform_get_drvdata(pdev); + unsigned long flags; + + raw_spin_lock_irqsave(&gpio->lock, flags); + + writel(gpio->ctx.swporta_dr, gpio->regs + GPIO_SWPORTA_DR); + writel(gpio->ctx.swporta_ddr, gpio->regs + GPIO_SWPORTA_DDR); + writel(gpio->ctx.ext_porta, gpio->regs + GPIO_EXT_PORTA); + writel(gpio->ctx.swportb_dr, gpio->regs + GPIO_SWPORTB_DR); + writel(gpio->ctx.swportb_ddr, gpio->regs + GPIO_SWPORTB_DDR); + writel(gpio->ctx.ext_portb, gpio->regs + GPIO_EXT_PORTB); + + writel(gpio->ctx.inten, gpio->regs + GPIO_INTEN); + writel(gpio->ctx.intmask, gpio->regs + GPIO_INTMASK); + writel(gpio->ctx.inttype_level, gpio->regs + GPIO_INTTYPE_LEVEL); + writel(gpio->ctx.int_polarity, gpio->regs + GPIO_INT_POLARITY); + writel(gpio->ctx.debounce, gpio->regs + GPIO_DEBOUNCE); + + writel(0xffffffff, gpio->regs + GPIO_PORTA_EOI); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_gpio_pm_ops, phytium_gpio_suspend, + phytium_gpio_resume); + +static struct platform_driver phytium_gpio_driver = { + .driver = { + .name = "gpio-phytium-platform", + .pm = &phytium_gpio_pm_ops, + .of_match_table = of_match_ptr(phytium_gpio_of_match), + .acpi_match_table = ACPI_PTR(phytium_gpio_acpi_match), + }, + .probe = phytium_gpio_probe, +}; + +module_platform_driver(phytium_gpio_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium GPIO driver"); diff --git a/drivers/gpio/gpio-phytium-sgpio.c b/drivers/gpio/gpio-phytium-sgpio.c new file mode 100644 index 0000000000000000000000000000000000000000..e7d7bb1c83761c5ca4c31d0413ccedd4220f8124 --- /dev/null +++ b/drivers/gpio/gpio-phytium-sgpio.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SGPIO Support + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SGPIO_CTL0_REG 0x00 +#define SGPIO_CTL0_REG_ENABLE BIT(0) +#define SGPIO_CTL0_REG_RX_DISABLE BIT(1) +#define SGPIO_CTL0_REG_L3_L0 GENMASK(11, 8) +#define SGPIO_CTL0_REG_CLK_DIV_NUM GENMASK(31, 12) +#define SGPIO_CTL1_REG 0x04 +#define SGPIO_CTL1_REG_READY BIT(0) +#define SGPIO_CTL1_REG_W_UPDATA BIT(1) +#define SGPIO_CTL1_REG_OP_MODE BIT(2) +#define SGPIO_CTL1_REG_OP_STATE BIT(3) +#define SGPIO_CTL1_REG_BIT_NUM GENMASK(14, 8) +#define SGPIO_CTL1_REG_INTERVAL_TIMER GENMASK(31, 16) +#define SGPIO_SOFT_RESET_REG 0x08 +#define SGPIO_SOFT_RESET_REG_MASK BIT(0) +#define SGPIO_IRQ_REG 0x0c +#define SGPIO_IRQ_REG_MASK BIT(0) +#define SGPIO_IRQ_M_REG 0x10 +#define SGPIO_IRQ_M_REG_MASK BIT(0) +#define SGPIO_WDATA0_REG 0x14 +#define SGPIO_WDATA_REG(x) (SGPIO_WDATA0_REG + ((x) == 2 ? 3 : (x))* 4) // 0x14, 0x18, 0x20 +#define SGPIO_RDATA0_REG 0x24 +#define SGPIO_RDATA_REG(x) (SGPIO_RDATA0_REG + (x) * 4) + +#define DEFAULT_L3_L0 0 + +#define GPIO_GROUP(x) ((x) >> 6) +#define GPIO_OFFSET(x) ((x) & GENMASK(5, 0)) +#define GPIO_BIT(x) BIT(GPIO_OFFSET(x) >> 1) + +struct phytium_sgpio { + struct gpio_chip gc; + void __iomem *regs; + unsigned int ngpios; + struct clk *pclk; + + struct mutex lock; + struct completion completion; +}; + +static bool phytium_sgpio_is_input(unsigned int offset) +{ + return !(offset % 2); +} + +static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val) +{ + struct phytium_sgpio *gpio = gpiochip_get_data(gc); + u32 reg; + int rc = 0; + + if (phytium_sgpio_is_input(offset)) + return -EINVAL; + + reinit_completion(&gpio->completion); + + /* + * Since this is an output, read the cached value from rdata, + * then update value. + */ + /* cached data from wdata? */ + reg = readl(gpio->regs + SGPIO_WDATA_REG(GPIO_GROUP(offset))); + if (val) + reg |= GPIO_BIT(offset); + else + reg &= GPIO_BIT(offset); + writel(reg, gpio->regs + SGPIO_WDATA_REG(GPIO_GROUP(offset))); + + /* Start transmission and wait for completion */ + writel(readl(gpio->regs + SGPIO_CTL1_REG) | SGPIO_CTL1_REG_W_UPDATA, + gpio->regs + SGPIO_CTL1_REG); + if (!wait_for_completion_timeout(&gpio->completion, msecs_to_jiffies(1000))) + rc = -EINVAL; + + return rc; +} + +static int phytium_sgpio_direction_input(struct gpio_chip *gc, unsigned int offset) +{ + return phytium_sgpio_is_input(offset) ? 0 : -EINVAL; +} + +static int phytium_sgpio_direction_output(struct gpio_chip *gc, unsigned int offset, int val) +{ + struct phytium_sgpio *gpio = gpiochip_get_data(gc); + int rc; + + mutex_lock(&gpio->lock); + + /* + * No special action is required for setting the direction; we'll + * error-out in sgpio_set_value if this isn't an output GPIO + */ + rc = sgpio_set_value(&gpio->gc, offset, val); + + mutex_unlock(&gpio->lock); + + return rc; +} + +static int phytium_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + return !!phytium_sgpio_is_input(offset); +} + +static int phytium_sgpio_get(struct gpio_chip *gc, unsigned int offset) +{ + struct phytium_sgpio *gpio = gpiochip_get_data(gc); + int rc = 0; + u32 val, ctl0; + + mutex_lock(&gpio->lock); + + if (!phytium_sgpio_is_input(offset)) { + val = readl(gpio->regs + SGPIO_WDATA_REG(GPIO_GROUP(offset))); + rc = !!(val & GPIO_BIT(offset)); + mutex_unlock(&gpio->lock); + return rc; + } + + reinit_completion(&gpio->completion); + + /* Enable Rx */ + ctl0 = readl(gpio->regs + SGPIO_CTL0_REG); + writel(ctl0 & ~SGPIO_CTL0_REG_RX_DISABLE, gpio->regs + SGPIO_CTL0_REG); + + /* Start reading transaction and wait for completion */ + writel(readl(gpio->regs + SGPIO_CTL1_REG) | SGPIO_CTL1_REG_W_UPDATA, + gpio->regs + SGPIO_CTL1_REG); + if (!wait_for_completion_timeout(&gpio->completion, msecs_to_jiffies(1000))) { + rc = -EINVAL; + goto err; + } + + val = readl(gpio->regs + SGPIO_RDATA_REG(GPIO_GROUP(offset))); + rc = !!(val & GPIO_BIT(offset)); + +err: + /* Disalbe Rx to hold the value */ + writel(ctl0 | SGPIO_CTL0_REG_RX_DISABLE, gpio->regs + SGPIO_CTL0_REG); + mutex_unlock(&gpio->lock); + + return rc; +} + +static void phytium_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) +{ + struct phytium_sgpio *gpio = gpiochip_get_data(gc); + + mutex_lock(&gpio->lock); + + sgpio_set_value(gc, offset, val); + + mutex_unlock(&gpio->lock); +} + +static irqreturn_t phytium_sgpio_irq_handler(int irq, void *data) +{ + struct phytium_sgpio *gpio = data; + + if (!readl(gpio->regs + SGPIO_IRQ_REG)) + return IRQ_NONE; + + /* Clear the interrupt */ + writel(0, gpio->regs + SGPIO_IRQ_REG); + + /* Check if tx/rx has been done */ + if (!(readl(gpio->regs + SGPIO_CTL1_REG) & SGPIO_CTL1_REG_OP_STATE)) + complete(&gpio->completion); + + return IRQ_HANDLED; +} + +static int phytium_sgpio_probe(struct platform_device *pdev) +{ + u32 pclk_freq, sclk_freq, clk_div; + struct phytium_sgpio *gpio; + struct resource *res; + struct device *dev = &pdev->dev; + int rc; + + gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->regs)) + return PTR_ERR(gpio->regs); + + if (devm_request_irq(dev, platform_get_irq(pdev, 0), + phytium_sgpio_irq_handler, + IRQF_SHARED, dev_name(dev), gpio)) { + dev_err(dev, "failed to request IRQ\n"); + return -ENOENT; + } + + rc = fwnode_property_read_u32(dev_fwnode(dev), "ngpios", &gpio->ngpios); + if (rc < 0) { + dev_err(dev, "Could not read ngpios property\n"); + return -EINVAL; + } else if (gpio->ngpios % 32) { + dev_err(&pdev->dev, "Number of GPIOs not multiple of 32: %d\n", + gpio->ngpios); + return -EINVAL; + } + + rc = fwnode_property_read_u32(dev_fwnode(dev), "bus-frequency", &sclk_freq); + if (rc < 0) { + dev_err(dev, "Could not read bus-frequency property\n"); + return -EINVAL; + } + + gpio->pclk = devm_clk_get(dev, NULL); + if (IS_ERR(gpio->pclk)) { + dev_err(dev, "Could not get the APB clock property\n"); + return PTR_ERR(gpio->pclk); + } + rc = clk_prepare_enable(gpio->pclk); + if (rc) { + dev_err(dev, "failed to enable pclk: %d\n", rc); + return rc; + } + pclk_freq = clk_get_rate(gpio->pclk); + + /* + * From the datasheet: + * (pclk / 2) / (clk_div + 1) = sclk + */ + if (sclk_freq == 0) { + dev_err(dev, "SCLK should not be 0\n"); + return -EINVAL; + } + + clk_div = (pclk_freq / (sclk_freq * 2)) - 1; + if (clk_div > (1 << 20) - 1) { + dev_err(dev, "clk_div is overflow\n"); + return -EINVAL; + } + + writel(FIELD_PREP(SGPIO_CTL0_REG_CLK_DIV_NUM, clk_div) | + FIELD_PREP(SGPIO_CTL0_REG_L3_L0, DEFAULT_L3_L0) | + SGPIO_CTL0_REG_RX_DISABLE | SGPIO_CTL0_REG_ENABLE, + gpio->regs + SGPIO_CTL0_REG); + + writel(FIELD_PREP(SGPIO_CTL1_REG_BIT_NUM, gpio->ngpios) | + SGPIO_CTL1_REG_READY, gpio->regs + SGPIO_CTL1_REG); + + mutex_init(&gpio->lock); + init_completion(&gpio->completion); + platform_set_drvdata(pdev, gpio); + + gpio->gc.parent = dev; + gpio->gc.base = -1; + gpio->gc.ngpio = gpio->ngpios * 2; + gpio->gc.label = dev_name(dev); + gpio->gc.direction_input = phytium_sgpio_direction_input; + gpio->gc.direction_output = phytium_sgpio_direction_output; + gpio->gc.get_direction = phytium_sgpio_get_direction; + gpio->gc.get = phytium_sgpio_get; + gpio->gc.set = phytium_sgpio_set; + + return devm_gpiochip_add_data(dev, &gpio->gc, gpio); +} + +static const struct of_device_id phytium_sgpio_of_match[] = { + { .compatible = "phytium,sgpio", }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_sgpio_of_match); + +static struct platform_driver phytium_sgpio_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = of_match_ptr(phytium_sgpio_of_match), + }, + .probe = phytium_sgpio_probe, +}; +module_platform_driver(phytium_sgpio_driver); + +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium SGPIO driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index a050a9aa9a5e5c3d44379b99e15644dc58e11506..b269688581903f1a637944f12c74cc4c51d36939 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -336,6 +336,8 @@ source "drivers/gpu/drm/tve200/Kconfig" source "drivers/gpu/drm/xen/Kconfig" +source "drivers/gpu/drm/phytium/Kconfig" + # Keep legacy drivers last menuconfig DRM_LEGACY diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index a6771cef85e25d74b73f1a2ff29bd34110605488..003ad888722984908b95c658e215a0ab322f2b56 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -106,4 +106,5 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ obj-$(CONFIG_DRM_PL111) += pl111/ obj-$(CONFIG_DRM_TVE200) += tve200/ +obj-$(CONFIG_DRM_PHYTIUM) += phytium/ obj-$(CONFIG_DRM_XEN) += xen/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 4488aad64643bbe1635e00e063b68415e8e39f5d..f22feb25eba9e3c6a2620ddecb8834724b56e32e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -261,11 +261,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, } /* Install the new fence list, seqcount provides the barriers */ - preempt_disable(); - write_seqcount_begin(&resv->seq); + write_seqlock(&resv->seq); RCU_INIT_POINTER(resv->fence, new); - write_seqcount_end(&resv->seq); - preempt_enable(); + write_sequnlock(&resv->seq); /* Drop the references to the removed fences or move them to ef_list */ for (i = j, k = 0; i < old->shared_count; ++i) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5b0d6d8b3ab8ed34b6fd61a649f9fb94ff8c515e..3fe4a1e93a1a55aa30955729d6a3e31088fe4116 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -516,7 +516,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, long timeout, struct intel_rps_client *rps_client) { - unsigned int seq = __read_seqcount_begin(&resv->seq); + unsigned int seq = read_seqbegin(&resv->seq); struct dma_fence *excl; bool prune_fences = false; @@ -569,9 +569,9 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, * signaled and that the reservation object has not been changed (i.e. * no new fences have been added). */ - if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { + if (prune_fences && !read_seqretry(&resv->seq, seq)) { if (reservation_object_trylock(resv)) { - if (!__read_seqcount_retry(&resv->seq, seq)) + if (!read_seqretry(&resv->seq, seq)) reservation_object_add_excl_fence(resv, NULL); reservation_object_unlock(resv); } @@ -4696,7 +4696,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * */ retry: - seq = raw_read_seqcount(&obj->resv->seq); + seq = read_seqbegin(&obj->resv->seq); /* Translate the exclusive fence to the READ *and* WRITE engine */ args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); @@ -4714,7 +4714,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, } } - if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) + if (args->busy && read_seqretry(&obj->resv->seq, seq)) goto retry; err = 0; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b7c3982321369b6b1700be0828ad5ad57a528c32..eed1ba3009c645a3dbc0e64c585a0ddc82a76484 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1025,6 +1025,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ + preempt_disable_rt(); /* Get optional system timestamp before query. */ if (stime) @@ -1076,6 +1077,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, *etime = ktime_get(); /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ + preempt_enable_rt(); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 5c2c93cbab12f8ebff29507a00953a24a9a877c6..7124510b9131ca6570da66236531b36c1d2916c5 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -356,9 +356,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine, GEM_BUG_ON(!i915_request_completed(rq)); - local_irq_disable(); - - spin_lock(&engine->timeline.lock); + spin_lock_irq(&engine->timeline.lock); GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests)); list_del_init(&rq->link); spin_unlock(&engine->timeline.lock); @@ -372,9 +370,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine, GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); atomic_dec(&rq->i915->gt_pm.rps.num_waiters); } - spin_unlock(&rq->lock); - - local_irq_enable(); + spin_unlock_irq(&rq->lock); /* * The backing object for the context is done after switching to the diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index b50c6b829715e220c9f3edede3dfa0e83497a804..33028d8f470e524810d12bee7a6cc864055a5243 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -2,6 +2,10 @@ #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _I915_TRACE_H_ +#ifdef CONFIG_PREEMPT_RT_BASE +#define NOTRACE +#endif + #include #include #include @@ -679,7 +683,7 @@ DEFINE_EVENT(i915_request, i915_request_add, TP_ARGS(rq) ); -#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) +#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) && !defined(NOTRACE) DEFINE_EVENT(i915_request, i915_request_submit, TP_PROTO(struct i915_request *rq), TP_ARGS(rq) diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index f7026e887fa9bd65d996c1afa851a92be28e6e6a..07e4ddebdd80aab2484cd75f40ca8669bd7c19f4 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "intel_drv.h" #include "intel_frontbuffer.h" #include @@ -60,6 +61,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, #define VBLANK_EVASION_TIME_US 100 #endif +static DEFINE_LOCAL_IRQ_LOCK(pipe_update_lock); + /** * intel_pipe_update_start() - start update of a set of display registers * @new_crtc_state: the new crtc state @@ -107,7 +110,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) if (intel_psr_wait_for_idle(new_crtc_state)) DRM_ERROR("PSR idle timed out, atomic update may fail\n"); - local_irq_disable(); + local_lock_irq(pipe_update_lock); crtc->debug.min_vbl = min; crtc->debug.max_vbl = max; @@ -131,11 +134,11 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) break; } - local_irq_enable(); + local_unlock_irq(pipe_update_lock); timeout = schedule_timeout(timeout); - local_irq_disable(); + local_lock_irq(pipe_update_lock); } finish_wait(wq, &wait); @@ -168,7 +171,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) return; irq_disable: - local_irq_disable(); + local_lock_irq(pipe_update_lock); } /** @@ -204,7 +207,7 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) new_crtc_state->base.event = NULL; } - local_irq_enable(); + local_unlock_irq(pipe_update_lock); if (intel_vgpu_active(dev_priv)) return; diff --git a/drivers/gpu/drm/phytium/Kconfig b/drivers/gpu/drm/phytium/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..e3024feb69d0e02d3f6c3f2a5257d21f367aeaaa --- /dev/null +++ b/drivers/gpu/drm/phytium/Kconfig @@ -0,0 +1,7 @@ +config DRM_PHYTIUM + tristate "DRM Support for Phytium Graphics Card" + depends on DRM + select DRM_KMS_HELPER + help + Choose this option if you have a phytium graphics card. + This driver provides kernel mode setting and buffer management to userspace. diff --git a/drivers/gpu/drm/phytium/Makefile b/drivers/gpu/drm/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..2dc7ea1118cd4cbf141215a7f377e258ae2593ba --- /dev/null +++ b/drivers/gpu/drm/phytium/Makefile @@ -0,0 +1,18 @@ +phytium-dc-drm-y := phytium_display_drv.o \ + phytium_plane.o \ + phytium_crtc.o \ + phytium_dp.o \ + phytium_fb.o \ + phytium_gem.o \ + phytium_fbdev.o \ + phytium_debugfs.o \ + px210_dp.o \ + phytium_panel.o \ + px210_dc.o \ + phytium_pci.o \ + pe220x_dp.o \ + pe220x_dc.o \ + phytium_platform.o + +obj-$(CONFIG_DRM_PHYTIUM) += phytium-dc-drm.o +CFLAGS_REMOVE_phytium_crtc.o += -mgeneral-regs-only diff --git a/drivers/gpu/drm/phytium/pe220x_dc.c b/drivers/gpu/drm/phytium/pe220x_dc.c new file mode 100644 index 0000000000000000000000000000000000000000..7fa323d65c853a69b9621e8669372e0d4976312c --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dc.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium PE220X display controller DRM driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "pe220x_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +void pe220x_dc_hw_disable(struct drm_crtc *crtc); + +static const unsigned int pe220x_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, + DRM_FORMAT_NV16, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, +}; + +static uint64_t pe220x_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int pe220x_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void pe220x_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, PE220X_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, PE220X_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void pe220x_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + 0, PE220X_DC_CMD_REGISTER(phys_pipe)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(phys_pipe), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void pe220x_dc_hw_reset(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + /* disable pixel clock for bmc mode */ + if (phys_pipe == 0) + pe220x_dc_hw_disable(crtc); + + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + config &= (~(DC0_CORE_RESET | DC1_CORE_RESET | AXI_RESET | AHB_RESET)); + + if (phys_pipe == 0) { + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } else { + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } +} + +void pe220x_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + /* clear framebuffer */ + phytium_writel_reg(priv, CLEAR_VALUE_BLACK, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + /* disable cursor */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + config = ((config & (~CURSOR_FORMAT_MASK)) | CURSOR_FORMAT_DISABLED); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + mdelay(20); + + /* reset pix clock */ + pe220x_dc_hw_config_pix_clock(crtc, 0); + + if (phys_pipe == 0) { + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | DC0_CORE_RESET, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config & (~DC0_CORE_RESET), 0, PE220X_DC_CLOCK_CONTROL); + } else { + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | DC1_CORE_RESET, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config & (~DC1_CORE_RESET), 0, PE220X_DC_CLOCK_CONTROL); + } + udelay(20); +} + +int pe220x_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + if (mode_cmd->modifier[count] != DRM_FORMAT_MOD_LINEAR) { + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[count]); + ret = -EINVAL; + } + + return ret; +} + +void pe220x_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = pe220x_primary_formats_modifiers; + *formats = pe220x_primary_formats; + *format_count = ARRAY_SIZE(pe220x_primary_formats); +} + +void pe220x_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = NULL; + *formats = pe220x_cursor_formats; + *format_count = ARRAY_SIZE(pe220x_cursor_formats); +} + +void pe220x_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_Y_HI_ADDRESS); + + phytium_writel_reg(priv, (phytium_plane->iova[1] >> U_PREFIX_SHIFT) & U_PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_U_HI_ADDRESS); + + phytium_writel_reg(priv, (phytium_plane->iova[2] >> V_PREFIX_SHIFT) & V_PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_V_HI_ADDRESS); +} + +void pe220x_dc_hw_update_cursor_hi_addr(struct drm_plane *plane, uint64_t iova) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + + config = ((iova >> CURSOR_PREFIX_SHIFT) & CURSOR_PREFIX_MASK); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PE220X_DC_CURSOR_HI_ADDRESS); +} diff --git a/drivers/gpu/drm/phytium/pe220x_dc.h b/drivers/gpu/drm/phytium/pe220x_dc.h new file mode 100644 index 0000000000000000000000000000000000000000..744ab74e4cc461f384f03053fe86e3ec895514c4 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium PE220X display controller DRM driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_DC_H__ +#define __PE220X_DC_H__ + +#define PE220X_DC_PIX_CLOCK_MAX (594000) +#define PE220X_DC_HDISPLAY_MAX 3840 +#define PE220X_DC_VDISPLAY_MAX 2160 +#define PE220X_DC_ADDRESS_MASK 0x3f + +extern void pe220x_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void pe220x_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void pe220x_dc_hw_disable(struct drm_crtc *crtc); +extern int pe220x_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void pe220x_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void pe220x_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void pe220x_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +extern void pe220x_dc_hw_update_cursor_hi_addr(struct drm_plane *plane, uint64_t iova); +void pe220x_dc_hw_reset(struct drm_crtc *crtc); +#endif /* __PE220X_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/pe220x_dp.c b/drivers/gpu/drm/phytium/pe220x_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..19f38fc01106cc3ae33f42f9ee37407df13b3c4f --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dp.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium display port DRM driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "pe220x_reg.h" +#include "phytium_dp.h" +#include "pe220x_dp.h" + +static uint8_t pe220x_dp_source_lane_count[2] = {1, 1}; + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, /* CP_PADJ */ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, /* CP_IADJ */ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, /* FILT_PADJ */ + {0x0061, 0x006C, 0x006C, 0x0051}, /* INTDIV */ + {0x3333, 0x0000, 0x0000, 0x0000}, /* FRACDIVL */ + {0x0000, 0x0000, 0x0000, 0x0000}, /* FRACDIVH */ + {0x0042, 0x0048, 0x0048, 0x0036}, /* HIGH_THR */ + {0x0002, 0x0002, 0x0002, 0x0002}, /* PDIAG_CTRL */ + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, /* VCOCAL_PLLCNT_START */ + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, /* LOCK_PEFCNT */ + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, /* LOCK_PLLCNT_START */ + {0x0005, 0x0005, 0x0005, 0x0005}, /* LOCK_PLLCNT_THR */ +}; + +/* [link_rate][swing][emphasis] */ +static int mgnfs_val[4][4][4] = { + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 5.4Gbps */ + { + {0x001f, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +/* [link_rate][swing][emphasis] */ +static int cpost_val[4][4][4] = { + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 5.4Gbps */ + { + {0x0005, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int pe220x_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, uint32_t link_rate) +{ + int port = phytium_dp->port%2; + int i = 0, data, tmp, tmp1, index = 0, mask = 0; + int timeout = 500, ret = 0; + + /* set pma powerdown */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A3_POWERDOWN3 << (i * A3_POWERDOWN3_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + /* lane pll disable */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + mask |= (((1<source_max_lane_count; i++) + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_EN(port), data); + + /* set pma power active */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A0_ACTIVE << (i * A0_ACTIVE_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + mask = PLL0_LOCK_DONE; + do { + mdelay(1); + timeout--; + tmp = phytium_phy_readl(phytium_dp, PE220X_PHY_PMA_CONTROL2(port)); + } while ((!(tmp & mask)) && timeout); + + if (timeout == 0) { + DRM_ERROR("dp(%d) phy pll lock failed\n", port); + ret = -1; + } + udelay(1); + + return ret; +} + +static void pe220x_dp_hw_set_phy_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set) +{ + int port = phytium_dp->port % 3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + default: + voltage_swing = 0; + break; + } + + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + default: + pre_emphasis = 0; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DIAG_ACYA(port), LOCK); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_TXCC_CTRL(port), TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DRV(port), TX_DRV); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_MGNFS(port), + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_CPOST(port), + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DIAG_ACYA(port), UNLOCK); +} + +static int pe220x_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + phytium_phy_writel(phytium_dp, PE220X_PHY_APB_RESET(port), APB_RESET); + phytium_phy_writel(phytium_dp, PE220X_PHY_PIPE_RESET(port), RESET); + + /* config lane to dp mode */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (LANE_BIT << (i * LANE_BIT_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_MODE(port), data); + + /* pll clock enable */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_EN(port), data); + + /* config input 20 bit */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (BIT_20 << (i * BIT_20_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA_WIDTH(port), data); + + /* config lane active power state */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A0_ACTIVE << (i * A0_ACTIVE_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + /* link reset */ + phytium_phy_writel(phytium_dp, PE220X_PHY_LINK_RESET(port), LINK_RESET); + + phytium_phy_writel(phytium_dp, PE220X_PHY_SGMII_DPSEL_INIT(port), DP_SEL); + + /* config single link */ + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_CFG(port), SINGLE_LINK); + + /* pipe reset */ + phytium_phy_writel(phytium_dp, PE220X_PHY_PIPE_RESET(port), RESET_DEASSERT); + + mask = PLL0_LOCK_DONE; + do { + mdelay(1); + timeout--; + tmp = phytium_phy_readl(phytium_dp, PE220X_PHY_PMA_CONTROL2(port)); + } while ((!(tmp & mask)) && timeout); + + if (timeout == 0) { + DRM_ERROR("reset dp(%d) phy failed\n", port); + ret = -1; + } + udelay(1); + + return ret; +} + +static void pe220x_dp_hw_poweron_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void pe220x_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void pe220x_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void pe220x_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t pe220x_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PE220X_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int pe220x_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0; + int ret = 0; + + if (level > PE220X_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); +out: + return ret; +} + +bool pe220x_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + return false; +} + +int pe220x_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, DP_RESET, group_offset, PE220X_DP_CONTROLLER_RESET); + udelay(500); + phytium_writel_reg(priv, AUX_CLK_DIVIDER_100, group_offset, PHYTIUM_DP_AUX_CLK_DIVIDER); + phytium_writel_reg(priv, SUPPORT_EDP_1_4, group_offset, PHYTIUM_EDP_CRC_ENABLE); + + return 0; +} + +uint8_t pe220x_dp_hw_get_source_lane_count(struct phytium_dp_device *phytium_dp) +{ + return pe220x_dp_source_lane_count[phytium_dp->port]; +} + +static struct phytium_dp_func pe220x_dp_funcs = { + .dp_hw_get_source_lane_count = pe220x_dp_hw_get_source_lane_count, + .dp_hw_reset = pe220x_dp_hw_reset, + .dp_hw_spread_is_enable = pe220x_dp_hw_spread_is_enable, + .dp_hw_set_backlight = pe220x_dp_hw_set_backlight, + .dp_hw_get_backlight = pe220x_dp_hw_get_backlight, + .dp_hw_disable_backlight = pe220x_dp_hw_disable_backlight, + .dp_hw_enable_backlight = pe220x_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = pe220x_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = pe220x_dp_hw_poweron_panel, + .dp_hw_init_phy = pe220x_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = pe220x_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = pe220x_dp_hw_set_phy_lane_and_rate, +}; + +void pe220x_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &pe220x_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/pe220x_dp.h b/drivers/gpu/drm/phytium/pe220x_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..a79bf5b5e3ab8d70ccd1d2f683e916a96fee2ae7 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium display port DRM driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_DP_H__ +#define __PE220X_DP_H__ + +#define PE220X_DP_BACKLIGHT_MAX 100 + +void pe220x_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __PE220X_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/pe220x_reg.h b/drivers/gpu/drm/phytium/pe220x_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..7ec9620f5cbff628ee985faf34af682737e3a7b5 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_reg.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium PE220X display engine register + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_REG_H__ +#define __PE220X_REG_H__ + +#include "phytium_reg.h" + +/* dc register */ +#define PE220X_DC_CLOCK_CONTROL 0x0000 +#define DC1_CORE_RESET (1<<18) +#define DC0_CORE_RESET (1<<17) +#define AXI_RESET (1<<16) +#define AHB_RESET (1<<12) + +#define PE220X_DC_CMD_REGISTER(pipe) (PE220X_DC_BASE(0) + 0x00F0 + 0x4*(pipe)) +#define FLAG_REPLY (1<<31) +#define FLAG_REQUEST (1<<30) +#define CMD_PIXEL_CLOCK (0x0 << 28) +#define CMD_BACKLIGHT (0x1 << 28) +#define CMD_DC_DP_RESET (0x3 << 28) +#define BACKLIGHT_SHIFT 21 +#define BACKLIGHT_MASK 0x7f +#define BACKLIGHT_MAX 100 +#define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) +#define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) +#define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) +#define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) +#define PIXEL_CLOCK_MASK (0x1fffff) + +#define PE220X_DC_FRAMEBUFFER_Y_HI_ADDRESS 0x1404 +#define PREFIX_MASK 0xff +#define PREFIX_SHIFT 32 + +#define PE220X_DC_CURSOR_HI_ADDRESS 0x1490 +#define CURSOR_PREFIX_MASK 0xff +#define CURSOR_PREFIX_SHIFT 32 + +#define PE220X_DC_FRAMEBUFFER_U_HI_ADDRESS 0x1534 +#define U_PREFIX_MASK 0xff +#define U_PREFIX_SHIFT 32 + +#define PE220X_DC_FRAMEBUFFER_V_HI_ADDRESS 0x153c +#define V_PREFIX_MASK 0xff +#define V_PREFIX_SHIFT 32 + +/* dp register */ +#define PE220X_DP_CONTROLLER_RESET 0x0850 +#define DP_RESET 0x1 + +/* address transform register */ +#define PE220X_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define SRC_ADDR_OFFSET 22 +#define SRC_ADDR_MASK 0xffffffffff + +#define PE220X_DC_ADDRESS_TRANSFORM_SIZE 0x4 +#define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) +#define SIZE_OFFSET 22 + +#define PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR 0x8 +#define DST_ADDR_OFFSET 22 + +#define PE220X_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 +#define DC_DP_RESET_STATUS(pipe) (1 << pipe) +#define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) + +#define PE220X_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c +#define BACKLIGHT_VALUE_MASK (0x7f) +#define BACKLIGHT_VALUE_SHIFT 16 + +/* phy register start */ +#define PE220X_PHY_BASE(pipe) (0x100000*pipe) + +#define PE220X_PHY_PIPE_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40254) +#define RESET 0x0 +#define RESET_DEASSERT 0x1 + +#define PE220X_PHY_MODE(pipe) (PE220X_PHY_BASE(pipe) + 0x40034) +#define LANE_BIT (0x3) +#define LANE_BIT_SHIFT 0x2 + +#define PE220X_PHY_LINK_CFG(pipe) (PE220X_PHY_BASE(pipe) + 0x40044) +#define LANE_MASTER 0x1 +#define LANE_MASTER_SHIFT 1 + +#define PE220X_PHY_PLL_EN(pipe) (PE220X_PHY_BASE(pipe) + 0x40214) +#define PLL_EN 0x1 +#define PLL_EN_SHIFT 1 + +#define PE220X_PHY_PMA_WIDTH(pipe) (PE220X_PHY_BASE(pipe) + 0x4021c) +#define BIT_20 0x5 +#define BIT_20_SHIFT 4 + +#define PE220X_PHY_PLL_SOURCE_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0x4004C) + +#define PE220X_PHY_PMA0_POWER(pipe) (PE220X_PHY_BASE(pipe) + 0x402bc) +#define A0_ACTIVE 0x1 +#define A0_ACTIVE_SHIFT 8 +#define A3_POWERDOWN3 0x8 +#define A3_POWERDOWN3_SHIFT 8 + +#define PE220X_PHY_LINK_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40258) +#define LINK_RESET 0x1 +#define LINK_RESET_MASK 0x1 +#define LINTK_RESET_SHIFT 0x1 + +#define PE220X_PHY_SGMII_DPSEL_INIT(pipe) (PE220X_PHY_BASE(pipe) + 0x40260) +#define DP_SEL 0x1 + +#define PE220X_PHY_APB_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40250) +#define APB_RESET 0x1 + +/* phy origin register */ +#define PE220X_PHY_PLL_CFG(pipe) (PE220X_PHY_BASE(pipe) + 0x30038) +#define SINGLE_LINK 0x0 + +#define PE220X_PHY_PMA_CONTROL(pipe) (PE220X_PHY_BASE(pipe) + 0x3800c) +#define CONTROL_ENABLE 0x1 +#define CONTROL_ENABLE_MASK 0x1 +#define CONTROL_ENABLE_SHIFT 0x1 + +#define PE220X_PHY_PMA_CONTROL2(pipe) (PE220X_PHY_BASE(pipe) + 0x38004) +#define PLL0_LOCK_DONE (0x1 << 6) + +#define PE220X_PHY_PLL0_CLK_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0X684) +#define PLL_LINK_RATE_162000 0xf01 +#define PLL_LINK_RATE_270000 0x701 +#define PLL_LINK_RATE_540000 0x301 +#define PLL_LINK_RATE_810000 0x200 + +#define PE220X_PHY_HSCLK0_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0x18398) +#define HSCLK_LINK_0 0x0 +#define HSCLK_LINK_1 0x1 + +#define PE220X_PHY_HSCLK0_DIV(pipe) (PE220X_PHY_BASE(pipe) + 0x1839c) +#define HSCLK_LINK_RATE_162000 0x2 +#define HSCLK_LINK_RATE_270000 0x1 +#define HSCLK_LINK_RATE_540000 0x0 +#define HSCLK_LINK_RATE_810000 0x0 + +#define PE220X_PHY_PLLDRC0_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x18394) +#define PLLDRC_LINK0 0x1 +#define PLLDRC_LINK1 0x9 + +#define PE220X_PHY_PLL0_DSM_M0(pipe) (PE220X_PHY_BASE(pipe) + 0x250) +#define PLL0_DSM_M0 0x4 +#define PE220X_PHY_PLL0_VCOCAL_START(pipe) (PE220X_PHY_BASE(pipe) + 0x218) +#define PLL0_VCOCAL_START 0xc5e +#define PE220X_PHY_PLL0_VCOCAL_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x208) +#define PLL0_VCOCAL_CTRL 0x3 + +#define PE220X_PHY_PLL0_CP_PADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x690) +#define PE220X_PHY_PLL0_CP_IADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x694) +#define PE220X_PHY_PLL0_CP_FILT_PADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x698) +#define PE220X_PHY_PLL0_INTDIV(pipe) (PE220X_PHY_BASE(pipe) + 0x240) +#define PE220X_PHY_PLL0_FRACDIVL(pipe) (PE220X_PHY_BASE(pipe) + 0x244) +#define PE220X_PHY_PLL0_FRACDIVH(pipe) (PE220X_PHY_BASE(pipe) + 0x248) +#define PE220X_PHY_PLL0_HIGH_THR(pipe) (PE220X_PHY_BASE(pipe) + 0x24c) +#define PE220X_PHY_PLL0_PDIAG_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x680) +#define PE220X_PHY_PLL0_VCOCAL_PLLCNT_START(pipe) (PE220X_PHY_BASE(pipe) + 0x220) +#define PE220X_PHY_PLL0_LOCK_PEFCNT(pipe) (PE220X_PHY_BASE(pipe) + 0x270) +#define PE220X_PHY_PLL0_LOCK_PLLCNT_START(pipe) (PE220X_PHY_BASE(pipe) + 0x278) +#define PE220X_PHY_PLL0_LOCK_PLLCNT_THR(pipe) (PE220X_PHY_BASE(pipe) + 0x27c) + +#define PE220X_PHY_PLL0_TX_PSC_A0(pipe) (PE220X_PHY_BASE(pipe) + 0x18400) +#define PLL0_TX_PSC_A0 0xfb +#define PE220X_PHY_PLL0_TX_PSC_A2(pipe) (PE220X_PHY_BASE(pipe) + 0x18408) +#define PLL0_TX_PSC_A2 0x4aa +#define PE220X_PHY_PLL0_TX_PSC_A3(pipe) (PE220X_PHY_BASE(pipe) + 0x1840c) +#define PLL0_TX_PSC_A3 0x4aa +#define PE220X_PHY_PLL0_RX_PSC_A0(pipe) (PE220X_PHY_BASE(pipe) + 0x28000) +#define PLL0_RX_PSC_A0 0x0 +#define PE220X_PHY_PLL0_RX_PSC_A2(pipe) (PE220X_PHY_BASE(pipe) + 0x28008) +#define PLL0_RX_PSC_A2 0x0 +#define PE220X_PHY_PLL0_RX_PSC_A3(pipe) (PE220X_PHY_BASE(pipe) + 0x2800C) +#define PLL0_RX_PSC_A3 0x0 +#define PE220X_PHY_PLL0_RX_PSC_CAL(pipe) (PE220X_PHY_BASE(pipe) + 0x28018) +#define PLL0_RX_PSC_CAL 0x0 + +#define PE220X_PHY_PLL0_XCVR_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x183a8) +#define PLL0_XCVR_CTRL 0xf + +#define PE220X_PHY_PLL0_RX_GCSM1_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28420) +#define PLL0_RX_GCSM1_CTRL 0x0 +#define PE220X_PHY_PLL0_RX_GCSM2_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28440) +#define PLL0_RX_GCSM2_CTRL 0x0 +#define PE220X_PHY_PLL0_RX_PERGCSM_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28460) +#define PLL0_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define PE220X_PHY_PLL0_TX_DIAG_ACYA(pipe) (PE220X_PHY_BASE(pipe) + 0x1879c) +#define LOCK 1 +#define UNLOCK 0 + +#define PE220X_PHY_PLL0_TX_TXCC_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x18100) +#define TX_TXCC_CTRL 0x8a4 + +#define PE220X_PHY_PLL0_TX_DRV(pipe) (PE220X_PHY_BASE(pipe) + 0x18318) +#define TX_DRV 0x3 + +#define PE220X_PHY_PLL0_TX_MGNFS(pipe) (PE220X_PHY_BASE(pipe) + 0x18140) + +#define PE220X_PHY_PLL0_TX_CPOST(pipe) (PE220X_PHY_BASE(pipe) + 0x18130) + +#endif /* __PE220X_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_crtc.c b/drivers/gpu/drm/phytium/phytium_crtc.c new file mode 100644 index 0000000000000000000000000000000000000000..21ffb6587dac99cc2e51b5b5c3fe2a87cd98279b --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.c @@ -0,0 +1,735 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_dp.h" +#include "px210_dc.h" +#include "pe220x_dc.h" +#include "phytium_reg.h" + +#define MAXKERNELSIZE 9 +#define SUBPIXELINDEXBITS 5 +#define SUBPIXELCOUNT (1 << SUBPIXELINDEXBITS) +#define SUBPIXELLOADCOUNT (SUBPIXELCOUNT / 2 + 1) +#define WEIGHTSTATECOUNT (((SUBPIXELLOADCOUNT * MAXKERNELSIZE + 1) & ~1) / 2) +#define KERNELTABLESIZE (SUBPIXELLOADCOUNT * MAXKERNELSIZE * sizeof(uint16_t)) +#define PHYALIGN(n, align) (((n) + ((align) - 1)) & ~((align) - 1)) +#define KERNELSTATES (PHYALIGN(KERNELTABLESIZE + 4, 8)) +#define PHYPI 3.14159265358979323846f + +#define MATH_Add(X, Y) (float)((X) + (Y)) +#define MATH_Multiply(X, Y) (float)((X) * (Y)) +#define MATH_Divide(X, Y) (float)((X) / (Y)) +#define MATH_DivideFromUInteger(X, Y) ((float)(X) / (float)(Y)) +#define MATH_I2Float(X) (float)(X) + +struct filter_blit_array { + uint8_t kernelSize; + uint32_t scaleFactor; + uint32_t *kernelStates; +}; + +static uint32_t dc_scaling_get_factor(uint32_t src_size, uint32_t dst_size) +{ + uint32_t factor = 0; + + factor = ((src_size - 1) << SCALE_FACTOR_SRC_OFFSET) / (dst_size - 1); + + return factor; +} + +static float dc_sint(float x) +{ + const float B = 1.2732395477; + const float C = -0.4052847346; + const float P = 0.2310792853; + float y; + + if (x < 0) + y = B*x - C*x*x; + else + y = B*x + C*x*x; + if (y < 0) + y = P * (y * (0 - y) - y) + y; + else + y = P * (y * y - y) + y; + return y; +} + +static float dc_sinc_filter(float x, int radius) +{ + float pit, pitd, f1, f2, result; + float f_radius = MATH_I2Float(radius); + + if (x == 0.0f) { + result = 1.0f; + } else if ((x < -f_radius) || (x > f_radius)) { + result = 0.0f; + } else { + pit = MATH_Multiply(PHYPI, x); + pitd = MATH_Divide(pit, f_radius); + f1 = MATH_Divide(dc_sint(pit), pit); + f2 = MATH_Divide(dc_sint(pitd), pitd); + result = MATH_Multiply(f1, f2); + } + + return result; +} + +static int dc_calculate_sync_table( + uint8_t kernel_size, + uint32_t src_size, + uint32_t dst_size, + struct filter_blit_array *kernel_info) +{ + uint32_t scale_factor; + float f_scale; + int kernel_half; + float f_subpixel_step; + float f_subpixel_offset; + uint32_t subpixel_pos; + int kernel_pos; + int padding; + uint16_t *kernel_array; + int range = 0; + + do { + /* Compute the scale factor. */ + scale_factor = dc_scaling_get_factor(src_size, dst_size); + + /* Same kernel size and ratio as before? */ + if ((kernel_info->kernelSize == kernel_size) && + (kernel_info->scaleFactor == kernel_size)) { + break; + } + + /* check the array */ + if (kernel_info->kernelStates == NULL) + break; + + /* Store new parameters. */ + kernel_info->kernelSize = kernel_size; + kernel_info->scaleFactor = scale_factor; + + /* Compute the scale factor. */ + f_scale = MATH_DivideFromUInteger(dst_size, src_size); + + /* Adjust the factor for magnification. */ + if (f_scale > 1.0f) + f_scale = 1.0f; + + /* Calculate the kernel half. */ + kernel_half = (int) (kernel_info->kernelSize >> 1); + + /* Calculate the subpixel step. */ + f_subpixel_step = MATH_Divide(1.0f, MATH_I2Float(SUBPIXELCOUNT)); + + /* Init the subpixel offset. */ + f_subpixel_offset = 0.5f; + + /* Determine kernel padding size. */ + padding = (MAXKERNELSIZE - kernel_info->kernelSize) / 2; + + /* Set initial kernel array pointer. */ + kernel_array = (uint16_t *) (kernel_info->kernelStates + 1); + + /* Loop through each subpixel. */ + for (subpixel_pos = 0; subpixel_pos < SUBPIXELLOADCOUNT; subpixel_pos++) { + /* Define a temporary set of weights. */ + float fSubpixelSet[MAXKERNELSIZE]; + + /* Init the sum of all weights for the current subpixel. */ + float fWeightSum = 0.0f; + uint16_t weightSum = 0; + short int adjustCount, adjustFrom; + short int adjustment; + + /* Compute weights. */ + for (kernel_pos = 0; kernel_pos < MAXKERNELSIZE; kernel_pos++) { + /* Determine the current index. */ + int index = kernel_pos - padding; + + /* Pad with zeros. */ + if ((index < 0) || (index >= kernel_info->kernelSize)) { + fSubpixelSet[kernel_pos] = 0.0f; + } else { + if (kernel_info->kernelSize == 1) { + fSubpixelSet[kernel_pos] = 1.0f; + } else { + /* Compute the x position for filter function. */ + float fX = MATH_Add( + MATH_I2Float(index - kernel_half), + f_subpixel_offset); + fX = MATH_Multiply(fX, f_scale); + + /* Compute the weight. */ + fSubpixelSet[kernel_pos] = dc_sinc_filter(fX, + kernel_half); + } + + /* Update the sum of weights. */ + fWeightSum = MATH_Add(fWeightSum, + fSubpixelSet[kernel_pos]); + } + } + + /* Adjust weights so that the sum will be 1.0. */ + for (kernel_pos = 0; kernel_pos < MAXKERNELSIZE; kernel_pos++) { + /* Normalize the current weight. */ + float fWeight = MATH_Divide(fSubpixelSet[kernel_pos], + fWeightSum); + + /* Convert the weight to fixed point and store in the table. */ + if (fWeight == 0.0f) + kernel_array[kernel_pos] = 0x0000; + else if (fWeight >= 1.0f) + kernel_array[kernel_pos] = 0x4000; + else if (fWeight <= -1.0f) + kernel_array[kernel_pos] = 0xC000; + else + kernel_array[kernel_pos] = + (int16_t) MATH_Multiply(fWeight, 16384.0f); + weightSum += kernel_array[kernel_pos]; + } + + /* Adjust the fixed point coefficients. */ + adjustCount = 0x4000 - weightSum; + if (adjustCount < 0) { + adjustCount = -adjustCount; + adjustment = -1; + } else { + adjustment = 1; + } + + adjustFrom = (MAXKERNELSIZE - adjustCount) / 2; + for (kernel_pos = 0; kernel_pos < adjustCount; kernel_pos++) { + range = (MAXKERNELSIZE*subpixel_pos + adjustFrom + kernel_pos) * + sizeof(uint16_t); + if ((range >= 0) && (range < KERNELTABLESIZE)) + kernel_array[adjustFrom + kernel_pos] += adjustment; + else + DRM_ERROR("%s failed\n", __func__); + } + + kernel_array += MAXKERNELSIZE; + + /* Advance to the next subpixel. */ + f_subpixel_offset = MATH_Add(f_subpixel_offset, -f_subpixel_step); + } + } while (0); + + return 0; +} + +static void phytium_dc_scaling_config(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t scale_factor_x, scale_factor_y, i; + uint32_t kernelStates[128]; + struct filter_blit_array kernel_info_width; + void *tmp = NULL; + + if (mode->hdisplay != mode->crtc_hdisplay || mode->vdisplay != mode->crtc_vdisplay) { + phytium_crtc->src_width = mode->hdisplay; + phytium_crtc->src_height = mode->vdisplay; + phytium_crtc->dst_width = mode->crtc_hdisplay; + phytium_crtc->dst_height = mode->crtc_vdisplay; + + phytium_crtc->dst_x = (mode->crtc_hdisplay - phytium_crtc->dst_width) / 2; + phytium_crtc->dst_y = (mode->crtc_vdisplay - phytium_crtc->dst_height) / 2; + + scale_factor_x = dc_scaling_get_factor(phytium_crtc->src_width, + phytium_crtc->dst_width); + scale_factor_y = dc_scaling_get_factor(phytium_crtc->src_height, + phytium_crtc->dst_height); + if (scale_factor_y > (SCALE_FACTOR_Y_MAX << SCALE_FACTOR_SRC_OFFSET)) + scale_factor_y = (SCALE_FACTOR_Y_MAX << SCALE_FACTOR_SRC_OFFSET); + + phytium_writel_reg(priv, scale_factor_x & SCALE_FACTOR_X_MASK, + group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X); + phytium_writel_reg(priv, scale_factor_y & SCALE_FACTOR_Y_MASK, + group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y); + phytium_writel_reg(priv, FRAMEBUFFER_TAP, + group_offset, PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG); + + tmp = kmalloc(KERNELSTATES, GFP_KERNEL); + if (!tmp) { + DRM_ERROR("malloc %ld failed\n", KERNELSTATES); + return; + } + + memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); + kernel_info_width.kernelStates = tmp; + memset(kernel_info_width.kernelStates, 0, KERNELSTATES); + kernel_neon_begin(); + dc_calculate_sync_table(FRAMEBUFFER_HORIZONTAL_FILTER_TAP, + phytium_crtc->src_width, + phytium_crtc->dst_width, + &kernel_info_width); + memset(kernelStates, 0, sizeof(kernelStates)); + memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); + kernel_neon_end(); + phytium_writel_reg(priv, HORI_FILTER_INDEX, + group_offset, PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX); + for (i = 0; i < 128; i++) { + phytium_writel_reg(priv, kernelStates[i], + group_offset, PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER); + } + + memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); + kernel_info_width.kernelStates = tmp; + memset(kernel_info_width.kernelStates, 0, KERNELSTATES); + kernel_neon_begin(); + dc_calculate_sync_table(FRAMEBUFFER_FILTER_TAP, phytium_crtc->src_height, + phytium_crtc->dst_height, &kernel_info_width); + memset(kernelStates, 0, sizeof(kernelStates)); + memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); + kernel_neon_end(); + phytium_writel_reg(priv, VERT_FILTER_INDEX, + group_offset, PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX); + for (i = 0; i < 128; i++) + phytium_writel_reg(priv, kernelStates[i], + group_offset, PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER); + phytium_writel_reg(priv, INITIALOFFSET, + group_offset, PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET); + kfree(tmp); + phytium_crtc->scale_enable = true; + } else { + phytium_crtc->scale_enable = false; + } +} + +static void phytium_crtc_gamma_set(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + struct drm_crtc_state *state = crtc->state; + struct drm_color_lut *lut; + int i; + + if (state->gamma_lut) { + if (WARN((state->gamma_lut->length/sizeof(struct drm_color_lut) != GAMMA_INDEX_MAX), + "gamma size is not match\n")) + return; + lut = (struct drm_color_lut *)state->gamma_lut->data; + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((lut[i].red >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((lut[i].green >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((lut[i].blue >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } + } +} + +static void phytium_crtc_gamma_init(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + uint16_t *red, *green, *blue; + int i; + + if (WARN((crtc->gamma_size != GAMMA_INDEX_MAX), "gamma size is not match\n")) + return; + + red = crtc->gamma_store; + green = red + crtc->gamma_size; + blue = green + crtc->gamma_size; + + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((*red++ >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((*green++ >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((*blue++ >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } +} + +static void phytium_crtc_destroy(struct drm_crtc *crtc) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(phytium_crtc); +} + +struct drm_crtc_state * +phytium_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct phytium_crtc_state *phytium_crtc_state = NULL; + + phytium_crtc_state = kmemdup(crtc->state, sizeof(*phytium_crtc_state), + GFP_KERNEL); + if (!phytium_crtc_state) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, + &phytium_crtc_state->base); + + return &phytium_crtc_state->base; +} + +void +phytium_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct phytium_crtc_state *phytium_crtc_state = + to_phytium_crtc_state(state); + + phytium_crtc_state = to_phytium_crtc_state(state); + __drm_atomic_helper_crtc_destroy_state(state); + kfree(phytium_crtc_state); +} + +static const struct drm_crtc_funcs phytium_crtc_funcs = { + .gamma_set = drm_atomic_helper_legacy_gamma_set, + .set_config = drm_atomic_helper_set_config, + .destroy = phytium_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = phytium_crtc_atomic_duplicate_state, + .atomic_destroy_state = phytium_crtc_atomic_destroy_state, +}; + +static void +phytium_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_atomic_state *state = old_state->state; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + int config = 0, i = 0; + + for_each_new_connector_in_state(state, conn, new_conn_state, i) { + if (new_conn_state->crtc != crtc) + continue; + + switch (conn->display_info.bpc) { + case 10: + phytium_crtc->bpc = DP_RGB101010; + break; + case 6: + phytium_crtc->bpc = DP_RGB666; + break; + default: + phytium_crtc->bpc = DP_RGB888; + break; + } + } + + /* config pix clock */ + phytium_crtc->dc_hw_config_pix_clock(crtc, mode->clock); + + phytium_dc_scaling_config(crtc, old_state); + config = ((mode->crtc_hdisplay & HDISPLAY_END_MASK) << HDISPLAY_END_SHIFT) + | ((mode->crtc_htotal&HDISPLAY_TOTAL_MASK) << HDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HDISPLAY); + config = ((mode->crtc_hsync_start & HSYNC_START_MASK) << HSYNC_START_SHIFT) + | ((mode->crtc_hsync_end & HSYNC_END_MASK) << HSYNC_END_SHIFT) + | HSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : HSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HSYNC); + config = ((mode->crtc_vdisplay & VDISPLAY_END_MASK) << VDISPLAY_END_SHIFT) + | ((mode->crtc_vtotal & VDISPLAY_TOTAL_MASK) << VDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VDISPLAY); + config = ((mode->crtc_vsync_start & VSYNC_START_MASK) << VSYNC_START_SHIFT) + | ((mode->crtc_vsync_end & VSYNC_END_MASK) << VSYNC_END_SHIFT) + | VSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : VSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VSYNC); + config = PANEL_DATAENABLE_ENABLE | PANEL_DATA_ENABLE | PANEL_CLOCK_ENABLE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_PANEL_CONFIG); + config = phytium_crtc->bpc | OUTPUT_DP; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_DP_CONFIG); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->active) + config |= FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET; + else + config &= (~(FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET)); + + if (phytium_crtc->scale_enable) + config |= FRAMEBUFFER_SCALE_ENABLE; + else + config &= (~FRAMEBUFFER_SCALE_ENABLE); + + config |= FRAMEBUFFER_GAMMA_ENABLE; + + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + drm_crtc_vblank_on(crtc); +} + +static void +phytium_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_vblank_off(crtc); + phytium_crtc->dc_hw_disable(crtc); +} + +static void phytium_crtc_update_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, + const struct drm_display_mode *native_mode) +{ + if (native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal) { + drm_mode->crtc_hdisplay = native_mode->crtc_hdisplay; + drm_mode->crtc_vdisplay = native_mode->crtc_vdisplay; + drm_mode->crtc_clock = native_mode->crtc_clock; + drm_mode->crtc_hblank_start = native_mode->crtc_hblank_start; + drm_mode->crtc_hblank_end = native_mode->crtc_hblank_end; + drm_mode->crtc_hsync_start = native_mode->crtc_hsync_start; + drm_mode->crtc_hsync_end = native_mode->crtc_hsync_end; + drm_mode->crtc_htotal = native_mode->crtc_htotal; + drm_mode->crtc_hskew = native_mode->crtc_hskew; + drm_mode->crtc_vblank_start = native_mode->crtc_vblank_start; + drm_mode->crtc_vblank_end = native_mode->crtc_vblank_end; + drm_mode->crtc_vsync_start = native_mode->crtc_vsync_start; + drm_mode->crtc_vsync_end = native_mode->crtc_vsync_end; + drm_mode->crtc_vtotal = native_mode->crtc_vtotal; + } +} + +static int +phytium_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) +{ + struct drm_plane_state *new_plane_state = NULL; + int ret = 0; + struct drm_atomic_state *state = crtc_state->state; + struct drm_connector *connector; + struct drm_connector_state *new_con_state; + uint32_t i; + struct phytium_dp_device *phytium_dp = NULL; + + for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (new_con_state->crtc == crtc) { + phytium_dp = connector_to_dp_device(connector); + break; + } + } + if (phytium_dp) + phytium_crtc_update_timing_for_drm_display_mode(&crtc_state->adjusted_mode, + &phytium_dp->native_mode); + + new_plane_state = drm_atomic_get_new_plane_state(crtc_state->state, + crtc->primary); + if (crtc_state->enable && new_plane_state && !new_plane_state->crtc) { + ret = -EINVAL; + goto fail; + } + + return 0; +fail: + return ret; +} + +static void +phytium_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + if (config & FRAMEBUFFER_RESET) { + phytium_writel_reg(priv, config | FRAMEBUFFER_VALID_PENDING, + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } +} + +static void phytium_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + struct phytium_crtc_state *phytium_crtc_state = NULL; + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + DRM_DEBUG_KMS("crtc->state active:%d enable:%d\n", + crtc->state->active, crtc->state->enable); + phytium_crtc_state = to_phytium_crtc_state(crtc->state); + + if (crtc->state->color_mgmt_changed) + phytium_crtc_gamma_set(crtc); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + phytium_writel_reg(priv, config&(~FRAMEBUFFER_VALID_PENDING), + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->event) { + DRM_DEBUG_KMS("vblank->refcount:%d\n", + atomic_read(&dev->vblank[0].refcount)); + spin_lock_irq(&dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + else + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&dev->event_lock); + } +} + +static enum drm_mode_status +phytium_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (mode->crtc_clock > priv->info.crtc_clock_max) + return MODE_CLOCK_HIGH; + + if (mode->hdisplay > priv->info.hdisplay_max) + return MODE_BAD_HVALUE; + + if (mode->vdisplay > priv->info.vdisplay_max) + return MODE_BAD_VVALUE; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + + return MODE_OK; +} + +static const struct drm_crtc_helper_funcs phytium_crtc_helper_funcs = { + .mode_valid = phytium_crtc_mode_valid, + .atomic_check = phytium_crtc_atomic_check, + .atomic_begin = phytium_crtc_atomic_begin, + .atomic_flush = phytium_crtc_atomic_flush, + .atomic_enable = phytium_crtc_atomic_enable, + .atomic_disable = phytium_crtc_atomic_disable, +}; + +void phytium_crtc_resume(struct drm_device *drm_dev) +{ + struct drm_crtc *crtc; + struct phytium_crtc *phytium_crtc = NULL; + + drm_for_each_crtc(crtc, drm_dev) { + phytium_crtc = to_phytium_crtc(crtc); + if (phytium_crtc->dc_hw_reset) + phytium_crtc->dc_hw_reset(crtc); + phytium_crtc_gamma_init(crtc); + } +} + +int phytium_crtc_init(struct drm_device *dev, int phys_pipe) +{ + struct phytium_crtc *phytium_crtc; + struct phytium_crtc_state *phytium_crtc_state; + struct phytium_plane *phytium_primary_plane = NULL; + struct phytium_plane *phytium_cursor_plane = NULL; + struct phytium_display_private *priv = dev->dev_private; + int ret; + + phytium_crtc = kzalloc(sizeof(*phytium_crtc), GFP_KERNEL); + if (!phytium_crtc) { + ret = -ENOMEM; + goto failed_malloc_crtc; + } + + phytium_crtc_state = kzalloc(sizeof(*phytium_crtc_state), GFP_KERNEL); + if (!phytium_crtc_state) { + ret = -ENOMEM; + goto failed_malloc_crtc_state; + } + + phytium_crtc_state->base.crtc = &phytium_crtc->base; + phytium_crtc->base.state = &phytium_crtc_state->base; + phytium_crtc->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_crtc->dc_hw_config_pix_clock = px210_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = px210_dc_hw_disable; + phytium_crtc->dc_hw_reset = NULL; + priv->dc_reg_base[phys_pipe] = PX210_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = PX210_DCREQ_BASE(phys_pipe); + priv->address_transform_base = PX210_ADDRESS_TRANSFORM_BASE; + } else if (IS_PE220X(priv)) { + phytium_crtc->dc_hw_config_pix_clock = pe220x_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = pe220x_dc_hw_disable; + phytium_crtc->dc_hw_reset = pe220x_dc_hw_reset; + priv->dc_reg_base[phys_pipe] = PE220X_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = 0x0; + priv->address_transform_base = PE220X_ADDRESS_TRANSFORM_BASE; + } + + phytium_primary_plane = phytium_primary_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_primary_plane)) { + ret = PTR_ERR(phytium_primary_plane); + DRM_ERROR("create primary plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_primary; + } + + phytium_cursor_plane = phytium_cursor_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_cursor_plane)) { + ret = PTR_ERR(phytium_cursor_plane); + DRM_ERROR("create cursor plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_cursor; + } + + ret = drm_crtc_init_with_planes(dev, &phytium_crtc->base, + &phytium_primary_plane->base, + &phytium_cursor_plane->base, + &phytium_crtc_funcs, + "phys_pipe %d", phys_pipe); + + if (ret) { + DRM_ERROR("init crtc with plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_crtc_init; + } + drm_crtc_helper_add(&phytium_crtc->base, &phytium_crtc_helper_funcs); + drm_crtc_vblank_reset(&phytium_crtc->base); + drm_mode_crtc_set_gamma_size(&phytium_crtc->base, GAMMA_INDEX_MAX); + drm_crtc_enable_color_mgmt(&phytium_crtc->base, 0, false, GAMMA_INDEX_MAX); + if (phytium_crtc->dc_hw_reset) + phytium_crtc->dc_hw_reset(&phytium_crtc->base); + phytium_crtc_gamma_init(&phytium_crtc->base); + + return 0; + +failed_crtc_init: +failed_create_cursor: + /* drm_mode_config_cleanup() will free any crtcs/planes already initialized */ +failed_create_primary: + kfree(phytium_crtc_state); +failed_malloc_crtc_state: + kfree(phytium_crtc); +failed_malloc_crtc: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_crtc.h b/drivers/gpu/drm/phytium/phytium_crtc.h new file mode 100644 index 0000000000000000000000000000000000000000..86f894ba5d7068277f11630ae1922f2be1a6d6a4 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_CRTC_H__ +#define __PHYTIUM_CRTC_H__ + +struct phytium_crtc { + struct drm_crtc base; + int phys_pipe; + unsigned int bpc; + + /* scale */ + uint32_t src_width; + uint32_t src_height; + uint32_t dst_width; + uint32_t dst_height; + uint32_t dst_x; + uint32_t dst_y; + bool scale_enable; + bool reserve[3]; + + void (*dc_hw_config_pix_clock)(struct drm_crtc *crtc, int clock); + void (*dc_hw_disable)(struct drm_crtc *crtc); + void (*dc_hw_reset)(struct drm_crtc *crtc); +}; + +struct phytium_crtc_state { + struct drm_crtc_state base; +}; + +#define to_phytium_crtc(x) container_of(x, struct phytium_crtc, base) +#define to_phytium_crtc_state(x) container_of(x, struct phytium_crtc_state, base) + +void phytium_crtc_resume(struct drm_device *drm_dev); +int phytium_crtc_init(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_CRTC_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.c b/drivers/gpu/drm/phytium/phytium_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..dc0d42f001361e7a4d6bf180bef2b33612dcfdeb --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.c @@ -0,0 +1,455 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_reg.h" + +const char *const mem_state[PHYTIUM_MEM_STATE_TYPE_COUNT] = { + "Memory_Vram_Total", + "Memory_Vram_Alloc", + "Memory_System_Carveout_Total", + "Memory_System_Carveout_Alloc", + "Memory_System_Alloc", +}; + +static ssize_t +phytium_dp_register_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_register_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_M_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_M_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_N_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_N_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_TRANSFER_UNIT_SIZE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_DATA_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_DATA_COUNT)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HRES)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VRES)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC0, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC1, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_USER_SYNC_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_VIDEO_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SECONDARY_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE)); + seq_puts(m, "audio:\n"); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_INPUT_SELECT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DIRECT_CLKDIV, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_MAP, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DATA_WINDOW, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_CATEGORY_CODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_MAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_MAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_NAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_NAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CLOCK_MODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_SOURCE_FORMAT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_AUDIO_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE)); + + return 0; +} + +static int phytium_dp_register_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_register_show, inode->i_private); +} + +static const struct file_operations phytium_dp_register_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_register_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_register_write, +}; + +static ssize_t +phytium_dp_trigger_train_fail_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + if (kstrtouint(tmp, 10, &phytium_dp->trigger_train_fail) != 0) + return -EINVAL; + + return len; +} + +static int phytium_dp_trigger_train_fail_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "trigger_train_fail: %d\n", phytium_dp->trigger_train_fail); + seq_printf(m, "train_retry_count: %d\n", phytium_dp->train_retry_count); + + return 0; +} + +static int phytium_dp_trigger_train_fail_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_trigger_train_fail_show, inode->i_private); +} + +static const struct file_operations phytium_dp_trigger_train_fail_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_trigger_train_fail_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_trigger_train_fail_write, +}; + +static int phytium_edp_backlight_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "backlight: %s\n", phytium_dp->panel.backlight_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_backlight_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_backlight_show, inode->i_private); +} + +static const struct file_operations phytium_edp_backlight_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_backlight_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int phytium_edp_power_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "power: %s\n", phytium_dp->panel.power_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_power_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_power_show, inode->i_private); +} + +static const struct file_operations phytium_edp_power_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_power_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +struct dpcd_block { + /* DPCD dump start address. */ + unsigned int offset; + /* DPCD dump end address, inclusive. If unset, .size will be used. */ + unsigned int end; + /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ + size_t size; + /* Only valid for eDP. */ + bool edp; +}; + +static const struct dpcd_block phytium_dpcd_debug[] = { + { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, + { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, + { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, + { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, + { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, + { .offset = DP_SET_POWER }, + { .offset = DP_EDP_DPCD_REV }, + { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, + { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, + { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, + { .offset = DP_DEVICE_SERVICE_IRQ_VECTOR, .size = 1 }, + { .offset = DP_TEST_REQUEST, .end = DP_TEST_PATTERN }, +}; + +static int phytium_dpcd_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + uint8_t buf[16], i; + ssize_t err; + + if (connector->status != connector_status_connected) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_dpcd_debug); i++) { + const struct dpcd_block *b = &phytium_dpcd_debug[i]; + size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); + + if (WARN_ON(size > sizeof(buf))) + continue; + + err = drm_dp_dpcd_read(&phytium_dp->aux, b->offset, buf, size); + if (err <= 0) { + DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", + size, b->offset, err); + continue; + } + + seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); + } + + return 0; +} + +static int phytium_dpcd_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dpcd_show, inode->i_private); +} + +static const struct file_operations phytium_dpcd_fops = { + .owner = THIS_MODULE, + .open = phytium_dpcd_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t +phytium_dp_state_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_state_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "port number: %d\n", phytium_dp->port); + seq_printf(m, "source_max_lane_count: %d\n", phytium_dp->source_max_lane_count); + seq_printf(m, "max_source_rates: %d\n", + phytium_dp->source_rates[phytium_dp->num_source_rates-1]); + if (connector->status == connector_status_connected) { + seq_printf(m, "sink_max_lane_count: %d\n", phytium_dp->sink_max_lane_count); + seq_printf(m, "max_sink_rates: %d\n", + phytium_dp->sink_rates[phytium_dp->num_sink_rates-1]); + seq_printf(m, "link_rate: %d\n", phytium_dp->link_rate); + seq_printf(m, "link_lane_count: %d\n", phytium_dp->link_lane_count); + seq_printf(m, "train_set[0]: %d\n", phytium_dp->train_set[0]); + seq_printf(m, "has_audio: %s\n", phytium_dp->has_audio?"yes":"no"); + } + + return 0; +} + +static int phytium_dp_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_state_show, inode->i_private); +} + +static const struct file_operations phytium_dp_state_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_state_write, +}; + +static const struct phytium_debugfs_files { + const char *name; + const struct file_operations *fops; +} phytium_debugfs_connector_files[] = { + {"dp_state", &phytium_dp_state_fops}, + {"dpcd", &phytium_dpcd_fops}, + {"dp_register", &phytium_dp_register_fops}, + {"dp_trigger_train_fail", &phytium_dp_trigger_train_fail_fops}, +}; + +static const struct phytium_debugfs_files phytium_edp_debugfs_connector_files[] = { + {"edp_power", &phytium_edp_power_fops}, + {"edp_backlight", &phytium_edp_backlight_fops}, +}; + +int phytium_debugfs_connector_add(struct drm_connector *connector) +{ + struct dentry *root = connector->debugfs_entry; + struct dentry *ent; + int i; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!root) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + if (phytium_dp->is_edp) + for (i = 0; i < ARRAY_SIZE(phytium_edp_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_edp_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_edp_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + return 0; +} + +static int phytium_mem_state_show(struct seq_file *m, void *data) +{ + struct phytium_display_private *priv = m->private; + uint8_t i; + + for (i = 0; i < ARRAY_SIZE(mem_state); i++) + seq_printf(m, "%-34s %10lld\n", mem_state[i], priv->mem_state[i]); + + return 0; +} + +static int phytium_mem_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_mem_state_show, inode->i_private); +} + +static const struct file_operations phytium_mem_state_fops = { + .owner = THIS_MODULE, + .open = phytium_mem_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct phytium_debugfs_files phytium_debugfs_display_files[] = { + {"mem_state", &phytium_mem_state_fops}, +}; + +int phytium_debugfs_display_register(struct phytium_display_private *priv) +{ + struct drm_minor *minor = priv->dev->primary; + struct dentry *root = minor->debugfs_root; + struct dentry *ent; + + if (!root) + return -ENODEV; + + ent = debugfs_create_file(phytium_debugfs_display_files[0].name, + 0644, + root, + priv, + phytium_debugfs_display_files[0].fops); + if (!ent) + return -ENOMEM; + + return 0; +} diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.h b/drivers/gpu/drm/phytium/phytium_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..aa8e2922ec257ec465d02657c0c81d40405f2177 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DEBUGFS_H__ +#define __PHYTIUM_DEBUGFS_H__ + +int phytium_debugfs_connector_add(struct drm_connector *connector); +int phytium_debugfs_display_register(struct phytium_display_private *priv); + +#endif /* __PHYTIUM_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.c b/drivers/gpu/drm/phytium/phytium_display_drv.c new file mode 100644 index 0000000000000000000000000000000000000000..22e94fa874891fcce7a52dd6ceaa6c691d550f7b --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_crtc.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "phytium_fb.h" +#include "phytium_fbdev.h" +#include "phytium_reg.h" +#include "phytium_pci.h" +#include "phytium_platform.h" +#include "phytium_debugfs.h" + +int dc_fake_mode_enable; +module_param(dc_fake_mode_enable, int, 0644); +MODULE_PARM_DESC(dc_fake_mode_enable, "Enable DC fake mode (0-disabled; 1-enabled; default-0)"); + +int dc_fast_training_check = 1; +module_param(dc_fast_training_check, int, 0644); +MODULE_PARM_DESC(dc_fast_training_check, "Check dp fast training (0-disabled; 1-enabled; default-1)"); + +int num_source_rates = 4; +module_param(num_source_rates, int, 0644); +MODULE_PARM_DESC(num_source_rates, "set the source max rates (1-1.62Gbps; 2-2.7Gbps; 3-5.4Gbps; 4-8.1Gbps; default-4)"); + +int source_max_lane_count = 4; +module_param(source_max_lane_count, int, 0644); +MODULE_PARM_DESC(source_max_lane_count, "set the source lane count (1-1lane; 2-2lane; 4-4lane; default-4)"); + +int link_dynamic_adjust; +module_param(link_dynamic_adjust, int, 0644); +MODULE_PARM_DESC(link_dynamic_adjust, "dynamic select the train pamameter according to the display mode (0-disabled; 1-enabled; default-1)"); + +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit) +{ + int timeout = 500, config = 0, ret = 0; + + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((!(config & reply_bit)) && timeout); + + phytium_writel_reg(priv, config & (~request_bit), 0, register_offset); + + if (timeout == 0) { + DRM_ERROR("wait cmd reply timeout\n"); + ret = -EBUSY; + } else { + timeout = 500; + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((config & reply_bit) && timeout); + if (timeout == 0) { + DRM_ERROR("clear cmd timeout\n"); + ret = -EBUSY; + } + } + mdelay(5); + + return ret; +} + +static void phytium_irq_preinstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static void phytium_irq_uninstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static irqreturn_t phytium_display_irq_handler(int irq, void *data) +{ + struct drm_device *dev = data; + struct phytium_display_private *priv = dev->dev_private; + bool enabled = 0; + int i = 0, virt_pipe = 0; + irqreturn_t ret = IRQ_NONE, ret1 = IRQ_NONE; + + for_each_pipe_masked(priv, i) { + enabled = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + if (enabled & INT_STATUS) { + virt_pipe = phytium_get_virt_pipe(priv, i); + if (virt_pipe < 0) + return IRQ_NONE; + drm_handle_vblank(dev, virt_pipe); + ret = IRQ_HANDLED; + if (priv->dc_hw_clear_msi_irq) + priv->dc_hw_clear_msi_irq(priv, i); + } + } + + ret1 = phytium_dp_hpd_irq_handler(priv); + if (ret == IRQ_HANDLED || ret1 == IRQ_HANDLED) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +static int phytium_enable_vblank(struct drm_device *dev, unsigned int virt_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + int phys_pipe; + + phys_pipe = phytium_get_phys_pipe(priv, virt_pipe); + if (phys_pipe < 0) + return phys_pipe; + + phytium_writel_reg(priv, INT_ENABLE, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_INT_ENABLE); + + return 0; +} + +static void phytium_disable_vblank(struct drm_device *dev, unsigned int virt_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + int phys_pipe; + + phys_pipe = phytium_get_phys_pipe(priv, virt_pipe); + if (phys_pipe >= 0) + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_INT_ENABLE); +} + +static const struct drm_mode_config_funcs phytium_mode_funcs = { + .fb_create = phytium_fb_create, + .output_poll_changed = drm_fb_helper_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void phytium_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + drm_atomic_helper_commit_planes(dev, state, false); + drm_atomic_helper_commit_modeset_enables(dev, state); + drm_atomic_helper_commit_hw_done(state); + drm_atomic_helper_wait_for_flip_done(dev, state); + drm_atomic_helper_cleanup_planes(dev, state); +} + +static struct drm_mode_config_helper_funcs phytium_mode_config_helpers = { + .atomic_commit_tail = phytium_atomic_commit_tail, +}; + +static int phytium_modeset_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i = 0, ret; + + drm_mode_config_init(dev); + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 16384; + dev->mode_config.max_height = 16384; + dev->mode_config.cursor_width = 32; + dev->mode_config.cursor_height = 32; + + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + dev->mode_config.allow_fb_modifiers = true; + + dev->mode_config.funcs = &phytium_mode_funcs; + dev->mode_config.helper_private = &phytium_mode_config_helpers; + + for_each_pipe_masked(priv, i) { + ret = phytium_crtc_init(dev, i); + if (ret) { + DRM_ERROR("phytium_crtc_init(pipe %d) return failed\n", i); + goto failed_crtc_init; + } + } + + for_each_pipe_masked(priv, i) { + ret = phytium_dp_init(dev, i); + if (ret) { + DRM_ERROR("phytium_dp_init(pipe %d) return failed\n", i); + goto failed_dp_init; + } + } + + drm_mode_config_reset(dev); + + return 0; +failed_dp_init: +failed_crtc_init: + drm_mode_config_cleanup(dev); + return ret; +} + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe) +{ + int i = 0; + int virt_pipe = 0; + + for_each_pipe_masked(priv, i) { + if (i != phys_pipe) + virt_pipe++; + else + return virt_pipe; + } + + DRM_ERROR("%s %d failed\n", __func__, phys_pipe); + return -EINVAL; +} + +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe) +{ + int i = 0; + int tmp = 0; + + for_each_pipe_masked(priv, i) { + if (tmp != virt_pipe) + tmp++; + else + return i; + } + + DRM_ERROR("%s %d failed\n", __func__, virt_pipe); + return -EINVAL; +} + +static int phytium_display_load(struct drm_device *dev, unsigned long flags) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + ret = drm_vblank_init(dev, priv->info.num_pipes); + if (ret) { + DRM_ERROR("vblank init failed\n"); + goto failed_vblank_init; + } + + ret = phytium_modeset_init(dev); + if (ret) { + DRM_ERROR("phytium_modeset_init failed\n"); + goto failed_modeset_init; + } + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) + priv->vram_hw_init(priv); + + ret = drm_irq_install(dev, priv->irq); + if (ret) { + DRM_ERROR("install irq failed\n"); + goto failed_irq_install; + } + + ret = phytium_drm_fbdev_init(dev); + if (ret) + DRM_ERROR("failed to init dev\n"); + + phytium_debugfs_display_register(priv); + + return ret; + +failed_irq_install: + drm_mode_config_cleanup(dev); +failed_modeset_init: +failed_vblank_init: + return ret; +} + +static void phytium_display_unload(struct drm_device *dev) +{ + phytium_drm_fbdev_fini(dev); + drm_irq_uninstall(dev); + drm_mode_config_cleanup(dev); +} + +static const struct vm_operations_struct phytium_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_ioctl_desc phytium_ioctls[] = { + /* for test, none so far */ +}; + +static const struct file_operations phytium_drm_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = phytium_gem_mmap, +}; + +struct drm_driver phytium_display_drm_driver = { + .driver_features = DRIVER_HAVE_IRQ | + DRIVER_IRQ_SHARED | + DRIVER_PRIME | + DRIVER_MODESET | + DRIVER_ATOMIC | + DRIVER_GEM, + .load = phytium_display_load, + .unload = phytium_display_unload, + .lastclose = drm_fb_helper_lastclose, + .irq_handler = phytium_display_irq_handler, + .irq_preinstall = phytium_irq_preinstall, + .irq_uninstall = phytium_irq_uninstall, + .enable_vblank = phytium_enable_vblank, + .disable_vblank = phytium_disable_vblank, + .gem_free_object = phytium_gem_free_object, + .gem_vm_ops = &phytium_vm_ops, + .gem_prime_get_sg_table = phytium_gem_prime_get_sg_table, + .gem_prime_vmap = phytium_gem_prime_vmap, + .gem_prime_vunmap = phytium_gem_prime_vunmap, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_import_sg_table = phytium_gem_prime_import_sg_table, + .gem_prime_mmap = phytium_gem_prime_mmap, + .dumb_create = phytium_gem_dumb_create, + .dumb_destroy = phytium_gem_dumb_destroy, + .ioctls = phytium_ioctls, + .num_ioctls = ARRAY_SIZE(phytium_ioctls), + .fops = &phytium_drm_driver_fops, + .name = DRV_NAME, + .desc = DRV_DESC, + .date = DRV_DATE, + .major = DRV_MAJOR, + .minor = DRV_MINOR, +}; + +static void phytium_display_shutdown(struct drm_device *dev) +{ + drm_atomic_helper_shutdown(dev); +} + +static int phytium_display_pm_suspend(struct drm_device *dev) +{ + struct drm_atomic_state *state; + struct phytium_display_private *priv = dev->dev_private; + int ret, ret1; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + state = drm_atomic_helper_suspend(dev); + if (IS_ERR(state)) { + DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(state)); + ret = PTR_ERR(state); + goto suspend_failed; + } + dev->mode_config.suspend_state = state; + ret = phytium_gem_suspend(dev); + if (ret) { + DRM_ERROR("phytium_gem_suspend failed: %d\n", ret); + goto gem_suspend_failed; + } + + return 0; + +gem_suspend_failed: + ret1 = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret1) + DRM_ERROR("Failed to resume (%d)\n", ret1); + dev->mode_config.suspend_state = NULL; +suspend_failed: + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return ret; +} + +static int phytium_display_pm_resume(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + if (WARN_ON(!dev->mode_config.suspend_state)) + return -EINVAL; + + ret = phytium_dp_resume(dev); + if (ret) + return -EIO; + + phytium_crtc_resume(dev); + phytium_gem_resume(dev); + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) + priv->vram_hw_init(priv); + + ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret) { + DRM_ERROR("Failed to resume (%d)\n", ret); + return ret; + } + + dev->mode_config.suspend_state = NULL; + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return 0; +} + +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev) +{ + INIT_LIST_HEAD(&priv->gem_list_head); + spin_lock_init(&priv->hotplug_irq_lock); + INIT_WORK(&priv->hotplug_work, phytium_dp_hpd_work_func); + memset(priv->mem_state, 0, sizeof(priv->mem_state)); + priv->dev = dev; + priv->display_shutdown = phytium_display_shutdown; + priv->display_pm_suspend = phytium_display_pm_suspend; + priv->display_pm_resume = phytium_display_pm_resume; +} + +static int __init phytium_display_init(void) +{ + int ret = 0; + + ret = platform_driver_register(&phytium_platform_driver); + if (ret) + return ret; + + ret = pci_register_driver(&phytium_pci_driver); + + return ret; +} + +static void __exit phytium_display_exit(void) +{ + pci_unregister_driver(&phytium_pci_driver); + + platform_driver_unregister(&phytium_platform_driver); +} + +module_init(phytium_display_init); +module_exit(phytium_display_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yang Xun "); +MODULE_DESCRIPTION("Phytium Display Controller"); diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.h b/drivers/gpu/drm/phytium/phytium_display_drv.h new file mode 100644 index 0000000000000000000000000000000000000000..df24138ed57819b28db9ccfd8f29748ed27e7067 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DISPLAY_DRV_H__ +#define __PHYTIUM_DISPLAY_DRV_H__ + +#include +#include +#include + +#define DEBUG_LOG 0 + +#define PHYTIUM_FORMAT_MAX_PLANE 3 +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +#define DRV_NAME "dc" +#define DRV_DESC "phytium dc" +#define DRV_DATE "20201220" +#define DRV_MAJOR 1 +#define DRV_MINOR 1 + +/* come from GPU */ +#define DRM_FORMAT_MOD_VENDOR_PHYTIUM 0x92 + +/* dc:mode0 8x8 16bpp gpu: FBCDC_8X8_V10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC fourcc_mod_code(PHYTIUM, 21) +/* dc:mode3 8x4 32bpp gpu: FBCDC_16X4_v10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC fourcc_mod_code(PHYTIUM, 22) + +#define PIPE_MASK_SHIFT 0x0 +#define PIPE_MASK_MASK 0x7 +#define EDP_MASK_SHIFT 0x3 +#define EDP_MASK_MASK 0x7 + +enum phytium_platform { + PHYTIUM_PLATFORM_UNINITIALIZED = 0, + PHYTIUM_PLATFORM_PX210, + PHYTIUM_PLATFORM_PE220X, +}; + +enum phytium_mem_state_type { + PHYTIUM_MEM_VRAM_TOTAL = 0, + PHYTIUM_MEM_VRAM_ALLOC, + PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL, + PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC, + PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC, + PHYTIUM_MEM_STATE_TYPE_COUNT, +}; + +#define MEMORY_TYPE_VRAM 0x1 +#define MEMORY_TYPE_SYSTEM_CARVEOUT 0x2 +#define MEMORY_TYPE_SYSTEM_UNIFIED 0x4 + +#define IS_PLATFORM(priv, p) ((priv)->info.platform_mask & BIT(p)) + +#define IS_PX210(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_PX210) +#define IS_PE220X(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_PE220X) + +struct phytium_device_info { + unsigned char platform_mask; + unsigned char pipe_mask; + unsigned char num_pipes; + unsigned char total_pipes; + unsigned char edp_mask; + unsigned int crtc_clock_max; + unsigned int hdisplay_max; + unsigned int vdisplay_max; + unsigned int backlight_max; + unsigned long address_mask; +}; + +struct phytium_display_private { + /* hw */ + void __iomem *regs; + void __iomem *vram_addr; + struct phytium_device_info info; + char support_memory_type; + char reserve[3]; + uint32_t dc_reg_base[3]; + uint32_t dcreq_reg_base[3]; + uint32_t dp_reg_base[3]; + uint32_t address_transform_base; + uint32_t phy_access_base[3]; + + /* drm */ + struct drm_device *dev; + int irq; + + /* fb_dev */ + struct drm_fb_helper fbdev_helper; + struct phytium_gem_object *fbdev_phytium_gem; + + int save_reg[3]; + struct list_head gem_list_head; + + struct work_struct hotplug_work; + spinlock_t hotplug_irq_lock; + + void (*vram_hw_init)(struct phytium_display_private *priv); + void (*display_shutdown)(struct drm_device *dev); + int (*display_pm_suspend)(struct drm_device *dev); + int (*display_pm_resume)(struct drm_device *dev); + void (*dc_hw_clear_msi_irq)(struct phytium_display_private *priv, uint32_t phys_pipe); + int (*dc_hw_fb_format_check)(const struct drm_mode_fb_cmd2 *mode_cmd, int count); + + struct gen_pool *memory_pool; + resource_size_t pool_phys_addr; + resource_size_t pool_size; + void *pool_virt_addr; + uint64_t mem_state[PHYTIUM_MEM_STATE_TYPE_COUNT]; + + /* DMA info */ + int dma_inited; + struct dma_chan *dma_chan; +}; + +static inline unsigned int +phytium_readl_reg(struct phytium_display_private *priv, uint32_t group_offset, uint32_t reg_offset) +{ + unsigned int data; + + data = readl(priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Read 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif + return data; +} + +static inline void +phytium_writel_reg(struct phytium_display_private *priv, uint32_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + + writel(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif +} + +static inline void +phytium_writeb_reg(struct phytium_display_private *priv, uint8_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + writeb(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 8'h%08x\n", group_offset + reg_offset, data); +#endif +} + +#define for_each_pipe(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) + +#define for_each_pipe_masked(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) \ + for_each_if((__dev_priv->info.pipe_mask) & BIT(__p)) + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe); +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe); +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit); +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev); + +extern struct drm_driver phytium_display_drm_driver; +extern int dc_fake_mode_enable; +extern int dc_fast_training_check; +extern int num_source_rates; +extern int source_max_lane_count; +extern int link_dynamic_adjust; + +#endif /* __PHYTIUM_DISPLAY_DRV_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_dp.c b/drivers/gpu/drm/phytium/phytium_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..fd31f9a908915894251c0a84bf1e1cd8d5d66d6f --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.c @@ -0,0 +1,2590 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_debugfs.h" +#include "px210_dp.h" +#include "pe220x_dp.h" +#include "phytium_panel.h" +#include "phytium_reg.h" + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp); +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged); +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp); +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp); + +static int phytium_rate[] = {162000, 270000, 540000, 810000}; + +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + +#if DEBUG_LOG + pr_info("phy address write: 0x%x data:0x%x\n", address, data); +#endif + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, data, group_offset, PHYTIUM_PHY_WRITE_DATA); + phytium_writel_reg(priv, ACCESS_WRITE, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); +} + +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + uint32_t data; + + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, ACCESS_READ, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); + data = phytium_readl_reg(priv, group_offset, PHYTIUM_PHY_READ_DATA); +#if DEBUG_LOG + pr_info("phy address read: 0x%x data:0x%x\n", address, data); +#endif + + return data; +} + +static int +phytium_dp_hw_aux_transfer_write(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0, j = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + /* clear PX210_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + for (j = 0; j < msg->size; j++) + phytium_writeb_reg(priv, data[j], group_offset, PHYTIUM_DP_AUX_WRITE_FIFO); + + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= (msg->size-1) & BYTE_COUNT_MASK; + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux write reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux write reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux write reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) write reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) write reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) write reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = msg->size; +out: + return ret; +} + +static int +phytium_dp_hw_aux_transfer_read(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= ((msg->size-1) & BYTE_COUNT_MASK); + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux read reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux read reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux read reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) read reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) read reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) read reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA_COUNT); + + if (ret > msg->size) { + ret = msg->size; + } else if (ret != msg->size) { + DRM_DEBUG_KMS("aux read count error(ret:0x%lx != 0x%lx)\n", ret, msg->size); + ret = -EBUSY; + goto out; + } + + for (i = 0; i < ret; i++) + data[i] = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA); + +out: + return ret; +} + +static void phytium_get_native_mode(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_mode *t, *mode; + struct drm_connector *connector = &phytium_dp->connector; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + if (mode->hdisplay != native_mode->hdisplay || + mode->vdisplay != native_mode->vdisplay) { + memcpy(native_mode, mode, sizeof(*mode)); + drm_mode_set_crtcinfo(native_mode, 0); + } + break; + } + } + + if (&mode->head == &connector->probed_modes) + native_mode->clock = 0; +} + +static int phytium_connector_add_common_modes(struct phytium_dp_device *phytium_dp) +{ + int i = 0, ret = 0; + struct drm_device *dev = phytium_dp->dev; + struct drm_display_mode *mode = NULL, *current_mode = NULL; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + bool mode_existed = false; + struct mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; + } common_mode[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} + }; + + if (native_mode->clock == 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(common_mode); i++) { + mode_existed = false; + + if (common_mode[i].w > native_mode->hdisplay || + common_mode[i].h > native_mode->vdisplay || + (common_mode[i].w == native_mode->hdisplay && + common_mode[i].h == native_mode->vdisplay)) + continue; + + list_for_each_entry(current_mode, &phytium_dp->connector.probed_modes, head) { + if (common_mode[i].w == current_mode->hdisplay && + common_mode[i].h == current_mode->vdisplay) { + mode_existed = true; + break; + } + } + + if (mode_existed) + continue; + + mode = drm_mode_duplicate(dev, native_mode); + if (mode == NULL) + continue; + + mode->hdisplay = common_mode[i].w; + mode->vdisplay = common_mode[i].h; + mode->type &= ~DRM_MODE_TYPE_PREFERRED; + strncpy(mode->name, common_mode[i].name, DRM_DISPLAY_MODE_LEN); + drm_mode_probed_add(&phytium_dp->connector, mode); + ret++; + } + + return ret; +} + +static int phytium_connector_get_modes(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct edid *edid; + int ret = 0; + + edid = drm_get_edid(connector, &phytium_dp->aux.ddc); + if (edid && drm_edid_is_valid(edid)) { + drm_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + phytium_dp->has_audio = drm_detect_monitor_audio(edid); + phytium_get_native_mode(phytium_dp); + if (dc_fake_mode_enable) + ret += phytium_connector_add_common_modes(phytium_dp); + } else { + drm_connector_update_edid_property(connector, NULL); + phytium_dp->has_audio = false; + } + + kfree(edid); + + return ret; +} + +static int +phytium_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned int requested, actual; + + switch (display_info->bpc) { + case 10: + case 6: + case 8: + break; + default: + DRM_INFO("not support bpc(%d)\n", display_info->bpc); + display_info->bpc = 8; + break; + } + + if ((display_info->color_formats & DRM_COLOR_FORMAT_RGB444) == 0) { + DRM_INFO("not support color_format(%d)\n", display_info->color_formats); + display_info->color_formats = DRM_COLOR_FORMAT_RGB444; + } + + requested = mode->clock * display_info->bpc * 3 / 1000; + actual = phytium_dp->max_link_rate * phytium_dp->max_link_lane_count / 100; + actual = actual * 8 / 10; + if (requested >= actual) { + DRM_DEBUG_KMS("requested=%d, actual=%d, clock=%d\n", requested, actual, + mode->clock); + return MODE_CLOCK_HIGH; + } + + if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay == 1024) && (mode->clock > 78000)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay < 640) || (mode->vdisplay < 480)) + return MODE_BAD_HVALUE; + + return MODE_OK; +} + +static struct drm_encoder *phytium_dp_best_encoder(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + return &phytium_dp->encoder; +} + +static const +struct drm_connector_helper_funcs phytium_connector_helper_funcs = { + .get_modes = phytium_connector_get_modes, + .mode_valid = phytium_connector_mode_valid, + .best_encoder = phytium_dp_best_encoder, +}; + +static void phytium_dp_set_sink_rates(struct phytium_dp_device *phytium_dp) +{ + static const int dp_rates[] = {162000, 270000, 540000, 810000}; + int i, max_rate; + + max_rate = drm_dp_bw_code_to_link_rate(phytium_dp->dpcd[DP_MAX_LINK_RATE]); + for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { + if (dp_rates[i] > max_rate) + break; + phytium_dp->sink_rates[i] = dp_rates[i]; + } + phytium_dp->num_sink_rates = i; +} + +static int get_common_rates(const int *source_rates, int source_len, const int *sink_rates, + int sink_len, int *common_rates) +{ + int i = 0, j = 0, k = 0; + + while (i < source_len && j < sink_len) { + if (source_rates[i] == sink_rates[j]) { + if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) + return k; + common_rates[k] = source_rates[i]; + ++k; + ++i; + ++j; + } else if (source_rates[i] < sink_rates[j]) { + ++i; + } else { + ++j; + } + } + return k; +} + +static void phytium_dp_set_common_rates(struct phytium_dp_device *phytium_dp) +{ + WARN_ON(!phytium_dp->num_source_rates || !phytium_dp->num_sink_rates); + + phytium_dp->num_common_rates = get_common_rates(phytium_dp->source_rates, + phytium_dp->num_source_rates, + phytium_dp->sink_rates, + phytium_dp->num_sink_rates, + phytium_dp->common_rates); + + if (WARN_ON(phytium_dp->num_common_rates == 0)) { + phytium_dp->common_rates[0] = 162000; + phytium_dp->num_common_rates = 1; + } +} + +static bool phytium_dp_get_dpcd(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char sink_count = 0; + + /* get dpcd capability,but don't check data error; so check revision */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, 0x00, phytium_dp->dpcd, + sizeof(phytium_dp->dpcd)); + if (ret < 0) { + DRM_ERROR("port %d get DPCD capability fail\n", phytium_dp->port); + return false; + } + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0) { + DRM_ERROR("DPCD data error: 0x%x\n", phytium_dp->dpcd[DP_DPCD_REV]); + return false; + } + + /* parse sink support link */ + phytium_dp_set_sink_rates(phytium_dp); + phytium_dp_set_common_rates(phytium_dp); + phytium_dp->sink_max_lane_count = drm_dp_max_lane_count(phytium_dp->dpcd); + phytium_dp->common_max_lane_count = min(phytium_dp->source_max_lane_count, + phytium_dp->sink_max_lane_count); + + /* get dpcd sink count */ + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_SINK_COUNT, &sink_count) <= 0) { + DRM_ERROR("get DPCD sink_count fail\n"); + return false; + } + + phytium_dp->sink_count = DP_GET_SINK_COUNT(sink_count); + if (!phytium_dp->sink_count) { + DRM_ERROR("DPCD sink_count should not be zero\n"); + return false; + } + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return true; + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0x10) + return true; + + /* get downstream port for branch device */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_DOWNSTREAM_PORT_0, + phytium_dp->downstream_ports, DP_MAX_DOWNSTREAM_PORTS); + if (ret < 0) { + DRM_ERROR("get DPCD DFP fail\n"); + return false; + } + + return true; +} + +static enum drm_connector_status +phytium_dp_detect_dpcd(struct phytium_dp_device *phytium_dp) +{ + if (!phytium_dp_get_dpcd(phytium_dp)) + return connector_status_disconnected; + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return connector_status_connected; + + if (phytium_dp->downstream_ports[0] & DP_DS_PORT_HPD) { + return phytium_dp->sink_count ? connector_status_connected + : connector_status_disconnected; + } + return connector_status_connected; +} + +static void phytium_get_adjust_train(struct phytium_dp_device *phytium_dp, + const uint8_t link_status[DP_LINK_STATUS_SIZE], uint8_t lane_count) +{ + unsigned char v = 0; + unsigned char p = 0; + int lane; + unsigned char voltage_max; + unsigned char preemph_max; + + /* find max value */ + for (lane = 0; lane < lane_count; lane++) { + uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); + uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + voltage_max = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; + + preemph_max = DP_TRAIN_PRE_EMPH_LEVEL_3; + if (p >= preemph_max) + p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + phytium_dp->train_set[lane] = v | p; +} + +bool phytium_dp_coding_8b10b_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +bool phytium_dp_scrambled_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +static void phytium_dp_hw_set_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, + uint8_t train_set) +{ + phytium_dp->funcs->dp_hw_set_phy_lane_setting(phytium_dp, link_rate, train_set); +} + +static void phytium_dp_hw_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint32_t link_rate) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0, retry = 3; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, lane_count, + group_offset, PHYTIUM_DP_LANE_COUNT_SET); + phytium_writel_reg(priv, + drm_dp_link_rate_to_bw_code(link_rate), + group_offset, PHYTIUM_DP_LINK_BW_SET); + + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + phytium_writel_reg(priv, ENHANCED_FRAME_ENABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + else + phytium_writel_reg(priv, ENHANCED_FRAME_DISABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + +try_again: + ret = phytium_dp->funcs->dp_hw_set_phy_lane_and_rate(phytium_dp, lane_count, link_rate); + if ((ret < 0) && retry) { + retry--; + goto try_again; + } +} + +static void phytium_dp_hw_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint8_t test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, val = 0, tmp = 0, i; + uint32_t group_offset = priv->dp_reg_base[port]; + + if ((test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + && custom_pattern && (custom_pattern_size > 0)) { + val = *(int *)custom_pattern; + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0); + val = *(int *)(custom_pattern + 4); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1); + val = *(short int *)(custom_pattern + 8); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2); + } + + if (test_pattern == PHYTIUM_PHY_TP_D10_2 || test_pattern == PHYTIUM_PHY_TP_PRBS7 + || test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + else + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + + tmp = test_pattern - PHYTIUM_PHY_TP_NONE + TEST_PATTERN_NONE; + val = 0; + for (i = 0; i < lane_count; i++) + val |= (tmp << (TEST_PATTERN_LANE_SHIFT * i)); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_LINK_QUAL_PATTERN_SET); +} + +static void phytium_dp_hw_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, tmp = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + /* Scrambling is disabled for TPS1/TPS2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 + || train_pattern == DP_TRAINING_PATTERN_DISABLE) { + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + phytium_writel_reg(priv, SCRAMBLER_RESET, group_offset, + PHYTIUM_DP_FORCE_SCRAMBLER_RESET); + } else { + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + } + switch (train_pattern) { + case DP_TRAINING_PATTERN_DISABLE: + tmp = TRAINING_OFF; + break; + case DP_TRAINING_PATTERN_1: + tmp = TRAINING_PATTERN_1; + break; + case DP_TRAINING_PATTERN_2: + tmp = TRAINING_PATTERN_2; + break; + case DP_TRAINING_PATTERN_3: + tmp = TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_4: + tmp = TRAINING_PATTERN_4; + break; + default: + tmp = TRAINING_OFF; + break; + } + + phytium_writel_reg(priv, tmp, group_offset, PHYTIUM_DP_TRAINING_PATTERN_SET); +} + +void phytium_dp_hw_enable_audio(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0, config1, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + + n_m = phytium_dp_audio_get_n_m(phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + + config1 = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config1, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static void phytium_dp_hw_audio_shutdown(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); +} + +static void phytium_dp_hw_audio_digital_mute(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + if (enable) + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + else + phytium_writel_reg(priv, SEC_AUDIO_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static int +phytium_dp_hw_audio_hw_params(struct phytium_dp_device *phytium_dp, struct audio_info audio_info) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + int ret = 0, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t fs, ws, fs_accurac; + uint32_t group_offset = priv->dp_reg_base[port]; + + DRM_DEBUG_KMS("%s:set port%d sample_rate(%d) channels(%d) sample_width(%d)\n", + __func__, phytium_dp->port, audio_info.sample_rate, + audio_info.channels, audio_info.sample_width); + + phytium_writel_reg(priv, INPUT_SELECT_I2S, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT); + phytium_writel_reg(priv, APB_CLOCK/audio_info.sample_rate, + group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV); + phytium_writel_reg(priv, audio_info.channels & CHANNEL_MASK, + group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT); + phytium_writel_reg(priv, CHANNEL_MAP_DEFAULT, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP); + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + phytium_writel_reg(priv, 0xb5, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE); + + phytium_writel_reg(priv, CLOCK_MODE_SYNC, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE); + phytium_writel_reg(priv, CS_SOURCE_FORMAT_DEFAULT, + group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT); + + switch (audio_info.sample_rate) { + case 32000: + fs = ORIG_FREQ_32000; + fs_accurac = SAMPLING_FREQ_32000; + break; + case 44100: + fs = ORIG_FREQ_44100; + fs_accurac = SAMPLING_FREQ_44100; + break; + case 48000: + fs = ORIG_FREQ_48000; + fs_accurac = SAMPLING_FREQ_48000; + break; + case 96000: + fs = ORIG_FREQ_96000; + fs_accurac = SAMPLING_FREQ_96000; + break; + case 176400: + fs = ORIG_FREQ_176400; + fs_accurac = SAMPLING_FREQ_176400; + break; + case 192000: + fs = ORIG_FREQ_192000; + fs_accurac = SAMPLING_FREQ_192000; + break; + default: + DRM_ERROR("dp not support sample_rate %d\n", audio_info.sample_rate); + goto out; + } + + switch (audio_info.sample_width) { + case 16: + ws = WORD_LENGTH_16; + break; + case 18: + ws = WORD_LENGTH_18; + break; + case 20: + ws = WORD_LENGTH_20; + break; + case 24: + ws = WORD_LENGTH_24; + break; + default: + DRM_ERROR("dp not support sample_width %d\n", audio_info.sample_width); + goto out; + } + + phytium_writel_reg(priv, ((fs&ORIG_FREQ_MASK)<link_rate, audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + phytium_writel_reg(priv, SECONDARY_STREAM_ENABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_dp->audio_info = audio_info; + + return 0; + +out: + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + + return ret; +} + +void phytium_dp_hw_disable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_DISABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); +} + +bool phytium_dp_hw_video_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + return config ? true : false; +} + +void phytium_dp_hw_enable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_ENABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_config_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned long link_bw, date_rate = 0; + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned char tu_size = 64; + unsigned long data_per_tu = 0; + int symbols_per_tu, frac_symbols_per_tu, symbol_count, udc, value; + + /* cal M/N and tu_size */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_clock/10, group_offset, PHYTIUM_DP_M_VID); + phytium_writel_reg(priv, phytium_dp->link_rate/10, group_offset, PHYTIUM_DP_N_VID); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + /* mul 10 for register setting */ + data_per_tu = 10*tu_size * date_rate/link_bw; + symbols_per_tu = (data_per_tu/10)&0xff; + frac_symbols_per_tu = (data_per_tu%10*16/10) & 0xf; + phytium_writel_reg(priv, frac_symbols_per_tu<<24 | symbols_per_tu<<16 | tu_size, + group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE); + + symbol_count = (phytium_dp->mode.crtc_hdisplay*display_info->bpc*3 + 7)/8; + udc = (symbol_count + phytium_dp->link_lane_count - 1)/phytium_dp->link_lane_count; + phytium_writel_reg(priv, udc, group_offset, PHYTIUM_DP_DATA_COUNT); + + /* config main stream attributes */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal, + group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_hdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_HRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_hsync_end - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSTART); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal, + group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_VRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_vsync_end - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSTART); + + value = 0; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value & (~HSYNC_POLARITY_LOW); + else + value = value | HSYNC_POLARITY_LOW; + + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value & (~VSYNC_POLARITY_LOW); + else + value = value | VSYNC_POLARITY_LOW; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY); + + switch (display_info->bpc) { + case 10: + value = (MISC0_BIT_DEPTH_10BIT << MISC0_BIT_DEPTH_OFFSET); + break; + case 6: + value = (MISC0_BIT_DEPTH_6BIT << MISC0_BIT_DEPTH_OFFSET); + break; + default: + value = (MISC0_BIT_DEPTH_8BIT << MISC0_BIT_DEPTH_OFFSET); + break; + } + value |= (MISC0_COMPONENT_FORMAT_RGB << MISC0_COMPONENT_FORMAT_SHIFT) + | MISC0_SYNCHRONOUS_CLOCK; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1); + + value = USER_ODDEVEN_POLARITY_HIGH | USER_DATA_ENABLE_POLARITY_HIGH; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value | USER_HSYNC_POLARITY_HIGH; + else + value = value & (~USER_HSYNC_POLARITY_HIGH); + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value | USER_VSYNC_POLARITY_HIGH; + else + value = value & (~USER_VSYNC_POLARITY_HIGH); + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY); +} + +void phytium_dp_hw_disable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_DISABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_enable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_ENABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); +} + +void phytium_dp_hw_enable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, VIRTUAL_SOURCE_0_ENABLE, + group_offset, PHYTIUM_INPUT_SOURCE_ENABLE); +} + +void phytium_dp_hw_disable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, (~VIRTUAL_SOURCE_0_ENABLE)&VIRTUAL_SOURCE_0_ENABLE_MASK, + priv->dp_reg_base[port], PHYTIUM_INPUT_SOURCE_ENABLE); +} + +bool phytium_dp_hw_output_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + int config = 0; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + return config ? true : false; +} + +static void phytium_dp_hw_get_hpd_state(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t val = 0, raw_state = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + val = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_RAW_STATUS); + + /* maybe miss hpd, so used for clear PHYTIUM_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + raw_state = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SINK_HPD_STATE); + if (val & HPD_EVENT) + phytium_dp->dp_hpd_state.hpd_event_state = true; + + if (val & HPD_IRQ) + phytium_dp->dp_hpd_state.hpd_irq_state = true; + + if (raw_state & HPD_CONNECT) + phytium_dp->dp_hpd_state.hpd_raw_state = true; + else + phytium_dp->dp_hpd_state.hpd_raw_state = false; +} + +void phytium_dp_hw_hpd_irq_setup(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_dp->dp_hpd_state.hpd_irq_enable = enable; + if (enable) + phytium_writel_reg(priv, HPD_OTHER_MASK, group_offset, PHYTIUM_DP_INTERRUPT_MASK); + else + phytium_writel_reg(priv, HPD_IRQ_MASK|HPD_EVENT_MASK|HPD_OTHER_MASK, + group_offset, PHYTIUM_DP_INTERRUPT_MASK); +} + +int phytium_dp_hw_init(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + uint8_t count = 0; + + phytium_dp->source_rates = phytium_rate; + phytium_dp->num_source_rates = num_source_rates; + count = phytium_dp->funcs->dp_hw_get_source_lane_count(phytium_dp); + phytium_dp->source_max_lane_count = count; + + ret = phytium_dp->funcs->dp_hw_reset(phytium_dp); + if (ret) + goto out; + ret = phytium_dp->funcs->dp_hw_init_phy(phytium_dp); + if (ret) + goto out; + + phytium_dp->fast_train_support = false; + phytium_dp->hw_spread_enable = phytium_dp->funcs->dp_hw_spread_is_enable(phytium_dp); + +out: + return ret; +} + +static int phytium_dp_dpcd_get_tp_link(struct phytium_dp_device *phytium_dp, + uint8_t *test_lane_count, + uint32_t *test_link_rate) +{ + uint8_t test_link_bw; + int ret; + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LANE_COUNT, + test_lane_count); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern Lane count read failed(%d)\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LINK_RATE, + &test_link_bw); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern link rate read failed(%d)\n", ret); + goto failed; + } + *test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, uint32_t link_rate) +{ + uint8_t link_config[2]; + int ret = 0; + + link_config[0] = drm_dp_link_rate_to_bw_code(link_rate); + link_config[1] = lane_count; + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) { + link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + } + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_LINK_BW_SET, link_config, 2); + if (ret < 0) { + DRM_NOTE("write dpcd DP_LINK_BW_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp->hw_spread_enable) + link_config[0] = DP_SPREAD_AMP_0_5; + else + link_config[0] = 0; + link_config[1] = DP_SET_ANSI_8B10B; + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); + if (ret < 0) { + DRM_ERROR("write DP_DOWNSPREAD_CTRL fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t test_pattern) +{ + unsigned char value; + int ret; + + if (phytium_dp_coding_8b10b_need_enable(test_pattern)) + value = DP_SET_ANSI_8B10B; + else + value = 0; + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_MAIN_LINK_CHANNEL_CODING_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp_scrambled_need_enable(test_pattern)) + value = DP_TRAINING_PATTERN_DISABLE; + else + value = (DP_TRAINING_PATTERN_DISABLE | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_LINK_QUAL_LANE0_SET, test_pattern); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + uint8_t value; + int ret; + + /* Scrambling is disabled for TPS1/2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 || train_pattern == DP_TRAINING_PATTERN_DISABLE) + value = train_pattern; + else + value = (train_pattern | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_NOTE("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int +phytium_dp_dpcd_set_lane_setting(struct phytium_dp_device *phytium_dp, uint8_t *train_set) +{ + int ret = 0; + + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_TRAINING_LANE0_SET, + phytium_dp->train_set, 4); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_LANE0_SET fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +static int +phytium_dp_dpcd_get_adjust_request(struct phytium_dp_device *phytium_dp, uint8_t lane_count) +{ + int ret = 0; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + goto failed; + } + phytium_get_adjust_train(phytium_dp, link_status, lane_count); + + return 0; +failed: + return ret; +} + +void phytium_dp_dpcd_sink_dpms(struct phytium_dp_device *phytium_dp, int mode) +{ + int ret, i; + + if (phytium_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + if (mode != DRM_MODE_DPMS_ON) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); + } else { + for (i = 0; i < 3; i++) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); + if (ret == 1) + break; + msleep(20); + } + } + + if (ret != 1) + DRM_DEBUG_KMS("failed to %s sink power state\n", + mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); +} + +static bool phytium_dp_link_training_clock_recovery(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char voltage, max_vswing_tries; + int voltage_tries; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->link_lane_count, + phytium_dp->link_rate); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_link failed(ret=%d)\n", ret); + return false; + } + + /* config source's voltage swing and pre-emphasis(103-106) */ + memset(phytium_dp->train_set, 0, sizeof(phytium_dp->train_set)); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return false; + } + + /* config sink's voltage swing and pre-emphasis(103-106) */ + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + voltage_tries = 1; + max_vswing_tries = 0; + for (;;) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + drm_dp_link_train_clock_recovery_delay(phytium_dp->dpcd); + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("clock revorery ok\n"); + return true; + } + + if (voltage_tries == 5) { + DRM_DEBUG_KMS("Same voltage tried 5 times\n"); + return false; + } + + if (max_vswing_tries == 1) { + DRM_DEBUG_KMS("Max Voltage Swing reached\n"); + return false; + } + + voltage = phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + if ((phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) + ++voltage_tries; + else + voltage_tries = 1; + + if (phytium_dp->train_set[0] & DP_TRAIN_MAX_SWING_REACHED) + ++max_vswing_tries; + + DRM_DEBUG_KMS("try train_set:0x%x voltage_tries:%d max_vswing_tries:%d\n", + phytium_dp->train_set[0], voltage_tries, max_vswing_tries); + } +} + +static unsigned int phytium_dp_get_training_pattern(struct phytium_dp_device *phytium_dp) +{ + bool sink_tps3, sink_tps4; + + sink_tps4 = drm_dp_tps4_supported(phytium_dp->dpcd); + if (sink_tps4) + return DP_TRAINING_PATTERN_4; + else if (phytium_dp->link_rate == 810000) + DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n"); + + sink_tps3 = drm_dp_tps3_supported(phytium_dp->dpcd); + if (sink_tps3) + return DP_TRAINING_PATTERN_3; + else if (phytium_dp->link_rate >= 540000) + DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); + + return DP_TRAINING_PATTERN_2; +} + +static bool phytium_dp_link_training_channel_equalization(struct phytium_dp_device *phytium_dp) +{ + unsigned int training_pattern; + int tries, ret; + unsigned char link_status[DP_LINK_STATUS_SIZE]; + bool channel_eq = false; + + /* config source and sink's voltage swing and pre-emphasis(103-106), from clock recovery */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return channel_eq; + } + + /* config source and sink's train_pattern x */ + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, training_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return channel_eq; + } + + for (tries = 0; tries < 5; tries++) { + drm_dp_link_train_channel_eq_delay(phytium_dp->dpcd); + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + break; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("CR check failed, cannot continue channel equalization\n"); + break; + } + + if (drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + channel_eq = true; + DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); + break; + } + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + break; + } + } + + /* Try 5 times, else fail and try at lower BW */ + if (tries == 5) + DRM_DEBUG_KMS("Channel equalization failed 5 times\n"); + + return channel_eq; +} + +static void phytium_dp_train_retry_work_fn(struct work_struct *work) +{ + struct phytium_dp_device *phytium_dp = train_retry_to_dp_device(work); + struct drm_connector *connector; + + connector = &phytium_dp->connector; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + mutex_lock(&connector->dev->mode_config.mutex); + drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); + mutex_unlock(&connector->dev->mode_config.mutex); + drm_kms_helper_hotplug_event(connector->dev); +} + +/* return index of rate in rates array, or -1 if not found */ +static int phytium_dp_rate_index(const int *rates, int len, int rate) +{ + int i; + + for (i = 0; i < len; i++) + if (rate == rates[i]) + return i; + + return -1; +} + +int phytium_dp_get_link_train_fallback_values(struct phytium_dp_device *phytium_dp) +{ + int index, ret = 0; + + if (phytium_dp->is_edp) { + phytium_dp->train_retry_count++; + DRM_INFO("Retrying Link training for eDP(%d) with same parameters\n", + phytium_dp->port); + goto out; + } else { + index = phytium_dp_rate_index(phytium_dp->common_rates, + phytium_dp->num_common_rates, + phytium_dp->link_rate); + if (index > 0) { + phytium_dp->link_rate = phytium_dp->common_rates[index - 1]; + } else if (phytium_dp->link_lane_count > 1) { + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->link_lane_count >> 1; + } else { + phytium_dp->train_retry_count++; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_INFO("Retrying Link training for DP(%d) with maximal parameters\n", + phytium_dp->port); + ret = -1; + } + } + +out: + return ret; +} + +static int +phytium_dp_stop_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret; + + /* config source and sink's train_pattern x: DP_TRAINING_PATTERN_DISABLE */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + + phytium_dp_hw_disable_output(phytium_dp); + phytium_dp_hw_disable_input_source(phytium_dp); + phytium_dp_hw_disable_video(phytium_dp); + phytium_dp_hw_enable_input_source(phytium_dp); + phytium_dp_hw_enable_output(phytium_dp); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_OFF); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_ON); + + if (!phytium_dp_link_training_clock_recovery(phytium_dp)) + goto failure_handling; + + if (!phytium_dp_link_training_channel_equalization(phytium_dp)) + goto failure_handling; + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + if (phytium_dp->trigger_train_fail) { + phytium_dp->trigger_train_fail--; + goto failure_handling; + } + phytium_dp->train_retry_count = 0; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Pass at Link Rate = %d, Lane count = %d\n", + phytium_dp->connector.base.id, + phytium_dp->connector.name, phytium_dp->link_rate, + phytium_dp->link_lane_count); + + return 0; + +failure_handling: + DRM_INFO("[CONNECTOR:%d:%s] Link Training failed at Link Rate = %d, Lane count = %d", + phytium_dp->connector.base.id, + phytium_dp->connector.name, + phytium_dp->link_rate, phytium_dp->link_lane_count); + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + phytium_dp_get_link_train_fallback_values(phytium_dp); + if (phytium_dp->train_retry_count < 5) + schedule_work(&phytium_dp->train_retry_work); + else + DRM_ERROR("DP(%d) Link Training Unsuccessful, and stop Training\n", + phytium_dp->port); + +out: + return -1; +} + +static bool phytium_dp_needs_link_retrain(struct phytium_dp_device *phytium_dp) +{ + unsigned char link_status[DP_LINK_STATUS_SIZE]; + int ret = 0; + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return true; + } + + if ((phytium_dp->link_rate == 0) || (phytium_dp->link_lane_count == 0)) { + DRM_DEBUG_KMS("link_rate(%d) or lane_count(%d) is invalid\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + return true; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Clock recovery check failed\n"); + return true; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Channel EQ check failed\n"); + return true; + } + + if (!phytium_dp_hw_output_is_enable(phytium_dp)) { + DRM_DEBUG_KMS("check DP output enable failed\n"); + return true; + } + return false; +} + +static bool +phytium_dp_get_sink_irq(struct phytium_dp_device *phytium_dp, u8 *sink_irq_vector) +{ + return drm_dp_dpcd_readb(&phytium_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector) == 1; +} + +static uint8_t phytium_dp_autotest_phy_pattern(struct phytium_dp_device *phytium_dp) +{ + union phytium_phy_tp phytium_phy_tp; + int ret; + unsigned char test_80_bit_pattern[ + (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - + DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; + unsigned char test_pattern; + unsigned int offset; + + offset = DP_TEST_PHY_PATTERN; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, offset, + &phytium_phy_tp.raw, + sizeof(phytium_phy_tp)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + + test_pattern = phytium_phy_tp.bits.PATTERN; + + if (test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) { + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, + test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + } + + /* config source and sink's link rate and link count */ + ret = phytium_dp_dpcd_get_tp_link(phytium_dp, &phytium_dp->compliance.test_lane_count, + &phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_tp_link fail: ret:%d\n", ret); + goto failed; + } + + phytium_dp_hw_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_link fail: ret:%d\n", ret); + goto failed_dpcd_set_link; + } + + /* config source and sink's lane setting: voltage swing and pre-emphasis */ + ret = phytium_dp_dpcd_get_adjust_request(phytium_dp, + phytium_dp->compliance.test_lane_count); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_adjust_request fail: ret:%d\n", ret); + goto failed_dpcd_get_adjust_request; + } + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->compliance.test_link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + goto failed_dpcd_set_lane_setting; + } + + /* config test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + test_pattern, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + ret = phytium_dp_dpcd_set_test_pattern(phytium_dp, test_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_test_pattern fail: ret:%d\n", ret); + goto failed_dpcd_set_tp; + } + + return DP_TEST_ACK; + +failed_dpcd_set_tp: + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + PHYTIUM_PHY_TP_NONE, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); +failed_dpcd_set_link: +failed_dpcd_set_lane_setting: +failed_dpcd_get_adjust_request: +failed: + return DP_TEST_NAK; +} + +static void phytium_dp_handle_test_request(struct phytium_dp_device *phytium_dp) +{ + uint8_t response = DP_TEST_NAK; + uint8_t request = 0; + int status; + + status = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_REQUEST, &request); + if (status <= 0) { + DRM_DEBUG_KMS("Could not read test request from sink\n"); + goto update_status; + } + + switch (request) { + case DP_TEST_LINK_TRAINING: + case DP_TEST_LINK_VIDEO_PATTERN: + case DP_TEST_LINK_EDID_READ: + DRM_DEBUG_KMS("Not support test request '%02x'\n", request); + response = DP_TEST_NAK; + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); + response = phytium_dp_autotest_phy_pattern(phytium_dp); + break; + default: + DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); + break; + } + +update_status: + status = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TEST_RESPONSE, response); + if (status <= 0) + DRM_DEBUG_KMS("Could not write test response to sink\n"); + +} + +static int phytium_dp_long_pulse(struct drm_connector *connector, bool hpd_raw_state) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + bool video_enable = false; + uint32_t index = 0; + + if (phytium_dp->is_edp) + status = connector_status_connected; + else if (hpd_raw_state) { + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + } else { + status = connector_status_disconnected; + goto out; + } + + if (!phytium_dp->is_edp) { + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) + goto out; + + index = phytium_dp->num_common_rates-1; + phytium_dp->max_link_rate = phytium_dp->common_rates[index]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + } + +out: + return status; +} + +static int phytium_dp_short_pulse(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + u8 sink_irq_vector = 0; + bool video_enable = false; + + /* handle the test pattern */ + if (phytium_dp_get_sink_irq(phytium_dp, &sink_irq_vector) && + sink_irq_vector != 0) { + drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector); + if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) + phytium_dp_handle_test_request(phytium_dp); + if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) + DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); + } + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + +out: + return status; +} + +void phytium_dp_hpd_poll_handler(struct phytium_display_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug poll functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); +} + +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + struct phytium_display_private *priv = dev->dev_private; + bool handler = false; + bool hpd_raw_state_old = false; + + /* We might have missed any hotplugs that happened, so polling and handler */ + if (enable) { + spin_lock_irq(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (!phytium_dp->dp_hpd_state.hpd_irq_enable) { + hpd_raw_state_old = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state + || (hpd_raw_state_old != phytium_dp->dp_hpd_state.hpd_raw_state)) { + handler = true; + } + } + } + spin_unlock_irq(&priv->hotplug_irq_lock); + if (handler) + phytium_dp_hpd_poll_handler(priv); + } + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + phytium_dp_hw_hpd_irq_setup(phytium_dp, enable); + } +} + +void phytium_dp_hpd_work_func(struct work_struct *work) +{ + struct phytium_display_private *priv = + container_of(work, struct phytium_display_private, hotplug_work); + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug work functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); + + phytium_dp_hpd_irq_setup(dev, true); +} + +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv) +{ + struct drm_encoder *encoder = NULL; + struct phytium_dp_device *phytium_dp = NULL; + struct drm_device *dev = priv->dev; + bool handler = false; + + spin_lock(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->dp_hpd_state.hpd_irq_enable) { + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state) { + handler = true; + } + } + } + spin_unlock(&priv->hotplug_irq_lock); + + if (handler) { + phytium_dp_hpd_irq_setup(dev, false); + schedule_work(&priv->hotplug_work); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + + +static void phytium_dp_fast_link_train_detect(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->fast_train_support = !!(phytium_dp->dpcd[DP_MAX_DOWNSPREAD] + & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); + DRM_DEBUG_KMS("fast link training %s\n", + phytium_dp->fast_train_support ? "supported" : "unsupported"); +} + +bool phytium_dp_fast_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + unsigned int training_pattern; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + usleep_range(500, 600); + + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + usleep_range(500, 600); + + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + if (dc_fast_training_check) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check clock recovery failed\n"); + return false; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check channel equalization failed\n"); + return false; + } + } + + return true; +} + +static enum drm_connector_status +phytium_connector_detect(struct drm_connector *connector, bool force) +{ + enum drm_connector_status status = connector->status; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + bool hpd_event_state, hpd_irq_state, hpd_raw_state; + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + bool plugged = true; + + spin_lock_irq(&priv->hotplug_irq_lock); + hpd_event_state = phytium_dp->dp_hpd_state.hpd_event_state; + hpd_irq_state = phytium_dp->dp_hpd_state.hpd_irq_state; + hpd_raw_state = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp->dp_hpd_state.hpd_event_state = false; + phytium_dp->dp_hpd_state.hpd_irq_state = false; + spin_unlock_irq(&priv->hotplug_irq_lock); + + if (hpd_event_state) + status = phytium_dp_long_pulse(connector, hpd_raw_state); + + if (hpd_irq_state) + status = phytium_dp_short_pulse(connector); + + if (status == connector_status_unknown) + status = connector_status_disconnected; + + if ((!phytium_dp->is_edp) && (!hpd_raw_state)) + status = connector_status_disconnected; + + if (connector->status != status) { + if ((status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + } + + return status; +} + +static void +phytium_connector_destroy(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + drm_connector_cleanup(connector); + kfree(phytium_dp); +} + +static int +phytium_dp_connector_register(struct drm_connector *connector) +{ + int ret; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + phytium_dp_aux_init(phytium_dp); + if (phytium_dp->is_edp) { + phytium_edp_init_connector(phytium_dp); + ret = phytium_edp_backlight_device_register(phytium_dp); + if (ret) + DRM_ERROR("failed to register port(%d) backlight device(ret=%d)\n", + phytium_dp->port, ret); + } + + ret = phytium_debugfs_connector_add(connector); + if (ret) + DRM_ERROR("failed to register phytium connector debugfs(ret=%d)\n", ret); + + return 0; +} + +static void +phytium_dp_connector_unregister(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (phytium_dp->is_edp) { + phytium_edp_backlight_device_unregister(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } + drm_dp_aux_unregister(&phytium_dp->aux); +} + +static const struct drm_connector_funcs phytium_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = phytium_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = phytium_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = phytium_dp_connector_register, + .early_unregister = phytium_dp_connector_unregister, +}; + +static void phytium_dp_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct phytium_dp_device *dp = encoder_to_dp_device(encoder); + + drm_mode_copy(&dp->mode, adjusted); +} + +static void phytium_edp_panel_poweron(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweron(&phytium_dp->panel); +} + +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweroff(&phytium_dp->panel); +} + +static void phytium_edp_backlight_on(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_enable_backlight(&phytium_dp->panel); +} + +static void phytium_edp_backlight_off(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_disable_backlight(&phytium_dp->panel); +} + +static void phytium_encoder_disable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + + if (phytium_dp->is_edp) + phytium_edp_backlight_off(phytium_dp); + + phytium_dp_hw_disable_video(phytium_dp); + + mdelay(50); + + if (phytium_dp->is_edp) + phytium_edp_panel_poweroff(phytium_dp); +} + +void phytium_dp_adjust_link_train_parameter(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned long link_bw, date_rate = 0, bs_limit, bs_request; + int rate = 0; + + bs_request = phytium_dp->mode.crtc_htotal/(phytium_dp->mode.crtc_clock/1000); + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + for (;;) { + bs_limit = 8192 / (phytium_dp->link_rate/1000); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + rate = 10 * date_rate / link_bw; + DRM_DEBUG_KMS("adjust link rate(%d), lane count(%d)\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + DRM_DEBUG_KMS("for crtc_clock(%d) bs_request(%ld) bs_limit(%ld) rate(%d)\n", + phytium_dp->mode.crtc_clock, bs_request, bs_limit, rate); + if ((link_dynamic_adjust && (bs_request < bs_limit) && rate < 10) || + ((!link_dynamic_adjust) && (rate < 10))) + break; + phytium_dp_get_link_train_fallback_values(phytium_dp); + } + + DRM_DEBUG_KMS("Try link training at Link Rate = %d, Lane count = %d\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); +} + +static void phytium_encoder_enable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + int ret = 0; + + phytium_dp_hw_disable_video(phytium_dp); + + if (phytium_dp->is_edp) { + phytium_edp_panel_poweron(phytium_dp); + if (phytium_dp->fast_train_support) + phytium_dp_fast_link_train(phytium_dp); + else + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + phytium_dp_fast_link_train_detect(phytium_dp); + } else { + phytium_dp_adjust_link_train_parameter(phytium_dp); + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + } + + phytium_dp_hw_config_video(phytium_dp); + if (ret == 0) { + phytium_dp_hw_enable_video(phytium_dp); + if (phytium_dp->has_audio) + phytium_dp_hw_enable_audio(phytium_dp); + } + + if (phytium_dp->is_edp) { + phytium_edp_backlight_on(phytium_dp); + } +} + +static const struct drm_encoder_helper_funcs phytium_encoder_helper_funcs = { + .mode_set = phytium_dp_encoder_mode_set, + .disable = phytium_encoder_disable, + .enable = phytium_encoder_enable, +}; + +static const struct drm_encoder_funcs phytium_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static const struct dp_audio_n_m phytium_dp_audio_n_m[] = { + { 32000, 162000, 1024, 10125 }, + { 44100, 162000, 784, 5625 }, + { 48000, 162000, 512, 3375 }, + { 64000, 162000, 2048, 10125 }, + { 88200, 162000, 1568, 5625 }, + { 96000, 162000, 1024, 3375 }, + { 128000, 162000, 4096, 10125 }, + { 176400, 162000, 3136, 5625 }, + { 192000, 162000, 2048, 3375 }, + { 32000, 270000, 1024, 16875 }, + { 44100, 270000, 784, 9375 }, + { 48000, 270000, 512, 5625 }, + { 64000, 270000, 2048, 16875 }, + { 88200, 270000, 1568, 9375 }, + { 96000, 270000, 1024, 5625 }, + { 128000, 270000, 4096, 16875 }, + { 176400, 270000, 3136, 9375 }, + { 192000, 270000, 2048, 5625 }, + { 32000, 540000, 1024, 33750 }, + { 44100, 540000, 784, 18750 }, + { 48000, 540000, 512, 11250 }, + { 64000, 540000, 2048, 33750 }, + { 88200, 540000, 1568, 18750 }, + { 96000, 540000, 1024, 11250 }, + { 128000, 540000, 4096, 33750 }, + { 176400, 540000, 3136, 18750 }, + { 192000, 540000, 2048, 11250 }, + { 32000, 810000, 1024, 50625 }, + { 44100, 810000, 784, 28125 }, + { 48000, 810000, 512, 16875 }, + { 64000, 810000, 2048, 50625 }, + { 88200, 810000, 1568, 28125 }, + { 96000, 810000, 1024, 16875 }, + { 128000, 810000, 4096, 50625 }, + { 176400, 810000, 3136, 28125 }, + { 192000, 810000, 2048, 16875 }, +}; + +static int phytium_dp_audio_get_eld(struct device *dev, void *data, u8 *buf, size_t len) +{ + struct phytium_dp_device *phytium_dp = data; + + memcpy(buf, phytium_dp->connector.eld, min(sizeof(phytium_dp->connector.eld), len)); + + return 0; +} + +static int phytium_dp_audio_digital_mute(struct device *dev, void *data, bool enable) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_digital_mute(phytium_dp, enable); + + return 0; +} + +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phytium_dp_audio_n_m); i++) { + if (sample_rate == phytium_dp_audio_n_m[i].sample_rate + && link_rate == phytium_dp_audio_n_m[i].link_rate) + return &phytium_dp_audio_n_m[i]; + } + + return NULL; +} + +static int phytium_dp_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct phytium_dp_device *phytium_dp = data; + int ret = 0; + struct audio_info audio_info = { + .sample_width = params->sample_width, + .sample_rate = params->sample_rate, + .channels = params->channels, + }; + + if (daifmt->fmt != HDMI_I2S) { + DRM_ERROR("invalid audio format %d\n", daifmt->fmt); + ret = -EINVAL; + goto failed; + } + + ret = phytium_dp_hw_audio_hw_params(phytium_dp, audio_info); + +failed: + return ret; +} + +static void phytium_dp_audio_shutdown(struct device *dev, void *data) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_shutdown(phytium_dp); +} + +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged) +{ + if (phytium_dp->plugged_cb && phytium_dp->codec_dev) + phytium_dp->plugged_cb(phytium_dp->codec_dev, plugged); +} + +static int phytium_dp_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct phytium_dp_device *phytium_dp = data; + bool plugged; + + phytium_dp->plugged_cb = fn; + phytium_dp->codec_dev = codec_dev; + + if ((phytium_dp->connector.status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + return 0; +} + + +static const struct hdmi_codec_ops phytium_audio_codec_ops = { + .hw_params = phytium_dp_audio_hw_params, + .audio_shutdown = phytium_dp_audio_shutdown, + .digital_mute = phytium_dp_audio_digital_mute, + .get_eld = phytium_dp_audio_get_eld, + .hook_plugged_cb = phytium_dp_audio_hook_plugged_cb, +}; + +static int phytium_dp_audio_codec_init(struct phytium_dp_device *phytium_dp, int port) +{ + struct device *dev = phytium_dp->dev->dev; + struct hdmi_codec_pdata codec_data = { + .i2s = 1, + .spdif = 0, + .ops = &phytium_audio_codec_ops, + .max_i2s_channels = 2, + .data = phytium_dp, + }; + + phytium_dp->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, + port, + &codec_data, sizeof(codec_data)); + + return PTR_ERR_OR_ZERO(phytium_dp->audio_pdev); +} + +static long phytium_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + struct phytium_dp_device *phytium_dp = container_of(aux, struct phytium_dp_device, aux); + long ret = 0; + + DRM_DEBUG_KMS("msg->size: 0x%lx\n", msg->size); + + if (WARN_ON(msg->size > 16)) + return -E2BIG; + + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_WRITE_STATUS_UPDATE: + ret = phytium_dp_hw_aux_transfer_write(phytium_dp, msg); + DRM_DEBUG_KMS("aux write reply:0x%x ret:0x%lx\n", msg->reply, ret); + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + ret = phytium_dp_hw_aux_transfer_read(phytium_dp, msg); + DRM_DEBUG_KMS("aux read ret:0x%lx\n", ret); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp) +{ + drm_dp_aux_init(&phytium_dp->aux); + phytium_dp->aux.name = kasprintf(GFP_KERNEL, "dp-%d", phytium_dp->port); + phytium_dp->aux.transfer = phytium_dp_aux_transfer; +} + +int phytium_get_encoder_crtc_mask(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int i, mask = 0; + + for_each_pipe_masked(priv, i) { + if (i != port) + mask++; + else + break; + } + + return BIT(mask); +} + +static bool phytium_dp_is_edp(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (priv->info.edp_mask & BIT(port)) + return true; + else + return false; +} + +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp) +{ + enum drm_connector_status status; + struct drm_connector *connector = &phytium_dp->connector; + + phytium_edp_panel_poweron(phytium_dp); + + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) + return false; + + connector->status = status; + phytium_dp->max_link_rate = phytium_dp->common_rates[phytium_dp->num_common_rates-1]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + return true; +} + +int phytium_dp_resume(struct drm_device *drm_dev) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + int ret = 0; + + drm_for_each_encoder(encoder, drm_dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->is_edp) { + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + return -EIO; + } + } + + return 0; +} + +int phytium_dp_init(struct drm_device *dev, int port) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_dp_device *phytium_dp = NULL; + int ret, type; + + DRM_DEBUG_KMS("%s: port %d\n", __func__, port); + phytium_dp = kzalloc(sizeof(*phytium_dp), GFP_KERNEL); + if (!phytium_dp) { + ret = -ENOMEM; + goto failed_malloc_dp; + } + + phytium_dp->dev = dev; + phytium_dp->port = port; + + if (IS_PX210(priv)) { + px210_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = PX210_DP_BASE(port); + priv->phy_access_base[port] = PX210_PHY_ACCESS_BASE(port); + } else if (IS_PE220X(priv)) { + pe220x_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = PE220X_DP_BASE(port); + priv->phy_access_base[port] = PE220X_PHY_ACCESS_BASE(port); + } + + if (phytium_dp_is_edp(phytium_dp, port)) { + phytium_dp->is_edp = true; + type = DRM_MODE_CONNECTOR_eDP; + phytium_dp_panel_init_backlight_funcs(phytium_dp); + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } else { + phytium_dp->is_edp = false; + type = DRM_MODE_CONNECTOR_DisplayPort; + } + + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + goto failed_init_dp; + } + + ret = drm_encoder_init(dev, &phytium_dp->encoder, + &phytium_encoder_funcs, + DRM_MODE_ENCODER_TMDS, "DP %d", port); + if (ret) { + DRM_ERROR("failed to initialize encoder with drm\n"); + goto failed_encoder_init; + } + drm_encoder_helper_add(&phytium_dp->encoder, &phytium_encoder_helper_funcs); + phytium_dp->encoder.possible_crtcs = phytium_get_encoder_crtc_mask(phytium_dp, port); + + phytium_dp->connector.dpms = DRM_MODE_DPMS_OFF; + phytium_dp->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + ret = drm_connector_init(dev, &phytium_dp->connector, &phytium_connector_funcs, + type); + if (ret) { + DRM_ERROR("failed to initialize connector with drm\n"); + goto failed_connector_init; + } + drm_connector_helper_add(&phytium_dp->connector, &phytium_connector_helper_funcs); + drm_connector_attach_encoder(&phytium_dp->connector, &phytium_dp->encoder); + + ret = phytium_dp_audio_codec_init(phytium_dp, port); + if (ret) { + DRM_ERROR("failed to initialize audio codec\n"); + goto failed_connector_init; + } + + phytium_dp->train_retry_count = 0; + INIT_WORK(&phytium_dp->train_retry_work, phytium_dp_train_retry_work_fn); + drm_connector_register(&phytium_dp->connector); + + return 0; +failed_connector_init: +failed_encoder_init: +failed_init_dp: + kfree(phytium_dp); +failed_malloc_dp: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_dp.h b/drivers/gpu/drm/phytium/phytium_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..3747ba6237aadbc93ff7ed5826b669eaed8401b5 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DP_H__ +#define __PHYTIUM_DP_H__ + +#include +#include +#include + +struct phytium_dp_device; + +#include "phytium_panel.h" + +struct audio_info { + int sample_rate; + int channels; + int sample_width; +}; + +struct dp_audio_n_m { + int sample_rate; + int link_rate; + u16 m; + u16 n; +}; + +struct phytium_dp_compliance { + unsigned long test_type; + uint32_t test_link_rate; + u8 test_lane_count; + bool test_active; + u8 reserve[2]; +}; + +struct phytium_dp_func { + uint8_t (*dp_hw_get_source_lane_count)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_reset)(struct phytium_dp_device *phytium_dp); + bool (*dp_hw_spread_is_enable)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_set_backlight)(struct phytium_dp_device *phytium_dp, uint32_t level); + uint32_t (*dp_hw_get_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_disable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_enable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweroff_panel)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweron_panel)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_init_phy)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_set_phy_lane_setting)(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set); + int (*dp_hw_set_phy_lane_and_rate)(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate); +}; + +struct phytium_dp_hpd_state { + bool hpd_event_state; + bool hpd_irq_state; + bool hpd_raw_state; + bool hpd_irq_enable; +}; + +struct phytium_dp_device { + struct drm_device *dev; + struct drm_encoder encoder; + struct drm_connector connector; + int port; + struct drm_display_mode mode; + bool link_trained; + bool detect_done; + bool is_edp; + bool reserve0; + struct drm_dp_aux aux; + unsigned char dpcd[DP_RECEIVER_CAP_SIZE]; + uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + unsigned char downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + unsigned char sink_count; + + int *source_rates; + int num_source_rates; + int sink_rates[DP_MAX_SUPPORTED_RATES]; + int num_sink_rates; + int common_rates[DP_MAX_SUPPORTED_RATES]; + int num_common_rates; + + int source_max_lane_count; + int sink_max_lane_count; + int common_max_lane_count; + + int max_link_rate; + int max_link_lane_count; + int link_rate; + int link_lane_count; + struct work_struct train_retry_work; + int train_retry_count; + uint32_t trigger_train_fail; + + unsigned char train_set[4]; + bool has_audio; + bool fast_train_support; + bool hw_spread_enable; + bool reserve[1]; + struct platform_device *audio_pdev; + struct audio_info audio_info; + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + struct phytium_dp_compliance compliance; + struct phytium_dp_func *funcs; + struct phytium_dp_hpd_state dp_hpd_state; + + struct phytium_panel panel; + struct drm_display_mode native_mode; +}; + +union phytium_phy_tp { + struct { + /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 + * and 3 bits for DP1.2. + */ + uint8_t PATTERN :3; + uint8_t RESERVED :5; + } bits; + uint8_t raw; +}; + +/* PHY test patterns + * The order of test patterns follows DPCD register PHY_TEST_PATTERN (0x248) + */ +enum phytium_dpcd_phy_tp { + PHYTIUM_PHY_TP_NONE = 0, + PHYTIUM_PHY_TP_D10_2, + PHYTIUM_PHY_TP_SYMBOL_ERROR, + PHYTIUM_PHY_TP_PRBS7, + PHYTIUM_PHY_TP_80BIT_CUSTOM, + PHYTIUM_PHY_TP_CP2520_1, + PHYTIUM_PHY_TP_CP2520_2, + PHYTIUM_PHY_TP_CP2520_3, +}; +#define encoder_to_dp_device(x) container_of(x, struct phytium_dp_device, encoder) +#define connector_to_dp_device(x) container_of(x, struct phytium_dp_device, connector) +#define panel_to_dp_device(x) container_of(x, struct phytium_dp_device, panel) +#define train_retry_to_dp_device(x) container_of(x, struct phytium_dp_device, train_retry_work) +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data); +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address); + +int phytium_dp_init(struct drm_device *dev, int pipe); +int phytium_dp_resume(struct drm_device *drm_dev); +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable); +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv); +void phytium_dp_hpd_work_func(struct work_struct *work); +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate); +#endif /* __PHYTIUM_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fb.c b/drivers/gpu/drm/phytium/phytium_fb.c new file mode 100644 index 0000000000000000000000000000000000000000..bae872465065a4b0d3c65b457990387aa5adf601 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static int +phytium_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, + unsigned int *handle) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + + return drm_gem_handle_create(file_priv, &phytium_fb->phytium_gem_obj[0]->base, handle); +} + +static void phytium_fb_destroy(struct drm_framebuffer *fb) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + int i, num_planes; + struct drm_gem_object *obj = NULL; + const struct drm_format_info *info; + + info = drm_format_info(fb->format->format); + num_planes = info ? info->num_planes : 1; + + for (i = 0; i < num_planes; i++) { + obj = &phytium_fb->phytium_gem_obj[i]->base; + if (obj) + drm_gem_object_unreference_unlocked(obj); + } + + drm_framebuffer_cleanup(fb); + kfree(phytium_fb); +} + +static struct drm_framebuffer_funcs viv_fb_funcs = { + .create_handle = phytium_fb_create_handle, + .destroy = phytium_fb_destroy, +}; + +struct phytium_framebuffer * +phytium_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, unsigned int num_planes) +{ + struct phytium_framebuffer *phytium_fb; + int ret = 0, i; + + phytium_fb = kzalloc(sizeof(*phytium_fb), GFP_KERNEL); + if (!phytium_fb) + return ERR_PTR(-ENOMEM); + + drm_helper_mode_fill_fb_struct(dev, &phytium_fb->base, mode_cmd); + + ret = drm_framebuffer_init(dev, &phytium_fb->base, &viv_fb_funcs); + + if (ret) { + DRM_ERROR("Failed to initialize framebuffer: %d\n", ret); + kfree(phytium_fb); + return ERR_PTR(ret); + } + + for (i = 0; i < num_planes; i++) + phytium_fb->phytium_gem_obj[i] = phytium_gem_obj[i]; + + return phytium_fb; +} + +struct drm_framebuffer * +phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + int ret = 0, i, num_planes; + struct drm_gem_object *obj; + unsigned int hsub, vsub, size; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE] = {0}; + struct phytium_framebuffer *phytium_fb; + struct phytium_display_private *priv = dev->dev_private; + const struct drm_format_info *info; + + info = drm_format_info(mode_cmd->pixel_format); + hsub = info ? info->hsub : 1; + vsub = info ? info->vsub : 1; + num_planes = info ? info->num_planes : 1; + num_planes = min(num_planes, PHYTIUM_FORMAT_MAX_PLANE); + + for (i = 0; i < num_planes; i++) { + unsigned int height = mode_cmd->height / (i ? vsub : 1); + + size = height * mode_cmd->pitches[i] + mode_cmd->offsets[i]; + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); + if (!obj) { + DRM_ERROR("Failed to lookup GEM object\n"); + ret = -ENXIO; + goto error; + } + + if (obj->size < size) { + drm_gem_object_unreference_unlocked(obj); + ret = -EINVAL; + goto error; + } + + phytium_gem_obj[i] = to_phytium_gem_obj(obj); + + ret = priv->dc_hw_fb_format_check(mode_cmd, i); + if (ret < 0) + goto error; + } + + phytium_fb = phytium_fb_alloc(dev, mode_cmd, phytium_gem_obj, i); + if (IS_ERR(phytium_fb)) { + DRM_DEBUG_KMS("phytium_fb_alloc failed\n"); + ret = PTR_ERR(phytium_fb); + goto error; + } + + return &phytium_fb->base; +error: + for (i--; i >= 0; i--) + drm_gem_object_unreference_unlocked(&phytium_gem_obj[i]->base); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_fb.h b/drivers/gpu/drm/phytium/phytium_fb.h new file mode 100644 index 0000000000000000000000000000000000000000..c5c9a860789fc75fe7878f437c0e093c942ba3a4 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_FB_H__ +#define __PHYTIUM_FB_H__ + +struct phytium_framebuffer { + struct drm_framebuffer base; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE]; +}; + +#define to_phytium_framebuffer(fb) container_of(fb, struct phytium_framebuffer, base) + +struct phytium_framebuffer *phytium_fb_alloc(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, + unsigned int num_planes); + +struct drm_framebuffer *phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd); +#endif /* __PHYTIUM_FB_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.c b/drivers/gpu/drm/phytium/phytium_fbdev.c new file mode 100644 index 0000000000000000000000000000000000000000..22933029733f734b021d9e5e8039247a4c9a8e03 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" +#include "phytium_fb.h" + + +#define PHYTIUM_MAX_CONNECTOR 1 +#define helper_to_drm_private(x) container_of(x, struct phytium_display_private, fbdev_helper) + +static int phytium_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct drm_fb_helper *helper = info->par; + struct phytium_display_private *priv = helper_to_drm_private(helper); + + return phytium_gem_mmap_obj(&priv->fbdev_phytium_gem->base, vma); +} + +static struct fb_ops phytium_fbdev_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_mmap = phytium_fbdev_mmap, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, +}; + +static int +phytium_drm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) +{ + struct phytium_display_private *priv = helper_to_drm_private(helper); + struct drm_device *dev = helper->dev; + unsigned int bytes_per_pixel; + struct drm_mode_fb_cmd2 mode_cmd = {0}; + struct phytium_framebuffer *phytium_fb = NULL; + struct fb_info *fbi = NULL; + struct drm_framebuffer *fb = NULL; + size_t size = 0; + int ret = 0; + unsigned long offset; + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = ALIGN(sizes->surface_width * bytes_per_pixel, 128); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height); + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret < 0) { + DRM_ERROR("failed to get mutex lock\n"); + return ret; + } + + priv->fbdev_phytium_gem = phytium_gem_create_object(dev, size); + if (!priv->fbdev_phytium_gem) { + DRM_ERROR("failed to create gem object\n"); + return -ENOMEM; + } + mutex_unlock(&dev->struct_mutex); + + fbi = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(fbi)) { + DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info."); + ret = PTR_ERR(fbi); + goto out; + } + + phytium_fb = phytium_fb_alloc(dev, &mode_cmd, &priv->fbdev_phytium_gem, 1); + if (IS_ERR(phytium_fb)) { + DRM_DEV_ERROR(dev->dev, "Failed to alloc DRM framebuffer.\n"); + ret = PTR_ERR(phytium_fb); + goto out; + } + + helper->fb = &(phytium_fb->base); + fbi->par = helper; + fbi->flags = FBINFO_FLAG_DEFAULT; + fbi->fbops = &phytium_fbdev_ops; + + fb = helper->fb; + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); + drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); + + offset = fbi->var.xoffset * bytes_per_pixel; + offset += fbi->var.yoffset * fb->pitches[0]; + dev->mode_config.fb_base = 0; + fbi->screen_base = priv->fbdev_phytium_gem->vaddr + offset; + fbi->screen_size = priv->fbdev_phytium_gem->base.size; + fbi->fix.smem_len = priv->fbdev_phytium_gem->base.size; + DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%pa offset=%ld size=%zu\n", fb->width, fb->height, + fb->format->depth, &priv->fbdev_phytium_gem->iova, offset, size); + fbi->skip_vt_switch = true; + + return 0; +out: + phytium_gem_free_object(&priv->fbdev_phytium_gem->base); + return ret; +} + +static const struct drm_fb_helper_funcs phytium_drm_fb_helper_funcs = { + .fb_probe = phytium_drm_fbdev_create, +}; + +int phytium_drm_fbdev_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + int ret; + + if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) + return -EINVAL; + + helper = &priv->fbdev_helper; + drm_fb_helper_prepare(dev, helper, &phytium_drm_fb_helper_funcs); + + ret = drm_fb_helper_init(dev, helper, PHYTIUM_MAX_CONNECTOR); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "Failed to initialize drm fb helper -ret %d\n", ret); + return ret; + } + + ret = drm_fb_helper_single_add_all_connectors(helper); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "Failed to add connectors - %d/\n", ret); + goto err_drm_fb_helper_fini; + } + ret = drm_fb_helper_initial_config(helper, 32); + return 0; + +err_drm_fb_helper_fini: + drm_fb_helper_fini(helper); + return ret; +} + +void phytium_drm_fbdev_fini(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + + helper = &priv->fbdev_helper; + drm_fb_helper_unregister_fbi(helper); + + if (helper->fb) + drm_framebuffer_put(helper->fb); + + drm_fb_helper_fini(helper); +} diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.h b/drivers/gpu/drm/phytium/phytium_fbdev.h new file mode 100644 index 0000000000000000000000000000000000000000..81070502c8e07a210f50bf4f7b0a967afd388633 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_FBDEV_H +#define _PHYTIUM_FBDEV_H + +int phytium_drm_fbdev_init(struct drm_device *dev); +void phytium_drm_fbdev_fini(struct drm_device *dev); + +#endif /* _PHYTIUM_FBDEV_H */ diff --git a/drivers/gpu/drm/phytium/phytium_gem.c b/drivers/gpu/drm/phytium/phytium_gem.c new file mode 100644 index 0000000000000000000000000000000000000000..a386e28d8820460308c8741faa8607ba0183e490 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.c @@ -0,0 +1,507 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" + +#define VRAM_POOL_ALLOC_ORDER 12 + +int phytium_memory_pool_alloc(struct phytium_display_private *priv, void **pvaddr, + phys_addr_t *phys_addr, uint64_t size) +{ + unsigned long vaddr; + + vaddr = gen_pool_alloc(priv->memory_pool, size); + if (!vaddr) + return -ENOMEM; + + *phys_addr = gen_pool_virt_to_phys(priv->memory_pool, vaddr); + + *pvaddr = (void *)vaddr; + return 0; +} + +void phytium_memory_pool_free(struct phytium_display_private *priv, void *vaddr, uint64_t size) +{ + gen_pool_free(priv->memory_pool, (unsigned long)vaddr, size); +} + +int phytium_memory_pool_init(struct device *dev, struct phytium_display_private *priv) +{ + int ret = 0; + + priv->memory_pool = gen_pool_create(VRAM_POOL_ALLOC_ORDER, -1); + if (priv->memory_pool == NULL) { + DRM_ERROR("fail to create memory pool\n"); + ret = -1; + goto failed_create_pool; + } + + ret = gen_pool_add_virt(priv->memory_pool, (unsigned long)priv->pool_virt_addr, + priv->pool_phys_addr, priv->pool_size, -1); + if (ret) { + DRM_ERROR("fail to add vram pool\n"); + ret = -1; + goto failed_add_pool_virt; + } + + return 0; + +failed_add_pool_virt: + gen_pool_destroy(priv->memory_pool); + +failed_create_pool: + return ret; +} + +void phytium_memory_pool_fini(struct device *dev, struct phytium_display_private *priv) +{ + gen_pool_destroy(priv->memory_pool); +} + +struct sg_table * +phytium_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct sg_table *sgt; + struct drm_device *dev = obj->dev; + int ret; + struct page *page = NULL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + DRM_DEBUG_KMS("malloc sgt fail\n"); + return ERR_PTR(-ENOMEM); + } + + if ((phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) || + (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT)) { + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to allocate sg\n"); + goto sgt_free; + } + page = phys_to_page(phytium_gem_obj->phys_addr); + sg_set_page(sgt->sgl, page, PAGE_ALIGN(phytium_gem_obj->size), 0); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + ret = dma_get_sgtable_attrs(dev->dev, sgt, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, phytium_gem_obj->size, + DMA_ATTR_WRITE_COMBINE); + if (ret) { + DRM_ERROR("failed to allocate sgt, %d\n", ret); + goto sgt_free; + } + } + + return sgt; +sgt_free: + kfree(sgt); + return ERR_PTR(ret); +} + +struct drm_gem_object * +phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct scatterlist *s; + dma_addr_t expected; + int ret, i; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto failed_malloc; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, attach->dmabuf->size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + expected = sg_dma_address(sgt->sgl); + for_each_sg(sgt->sgl, s, sgt->nents, i) { + if (sg_dma_address(s) != expected) { + DRM_ERROR("sg_table is not contiguous"); + ret = -EINVAL; + goto failed_check_continue; + } + expected = sg_dma_address(s) + sg_dma_len(s); + } + + phytium_gem_obj->iova = sg_dma_address(sgt->sgl); + phytium_gem_obj->sgt = sgt; + + return &phytium_gem_obj->base; +failed_check_continue: + drm_gem_object_release(&phytium_gem_obj->base); +failed_object_init: + kfree(phytium_gem_obj); +failed_malloc: + return ERR_PTR(ret); +} + +void *phytium_gem_prime_vmap(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_obj = to_phytium_gem_obj(obj); + + return phytium_obj->vaddr; +} + +void phytium_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ +} + +int phytium_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret = 0; + + ret = drm_gem_mmap_obj(obj, obj->size, vma); + if (ret < 0) + return ret; + + return phytium_gem_mmap_obj(obj, vma); +} + +static void phytium_dma_callback(void *callback_param) +{ + struct completion *comp = callback_param; + + complete(comp); +} + +int phytium_dma_transfer(struct drm_device *drm_dev, int dev_to_mem, void *addr, + dma_addr_t iova, uint64_t size) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct dma_chan *dma_chan = priv->dma_chan; + struct sg_table st; + struct scatterlist *sgl; + int ret = 0, timeout; + uint32_t nents, i; + struct dma_slave_config cfg = {0}; + struct dma_async_tx_descriptor *desc; + struct completion comp; + enum dma_data_direction dir; + size_t min = 0; + + nents = DIV_ROUND_UP(size, PAGE_SIZE); + ret = sg_alloc_table(&st, nents, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to allocate sg_table\n"); + ret = -ENOMEM; + goto failed_sg_alloc_table; + } + + for_each_sg(st.sgl, sgl, st.nents, i) { + min = min_t(size_t, size, PAGE_SIZE - offset_in_page(addr)); + sg_set_page(sgl, vmalloc_to_page(addr), min, offset_in_page(addr)); + addr += min; + size -= min; + } + + memset(&cfg, 0, sizeof(cfg)); + if (dev_to_mem) { + cfg.direction = DMA_DEV_TO_MEM; + cfg.src_addr = iova; + cfg.dst_addr = 0; + dir = DMA_FROM_DEVICE; + } else { + cfg.direction = DMA_MEM_TO_DEV; + cfg.src_addr = 0; + cfg.dst_addr = iova; + dir = DMA_TO_DEVICE; + } + + dmaengine_slave_config(dma_chan, &cfg); + + nents = dma_map_sg(dma_chan->device->dev, st.sgl, st.nents, dir); + if (!nents) { + DRM_DEV_ERROR(drm_dev->dev, "failed to dma_map_sg for dmaengine\n"); + ret = -EINVAL; + goto failed_dma_map_sg; + } + st.nents = nents; + dma_sync_sg_for_device(dma_chan->device->dev, st.sgl, st.nents, dir); + + sgl = st.sgl; + desc = dmaengine_prep_slave_sg(dma_chan, + st.sgl, + st.nents, + cfg.direction, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + DRM_DEV_ERROR(drm_dev->dev, "failed to dmaengine_prep_slave_sg\n"); + ret = -EINVAL; + goto failed_prep_slave_sg; + } + init_completion(&comp); + desc->callback = phytium_dma_callback; + desc->callback_param = ∁ + + dmaengine_submit(desc); + dma_async_issue_pending(dma_chan); + + timeout = wait_for_completion_timeout(&comp, 2 * HZ); + if (timeout == 0) { + DRM_DEV_ERROR(drm_dev->dev, "wait for dma callback timeout\n"); + ret = -EIO; + } + dma_sync_sg_for_cpu(dma_chan->device->dev, st.sgl, st.nents, dir); + +failed_prep_slave_sg: + dma_unmap_sg(dma_chan->device->dev, st.sgl, st.nents, dir); +failed_dma_map_sg: + sg_free_table(&st); +failed_sg_alloc_table: + return ret; +} + +int phytium_gem_suspend(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + phytium_gem_obj->vaddr_save = vmalloc(phytium_gem_obj->size); + if (!phytium_gem_obj->vaddr_save) + goto malloc_failed; + + if (priv->dma_inited) + ret = phytium_dma_transfer(drm_dev, 1, phytium_gem_obj->vaddr_save, + phytium_gem_obj->iova, phytium_gem_obj->size); + + if ((!priv->dma_inited) || ret) + memcpy(phytium_gem_obj->vaddr_save, phytium_gem_obj->vaddr, + phytium_gem_obj->size); + } + + return 0; +malloc_failed: + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + if (phytium_gem_obj->vaddr_save) { + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } + } + return -ENOMEM; +} + +void phytium_gem_resume(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + memcpy(phytium_gem_obj->vaddr, phytium_gem_obj->vaddr_save, phytium_gem_obj->size); + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } +} + +void phytium_gem_free_object(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct drm_device *dev = obj->dev; + struct phytium_display_private *priv = dev->dev_private; + uint64_t size = phytium_gem_obj->size; + + DRM_DEBUG_KMS("free phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + if (phytium_gem_obj->vaddr) { + if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) { + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] -= size; + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + dma_unmap_page(dev->dev, phytium_gem_obj->iova, size, DMA_TO_DEVICE); + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC] -= size; + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + dma_free_attrs(dev->dev, size, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, 0); + priv->mem_state[PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC] -= size; + } + list_del(&phytium_gem_obj->list); + } else if (obj->import_attach) + drm_prime_gem_destroy(obj, phytium_gem_obj->sgt); + drm_gem_object_release(obj); + kfree(phytium_gem_obj); +} + +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret = 0; + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + unsigned long pfn = PHYS_PFN(phytium_gem_obj->phys_addr); + /* + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map + * the whole buffer. + */ + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_pgoff = 0; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) { + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + ret = dma_mmap_attrs(obj->dev->dev, vma, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, vma->vm_end - vma->vm_start, 0); + } + if (ret) + drm_gem_vm_close(vma); + + return ret; +} + +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret = 0; + + ret = drm_gem_mmap(filp, vma); + if (ret < 0) + return ret; + + return phytium_gem_mmap_obj(vma->vm_private_data, vma); +} + +int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, uint32_t handle) +{ + return drm_gem_dumb_destroy(file, dev, handle); +} + + +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct phytium_display_private *priv = dev->dev_private; + struct page *page = NULL; + int ret = 0; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto error; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) { + ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, + &phytium_gem_obj->phys_addr, size); + if (ret) { + DRM_ERROR("fail to allocate vram buffer with size %lx\n", size); + goto failed_dma_alloc; + } + phytium_gem_obj->iova = phytium_gem_obj->phys_addr; + phytium_gem_obj->memory_type = MEMORY_TYPE_VRAM; + priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] += size; + } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_CARVEOUT) { + ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, + &phytium_gem_obj->phys_addr, size); + if (ret) { + DRM_ERROR("fail to allocate carveout memory with size %lx\n", size); + goto failed_dma_alloc; + } + page = phys_to_page(phytium_gem_obj->phys_addr); + phytium_gem_obj->iova = dma_map_page(dev->dev, page, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev->dev, phytium_gem_obj->iova)) { + DRM_ERROR("fail to dma map carveout memory with size %lx\n", size); + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + phytium_gem_obj->memory_type = MEMORY_TYPE_SYSTEM_CARVEOUT; + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC] += size; + } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_UNIFIED) { + phytium_gem_obj->vaddr = dma_alloc_attrs(dev->dev, size, &phytium_gem_obj->iova, + GFP_KERNEL, 0); + if (!phytium_gem_obj->vaddr) { + DRM_ERROR("fail to allocate unified buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + phytium_gem_obj->memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->mem_state[PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC] += size; + } else { + DRM_ERROR("fail to allocate buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + + phytium_gem_obj->size = size; + list_add_tail(&phytium_gem_obj->list, &priv->gem_list_head); + DRM_DEBUG_KMS("phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + return phytium_gem_obj; + +failed_dma_alloc: + drm_gem_object_unreference_unlocked(&phytium_gem_obj->base); + + return ERR_PTR(ret); +failed_object_init: + kfree(phytium_gem_obj); +error: + return ERR_PTR(ret); +} + +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + int size = 0; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + args->pitch = ALIGN(args->width*DIV_ROUND_UP(args->bpp, 8), 128); + args->size = args->pitch * args->height; + size = PAGE_ALIGN(args->size); + phytium_gem_obj = phytium_gem_create_object(dev, size); + if (IS_ERR(phytium_gem_obj)) + return PTR_ERR(phytium_gem_obj); + ret = drm_gem_handle_create(file, &phytium_gem_obj->base, &args->handle); + if (ret) { + DRM_ERROR("failed to drm_gem_handle_create\n"); + goto failed_gem_handle; + } + drm_gem_object_unreference_unlocked(&phytium_gem_obj->base); + + return 0; +failed_gem_handle: + phytium_gem_free_object(&phytium_gem_obj->base); + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_gem.h b/drivers/gpu/drm/phytium/phytium_gem.h new file mode 100644 index 0000000000000000000000000000000000000000..cd34f4a4539f97ef698b3a293863be6df23740fb --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_GEM_H__ +#define __PHYTIUM_GEM_H__ + +#include + +struct phytium_gem_object { + struct drm_gem_object base; + phys_addr_t phys_addr; + dma_addr_t iova; + void *vaddr; + unsigned long size; + struct sg_table *sgt; + char memory_type; + char reserve[3]; + struct list_head list; + void *vaddr_save; +}; + +#define to_phytium_gem_obj(obj) container_of(obj, struct phytium_gem_object, base) + +int phytium_memory_pool_init(struct device *dev, struct phytium_display_private *priv); +void phytium_memory_pool_fini(struct device *dev, struct phytium_display_private *priv); +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma); +void phytium_gem_free_object(struct drm_gem_object *obj); +struct sg_table *phytium_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sgt); +void phytium_gem_free_object(struct drm_gem_object *obj); +int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, unsigned int handle); +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size); +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); +void *phytium_gem_prime_vmap(struct drm_gem_object *obj); +void phytium_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int phytium_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +int phytium_gem_suspend(struct drm_device *drm_dev); +void phytium_gem_resume(struct drm_device *drm_dev); +#endif /* __PHYTIUM_GEM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_panel.c b/drivers/gpu/drm/phytium/phytium_panel.c new file mode 100644 index 0000000000000000000000000000000000000000..16783b24a4d374bdfaac23275ce1a36269544058 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_panel.h" + +static int +phytium_dp_aux_set_backlight(struct phytium_panel *panel, unsigned int level) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + unsigned char vals[2] = { 0x0 }; + + vals[0] = level; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { + vals[0] = (level & 0xFF00) >> 8; + vals[1] = (level & 0xFF); + } + + if (drm_dp_dpcd_write(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + vals, sizeof(vals)) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight level\n"); + return -EIO; + } + + return 0; +} + +static unsigned int phytium_dp_aux_get_backlight(struct phytium_panel *panel) +{ + unsigned char read_val[2] = { 0x0 }; + unsigned char level = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_read(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + &read_val, sizeof(read_val)) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); + return 0; + } + + level = read_val[0]; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + level = (read_val[0] << 8 | read_val[1]); + + return level; +} + +static void set_aux_backlight_enable(struct phytium_panel *panel, bool enable) +{ + u8 reg_val = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (!(phytium_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)) + return; + + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + ®_val) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_DISPLAY_CONTROL_REGISTER); + return; + } + + if (enable) + reg_val |= DP_EDP_BACKLIGHT_ENABLE; + else + reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); + + if (drm_dp_dpcd_writeb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + reg_val) != 1) { + DRM_DEBUG_KMS("Failed to %s aux backlight\n", + enable ? "enable" : "disable"); + } +} + +static void phytium_dp_aux_enable_backlight(struct phytium_panel *panel) +{ + unsigned char dpcd_buf, new_dpcd_buf, edp_backlight_mode; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_readb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_MODE_SET_REGISTER); + return; + } + + new_dpcd_buf = dpcd_buf; + edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + + switch (edp_backlight_mode) { + case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: + new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + break; + + /* Do nothing when it is already DPCD mode */ + case DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD: + default: + break; + } + + if (new_dpcd_buf != dpcd_buf) { + if (drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight mode\n"); + } + } + + set_aux_backlight_enable(panel, true); + phytium_dp_aux_set_backlight(panel, panel->level); +} + +static void phytium_dp_aux_disable_backlight(struct phytium_panel *panel) +{ + set_aux_backlight_enable(panel, false); +} + +static void phytium_dp_aux_setup_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + phytium_dp->panel.max = 0xFFFF; + else + phytium_dp->panel.max = 0xFF; + + phytium_dp->panel.min = 0; + phytium_dp->panel.level = phytium_dp_aux_get_backlight(panel); + phytium_dp->panel.backlight_enabled = (phytium_dp->panel.level != 0); +} + +static void phytium_dp_hw_poweron_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweron_panel(phytium_dp); +} + +static void phytium_dp_hw_poweroff_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweroff_panel(phytium_dp); +} + +static int +phytium_dp_hw_set_backlight(struct phytium_panel *panel, uint32_t level) +{ + int ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, level); + + return ret; +} + +static uint32_t phytium_dp_hw_get_backlight(struct phytium_panel *panel) +{ + uint32_t ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_get_backlight(phytium_dp); + + return ret; +} + +static void phytium_dp_hw_enable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, phytium_dp->panel.level); + phytium_dp->funcs->dp_hw_enable_backlight(phytium_dp); +} + +static void phytium_dp_hw_disable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_disable_backlight(phytium_dp); +} + +static void phytium_dp_hw_setup_backlight(struct phytium_panel *panel) +{ + struct drm_device *dev = panel->dev; + struct phytium_display_private *priv = dev->dev_private; + + panel->max = priv->info.backlight_max; + panel->min = 0; + panel->level = phytium_dp_hw_get_backlight(panel); +} + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && + (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && + !(phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_aux_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_aux_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_aux_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_aux_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_aux_get_backlight; + } else { + DRM_DEBUG_KMS("SE Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_hw_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_hw_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_hw_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_hw_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_hw_get_backlight; + } + phytium_dp->panel.poweron = phytium_dp_hw_poweron_panel; + phytium_dp->panel.poweroff = phytium_dp_hw_poweroff_panel; + mutex_init(&phytium_dp->panel.panel_lock); + phytium_dp->panel.dev = phytium_dp->dev; + + /* Upper limits from eDP 1.3 spec */ + phytium_dp->panel.panel_power_up_delay = 210; /* t1_t3 */ + phytium_dp->panel.backlight_on_delay = 50; /* t7 */ + phytium_dp->panel.backlight_off_delay = 50; + phytium_dp->panel.panel_power_down_delay = 0; /* t10 */ + phytium_dp->panel.panel_power_cycle_delay = 510; /* t11 + t12 */ +} + +void phytium_dp_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->panel.setup_backlight = NULL; + phytium_dp->panel.enable_backlight = NULL; + phytium_dp->panel.disable_backlight = NULL; + phytium_dp->panel.set_backlight = NULL; + phytium_dp->panel.get_backlight = NULL; + phytium_dp->panel.poweron = NULL; + phytium_dp->panel.poweroff = NULL; +} + +void phytium_panel_enable_backlight(struct phytium_panel *panel) +{ + + if (panel->enable_backlight) { + mutex_lock(&panel->panel_lock); + msleep(panel->backlight_on_delay); + panel->enable_backlight(panel); + panel->backlight_enabled = true; + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_disable_backlight(struct phytium_panel *panel) +{ + if (panel->disable_backlight) { + mutex_lock(&panel->panel_lock); + panel->disable_backlight(panel); + panel->backlight_enabled = false; + msleep(panel->backlight_off_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweron(struct phytium_panel *panel) +{ + if (panel->poweron) { + mutex_lock(&panel->panel_lock); + panel->poweron(panel); + panel->power_enabled = true; + msleep(panel->panel_power_up_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweroff(struct phytium_panel *panel) +{ + if (panel->poweroff) { + mutex_lock(&panel->panel_lock); + msleep(panel->panel_power_down_delay); + panel->poweroff(panel); + panel->power_enabled = false; + mutex_unlock(&panel->panel_lock); + } +} + +static uint32_t phytium_scale(uint32_t source_val, + uint32_t source_min, uint32_t source_max, + uint32_t target_min, uint32_t target_max) +{ + uint64_t target_val; + + WARN_ON(source_min > source_max); + WARN_ON(target_min > target_max); + + /* defensive */ + source_val = clamp(source_val, source_min, source_max); + + /* avoid overflows */ + target_val = mul_u32_u32(source_val - source_min, target_max - target_min); + target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min); + target_val += target_min; + + return target_val; +} + +static inline uint32_t +phytium_scale_hw_to_user(struct phytium_panel *panel, uint32_t hw_level, uint32_t user_max) +{ + return phytium_scale(hw_level, panel->min, panel->max, + 0, user_max); +} + +static inline uint32_t +phytium_scale_user_to_hw(struct phytium_panel *panel, u32 user_level, u32 user_max) +{ + return phytium_scale(user_level, 0, user_max, + panel->min, panel->max); +} + +static int phytium_backlight_device_update_status(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret = 0; + + DRM_DEBUG_KMS("updating phytium_backlight, brightness=%d/%d\n", + bd->props.brightness, bd->props.max_brightness); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + hw_level = phytium_scale_user_to_hw(panel, bd->props.brightness, bd->props.max_brightness); + + if ((panel->set_backlight) && (panel->backlight_enabled)) { + mutex_lock(&panel->panel_lock); + ret = panel->set_backlight(panel, hw_level); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return ret; +} + +static int phytium_backlight_device_get_brightness(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + if (panel->get_backlight && panel->backlight_enabled) { + mutex_lock(&panel->panel_lock); + hw_level = panel->get_backlight(panel); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + ret = phytium_scale_hw_to_user(panel, hw_level, bd->props.max_brightness); + DRM_DEBUG_KMS("get phytium_backlight, brightness=%d/%d\n", + ret, bd->props.max_brightness); + + return ret; +} + +static const struct backlight_ops phytium_backlight_device_ops = { + .update_status = phytium_backlight_device_update_status, + .get_brightness = phytium_backlight_device_get_brightness, +}; + +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp) +{ + struct backlight_properties props; + char bl_name[16]; + + if (phytium_dp->panel.setup_backlight) { + mutex_lock(&phytium_dp->panel.panel_lock); + phytium_dp->panel.setup_backlight(&phytium_dp->panel); + mutex_unlock(&phytium_dp->panel.panel_lock); + } else { + return -EINVAL; + } + + memset(&props, 0, sizeof(props)); + props.max_brightness = PHYTIUM_MAX_BL_LEVEL; + props.type = BACKLIGHT_RAW; + props.brightness = phytium_scale_hw_to_user(&phytium_dp->panel, phytium_dp->panel.level, + props.max_brightness); + snprintf(bl_name, sizeof(bl_name), "phytium_bl%d", phytium_dp->port); + + phytium_dp->panel.bl_device = + backlight_device_register(bl_name, + phytium_dp->connector.kdev, + &phytium_dp->panel, + &phytium_backlight_device_ops, + &props); + + if (IS_ERR(phytium_dp->panel.bl_device)) { + DRM_ERROR("Failed to register backlight: %ld\n", + PTR_ERR(phytium_dp->panel.bl_device)); + phytium_dp->panel.bl_device = NULL; + return -ENODEV; + } + + DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n", + phytium_dp->connector.name); + + return 0; +} + +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->panel.bl_device) { + backlight_device_unregister(phytium_dp->panel.bl_device); + phytium_dp->panel.bl_device = NULL; + } +} diff --git a/drivers/gpu/drm/phytium/phytium_panel.h b/drivers/gpu/drm/phytium/phytium_panel.h new file mode 100644 index 0000000000000000000000000000000000000000..91760e26dcf18636b5ddea494b178daf7e383d35 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PANEL_H__ +#define __PHYTIUM_PANEL_H__ +#include "phytium_dp.h" + +#define PHYTIUM_MAX_BL_LEVEL 0xFF + +struct phytium_panel { + struct drm_device *dev; + bool backlight_enabled; + bool power_enabled; + bool reserve1[2]; + unsigned int min; + unsigned int level; + unsigned int max; + struct backlight_device *bl_device; + void (*setup_backlight)(struct phytium_panel *panel); + uint32_t (*get_backlight)(struct phytium_panel *panel); + int (*set_backlight)(struct phytium_panel *panel, uint32_t level); + void (*disable_backlight)(struct phytium_panel *panel); + void (*enable_backlight)(struct phytium_panel *panel); + void (*poweron)(struct phytium_panel *panel); + void (*poweroff)(struct phytium_panel *panel); + struct mutex panel_lock; + uint32_t panel_power_up_delay; + uint32_t backlight_on_delay; + uint32_t backlight_off_delay; + uint32_t panel_power_down_delay; + uint32_t panel_power_cycle_delay; +}; + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp); +void phytium_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp); +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp); +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp); +void phytium_panel_enable_backlight(struct phytium_panel *panel); +void phytium_panel_disable_backlight(struct phytium_panel *panel); +void phytium_panel_poweron(struct phytium_panel *panel); +void phytium_panel_poweroff(struct phytium_panel *panel); + +#endif /* __PHYTIUM_PANEL_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..bc2f8a8cbf9801f6fdc4a4af039dd35231f99799 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_pci.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "px210_dc.h" +#include "px210_dp.h" +#include "pe220x_dc.h" +#include "pe220x_dp.h" + +int dc_msi_enable; +module_param(dc_msi_enable, int, 0644); +MODULE_PARM_DESC(dc_msi_enable, "Enable DC msi interrupt (0-disabled; 1-enabled; default-0)"); + +void phytium_pci_vram_hw_init(struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + pci_priv->dc_hw_vram_init(priv, priv->pool_phys_addr, priv->pool_size); +} + +int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + int ret = 0; + + priv->pool_phys_addr = pci_resource_start(pdev, 2); + priv->pool_size = pci_resource_len(pdev, 2); + if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { + priv->pool_virt_addr = devm_ioremap_wc(&pdev->dev, priv->pool_phys_addr, + priv->pool_size); + if (priv->pool_virt_addr == NULL) { + DRM_ERROR("pci vram ioremap fail, addr:0x%llx, size:0x%llx\n", + priv->pool_phys_addr, priv->pool_size); + ret = -EINVAL; + goto failed_ioremap; + } + ret = phytium_memory_pool_init(&pdev->dev, priv); + if (ret) + goto failed_init_memory_pool; + + priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = priv->pool_size; + priv->support_memory_type = MEMORY_TYPE_VRAM; + priv->vram_hw_init = phytium_pci_vram_hw_init; + } else { + DRM_DEBUG_KMS("not support vram\n"); + priv->pool_virt_addr = NULL; + priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = 0; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->vram_hw_init = NULL; + } + + return 0; + +failed_init_memory_pool: + devm_iounmap(&pdev->dev, priv->pool_virt_addr); +failed_ioremap: + return ret; +} + +void phytium_pci_vram_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + if (priv->support_memory_type == MEMORY_TYPE_VRAM) { + phytium_memory_pool_fini(&pdev->dev, priv); + devm_iounmap(&pdev->dev, priv->pool_virt_addr); + } +} + +static bool phytium_pci_dma_chan_filter(struct dma_chan *chan, void *param) +{ + struct phytium_dma_slave *s = param; + + if (s->dma_dev != chan->device->dev) + return false; + + if (s->chan_id == chan->chan_id) + return true; + else + return false; +} + +int phytium_pci_dma_init(struct phytium_display_private *priv) +{ + struct pci_dev *dma_dev, *gpu_dev; + struct drm_device *drm_dev = priv->dev; + dma_cap_mask_t mask; + struct phytium_dma_slave s; + int ret = 0; + u16 cmd; + + /* check px210 gpu enable */ + gpu_dev = pci_get_device(PCI_VENDOR_ID_PHYTIUM, 0xdc20, NULL); + if (!gpu_dev) { + DRM_INFO("failed to get gpu_dev\n"); + ret = -ENODEV; + goto failed; + } + + pci_read_config_word(gpu_dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) { + DRM_INFO("gpu_dev master is disabled\n"); + ret = -ENODEV; + goto failed; + } + + dma_dev = pci_get_device(PCI_VENDOR_ID_PHYTIUM, 0xdc3c, NULL); + if (!dma_dev) { + DRM_INFO("failed to get dma_dev\n"); + ret = -ENODEV; + goto failed; + } + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + s.dma_dev = &dma_dev->dev; + s.chan_id = 2; + priv->dma_chan = dma_request_channel(mask, phytium_pci_dma_chan_filter, &s); + if (!priv->dma_chan) { + DRM_DEV_ERROR(drm_dev->dev, "failed to request dma chan\n"); + ret = -EBUSY; + goto failed; + } + priv->dma_inited = 1; + +failed: + return ret; +} + +void phytium_pci_dma_fini(struct phytium_display_private *priv) +{ + if (priv->dma_inited) + dma_release_channel(priv->dma_chan); + priv->dma_inited = 0; + priv->dma_chan = NULL; +} + +static struct phytium_display_private* +phytium_pci_private_init(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = NULL; + struct phytium_pci_private *pci_priv = NULL; + struct phytium_device_info *phytium_info = (struct phytium_device_info *)ent->driver_data; + int i = 0; + resource_size_t io_addr, io_size; + + pci_priv = devm_kzalloc(&pdev->dev, sizeof(*pci_priv), GFP_KERNEL); + if (!pci_priv) { + DRM_ERROR("no memory to allocate for drm_display_private\n"); + goto failed_malloc_priv; + } + + memset(pci_priv, 0, sizeof(*pci_priv)); + priv = &pci_priv->base; + phytium_display_private_init(priv, dev); + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + DRM_DEBUG_KMS("priv->info.num_pipes :%d\n", priv->info.num_pipes); + priv->info.pipe_mask = ((pdev->subsystem_device >> PIPE_MASK_SHIFT) & PIPE_MASK_MASK); + priv->info.edp_mask = ((pdev->subsystem_device >> EDP_MASK_SHIFT) & EDP_MASK_MASK); + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed_init_numpipe; + } + + io_addr = pci_resource_start(pdev, 0); + io_size = pci_resource_len(pdev, 0); + priv->regs = ioremap(io_addr, io_size); + if (priv->regs == NULL) { + DRM_ERROR("pci bar0 ioremap fail, addr:0x%llx, size:0x%llx\n", io_addr, io_size); + goto failed_ioremap; + } + + priv->irq = pdev->irq; + if (IS_PX210(priv)) { + pci_priv->dc_hw_vram_init = px210_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = px210_dc_hw_clear_msi_irq; + priv->dc_hw_fb_format_check = px210_dc_hw_fb_format_check; + } else if (IS_PE220X(priv)) { + pci_priv->dc_hw_vram_init = pe220x_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = NULL; + priv->dc_hw_fb_format_check = pe220x_dc_hw_fb_format_check; + } + + return priv; + +failed_ioremap: +failed_init_numpipe: + devm_kfree(&pdev->dev, pci_priv); +failed_malloc_priv: + return NULL; +} + +static void +phytium_pci_private_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + if (priv->regs) + iounmap(priv->regs); + + devm_kfree(&pdev->dev, pci_priv); +} + +static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + dev->pdev = pdev; + pci_set_drvdata(pdev, dev); + pci_set_master(pdev); + ret = pci_enable_device(pdev); + if (ret) { + DRM_ERROR("pci enbale device fail\n"); + goto failed_enable_device; + } + + if (dc_msi_enable) { + ret = pci_enable_msi(pdev); + if (ret) + DRM_ERROR("pci enbale msi fail\n"); + } + + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + + priv = phytium_pci_private_init(pdev, ent); + if (priv) + dev->dev_private = priv; + else + goto failed_pci_private_init; + + ret = phytium_pci_vram_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init pci vram\n"); + goto failed_pci_vram_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_pci_vram_fini(pdev, priv); +failed_pci_vram_init: + phytium_pci_private_fini(pdev, priv); +failed_pci_private_init: + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); +failed_enable_device: + pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); + + return -1; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_pci_vram_fini(pdev, priv); + phytium_pci_private_fini(pdev, priv); + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); +} + +static void phytium_pci_shutdown(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_pci_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + if (IS_PX210(priv)) + phytium_pci_dma_init(priv); + + ret = priv->display_pm_suspend(drm_dev); + if (ret < 0) + goto out; + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + udelay(200); + +out: + return ret; +} + +static int phytium_pci_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + + ret = priv->display_pm_resume(drm_dev); + if (IS_PX210(priv)) + phytium_pci_dma_fini(priv); + + return ret; +} + +static const struct dev_pm_ops phytium_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_pci_pm_suspend, phytium_pci_pm_resume) +}; + +static const struct phytium_device_info px210_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PX210), + .total_pipes = 3, + .crtc_clock_max = PX210_DC_PIX_CLOCK_MAX, + .hdisplay_max = px210_DC_HDISPLAY_MAX, + .vdisplay_max = PX210_DC_VDISPLAY_MAX, + .address_mask = PX210_DC_ADDRESS_MASK, + .backlight_max = PX210_DP_BACKLIGHT_MAX, +}; + +static const struct phytium_device_info pe220x_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PE220X), + .total_pipes = 2, + .crtc_clock_max = PE220X_DC_PIX_CLOCK_MAX, + .hdisplay_max = PE220X_DC_HDISPLAY_MAX, + .vdisplay_max = PE220X_DC_VDISPLAY_MAX, + .address_mask = PE220X_DC_ADDRESS_MASK, + .backlight_max = PE220X_DP_BACKLIGHT_MAX, +}; + +static const struct pci_device_id phytium_display_pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc22), (kernel_ulong_t)&px210_info }, + { PCI_VDEVICE(PHYTIUM, 0xdc3e), (kernel_ulong_t)&pe220x_info }, + { /* End: all zeroes */ } +}; +MODULE_DEVICE_TABLE(pci, phytium_display_pci_ids); + +struct pci_driver phytium_pci_driver = { + .name = "phytium_display_pci", + .id_table = phytium_display_pci_ids, + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .shutdown = phytium_pci_shutdown, + .driver.pm = &phytium_pci_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_pci.h b/drivers/gpu/drm/phytium/phytium_pci.h new file mode 100644 index 0000000000000000000000000000000000000000..ad116dfcb5e97921f06c2c5cf157f547106b4993 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PCI_H__ +#define __PHYTIUM_PCI_H__ + +#include "phytium_display_drv.h" + +struct phytium_pci_private { + struct phytium_display_private base; + void (*dc_hw_vram_init)(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size); +}; + +struct phytium_dma_slave { + struct device *dma_dev; + u32 chan_id; +}; + +#define to_pci_priv(priv) container_of(priv, struct phytium_pci_private, base) + +extern struct pci_driver phytium_pci_driver; +#endif /* __PHYTIUM_PCI_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_plane.c b/drivers/gpu/drm/phytium/phytium_plane.c new file mode 100644 index 0000000000000000000000000000000000000000..5871275a867aa24cf41d465e0d98de4c16b710de --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.c @@ -0,0 +1,640 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include + +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" +#include "phytium_crtc.h" +#include "px210_dc.h" +#include "pe220x_dc.h" +#include "phytium_reg.h" + +#define PHYTIUM_CURS_W_SIZE 32 +#define PHYTIUM_CURS_H_SIZE 32 + +void phytium_plane_destroy(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + + drm_plane_cleanup(plane); + kfree(phytium_plane); +} + +/** + * phytium_plane_atomic_get_property - fetch plane property value + * @plane: plane to fetch property for + * @state: state containing the property value + * @property: property to look up + * @val: pointer to write property value into + * + * The DRM core does not store shadow copies of properties for + * atomic-capable drivers. This entrypoint is used to fetch + * the current value of a driver-specific plane property. + */ +static int +phytium_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +/** + * phytium_plane_atomic_set_property - set plane property value + * @plane: plane to set property for + * @state: state to update property value in + * @property: property to set + * @val: value to set property to + * + * Writes the specified property value for a plane into the provided atomic + * state object. + * + * Returns 0 on success, -EINVAL on unrecognized properties + */ +int +phytium_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +struct drm_plane_state * +phytium_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct drm_plane_state *state = NULL; + struct phytium_plane_state *phytium_state = NULL; + + phytium_state = kmemdup(plane->state, sizeof(*phytium_state), GFP_KERNEL); + + if (!phytium_state) + return NULL; + + state = &phytium_state->base; + if (state->fb) + drm_framebuffer_get(state->fb); + + state->fence = NULL; + state->commit = NULL; + + return state; +} + +void +phytium_plane_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct phytium_plane_state *phytium_state = to_phytium_plane_state(state); + + __drm_atomic_helper_plane_destroy_state(state); + kfree(phytium_state); +} + +const struct drm_plane_funcs phytium_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = phytium_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_get_property = phytium_plane_atomic_get_property, + .atomic_set_property = phytium_plane_atomic_set_property, + .atomic_duplicate_state = phytium_plane_atomic_duplicate_state, + .atomic_destroy_state = phytium_plane_atomic_destroy_state, +}; + +static int phytium_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct dma_buf *dma_buf; + struct dma_fence *fence; + + if (!state->fb) + return 0; + dma_buf = to_phytium_framebuffer(state->fb)->phytium_gem_obj[0]->base.dma_buf; + if (dma_buf) { + fence = reservation_object_get_excl_rcu(dma_buf->resv); + drm_atomic_set_fence_for_plane(state, fence); + } + + return 0; +} + +static int +phytium_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; + int src_x, src_y, src_w, src_h; + unsigned long base_offset; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + if ((!fb) || (!crtc)) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + if (phytium_crtc->scale_enable) + return -EINVAL; + if ((src_w != PHYTIUM_CURS_W_SIZE) || (src_h != PHYTIUM_CURS_W_SIZE)) { + DRM_INFO("Invalid cursor size(%d, %d)\n", src_w, src_h); + return -EINVAL; + } + } else if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + src_x = state->src_x >> 16; + src_y = state->src_y >> 16; + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + if (base_offset & (priv->info.address_mask)) { + DRM_ERROR("fb base address is not aligned by 0x%lx byte\n", + priv->info.address_mask); + return -EINVAL; + } + + if (src_w != state->crtc_w || src_h != state->crtc_h) { + DRM_ERROR("scale not support: crtc_w(0x%x)/h(0x%x) src_w(0x%x)/h(0x%x)\n", + state->crtc_w, state->crtc_h, src_w, src_h); + return -EINVAL; + } + + if ((state->crtc_x < 0) || (state->crtc_y < 0)) { + DRM_ERROR("crtc_x(0x%x)/y(0x%x) of drm plane state is invalid\n", + state->crtc_x, state->crtc_y); + return -EINVAL; + } + + if ((state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay) + || (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)) { + DRM_ERROR("plane out of crtc region\n"); + return -EINVAL; + } + } + + return 0; +} + +static void phytium_dc_get_plane_parameter(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + struct phytium_gem_object *phytium_gem_obj = NULL; + int i, num_planes = 0; + const struct drm_format_info *info; + + info = drm_format_info(fb->format->format); + num_planes = info ? info->num_planes : 1; + + for (i = 0; i < num_planes; i++) { + phytium_gem_obj = phytium_fb->phytium_gem_obj[i]; + phytium_plane->iova[i] = phytium_gem_obj->iova + fb->offsets[i]; + phytium_plane->size[i] = phytium_gem_obj->size - fb->offsets[i]; + + if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE0; + else if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE3; + else if (fb->modifier == DRM_FORMAT_MOD_LINEAR) + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + else + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + + if (i == 0) { + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB2101010; + break; + + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB8888; + break; + + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB8888; + break; + + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB4444; + break; + + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB4444; + break; + + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB1555; + break; + + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB1555; + break; + + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + phytium_plane->format = FRAMEBUFFER_FORMAT_RGB565; + break; + + case DRM_FORMAT_YUYV: + phytium_plane->format = FRAMEBUFFER_FORMAT_YUYV; + break; + + case DRM_FORMAT_UYVY: + phytium_plane->format = FRAMEBUFFER_FORMAT_UYVY; + break; + case DRM_FORMAT_NV16: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV16; + break; + case DRM_FORMAT_NV12: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + case DRM_FORMAT_NV21: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_BGR565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ABGR; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_RGBX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_RGBA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_BGRA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV12: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + } + } +} + +static void phytium_dc_primary_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int src_x, src_y, crtc_x, crtc_y, crtc_w, crtc_h; + unsigned long base_offset; + int config; + + src_x = plane->state->src_x >> 16; + src_y = plane->state->src_y >> 16; + crtc_x = plane->state->crtc_x; + crtc_y = plane->state->crtc_y; + crtc_w = plane->state->crtc_w; + crtc_h = plane->state->crtc_h; + + if (phytium_plane->dc_hw_update_dcreq) + phytium_plane->dc_hw_update_dcreq(plane); + phytium_plane->dc_hw_update_primary_hi_addr(plane); + + /* config dc */ + /* Y */ + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + phytium_writel_reg(priv, (phytium_plane->iova[0] + base_offset) & ADDRESS_MASK, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[0], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE); + + /* U */ + phytium_writel_reg(priv, phytium_plane->iova[1] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[1], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_STRIDE); + + /* V */ + phytium_writel_reg(priv, phytium_plane->iova[2] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[2], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_STRIDE); + + /* size */ + phytium_writel_reg(priv, (crtc_w & WIDTH_MASK) | ((crtc_h&HEIGHT_MASK) << HEIGHT_SHIFT), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_SIZE); + /* config */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config &= ~(FRAMEBUFFER_FORMAT_MASK << FRAMEBUFFER_FORMAT_SHIFT); + config |= (phytium_plane->format << FRAMEBUFFER_FORMAT_SHIFT); + config &= ~(1 << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config |= (phytium_plane->uv_swizzle << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_SWIZZLE_MASK << FRAMEBUFFER_SWIZZLE_SHIFT); + config |= (phytium_plane->swizzle << FRAMEBUFFER_SWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_TILE_MODE_MASK << FRAMEBUFFER_TILE_MODE_SHIFT); + config |= (phytium_plane->tiling[0] << FRAMEBUFFER_TILE_MODE_SHIFT); + config &= (~FRAMEBUFFER_CLEAR); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); +} + +static void phytium_dc_cursor_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int config; + unsigned long iova; + + phytium_plane->enable = 1; + phytium_plane->cursor_hot_x = fb->hot_x; + phytium_plane->cursor_hot_y = fb->hot_y; + phytium_plane->cursor_x = plane->state->crtc_x + fb->hot_x; + phytium_plane->cursor_y = plane->state->crtc_y + fb->hot_y; + + config = CURSOR_FORMAT_ARGB8888 | + ((phytium_plane->cursor_hot_y & CURSOR_HOT_Y_MASK) << CURSOR_HOT_Y_SHIFT) | + ((phytium_plane->cursor_hot_x & CURSOR_HOT_X_MASK) << CURSOR_HOT_X_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + + config = ((phytium_plane->cursor_x & CURSOR_X_MASK) << CURSOR_X_SHIFT) | + ((phytium_plane->cursor_y & CURSOR_Y_MASK) << CURSOR_Y_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_LOCATION); + iova = phytium_plane->iova[0]; + phytium_writel_reg(priv, iova & 0xffffffff, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_ADDRESS); + if (phytium_plane->dc_hw_update_cursor_hi_addr) + phytium_plane->dc_hw_update_cursor_hi_addr(plane, iova); +} + +static void phytium_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_framebuffer *fb, *old_fb; + + DRM_DEBUG_KMS("update plane: type=%d\n", plane->type); + if (!plane->state->crtc || !plane->state->fb) + return; + + fb = plane->state->fb; + old_fb = old_state->fb; + + if (fb) + drm_framebuffer_get(fb); + if (old_fb) + drm_framebuffer_put(old_fb); + + phytium_dc_get_plane_parameter(plane); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + phytium_dc_primary_plane_update(plane); + else if (plane->type == DRM_PLANE_TYPE_CURSOR) + phytium_dc_cursor_plane_update(plane); +} + +static void phytium_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + struct drm_framebuffer *old_fb; + + old_fb = old_state->fb; + if (old_fb) + drm_framebuffer_put(old_fb); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + phytium_writel_reg(priv, CLEAR_VALUE_RED, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + phytium_writel_reg(priv, CURSOR_FORMAT_DISABLED, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + } +} + +const struct drm_plane_helper_funcs phytium_plane_helper_funcs = { + .prepare_fb = phytium_plane_prepare_fb, + .atomic_check = phytium_plane_atomic_check, + .atomic_update = phytium_plane_atomic_update, + .atomic_disable = phytium_plane_atomic_disable, +}; + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_plane->dc_hw_plane_get_format = px210_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = px210_dc_hw_update_dcreq; + phytium_plane->dc_hw_update_primary_hi_addr = px210_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } else if (IS_PE220X(priv)) { + phytium_plane->dc_hw_plane_get_format = pe220x_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = pe220x_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, formats, + format_count, + format_modifiers, + DRM_PLANE_TYPE_PRIMARY, "primary %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} + +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_plane->dc_hw_plane_get_format = px210_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } else if (IS_PE220X(priv)) { + phytium_plane->dc_hw_plane_get_format = pe220x_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = pe220x_dc_hw_update_cursor_hi_addr; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, + formats, format_count, + format_modifiers, + DRM_PLANE_TYPE_CURSOR, "cursor %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_plane.h b/drivers/gpu/drm/phytium/phytium_plane.h new file mode 100644 index 0000000000000000000000000000000000000000..2d46164861ce4135d3ba93dcfee35e9bc1be27b5 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLANE_H__ +#define __PHYTIUM_PLANE_H__ + +struct phytium_plane { + struct drm_plane base; + int phys_pipe; + unsigned long iova[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned long size[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int format; + unsigned int tiling[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int swizzle; + unsigned int uv_swizzle; + unsigned int rot_angle; + + /* only for cursor */ + bool enable; + bool reserve[3]; + unsigned int cursor_x; + unsigned int cursor_y; + unsigned int cursor_hot_x; + unsigned int cursor_hot_y; + + void (*dc_hw_plane_get_format)(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); + void (*dc_hw_update_dcreq)(struct drm_plane *plane); + void (*dc_hw_update_primary_hi_addr)(struct drm_plane *plane); + void (*dc_hw_update_cursor_hi_addr)(struct drm_plane *plane, uint64_t iova); +}; + +struct phytium_plane_state { + struct drm_plane_state base; +}; + +#define to_phytium_plane(x) container_of(x, struct phytium_plane, base) +#define to_phytium_plane_state(x) container_of(x, struct phytium_plane_state, base) + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int pipe); +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_PLANE_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_platform.c b/drivers/gpu/drm/phytium/phytium_platform.c new file mode 100644 index 0000000000000000000000000000000000000000..99d7cb5dc3e0658820d9c85a708295d73b3d48a5 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_platform.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium display engine DRM driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_platform.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "pe220x_dc.h" +#include "pe220x_dp.h" + +int phytium_platform_carveout_mem_init(struct platform_device *pdev, + struct phytium_display_private *priv) +{ + struct resource *res; + int ret = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + priv->pool_size = resource_size(res); + priv->pool_phys_addr = res->start; + } + + if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { + priv->pool_virt_addr = ioremap_cache(priv->pool_phys_addr, priv->pool_size); + if (priv->pool_virt_addr == NULL) { + DRM_ERROR("failed to remap carveout mem(0x%llx)\n", priv->pool_phys_addr); + ret = -EINVAL; + goto failed_ioremap; + } + ret = phytium_memory_pool_init(&pdev->dev, priv); + if (ret) + goto failed_init_memory_pool; + + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL] = priv->pool_size; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_CARVEOUT; + priv->vram_hw_init = NULL; + } else { + DRM_DEBUG_KMS("not support carveout memory\n"); + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL] = 0; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->vram_hw_init = NULL; + } + + return 0; + +failed_init_memory_pool: + iounmap(priv->pool_virt_addr); +failed_ioremap: + return ret; +} + +void phytium_platform_carveout_mem_fini(struct platform_device *pdev, + struct phytium_display_private *priv) +{ + if (priv->support_memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + phytium_memory_pool_fini(&pdev->dev, priv); + iounmap(priv->pool_virt_addr); + } +} + +static struct phytium_display_private * +phytium_platform_private_init(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct device_node *node; + struct fwnode_handle *np; + struct phytium_display_private *priv = NULL; + struct phytium_platform_private *platform_priv = NULL; + struct phytium_device_info *phytium_info = NULL; + int i = 0, ret = 0; + struct resource *res; + + platform_priv = devm_kzalloc(&pdev->dev, sizeof(*platform_priv), GFP_KERNEL); + if (!platform_priv) { + DRM_ERROR("no memory to allocate for phytium_platform_private\n"); + goto exit; + } + + memset(platform_priv, 0, sizeof(*platform_priv)); + priv = &platform_priv->base; + phytium_display_private_init(priv, dev); + + if (pdev->dev.of_node) { + phytium_info = (struct phytium_device_info *)of_device_get_match_data(&pdev->dev); + if (!phytium_info) { + DRM_ERROR("failed to get dts id data(phytium_info)\n"); + goto failed; + } + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + node = pdev->dev.of_node; + ret = of_property_read_u8(node, "pipe_mask", &priv->info.pipe_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing pipe_mask property from dts\n"); + goto failed; + } + + ret = of_property_read_u8(node, "edp_mask", &priv->info.edp_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing edp_mask property from dts\n"); + goto failed; + } + } else if (has_acpi_companion(&pdev->dev)) { + phytium_info = (struct phytium_device_info *)acpi_device_get_match_data(&pdev->dev); + if (!phytium_info) { + DRM_ERROR("failed to get acpi id data(phytium_info)\n"); + goto failed; + } + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + np = dev_fwnode(&(pdev->dev)); + ret = fwnode_property_read_u8(np, "pipe_mask", &priv->info.pipe_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing pipe_mask property from acpi\n"); + goto failed; + } + ret = fwnode_property_read_u8(np, "edp_mask", &priv->info.edp_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing edp_mask property from acpi\n"); + goto failed; + } + } + + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(&pdev->dev, res); + if (priv->regs == NULL) { + DRM_ERROR("ioremap fail, addr:0x%llx, size:0x%llx\n", res->start, res->end); + goto failed; + } + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq < 0) { + dev_err(&pdev->dev, "failed to get irq\n"); + goto failed; + } + + if (IS_PE220X(priv)) { + priv->dc_hw_clear_msi_irq = NULL; + priv->dc_hw_fb_format_check = pe220x_dc_hw_fb_format_check; + } + + return priv; + +failed: + devm_kfree(&pdev->dev, platform_priv); +exit: + return NULL; +} + +static void phytium_platform_private_fini(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + struct phytium_platform_private *platform_priv = to_platform_priv(priv); + + devm_kfree(&pdev->dev, platform_priv); +} + +static int phytium_platform_probe(struct platform_device *pdev) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + + dev_set_drvdata(&pdev->dev, dev); + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + + priv = phytium_platform_private_init(pdev); + if (priv) + dev->dev_private = priv; + else + goto failed_platform_private_init; + + ret = phytium_platform_carveout_mem_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init system carveout memory\n"); + goto failed_carveout_mem_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_platform_carveout_mem_fini(pdev, priv); +failed_carveout_mem_init: + phytium_platform_private_fini(pdev); +failed_platform_private_init: + dev_set_drvdata(&pdev->dev, NULL); + drm_dev_put(dev); + return -1; +} + +static int phytium_platform_remove(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_platform_private_fini(pdev); + dev_set_drvdata(&pdev->dev, NULL); + drm_dev_put(dev); + + return 0; +} + +static void phytium_platform_shutdown(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_platform_pm_suspend(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct phytium_display_private *priv = drm_dev->dev_private; + + return priv->display_pm_suspend(drm_dev); +} + +static int phytium_platform_pm_resume(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct phytium_display_private *priv = drm_dev->dev_private; + + return priv->display_pm_resume(drm_dev); +} + +static const struct dev_pm_ops phytium_platform_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_platform_pm_suspend, phytium_platform_pm_resume) +}; + +static const struct phytium_device_info pe220x_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PE220X), + .total_pipes = 2, + .crtc_clock_max = PE220X_DC_PIX_CLOCK_MAX, + .hdisplay_max = PE220X_DC_HDISPLAY_MAX, + .vdisplay_max = PE220X_DC_VDISPLAY_MAX, + .address_mask = PE220X_DC_ADDRESS_MASK, + .backlight_max = PE220X_DP_BACKLIGHT_MAX, +}; + +static const struct of_device_id display_of_match[] = { + { + .compatible = "phytium,dc", + .data = &pe220x_info, + }, + { } +}; + +MODULE_DEVICE_TABLE(of, display_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id display_acpi_ids[] = { + { + .id = "PHYT0015", + .driver_data = (kernel_ulong_t)&pe220x_info, + }, + {}, +}; + +MODULE_DEVICE_TABLE(acpi, display_acpi_ids); +#else +#define display_acpi_ids NULL +#endif + +struct platform_driver phytium_platform_driver = { + .driver = { + .name = "phytium_display_platform", + .of_match_table = of_match_ptr(display_of_match), + .acpi_match_table = ACPI_PTR(display_acpi_ids), + }, + .probe = phytium_platform_probe, + .remove = phytium_platform_remove, + .shutdown = phytium_platform_shutdown, + .driver.pm = &phytium_platform_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_platform.h b/drivers/gpu/drm/phytium/phytium_platform.h new file mode 100644 index 0000000000000000000000000000000000000000..e752f79130db5e4c4a1d525c79992b12aecff580 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_platform.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLATFORM_H__ +#define __PHYTIUM_PLATFORM_H__ + +struct phytium_platform_private { + struct phytium_display_private base; +}; + +#define to_platform_priv(priv) container_of(priv, struct phytium_platform_private, base) + +extern struct platform_driver phytium_platform_driver; + +#endif /* __PHYTIUM_PLATFORM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_reg.h b/drivers/gpu/drm/phytium/phytium_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..98351e75db493789e654c1fe8809dffadeeff1cd --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_reg.h @@ -0,0 +1,365 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_REG_H__ +#define __PHYTIUM_REG_H__ + +/******************************register base******************************************/ +#define PX210_PIPE_BASE(pipe) (0x8000*pipe) +#define PX210_DC_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x0000) +#define PX210_DCREQ_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x2000) +#define PX210_DP_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x3000) +#define PX210_ADDRESS_TRANSFORM_BASE 0x4000 +#define PX210_PHY_ACCESS_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x5000) + +#define PE220X_DC_BASE(pipe) (0x1000*pipe) +#define PE220X_DP_BASE(pipe) (0x4000 + 0x1000*pipe) +#define PE220X_ADDRESS_TRANSFORM_BASE 0x8000 +#define PE220X_PHY_ACCESS_BASE(pipe) (0x6000 + 0x1000*pipe) +/******************************register base end******************************************/ + +/******************************dc register start******************************************/ +#define PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS 0x1400 + #define ADDRESS_MASK 0xffffff80 +#define PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE 0x1408 +#define PHYTIUM_DC_PANEL_CONFIG 0x1418 + #define PANEL_DATAENABLE_ENABLE (1<<0) + #define PANEL_DATA_ENABLE (1<<4) + #define PANEL_CLOCK_ENABLE (1<<8) +#define PHYTIUM_DC_HDISPLAY 0x1430 + #define HDISPLAY_END_SHIFT 0 + #define HDISPLAY_END_MASK 0x7fff + #define HDISPLAY_TOTAL_SHIFT 16 + #define HDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_HSYNC 0x1438 + #define HSYNC_START_SHIFT 0 + #define HSYNC_START_MASK 0x7fff + #define HSYNC_END_SHIFT 15 + #define HSYNC_END_MASK 0x7fff + #define HSYNC_PULSE_ENABLED (1<<30) + #define HSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_VDISPLAY 0x1440 + #define VDISPLAY_END_SHIFT 0 + #define VDISPLAY_END_MASK 0x7fff + #define VDISPLAY_TOTAL_SHIFT 16 + #define VDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_VSYNC 0x1448 + #define VSYNC_START_SHIFT 0 + #define VSYNC_START_MASK 0x7fff + #define VSYNC_END_SHIFT 15 + #define VSYNC_END_MASK 0x7fff + #define VSYNC_PULSE_ENABLED (1<<30) + #define VSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_DISPLAY_CURRENT_LOCATION 0x1450 +#define PHYTIUM_DC_GAMMA_INDEX 0x1458 + #define GAMMA_INDEX_MAX 256 +#define PHYTIUM_DC_GAMMA_DATA 0x1460 + #define GAMMA_BLUE_SHIFT 0 + #define GAMMA_BLUE_MASK 0x3ff + #define GAMMA_GREEN_SHIFT 10 + #define GAMMA_GREEN_MASK 0x3ff + #define GAMMA_RED_SHIFT 20 + #define GAMMA_RED_MASK 0x3ff +#define PHYTIUM_DC_CURSOR_CONFIG 0x1468 + #define CURSOR_FORMAT_DISABLED 0x0 + #define CURSOR_FORMAT_MASKMODE 0x3 + #define CURSOR_FORMAT_ARGB8888 0x2 + #define CURSOR_FORMAT_MASK 0x3 + #define CURSOR_HOT_Y_SHIFT 8 + #define CURSOR_HOT_Y_MASK 0x1f + #define CURSOR_HOT_X_SHIFT 16 + #define CURSOR_HOT_X_MASK 0x1f +#define PHYTIUM_DC_CURSOR_ADDRESS 0x146c +#define PHYTIUM_DC_CURSOR_LOCATION 0x1470 + #define CURSOR_X_SHIFT 0 + #define CURSOR_X_MASK 0x7fff + #define CURSOR_Y_SHIFT 16 + #define CURSOR_Y_MASK 0x7fff +#define PHYTIUM_DC_CURSOR_BACKGROUND 0x1474 +#define PHYTIUM_DC_CURSOR_FOREGROUND 0x1478 +#define PHYTIUM_DC_INT_STATUS 0x147c + #define INT_STATUS 0x1 +#define PHYTIUM_DC_INT_ENABLE 0x1480 + #define INT_ENABLE 0x1 + #define INT_DISABLE 0x0 + +#define PHYTIUM_DC_FRAMEBUFFER_CONFIG 0x1518 + #define FRAMEBUFFER_OUTPUT BIT(0) + #define FRAMEBUFFER_GAMMA_ENABLE BIT(2) + #define FRAMEBUFFER_VALID_PENDING BIT(3) + #define FRAMEBUFFER_RESET BIT(4) + #define FRAMEBUFFER_PROGRESS BIT(6) + #define FRAMEBUFFER_ROT_ANGLE_SHIFT (11) + #define FRAMEBUFFER_ROT_ANGLE_MASK (0x7) + #define FRAMEBUFFER_ROT_ANGLE_ROT0 (0) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_X (1) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_Y (2) + #define FRAMEBUFFER_TILE_MODE_SHIFT (17) + #define FRAMEBUFFER_TILE_MODE_MASK (0x1f) + #define FRAMEBUFFER_LINEAR 0 + #define FRAMEBUFFER_TILE_MODE0 4 + #define FRAMEBUFFER_TILE_MODE3 7 + #define FRAMEBUFFER_FORMAT_SHIFT 26 + #define FRAMEBUFFER_FORMAT_MASK 0x3f + #define FRAMEBUFFER_FORMAT_XRGB4444 0x0 + #define FRAMEBUFFER_FORMAT_ARGB4444 0x1 + #define FRAMEBUFFER_FORMAT_XRGB1555 0x2 + #define FRAMEBUFFER_FORMAT_ARGB1555 0x3 + #define FRAMEBUFFER_FORMAT_RGB565 0x4 + #define FRAMEBUFFER_FORMAT_XRGB8888 0x5 + #define FRAMEBUFFER_FORMAT_ARGB8888 0x6 + #define FRAMEBUFFER_FORMAT_YUYV 0x7 + #define FRAMEBUFFER_FORMAT_UYVY 0x8 + #define FRAMEBUFFER_FORMAT_NV12 0x11 + #define FRAMEBUFFER_FORMAT_NV16 0x12 + #define FRAMEBUFFER_FORMAT_ARGB2101010 0x16 + #define FRAMEBUFFER_SWIZZLE_SHIFT 23 + #define FRAMEBUFFER_SWIZZLE_MASK 0x3 + #define FRAMEBUFFER_SWIZZLE_ARGB 0 + #define FRAMEBUFFER_SWIZZLE_RGBA 1 + #define FRAMEBUFFER_SWIZZLE_ABGR 2 + #define FRAMEBUFFER_SWIZZLE_BGRA 3 + #define FRAMEBUFFER_UVSWIZZLE_SHIFT 25 + #define FRAMEBUFFER_UVSWIZZLE_DISABLE 0 + #define FRAMEBUFFER_UVSWIZZLE_ENABLE 1 + #define FRAMEBUFFER_CLEAR BIT(8) + #define FRAMEBUFFER_SCALE_ENABLE BIT(22) +#define PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG 0x1520 + #define FRAMEBUFFER_FILTER_TAP 3 + #define FRAMEBUFFER_HORIZONTAL_FILTER_TAP 3 + #define FRAMEBUFFER_TAP 0x33 +#define PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS 0x1530 +#define PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS 0x1538 +#define PHYTIUM_DC_OVERLAY_CONFIG 0x1540 + #define PX210_DC_OVERLAY_ENABLE BIT(24) + +#define PHYTIUM_DC_FRAMEBUFFER_U_STRIDE 0x1800 +#define PHYTIUM_DC_FRAMEBUFFER_V_STRIDE 0x1808 +#define PHYTIUM_DC_FRAMEBUFFER_SIZE 0x1810 + #define WIDTH_SHIFT 0 + #define WIDTH_MASK 0x7fff + #define HEIGHT_SHIFT 15 + #define HEIGHT_MASK 0x7fff + +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X 0x1828 + #define SCALE_FACTOR_X_MASK 0x7fffffff +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y 0x1830 + #define SCALE_FACTOR_Y_MASK 0x7fffffff + #define SCALE_FACTOR_Y_MAX 0x3 + #define SCALE_FACTOR_SRC_OFFSET 16 + +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX 0x1838 + #define HORI_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER 0x1a00 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX 0x1a08 + #define VERT_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER 0x1a10 +#define PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE 0x1a18 + #define CLEAR_VALUE_RED 0x00ff0000 + #define CLEAR_VALUE_GREEN 0x0000ff00 + #define CLEAR_VALUE_BLACK 0x00000000 +#define PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET 0x1a20 + #define INITIALOFFSET (0x8000 | (0X8000 << 16)) +#define PHYTIUM_DC_DP_CONFIG 0x1cd0 + #define OUTPUT_DP (1<<3) + #define DP_RGB666 (0x1) + #define DP_RGB888 (0x2) + #define DP_RGB101010 (0x3) +/******************************dc register end********************************************/ + +/******************************phy access register****************************************/ +#define PHYTIUM_PHY_ACCESS_ADDRESS 0x0000 +#define PHYTIUM_PHY_WRITE_DATA 0x0004 +#define PHYTIUM_PHY_READ_DATA 0x0008 +#define PHYTIUM_PHY_ACCESS_CTRL 0x000c + #define ACCESS_WRITE (1<<0) + #define ACCESS_READ (1<<1) +/******************************phy access register end*************************************/ + +/******************************dp register start******************************************/ +#define PHYTIUM_DP_LINK_BW_SET 0x0000 +#define PHYTIUM_DP_LANE_COUNT_SET 0x0004 +#define PHYTIUM_DP_ENHANCED_FRAME_EN 0x0008 + #define ENHANCED_FRAME_ENABLE 0x1 + #define ENHANCED_FRAME_DISABLE 0x0 +#define PHYTIUM_DP_TRAINING_PATTERN_SET 0x000c + #define TRAINING_OFF 0x0 + #define TRAINING_PATTERN_1 0x1 + #define TRAINING_PATTERN_2 0x2 + #define TRAINING_PATTERN_3 0x3 + #define TRAINING_PATTERN_4 0x4 +#define PHYTIUM_DP_LINK_QUAL_PATTERN_SET 0x0010 + #define TEST_PATTERN_NONE 0x0 + #define TEST_PATTERN_D10_2 0x1 + #define TEST_PATTERN_SYMBOL_ERROR 0x2 + #define TEST_PATTERN_PRBS7 0x3 + #define TEST_PATTERN_80BIT_CUSTOM 0x4 + #define TEST_PATTERN_CP2520_1 0x5 + #define TEST_PATTERN_CP2520_2 0x6 + #define TEST_PATTERN_CP2520_3 0x7 + #define TEST_PATTERN_LANE_SHIFT 8 +#define PHYTIUM_DP_SCRAMBLING_DISABLE 0x0014 + #define SCRAMBLING_ENABLE 0x0 + #define SCRAMBLING_DISABLE 0x1 +#define PHYTIUM_DP_DOWNSPREAD_CTRL 0x0018 +#define PHYTIUM_DP_ALT_SCRAMBLER_RESET 0x001c +#define PHYTIUM_DP_HBR2_SCRAMBLER_RESET 0x0020 +#define PHYTIUM_DP_DISPLAYPORT_VERSION 0x0024 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0 0x0030 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1 0x0034 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2 0x0038 +#define PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE 0x0080 + #define TRANSMITTER_OUTPUT_ENABLE BIT(0) + #define TRANSMITTER_OUTPUT_DISABLE 0 +#define PHYTIUM_DP_VIDEO_STREAM_ENABLE 0x0084 + #define SST_MST_SOURCE_0_ENABLE BIT(0) + #define SST_MST_SOURCE_0_ENABLE_MASK 0x1 + #define SST_MST_SOURCE_0_DISABLE 0 +#define PHYTIUM_DP_SECONDARY_STREAM_ENABLE 0x0088 + #define SECONDARY_STREAM_ENABLE 0x1 + #define SECONDARY_STREAM_DISABLE 0x0 +#define PHYTIUM_DP_SEC_DATA_WINDOW 0x008C +#define PHYTIUM_DP_SOFT_RESET 0x0090 + #define LINK_SOFT_RESET (0x1 << 0) + #define VIDEO_SOFT_RESET (0x1 << 1) +#define PHYTIUM_INPUT_SOURCE_ENABLE 0x0094 + #define VIRTUAL_SOURCE_0_ENABLE BIT(0) + #define VIRTUAL_SOURCE_0_ENABLE_MASK 0x1 +#define PHYTIUM_DP_FORCE_SCRAMBLER_RESET 0x00C0 + #define SCRAMBLER_RESET BIT(0) +#define PHYTIUM_DP_SOURCE_CONTROL_STATUS 0x00C4 +#define PHYTIUM_DP_DATA_CONTROL 0x00C8 +#define PHYTIUM_DP_CORE_CAPABILITY 0x00F8 +#define PHYTIUM_DP_CORE_ID 0x00FC +#define PHYTIUM_DP_AUX_COMMAND 0x0100 + #define BYTE_COUNT_MASK 0xf + #define COMMAND_SHIFT 8 + #define COMMAND_MASK 0xf + #define ADDRESS_ONLY (1<<12) +#define PHYTIUM_DP_AUX_WRITE_FIFO 0x0104 +#define PHYTIUM_DP_AUX_ADDRESS 0x0108 +#define PHYTIUM_DP_AUX_CLK_DIVIDER 0x010C + #define AUX_CLK_DIVIDER 48 + #define AUX_CLK_DIVIDER_100 100 +#define PHYTIUM_DP_SINK_HPD_STATE 0x0128 + #define HPD_CONNECT 0x1 + #define HPD_DISCONNECT 0x0 +#define PHYTIUM_DP_INTERRUPT_RAW_STATUS 0x0130 + #define REPLY_TIMEOUT (1<<3) + #define DP_STATUS_REQUEST_IN_PROGRESS (1<<1) + #define HPD_STATE (0<<1) +#define PHYTIUM_DP_AUX_REPLY_DATA 0x0134 +#define PHYTIUM_DP_AUX_REPLY_CODE 0x0138 + #define AUX_NATIVE_ACK (0x0<<0) + #define AUX_NATIVE_NACK (0x1<<0) + #define AUX_NATIVE_DEFER (0x2<<0) + #define AUX_NATIVE_MASK (0x3 << 0) + #define AUX_I2C_ACK (0x0<<2) + #define AUX_I2C_NACK (0x1<<2) + #define AUX_I2C_DEFER (0x2<<2) + #define AUX_I2C_MASK (0x3 << 2) +#define PHYTIUM_DP_INTERRUPT_STATUS 0x0140 + #define HPD_IRQ (1<<1) + #define HPD_EVENT (1<<0) +#define PHYTIUM_DP_INTERRUPT_MASK 0x0144 + #define HPD_IRQ_MASK (1<<1) + #define HPD_EVENT_MASK (1<<0) + #define HPD_OTHER_MASK 0x3c +#define PHYTIUM_DP_AUX_REPLY_DATA_COUNT 0x0148 +#define PHYTIUM_DP_AUX_STATUS 0x014C + #define REPLY_RECEIVED 0x1 + #define REPLY_IN_PROGRESS 0x2 + #define REQUEST_IN_PROGRESS 0x4 + #define REPLY_ERROR 0x8 +#define PHYTIUM_DP_AUX_TIMER 0x0158 +#define PHYTIUM_DP_MAIN_LINK_HTOTAL 0x0180 +#define PHYTIUM_DP_MAIN_LINK_VTOTAL 0x0184 +#define PHYTIUM_DP_MAIN_LINK_POLARITY 0x0188 + #define VSYNC_POLARITY_LOW BIT(1) + #define HSYNC_POLARITY_LOW BIT(0) +#define PHYTIUM_DP_MAIN_LINK_HSWIDTH 0x018C +#define PHYTIUM_DP_MAIN_LINK_VSWIDTH 0x0190 +#define PHYTIUM_DP_MAIN_LINK_HRES 0x0194 +#define PHYTIUM_DP_MAIN_LINK_VRES 0x0198 +#define PHYTIUM_DP_MAIN_LINK_HSTART 0x019C +#define PHYTIUM_DP_MAIN_LINK_VSTART 0x01A0 +#define PHYTIUM_DP_MAIN_LINK_MISC0 0x01A4 + #define MISC0_SYNCHRONOUS_CLOCK BIT(0) + #define MISC0_BIT_DEPTH_OFFSET 5 + #define MISC0_BIT_DEPTH_6BIT 0x0 + #define MISC0_BIT_DEPTH_8BIT 0x1 + #define MISC0_BIT_DEPTH_10BIT 0x2 + #define MISC0_COMPONENT_FORMAT_SHIFT 1 + #define MISC0_COMPONENT_FORMAT_RGB 0x0 +#define PHYTIUM_DP_MAIN_LINK_MISC1 0x01A8 +#define PHYTIUM_DP_M_VID 0x01AC +#define PHYTIUM_DP_TRANSFER_UNIT_SIZE 0x01B0 +#define PHYTIUM_DP_N_VID 0x01B4 +#define PHYTIUM_DP_USER_PIXEL_WIDTH 0x01B8 +#define PHYTIUM_DP_DATA_COUNT 0x01BC +#define PHYTIUM_DP_INTERLACED 0x01C0 +#define PHYTIUM_DP_USER_SYNC_POLARITY 0x01C4 + #define USER_ODDEVEN_POLARITY_HIGH BIT(3) + #define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) + #define USER_VSYNC_POLARITY_HIGH BIT(1) + #define USER_HSYNC_POLARITY_HIGH BIT(0) +#define PHYTIUM_DP_USER_CONTROL 0x01C8 +#define PHYTIUM_EDP_CRC_ENABLE 0x01D0 + #define SUPPORT_EDP_1_4 BIT(1) +#define PHYTIUM_EDP_CRC_RED 0x01D4 +#define PHYTIUM_EDP_CRC_GREEN 0x01D8 +#define PHYTIUM_EDP_CRC_BLUE 0x01DC +#define PHYTIUM_DP_SEC_AUDIO_ENABLE 0x0300 + #define SEC_AUDIO_ENABLE BIT(0) + #define CHANNEL_MUTE_ENABLE BIT(1) +#define PHYTIUM_DP_SEC_INPUT_SELECT 0x0304 + #define INPUT_SELECT_I2S 0x0 +#define PHYTIUM_DP_SEC_CHANNEL_COUNT 0x0308 + #define CHANNEL_2 0x2 + #define CHANNEL_2_LFE 0x3 + #define CHANNEL_5_1 0x6 + #define CHANNEL_7_1 0x7 + #define CHANNEL_MASK 0xf +#define PHYTIUM_DP_SEC_DIRECT_CLKDIV 0x030c + #define APB_CLOCK 48000000 +#define PHYTIUM_DP_SEC_MAUD 0x0318 +#define PHYTIUM_DP_SEC_NAUD 0x031c +#define PHYTIUM_DP_SEC_CLOCK_MODE 0x0320 + #define CLOCK_MODE_SYNC 0x1 +#define PHYTIUM_DP_SEC_CS_SOURCE_FORMAT 0x0340 + #define CS_SOURCE_FORMAT_DEFAULT 0x0 +#define PHYTIUM_DP_SEC_CS_CATEGORY_CODE 0x0344 +#define PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ 0x0348 + #define ORIG_FREQ_32000 0xc + #define ORIG_FREQ_44100 0xf + #define ORIG_FREQ_48000 0xd + #define ORIG_FREQ_88200 0x7 + #define ORIG_FREQ_96000 0x5 + #define ORIG_FREQ_176400 0x3 + #define ORIG_FREQ_192000 0x1 + #define ORIG_FREQ_MASK 0xf + #define ORIG_FREQ_SHIFT 0 + #define WORD_LENGTH_16 0x4 + #define WORD_LENGTH_18 0x2 + #define WORD_LENGTH_20 0xc + #define WORD_LENGTH_24 0xd + #define WORD_LENGTH_MASK 0xf + #define WORD_LENGTH_SHIFT 4 +#define PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY 0x034c // not used + #define SAMPLING_FREQ_32000 0xc + #define SAMPLING_FREQ_44100 0x0 + #define SAMPLING_FREQ_48000 0x4 + #define SAMPLING_FREQ_88200 0x1 + #define SAMPLING_FREQ_96000 0x5 + #define SAMPLING_FREQ_176400 0x3 + #define SAMPLING_FREQ_192000 0x7 + #define SAMPLING_FREQ_MASK 0xf + #define SAMPLING_FREQ_SHIFT 4 +#define PHYTIUM_DP_SEC_CHANNEL_MAP 0x035C + #define CHANNEL_MAP_DEFAULT 0x87654321 +/******************************dp register end********************************************/ + +#endif /* __PHYTIUM_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_dc.c b/drivers/gpu/drm/phytium/px210_dc.c new file mode 100644 index 0000000000000000000000000000000000000000..4e737236d66833bfcd3faa83a4e7dfff46531c22 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dc.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "px210_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static const unsigned int px210_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, +}; + +static uint64_t px210_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int px210_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void px210_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SIZE); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SIZE); + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR); + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, PX210_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, PX210_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, PX210_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void px210_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe) +{ + phytium_writel_reg(priv, MSI_CLEAR, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_MSI_CLEAR); +} + +void px210_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void px210_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int reset_timeout = 100; + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + // reset dc + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset pix clock */ + px210_dc_hw_config_pix_clock(crtc, 0); + + // reset dc + reset_timeout = 100; + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset dcreq */ + phytium_writel_reg(priv, DCREQ_PLAN_A, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_PLAN); + phytium_writel_reg(priv, 0, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_CONTROL); + phytium_writel_reg(priv, DCREQ_RESET, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_RESET); + msleep(20); + phytium_writel_reg(priv, (~DCREQ_RESET)&DCREQ_RESET_MASK, + priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_RESET); +} + +int px210_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + switch (mode_cmd->modifier[count]) { + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + break; + default: + DRM_ERROR("TILE_MODE0_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + break; + default: + DRM_ERROR("TILE_MODE3_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_LINEAR: + break; + default: + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[0]); + ret = -EINVAL; + goto error; + } + + return 0; +error: + return ret; +} + +void px210_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = px210_primary_formats_modifiers; + *formats = px210_primary_formats; + *format_count = ARRAY_SIZE(px210_primary_formats); +} + +void px210_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = NULL; + *formats = px210_cursor_formats; + *format_count = ARRAY_SIZE(px210_cursor_formats); +} + +void px210_dc_hw_update_dcreq(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int config; + + if (phytium_plane->tiling[0] == FRAMEBUFFER_LINEAR) { + phytium_writel_reg(priv, DCREQ_MODE_LINEAR, + group_offset, PX210_DCREQ_PLANE0_CONFIG); + } else { + config = DCREQ_NO_LOSSY; + if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE0) + config |= DCREQ_TILE_TYPE_MODE0; + else if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE3) + config |= DCREQ_TILE_TYPE_MODE3; + else + config |= DCREQ_TILE_TYPE_MODE0; + + switch (phytium_plane->format) { + case FRAMEBUFFER_FORMAT_ARGB8888: + case FRAMEBUFFER_FORMAT_XRGB8888: + config |= DCREQ_COLOURFORMAT_BGRA8888; + break; + case FRAMEBUFFER_FORMAT_ARGB2101010: + config |= DCREQ_COLOURFORMAT_ARGB2101010; + break; + case FRAMEBUFFER_FORMAT_XRGB4444: + case FRAMEBUFFER_FORMAT_ARGB4444: + config |= DCREQ_COLOURFORMAT_ARGB4444; + break; + case FRAMEBUFFER_FORMAT_XRGB1555: + case FRAMEBUFFER_FORMAT_ARGB1555: + config |= DCREQ_COLOURFORMAT_ARGB1555; + break; + case FRAMEBUFFER_FORMAT_RGB565: + config |= DCREQ_COLOURFORMAT_RGB565; + break; + case FRAMEBUFFER_FORMAT_YUYV: + config |= DCREQ_COLOURFORMAT_YUYV; + break; + case FRAMEBUFFER_FORMAT_UYVY: + config |= DCREQ_COLOURFORMAT_UYVY; + break; + } + config |= DCREQ_ARGBSWIZZLE_ARGB; + config |= DCREQ_MODE_TILE; + phytium_writel_reg(priv, phytium_plane->iova[0] & 0xffffffff, + group_offset, PX210_DCREQ_PLANE0_ADDR_START); + phytium_writel_reg(priv, (phytium_plane->iova[0] + phytium_plane->size[0]) & + 0xffffffff, group_offset, PX210_DCREQ_PLANE0_ADDR_END); + phytium_writel_reg(priv, config, group_offset, PX210_DCREQ_PLANE0_CONFIG); + } +} + +void px210_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_PIX_DMA_PREFIX); +} diff --git a/drivers/gpu/drm/phytium/px210_dc.h b/drivers/gpu/drm/phytium/px210_dc.h new file mode 100644 index 0000000000000000000000000000000000000000..24d1adfae2a32f94f736ba87c692c7322191e004 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_DC_H__ +#define __PX210_DC_H__ + +#define PX210_DC_PIX_CLOCK_MAX (594000) +#define px210_DC_HDISPLAY_MAX 3840 +#define PX210_DC_VDISPLAY_MAX 2160 +#define PX210_DC_ADDRESS_MASK 0x3f + +extern void px210_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void px210_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe); +extern void px210_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void px210_dc_hw_disable(struct drm_crtc *crtc); +extern int px210_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void px210_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void px210_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +void px210_dc_hw_update_dcreq(struct drm_plane *plane); +void px210_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +#endif /* __PX210_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_dp.c b/drivers/gpu/drm/phytium/px210_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..d7bd04eac2a84714ece2694d94c04c37e3e39667 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dp.c @@ -0,0 +1,920 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "px210_reg.h" +#include "phytium_dp.h" +#include "px210_dp.h" + +static uint8_t px210_dp_source_lane_count[3] = {4, 4, 1}; + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, // CP_PADJ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, // CP_IADJ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, // FILT_PADJ + {0x0061, 0x006C, 0x006C, 0x0051}, // INTDIV + {0x3333, 0x0000, 0x0000, 0x0000}, // FRACDIVL + {0x0000, 0x0000, 0x0000, 0x0000}, // FRACDIVH + {0x0042, 0x0048, 0x0048, 0x0036}, // HIGH_THR + {0x0002, 0x0002, 0x0002, 0x0002}, // PDIAG_CTRL + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, // VCOCAL_PLLCNT_START + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PEFCNT + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PLLCNT_START + {0x0005, 0x0005, 0x0005, 0x0005}, // LOCK_PLLCNT_THR +}; + +static int mgnfs_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int cpost_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int px210_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate) +{ + int port = phytium_dp->port%3; + int i = 0, data, tmp, tmp1, index = 0, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + /* set pma powerdown */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (A3_POWERDOWN3 << i*A3_POWERDOWN3_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<port%3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + default: + voltage_swing = 0; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + } + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_0: + default: + pre_emphasis = 0; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + if (port == 0) { + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DIAG_ACYA, UNLOCK); + + } else if (port == 1) { + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_CPOST1, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DIAG_ACYA, UNLOCK); + } else { + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DIAG_ACYA, UNLOCK); + } +} + +static int px210_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + phytium_phy_writel(phytium_dp, PX210_PHY0_APB_RESET, APB_RESET); + + phytium_phy_writel(phytium_dp, PX210_PHY0_PIPE_RESET, RESET); + + /* config lane to dp mode */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (LANE_BIT << i*LANE_BIT_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (LANE_MASTER << i*LANE_MASTER_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (BIT_20 << i*BIT_20_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void px210_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void px210_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + uint32_t group_offset = priv->dcreq_reg_base[port]; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void px210_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t px210_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int px210_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int config = 0; + int ret = 0; + + if (level > PX210_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); + +out: + return ret; +} + +bool px210_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + + return ((config & DP_SPREAD_ENABLE(port)) ? true:false); +} + +int px210_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int timeout = 100, config, ret = 0; + uint32_t group_offset = priv->address_transform_base; + uint32_t group_offset_dp = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + config &= (~DC_DP_RESET_STATUS(port)); + + phytium_writel_reg(priv, config, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + phytium_writel_reg(priv, FLAG_REQUEST | CMD_DC_DP_RESET, + priv->dcreq_reg_base[port], PX210_DCREQ_CMD_REGISTER); + do { + mdelay(10); + timeout--; + config = phytium_readl_reg(priv, group_offset, + PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + if (config & DC_DP_RESET_STATUS(port)) + break; + } while (timeout); + if (timeout == 0) { + DRM_ERROR("reset dc/dp pipe(%d) failed\n", port); + ret = -1; + } + + phytium_writel_reg(priv, AUX_CLK_DIVIDER, group_offset_dp, PHYTIUM_DP_AUX_CLK_DIVIDER); + + return ret; +} + +uint8_t px210_dp_hw_get_source_lane_count(struct phytium_dp_device *phytium_dp) +{ + return px210_dp_source_lane_count[phytium_dp->port]; +} + +static struct phytium_dp_func px210_dp_funcs = { + .dp_hw_get_source_lane_count = px210_dp_hw_get_source_lane_count, + .dp_hw_reset = px210_dp_hw_reset, + .dp_hw_spread_is_enable = px210_dp_hw_spread_is_enable, + .dp_hw_set_backlight = px210_dp_hw_set_backlight, + .dp_hw_get_backlight = px210_dp_hw_get_backlight, + .dp_hw_disable_backlight = px210_dp_hw_disable_backlight, + .dp_hw_enable_backlight = px210_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = px210_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = px210_dp_hw_poweron_panel, + .dp_hw_init_phy = px210_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = px210_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = px210_dp_hw_set_phy_lane_and_rate, +}; + +void px210_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &px210_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/px210_dp.h b/drivers/gpu/drm/phytium/px210_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..4ad65397f1aa23a40e5591981563dcfd7baae527 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dp.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_DP_H__ +#define __PX210_DP_H__ + +#define PX210_DP_BACKLIGHT_MAX 100 + +void px210_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __PX210_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_reg.h b/drivers/gpu/drm/phytium/px210_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..5556b3ee5c1db894844687520371979380c20e70 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_reg.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_REG_H__ +#define __PX210_REG_H__ + +#include "phytium_reg.h" + +/******************************dc register start******************************************/ +#define PX210_DC_CLOCK_CONTROL 0x0000 + #define SOFT_RESET (1<<12) +#define PX210_DC_CLOCK_IDLE 0x0004 + #define IS_IDLE (1<<16) +/******************************dc register end********************************************/ + +/******************************dcreq register start**************************************/ +#define PX210_DCREQ_PLANE0_ADDR_START 0x00 +#define PX210_DCREQ_PLANE0_ADDR_END 0x04 +#define PX210_DCREQ_PLANE1_ADDR_START 0x08 +#define PX210_DCREQ_PLANE1_ADDR_END 0x0c +#define PX210_DCREQ_PLANE0_CONFIG 0x10 + #define DCREQ_NO_LOSSY (0 << 0) + #define DCREQ_LOSSY (1 << 0) + #define DCREQ_TILE_TYPE_MASK (0x3 << 1) + #define DCREQ_TILE_TYPE_MODE0 (0x1 << 1) + #define DCREQ_TILE_TYPE_MODE3 (0x2 << 1) + #define DCREQ_COLOURFORMAT_MASK (0x7f << 8) + #define DCREQ_COLOURFORMAT_RGB565 (0x5 << 8) + #define DCREQ_COLOURFORMAT_ARGB1555 (0x4 << 8) + #define DCREQ_COLOURFORMAT_ARGB4444 (0x02 << 8) + #define DCREQ_COLOURFORMAT_BGRA8888 (0x29 << 8) + #define DCREQ_COLOURFORMAT_ARGB2101010 (0xe << 8) + #define DCREQ_COLOURFORMAT_YUYV (0x59 << 8) + #define DCREQ_COLOURFORMAT_UYVY (0x5b << 8) + #define DCREQ_ARGBSWIZZLE_MASK (0xf << 4) + #define DCREQ_ARGBSWIZZLE_ARGB (0X0 << 4) + #define DCREQ_ARGBSWIZZLE_BGRA (0XC << 4) + #define DCREQ_MODE_MASK (1 << 16) + #define DCREQ_MODE_LINEAR (0 << 16) + #define DCREQ_MODE_TILE (1 << 16) +#define PX210_DCREQ_PLANE1_CONFIG(pipe) 0x14 +#define PX210_DCREQ_PLANE0_CLEAR_COLOR_L 0x18 +#define PX210_DCREQ_PLANE0_CLEAR_COLOR_H 0x1C +#define PX210_DCREQ_PLANE1_CLEAR_COLOR_L 0x20 +#define PX210_DCREQ_PLANE1_CLEAR_COLOR_H 0x24 +#define PX210_DCREQ_CMD_REGISTER 0x38 + #define FLAG_REPLY (1<<31) + #define FLAG_REQUEST (1<<30) + #define CMD_PIXEL_CLOCK (0x0 << 28) + #define CMD_BACKLIGHT (0x1 << 28) + #define CMD_DC_DP_RESET (0x3 << 28) + #define BACKLIGHT_SHIFT 21 + #define BACKLIGHT_MASK 0x7f + #define BACKLIGHT_MAX 100 + #define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) + #define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) + #define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) + #define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) + #define PIXEL_CLOCK_MASK (0x1fffff) +#define PX210_DCREQ_FBCD_CLOCK_CONFIG 0x3c +#define PX210_DCREQ_PIX_DMA_PREFIX 0x50 + #define PREFIX_MASK 0xff + #define PREFIX_SHIFT 32 +#define PX210_DCREQ_FRAME_START 0x54 +#define PX210_DCREQ_FILTER_CONFIG 0x58 +#define PX210_DCREQ_CONTROL 0x5C + #define DC_REQ_ENABLE (1<<0) +#define PX210_DCREQ_MSI_CLEAR 0x60 + #define MSI_CLEAR 0x0 +#define PX210_DCREQ_RESET 0x68 + #define DCREQ_RESET (0x3 << 0) + #define DCREQ_RESET_MASK 0x3 +#define PX210_DCREQ_PLAN 0x94 + #define DCREQ_PLAN_A 0x0 + #define DCREQ_PLAN_B 0X5 +/******************************dcreq register end**************************************/ + +/******************************address transform register start**************************/ +#define PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define PX210_GPU_ADDRESS_TRANSFORM_SIZE 0x4 +#define PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR 0x8 + +#define PX210_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x24 + #define SRC_ADDR_OFFSET 22 + #define SRC_ADDR_MASK 0xffffffffff +#define PX210_DC_ADDRESS_TRANSFORM_SIZE 0x28 + #define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) + #define SIZE_OFFSET 22 +#define PX210_DC_ADDRESS_TRANSFORM_DST_ADDR 0x2c + #define DST_ADDR_OFFSET 22 +#define PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 + #define DC_DP_RESET_STATUS(pipe) (1 << pipe) + #define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) +#define PX210_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c + #define BACKLIGHT_VALUE_MASK (0x7f) + #define BACKLIGHT_VALUE_SHIFT 16 +/******************************address transform register end**************************/ + +/******************************phy register start******************************************/ +/* self define */ +#define PX210_PHY0_PIPE_RESET 0x40104 + #define RESET 0x0 + #define RESET_DEASSERT 0x1 +#define PX210_PHY1_PIPE_RESET 0x100100 + #define PHY1_PIPE_RESET 0x0 + #define PHY1_PIPE_RESET_DEASSERT 0x4 + +#define PX210_PHY1_EN_REFCLK 0x100070 + +#define PX210_PHY0_MODE 0x40088 + #define LANE_BIT (0x3) + #define LANE_BIT_SHIFT 0x2 +#define PX210_PHY1_SEL 0x100004 + #define PHY1_DP_LANE_BIT 0x1 + #define PHY1_DP_LANE_BIT_SHIFT 2 + +#define PX210_PHY0_LINK_CFG 0x40044 + #define LANE_MASTER 0x1 + #define LANE_MASTER_SHIFT 1 + +#define PX210_PHY0_PLL_EN 0x40010 + #define PLL_EN 0x1 + #define PLL_EN_SHIFT 1 +#define PX210_PHY0_PMA_WIDTH 0x40020 + #define BIT_20 0x5 + #define BIT_20_SHIFT 4 + +#define PX210_PHY0_PMA0_POWER 0x40014 +#define PX210_PHY0_PMA1_POWER 0x40018 + #define A0_ACTIVE 0x1 + #define A0_ACTIVE_SHIFT 8 + #define A3_POWERDOWN3 0x8 + #define A3_POWERDOWN3_SHIFT 8 + +#define PX210_PHY1_PMA_MISC 0x1000a0 + #define PHY1_PLL_EN 0x1 + #define PHY1_PLL_EN_MASK 1 + #define PHY1_PLL_EN_SHIFT 8 + #define PHY1_BIT_20 0x5 + #define PHY1_BIT_20_SHIFT 9 + #define PHY1_A0_ACTIVE 0x1 + #define PHY1_A0_ACTIVE_SHIFT 2 + #define PHY1_A0_ACTIVE_MASK 0x3f + #define PHY1_A3_POWERDOWN3 0x8 + #define PHY1_A3_POWERDOWN3_MASK 0x3f + #define PHY1_A3_POWERDOWN3_SHIFT 2 + +#define PX210_PHY0_LINK_RESET 0x40108 + #define LINK_RESET 0x1 + #define LINK_RESET_MASK 0x1 + #define LINTK_RESET_SHIFT 0x1 + +#define PX210_PHY0_APB_RESET 0x40100 + #define APB_RESET 0x1 +#define PX210_PHY1_APB_RESET 0x100104 + #define PHY1_APB_RESET 0x4 + +/* phy origin register */ +#define PX210_PHY0_PLL_CFG 0x30038 +#define PX210_PHY1_PLL_CFG 0xb0038 + #define SINGLE_LINK 0x0 + #define DOUBLE_LINK 0x2 + +#define PX210_PHY0_PMA_CONTROL 0x3800c +#define PX210_PHY1_PMA_CONTROL 0xb800c + #define CONTROL_ENABLE 0x1 + #define CONTROL_ENABLE_MASK 0x1 + #define CONTROL_ENABLE_SHIFT 0x1 + +#define PX210_PHY0_PMA_CONTROL2 0x38004 +#define PX210_PHY1_PMA_CONTROL2 0xb8004 + #define PLL0_LOCK_DONE (0x1 << 6) + #define PLL1_LOCK_DONE (0x1 << 7) + +#define PX210_PHY0_PLL0_CLK_SEL 0X684 +#define PX210_PHY0_PLL1_CLK_SEL 0x704 +#define PX210_PHY1_PLL_CLK_SEL 0X80684 + #define PLL_LINK_RATE_162000 0xf01 + #define PLL_LINK_RATE_270000 0x701 + #define PLL_LINK_RATE_540000 0x301 + #define PLL_LINK_RATE_810000 0x200 + +#define PX210_PHY0_HSCLK0_SEL 0x18398 +#define PX210_PHY0_HSCLK1_SEL 0x1a398 +#define PX210_PHY1_HSCLK_SEL 0x90398 + #define HSCLK_LINK_0 0x0 + #define HSCLK_LINK_1 0x1 + +#define PX210_PHY0_HSCLK0_DIV 0x1839c +#define PX210_PHY0_HSCLK1_DIV 0x1a39c +#define PX210_PHY1_HSCLK_DIV 0x9039c + #define HSCLK_LINK_RATE_162000 0x2 + #define HSCLK_LINK_RATE_270000 0x1 + #define HSCLK_LINK_RATE_540000 0x0 + #define HSCLK_LINK_RATE_810000 0x0 + +#define PX210_PHY0_PLLDRC0_CTRL 0x18394 +#define PX210_PHY0_PLLDRC1_CTRL 0x1a394 +#define PX210_PHY1_PLLDRC_CTRL 0x90394 + #define PLLDRC_LINK0 0x1 + #define PLLDRC_LINK1 0x9 + +#define PX210_PHY0_PLL0_DSM_M0 0x250 +#define PX210_PHY1_PLL0_DSM_M0 0x80250 + #define PLL0_DSM_M0 0x4 +#define PX210_PHY0_PLL0_VCOCAL_START 0x218 +#define PX210_PHY1_PLL0_VCOCAL_START 0x80218 + #define PLL0_VCOCAL_START 0xc5e +#define PX210_PHY0_PLL0_VCOCAL_CTRL 0x208 +#define PX210_PHY1_PLL0_VCOCAL_CTRL 0x80208 + #define PLL0_VCOCAL_CTRL 0x3 + +#define PX210_PHY0_PLL1_DSM_M0 0x350 + #define PLL1_DSM_M0 0x4 +#define PX210_PHY0_PLL1_VCOCAL_START 0x318 + #define PLL1_VCOCAL_START 0xc5e +#define PX210_PHY0_PLL1_VCOCAL_CTRL 0x308 + #define PLL1_VCOCAL_CTRL 0x3 + +#define PX210_PHY0_PLL0_CP_PADJ 0x690 +#define PX210_PHY0_PLL0_CP_IADJ 0x694 +#define PX210_PHY0_PLL0_CP_FILT_PADJ 0x698 +#define PX210_PHY0_PLL0_INTDIV 0x240 +#define PX210_PHY0_PLL0_FRACDIVL 0x244 +#define PX210_PHY0_PLL0_FRACDIVH 0x248 +#define PX210_PHY0_PLL0_HIGH_THR 0x24c +#define PX210_PHY0_PLL0_PDIAG_CTRL 0x680 +#define PX210_PHY0_PLL0_VCOCAL_PLLCNT_START 0x220 +#define PX210_PHY0_PLL0_LOCK_PEFCNT 0x270 +#define PX210_PHY0_PLL0_LOCK_PLLCNT_START 0x278 +#define PX210_PHY0_PLL0_LOCK_PLLCNT_THR 0x27c + +#define PX210_PHY0_PLL1_CP_PADJ 0x710 +#define PX210_PHY0_PLL1_CP_IADJ 0x714 +#define PX210_PHY0_PLL1_CP_FILT_PADJ 0x718 +#define PX210_PHY0_PLL1_INTDIV 0x340 +#define PX210_PHY0_PLL1_FRACDIVL 0x344 +#define PX210_PHY0_PLL1_FRACDIVH 0x348 +#define PX210_PHY0_PLL1_HIGH_THR 0x34c +#define PX210_PHY0_PLL1_PDIAG_CTRL 0x700 +#define PX210_PHY0_PLL1_VCOCAL_PLLCNT_START 0x320 +#define PX210_PHY0_PLL1_LOCK_PEFCNT 0x370 +#define PX210_PHY0_PLL1_LOCK_PLLCNT_START 0x378 +#define PX210_PHY0_PLL1_LOCK_PLLCNT_THR 0x37c + +#define PX210_PHY1_PLL0_CP_PADJ 0x80690 +#define PX210_PHY1_PLL0_CP_IADJ 0x80694 +#define PX210_PHY1_PLL0_CP_FILT_PADJ 0x80698 +#define PX210_PHY1_PLL0_INTDIV 0x80240 +#define PX210_PHY1_PLL0_FRACDIVL 0x80244 +#define PX210_PHY1_PLL0_FRACDIVH 0x80248 +#define PX210_PHY1_PLL0_HIGH_THR 0x8024c +#define PX210_PHY1_PLL0_PDIAG_CTRL 0x80680 +#define PX210_PHY1_PLL0_VCOCAL_PLLCNT_START 0x80220 +#define PX210_PHY1_PLL0_LOCK_PEFCNT 0x80270 +#define PX210_PHY1_PLL0_LOCK_PLLCNT_START 0x80278 +#define PX210_PHY1_PLL0_LOCK_PLLCNT_THR 0x8027c + +#define PX210_PHY0_PLL0_TX_PSC_A0 0x18400 +#define PX210_PHY1_PLL0_TX_PSC_A0 0x90400 + #define PLL0_TX_PSC_A0 0xfb +#define PX210_PHY0_PLL0_TX_PSC_A2 0x18408 +#define PX210_PHY1_PLL0_TX_PSC_A2 0x90408 + #define PLL0_TX_PSC_A2 0x4aa +#define PX210_PHY0_PLL0_TX_PSC_A3 0x1840c +#define PX210_PHY1_PLL0_TX_PSC_A3 0x9040c + #define PLL0_TX_PSC_A3 0x4aa +#define PX210_PHY0_PLL0_RX_PSC_A0 0x28000 +#define PX210_PHY1_PLL0_RX_PSC_A0 0xa0000 + #define PLL0_RX_PSC_A0 0x0 +#define PX210_PHY0_PLL0_RX_PSC_A2 0x28008 +#define PX210_PHY1_PLL0_RX_PSC_A2 0xa0008 + #define PLL0_RX_PSC_A2 0x0 +#define PX210_PHY0_PLL0_RX_PSC_A3 0x2800C +#define PX210_PHY1_PLL0_RX_PSC_A3 0xa000C + #define PLL0_RX_PSC_A3 0x0 +#define PX210_PHY0_PLL0_RX_PSC_CAL 0x28018 +#define PX210_PHY1_PLL0_RX_PSC_CAL 0xa0018 + #define PLL0_RX_PSC_CAL 0x0 + +#define PX210_PHY0_PLL1_TX_PSC_A0 0x1a400 + #define PLL1_TX_PSC_A0 0xfb +#define PX210_PHY0_PLL1_TX_PSC_A2 0x1a408 + #define PLL1_TX_PSC_A2 0x4aa +#define PX210_PHY0_PLL1_TX_PSC_A3 0x1a40c + #define PLL1_TX_PSC_A3 0x4aa +#define PX210_PHY0_PLL1_RX_PSC_A0 0x2a000 + #define PLL1_RX_PSC_A0 0x0 +#define PX210_PHY0_PLL1_RX_PSC_A2 0x2a008 + #define PLL1_RX_PSC_A2 0x0 +#define PX210_PHY0_PLL1_RX_PSC_A3 0x2a00C + #define PLL1_RX_PSC_A3 0x0 +#define PX210_PHY0_PLL1_RX_PSC_CAL 0x2a018 + #define PLL1_RX_PSC_CAL 0x0 + +#define PX210_PHY0_PLL0_XCVR_CTRL 0x183a8 +#define PX210_PHY1_PLL0_XCVR_CTRL 0x903a8 + #define PLL0_XCVR_CTRL 0xf +#define PX210_PHY0_PLL1_XCVR_CTRL 0x1a3a8 + #define PLL1_XCVR_CTRL 0xf + +#define PX210_PHY0_PLL0_RX_GCSM1_CTRL 0x28420 +#define PX210_PHY1_PLL0_RX_GCSM1_CTRL 0xa0420 + #define PLL0_RX_GCSM1_CTRL 0x0 +#define PX210_PHY0_PLL0_RX_GCSM2_CTRL 0x28440 +#define PX210_PHY1_PLL0_RX_GCSM2_CTRL 0xa0440 + #define PLL0_RX_GCSM2_CTRL 0x0 +#define PX210_PHY0_PLL0_RX_PERGCSM_CTRL 0x28460 +#define PX210_PHY1_PLL0_RX_PERGCSM_CTRL 0xa0460 + #define PLL0_RX_PERGCSM_CTRL 0x0 + +#define PX210_PHY0_PLL1_RX_GCSM1_CTRL 0x2a420 + #define PLL1_RX_GCSM1_CTRL 0x0 +#define PX210_PHY0_PLL1_RX_GCSM2_CTRL 0x2a440 + #define PLL1_RX_GCSM2_CTRL 0x0 +#define PX210_PHY0_PLL1_RX_PERGCSM_CTRL 0x2a460 + #define PLL1_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define PX210_PHY0_PLL0_TX_DIAG_ACYA 0x1879c +#define PX210_PHY0_PLL1_TX_DIAG_ACYA 0x1a79c +#define PX210_PHY1_PLL0_TX_DIAG_ACYA 0x9079c + #define LOCK 1 + #define UNLOCK 0 + +#define PX210_PHY0_PLL0_TX_TXCC_CTRL 0x18100 +#define PX210_PHY0_PLL1_TX_TXCC_CTRL 0x1a100 +#define PX210_PHY1_PLL0_TX_TXCC_CTRL 0x90100 + #define TX_TXCC_CTRL 0x8a4 + +#define PX210_PHY0_PLL0_TX_DRV 0x18318 +#define PX210_PHY0_PLL1_TX_DRV 0x1a318 +#define PX210_PHY1_PLL0_TX_DRV 0x90318 + #define TX_DRV 0x3 + +#define PX210_PHY0_PLL0_TX_MGNFS 0x18140 +#define PX210_PHY0_PLL1_TX_MGNFS 0x1a140 +#define PX210_PHY1_PLL0_TX_MGNFS 0x90140 + +#define PX210_PHY0_PLL0_TX_CPOST 0x18130 +#define PX210_PHY0_PLL1_TX_CPOST 0x1a130 +#define PX210_PHY0_PLL1_TX_CPOST1 0x1a13c +#define PX210_PHY1_PLL0_TX_CPOST 0x90130 + +/******************************phy register end********************************************/ +#endif /* __PX210_REG_H__ */ diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 5985efc5a1f34ad42fe5e783f67b1d79809321e8..de2552a40d364f957b5488f3577c661f0e786ef7 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1818,6 +1818,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, struct radeon_device *rdev = dev->dev_private; /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ + preempt_disable_rt(); /* Get optional system timestamp before query. */ if (stime) @@ -1910,6 +1911,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, *etime = ktime_get(); /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ + preempt_enable_rt(); /* Decode into vertical and horizontal scanout position. */ *vpos = position & 0x1fff; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index d0fd147ef75f2276fcd5c17865d65d94728e682c..fb5a3461bb8c4b2a6464ed2c6a1a98e37cdba559 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -167,10 +167,8 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) { u32 *fifo_mem = dev_priv->mmio_virt; - preempt_disable(); if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0) vmw_write(dev_priv, SVGA_REG_SYNC, reason); - preempt_enable(); } void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 12bc9fa2111178cd59e36cf81ebc9e4c70af89b6..278f03f50147e3593d77688f407480bf167337e7 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -112,10 +112,12 @@ int hv_post_message(union hv_connection_id connection_id, static void hv_stimer0_isr(void) { struct hv_per_cpu_context *hv_cpu; + struct pt_regs *regs = get_irq_regs(); + u64 ip = regs ? instruction_pointer(regs) : 0; hv_cpu = this_cpu_ptr(hv_context.cpu_context); hv_cpu->clk_evt->event_handler(hv_cpu->clk_evt); - add_interrupt_randomness(stimer0_vector, 0); + add_interrupt_randomness(stimer0_vector, 0, ip); } static int hv_ce_set_next_event(unsigned long delta, diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index c4ad518890243a8ecd66ecade33004a4284eff00..14af29c0ea1ca724ade2ad4a42b2bf8d5636fbf4 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -32,6 +32,7 @@ #include #include #include +#include #include "hv_trace.h" diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 0c17743b4a65c08116f3543a3e6f463fdc181cd6..f2e41f85a34a1bf486caa209e281fd20d94f6672 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1103,6 +1103,8 @@ static void vmbus_isr(void) void *page_addr = hv_cpu->synic_event_page; struct hv_message *msg; union hv_synic_event_flags *event; + struct pt_regs *regs = get_irq_regs(); + u64 ip = regs ? instruction_pointer(regs) : 0; bool handled = false; if (unlikely(page_addr == NULL)) @@ -1146,7 +1148,7 @@ static void vmbus_isr(void) tasklet_schedule(&hv_cpu->msg_dpc); } - add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); + add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip); } /* diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index d150d0cab1b6f9e1daedd63d6a77c762b5db35ab..4c90cb1d016023e612461c88080e64d3a983f702 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -360,6 +360,15 @@ config SENSORS_ASPEED This driver can also be built as a module. If so, the module will be called aspeed_pwm_tacho. +config SENSORS_PHYTIUM + tristate "Phytium Fan tach and capture counter driver" + help + This driver provides support for Phytium Fan Tacho and capture + counter controllers. + + This driver can also be built as a module. If so, the module + will be called tacho-phytium. + config SENSORS_ATXP1 tristate "Attansic ATXP1 VID controller" depends on I2C diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 93f7f41ea4ad514f88075ccd631358ae6b17ac93..2160f637ebe907864425517bfc55b28b5a88d1d7 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -50,6 +50,7 @@ obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o obj-$(CONFIG_SENSORS_ASPEED) += aspeed-pwm-tacho.o +obj-$(CONFIG_SENSORS_PHYTIUM) += tacho-phytium.o obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o diff --git a/drivers/hwmon/tacho-phytium.c b/drivers/hwmon/tacho-phytium.c new file mode 100644 index 0000000000000000000000000000000000000000..cbfbe0b8250f548463c1ffa159621fb58f8b74d2 --- /dev/null +++ b/drivers/hwmon/tacho-phytium.c @@ -0,0 +1,391 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Hwmon driver for Phytium tachometer. + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TIMER_CTRL_REG 0x00 +#define TIMER_CTRL_MODE_SHIFT 0//0:1 +#define TIMER_CTRL_RESET_SHIFT BIT(2) +#define TIMER_CTRL_FORCE_SHIFT BIT(3) +#define TIMER_CTRL_CAPTURE_EN_SHIFT BIT(4) +#define TIMER_CTRL_CAPTURE_CNT_SHIFT 5//5:11 +#define TIMER_CTRL_ANTI_JITTER_SHIFT 18//18:19 +#define TIMER_CTRL_TACHO_MODE_SHIFT 20//20:21 +#define TIMER_CTRL_TIMER_CNT_MODE_SHIFT BIT(22) +#define TIMER_CTRL_BIT_SET_SHIFT 24 +#define TIMER_CTRL_CNT_EN_SHIFT BIT(25) +#define TIMER_CTRL_CNT_CLR_SHIFT BIT(26) +#define TIMER_CTRL_TIMER_MODE_SHIFT BIT(27) +#define TIMER_CTRL_PULSE_NUM_SHIFT 28//28:30 +#define TIMER_CTRL_TACHO_EN_SHIFT BIT(31) +#define TIMER_TACHO_RES_REG 0x04 +#define TIMER_TACHO_RES_VALID_SHIFT BIT(31) +#define TIMER_TACHO_RES_MASK GENMASK(30, 0) +#define TIMER_CMP_VALUE_UP_REG 0x08 +#define TIMER_CMP_VALUE_LOW_REG 0x1C +#define TIMER_CNT_VALUE_UP_REG 0x20 +#define TIMER_CNT_VALUE_LOW_REG 0x24 +#define TIMER_INT_MASK_REG 0x28 +#define TIMER_INT_STAT_REG 0x2C +#define TIMER_INT_CAPTURE_SHIFT BIT(5) +#define TIMER_INT_CYC_COMP_SHIFT BIT(4) +#define TIMER_INT_ONE_COMP_SHIFT BIT(3) +#define TIMER_INT_ROLLOVER_SHIFT BIT(2) +#define TIMER_INT_TACHO_UNDER_SHIFT BIT(1) +#define TIMER_INT_TACHO_OVER_SHIFT BIT(0) +#define TIMER_TACHO_OVER_REG 0x30 +#define TIMER_TACHO_UNDER_REG 0x34 +#define TIMER_START_VALUE_REG 0x38 + +#define TIMER_INT_CLR_MASK GENMASK(5, 0) + +enum tacho_modes { +tacho_mode = 1, +capture_mode, +}; + +enum edge_modes { +rising_edge, +falling_edge, +double_edge, +}; + +struct phytium_tacho { + struct device *dev; + struct device *hwmon; + void __iomem *base; + struct clk *clk; + u32 freq; + int irq; + u8 work_mode; + u8 edge_mode; + u32 debounce; +}; + +static u16 capture_count; + +static void phytium_tacho_init(struct phytium_tacho *tacho) +{ + u32 val; + + if (tacho->work_mode == tacho_mode) { + val = (TIMER_CTRL_TACHO_EN_SHIFT | + TIMER_CTRL_CNT_EN_SHIFT | + (tacho->edge_mode << TIMER_CTRL_TACHO_MODE_SHIFT) | + (tacho->debounce << TIMER_CTRL_ANTI_JITTER_SHIFT) | + (tacho->work_mode << TIMER_CTRL_MODE_SHIFT)); + writel_relaxed(val, tacho->base + TIMER_CTRL_REG); + writel_relaxed(0x2faf07f, tacho->base + TIMER_CMP_VALUE_LOW_REG); + } else { + val = (TIMER_CTRL_TACHO_EN_SHIFT | + TIMER_CTRL_CNT_EN_SHIFT | + (tacho->edge_mode << TIMER_CTRL_TACHO_MODE_SHIFT) | + (tacho->debounce << TIMER_CTRL_ANTI_JITTER_SHIFT) | + TIMER_CTRL_CAPTURE_EN_SHIFT | + (0x7f << TIMER_CTRL_CAPTURE_CNT_SHIFT) | + (tacho->work_mode << TIMER_CTRL_MODE_SHIFT)), + writel_relaxed(val, tacho->base + TIMER_CTRL_REG); + writel_relaxed(0x20, tacho->base + TIMER_INT_MASK_REG); + } +} + +static int phytium_get_fan_tach_rpm(struct phytium_tacho *priv) +{ + u64 raw_data, tach_div, clk_source; + u8 mode, both; + unsigned long timeout; + unsigned long loopcounter; + + timeout = jiffies + msecs_to_jiffies(500); + + for (loopcounter = 0;; loopcounter++) { + raw_data = readl_relaxed(priv->base + TIMER_TACHO_RES_REG); + + if (raw_data & TIMER_TACHO_RES_VALID_SHIFT) + break; + + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + if (loopcounter > 3000) + msleep(20); + else { + udelay(100); + cond_resched(); + } + } + + raw_data = raw_data & TIMER_TACHO_RES_MASK; + clk_source = priv->freq; + mode = priv->edge_mode; + both = (mode == double_edge) ? 1 : 0; + tach_div = 1 << both; + + if (raw_data == 0) + return 0; + + return (clk_source * 60 * raw_data) / 0x2faf080 / tach_div; +} + +static ssize_t show_rpm(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int rpm; + struct phytium_tacho *priv = dev_get_drvdata(dev); + + rpm = phytium_get_fan_tach_rpm(priv); + if (rpm < 0) + return rpm; + + return sprintf(buf, "%d\n", rpm); +} + +static SENSOR_DEVICE_ATTR(fan_input, 0444, + show_rpm, NULL, 0); + +static struct attribute *tacho_dev_attrs[] = { + &sensor_dev_attr_fan_input.dev_attr.attr, + NULL +}; + +static umode_t tacho_dev_is_visible(struct kobject *kobj, + struct attribute *a, int index) +{ + return a->mode; +} + +static const struct attribute_group tacho_group = { + .attrs = tacho_dev_attrs, + .is_visible = tacho_dev_is_visible, +}; + +static const struct attribute_group *tacho_groups[] = { + &tacho_group, + NULL +}; + +static irqreturn_t capture_irq_handler(int irq, void *dev_id) +{ + struct phytium_tacho *priv = dev_id; + u32 status = readl_relaxed(priv->base + TIMER_INT_STAT_REG); + + if (status & TIMER_INT_CAPTURE_SHIFT) { + capture_count++; + + if (capture_count == 0) + dev_err(priv->dev, "Capture counter is overflowed"); + + writel_relaxed(status, priv->base + TIMER_INT_STAT_REG); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static ssize_t show_capture(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int cnt; + struct phytium_tacho *priv = dev_get_drvdata(dev); + + cnt = capture_count * 0x7f + readl_relaxed(priv->base + TIMER_CNT_VALUE_LOW_REG); + + return sprintf(buf, "%d\n", cnt); +} + +static SENSOR_DEVICE_ATTR(capture_input, 0444, + show_capture, NULL, 0); + +static struct attribute *capture_dev_attrs[] = { + &sensor_dev_attr_capture_input.dev_attr.attr, + NULL +}; + +static umode_t capture_dev_is_visible(struct kobject *kobj, + struct attribute *a, int index) +{ + return a->mode; +} + +static const struct attribute_group capture_group = { + .attrs = capture_dev_attrs, + .is_visible = capture_dev_is_visible, +}; + +static const struct attribute_group *capture_groups[] = { + &capture_group, + NULL +}; + +static int phytium_tacho_get_work_mode(struct phytium_tacho *tacho) +{ + struct fwnode_handle *nc = dev_fwnode(tacho->dev); + + if (fwnode_property_read_bool(nc, "tacho")) + return tacho_mode; + if (fwnode_property_read_bool(nc, "capture")) + return capture_mode; + return tacho_mode; +} + +static int phytium_tacho_get_edge_mode(struct phytium_tacho *tacho) +{ + struct fwnode_handle *nc = dev_fwnode(tacho->dev); + + if (fwnode_property_read_bool(nc, "up")) + return rising_edge; + if (fwnode_property_read_bool(nc, "down")) + return falling_edge; + if (fwnode_property_read_bool(nc, "double")) + return double_edge; + return rising_edge; +} + +static int phytium_tacho_get_debounce(struct phytium_tacho *tacho) +{ + u32 value; + struct fwnode_handle *nc = dev_fwnode(tacho->dev); + + if (!fwnode_property_read_u32(nc, "debounce-level", &value)) + return value; + else + return 0; +} + +static void phytium_tacho_get_of_data(struct phytium_tacho *tacho) +{ + tacho->work_mode = phytium_tacho_get_work_mode(tacho); + tacho->edge_mode = phytium_tacho_get_edge_mode(tacho); + tacho->debounce = phytium_tacho_get_debounce(tacho); +} + +static int phytium_tacho_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct phytium_tacho *tacho; + int ret; + + tacho = devm_kzalloc(dev, sizeof(*tacho), GFP_KERNEL); + if (!tacho) + return -ENOMEM; + + tacho->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENOENT; + + tacho->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(tacho->base)) { + dev_err(&pdev->dev, "region map failed\n"); + return PTR_ERR(tacho->base); + } + if (dev->of_node) { + tacho->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(tacho->clk)) + return PTR_ERR(tacho->clk); + ret = clk_prepare_enable(tacho->clk); + if (ret) + return ret; + + tacho->freq = clk_get_rate(tacho->clk); + } else if (has_acpi_companion(dev)){ + if(fwnode_property_read_u32(dev_fwnode(dev),"clock-frequency", (u32 *)&(tacho->freq) ) <0) + tacho->freq = 50000000; + } + + tacho->irq = platform_get_irq(pdev, 0); + if (tacho->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return tacho->irq; + } + + ret = devm_request_irq(dev, tacho->irq, capture_irq_handler, + 0, "phytium_tacho", tacho); + if (ret) { + dev_err(&pdev->dev, "Cannot request IRQ\n"); + return ret; + } + + phytium_tacho_get_of_data(tacho); + + phytium_tacho_init(tacho); + + if (tacho->work_mode == tacho_mode) + tacho->hwmon = devm_hwmon_device_register_with_groups(dev, + "phytium_tacho", + tacho, tacho_groups); + else + tacho->hwmon = devm_hwmon_device_register_with_groups(dev, + "phytium_capture", + tacho, capture_groups); + + platform_set_drvdata(pdev, tacho); + + return PTR_ERR_OR_ZERO(tacho->hwmon); +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_tacho_suspend(struct device *dev) +{ + return 0; +} + +static int phytium_tacho_resume(struct device *dev) +{ + struct phytium_tacho *tacho = dev_get_drvdata(dev); + + phytium_tacho_init(tacho); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_tacho_pm, phytium_tacho_suspend, phytium_tacho_resume); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_tacho_acpi_ids[] = { + { "PHYT0033", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_tacho_acpi_ids); +#endif + +static const struct of_device_id tacho_of_match[] = { + { .compatible = "phytium,tacho", }, + {}, +}; +MODULE_DEVICE_TABLE(of, tacho_of_match); + +static struct platform_driver phytium_tacho_driver = { + .probe = phytium_tacho_probe, + .driver = { + .name = "phytium_tacho", + .pm = &phytium_tacho_pm, + .of_match_table = of_match_ptr(tacho_of_match), + .acpi_match_table = ACPI_PTR(phytium_tacho_acpi_ids), + }, +}; + +module_platform_driver(phytium_tacho_driver); + +MODULE_AUTHOR("Zhang Yiqun "); +MODULE_DESCRIPTION("Phytium tachometer driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig index e895d29500eec03d10f5067c55a14602cc60de02..ffc6ceacb39c975f5a3e008e0bc2eaeddd170394 100644 --- a/drivers/hwspinlock/Kconfig +++ b/drivers/hwspinlock/Kconfig @@ -16,6 +16,15 @@ config HWSPINLOCK_OMAP If unsure, say N. +config HWSPINLOCK_PHYTIUM + tristate "Phytium Hardware Spinlock device" + depends on HWSPINLOCK + depends on ARCH_PHYTIUM + help + Say y here to support the Phytium Hardware Spinlock device. + + If unsure, say N. + config HWSPINLOCK_QCOM tristate "Qualcomm Hardware Spinlock device" depends on HWSPINLOCK diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile index b87c01a506a494c81a627062dff25ed1eae4ae4e..f6dcd79360d7223632f88f46e554639645320313 100644 --- a/drivers/hwspinlock/Makefile +++ b/drivers/hwspinlock/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o +obj-$(CONFIG_HWSPINLOCK_PHYTIUM) += phytium_hwspinlock.o obj-$(CONFIG_HWSPINLOCK_QCOM) += qcom_hwspinlock.o obj-$(CONFIG_HWSPINLOCK_SIRF) += sirf_hwspinlock.o obj-$(CONFIG_HWSPINLOCK_SPRD) += sprd_hwspinlock.o diff --git a/drivers/hwspinlock/phytium_hwspinlock.c b/drivers/hwspinlock/phytium_hwspinlock.c new file mode 100644 index 0000000000000000000000000000000000000000..ec057388726a8f43a3b92920486012403f08186a --- /dev/null +++ b/drivers/hwspinlock/phytium_hwspinlock.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium hardware spinlock driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hwspinlock_internal.h" + +/* Spinlock register offsets */ +#define LOCK_BASE 0x10 + +#define SEMA_NOTTAKEN (0) /* free */ +#define SEMA_TAKEN (1) /* locked */ + +static int phytium_hwspinlock_trylock(struct hwspinlock *lock) +{ + void __iomem *lock_addr = lock->priv; + + /* attempt to acquire the lock by reading its value */ + return (readl(lock_addr) == SEMA_NOTTAKEN); +} + +static void phytium_hwspinlock_unlock(struct hwspinlock *lock) +{ + void __iomem *lock_addr = lock->priv; + + /* release the lock by writing 0 to it */ + writel(SEMA_NOTTAKEN, lock_addr); +} + +static void phytium_hwspinlock_relax(struct hwspinlock *lock) +{ + ndelay(50); +} + +static const struct hwspinlock_ops phytium_hwspinlock_ops = { + .trylock = phytium_hwspinlock_trylock, + .unlock = phytium_hwspinlock_unlock, + .relax = phytium_hwspinlock_relax, +}; + +static int phytium_hwspinlock_probe(struct platform_device *pdev) +{ + struct fwnode_handle *np = dev_fwnode(&(pdev->dev)); + struct hwspinlock_device *bank; + struct hwspinlock *hwlock; + struct resource *res; + void __iomem *io_base; + int num_locks, i, ret; + if (!np) + return -ENODEV; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + io_base = ioremap(res->start, resource_size(res)); + if (!io_base) + return -ENOMEM; + + /* + * make sure the module is enabled and clocked before reading + * the module SYSSTATUS register + */ + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); + goto iounmap_base; + } + + /* Determine number of locks */ + if (fwnode_property_read_u32(np, "nr-locks", &num_locks)) { + dev_err(&pdev->dev, "missing/invalid number of locks\n"); + ret = -EINVAL; + goto iounmap_base; + } + + /* + * runtime PM will make sure the clock of this module is + * enabled again iff at least one lock is requested + */ + ret = pm_runtime_put(&pdev->dev); + if (ret < 0) + goto iounmap_base; + + bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL); + if (!bank) { + ret = -ENOMEM; + goto iounmap_base; + } + + platform_set_drvdata(pdev, bank); + + for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++) { + /* Set register address of each lock */ + hwlock->priv = io_base + LOCK_BASE + sizeof(u32) * i; + } + + ret = hwspin_lock_register(bank, &pdev->dev, &phytium_hwspinlock_ops, + 0, num_locks); + if (ret) + goto reg_fail; + + return 0; + +reg_fail: + kfree(bank); +iounmap_base: + iounmap(io_base); + return ret; +} + +static int phytium_hwspinlock_remove(struct platform_device *pdev) +{ + struct hwspinlock_device *bank = platform_get_drvdata(pdev); + void __iomem *io_base = bank->lock[0].priv - LOCK_BASE; + int ret; + + ret = hwspin_lock_unregister(bank); + if (ret) { + dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret); + return ret; + } + + iounmap(io_base); + kfree(bank); + + return 0; +} +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_hwspinlock_acpi_ids[] = { + { "PHYT0027", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_hwspinlock_acpi_ids); +#endif + + +static const struct of_device_id phytium_hwspinlock_of_match[] = { + { .compatible = "phytium,hwspinlock", }, + { /* end */ }, +}; +MODULE_DEVICE_TABLE(of, phytium_hwspinlock_of_match); + +static struct platform_driver phytium_hwspinlock_driver = { + .probe = phytium_hwspinlock_probe, + .remove = phytium_hwspinlock_remove, + .driver = { + .name = "phytium_hwspinlock", + .of_match_table = of_match_ptr(phytium_hwspinlock_of_match), + .acpi_match_table = ACPI_PTR(phytium_hwspinlock_acpi_ids), + }, +}; + +static int __init phytium_hwspinlock_init(void) +{ + return platform_driver_register(&phytium_hwspinlock_driver); +} +postcore_initcall(phytium_hwspinlock_init); + +static void __exit phytium_hwspinlock_exit(void) +{ + platform_driver_unregister(&phytium_hwspinlock_driver); +} +module_exit(phytium_hwspinlock_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Hardware spinlock driver for Phytium"); +MODULE_AUTHOR("Chen Baozi "); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 165c112bc5b9a97df58e8e65f644ce8081c65254..3c315d72449149caec60e92941d72bc0d47f758e 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -531,6 +531,34 @@ config I2C_DESIGNWARE_BAYTRAIL the platform firmware controlling it. You should say Y if running on a BayTrail system using the AXP288. +config I2C_PHYTIUM_CORE + tristate + +config I2C_PHYTIUM_PCI + tristate "Phytium I2C PCI" + depends on PCI && ARCH_PHYTIUM + select I2C_PHYTIUM_CORE + select I2C_SMBUS + help + If you say yes to this option, support will be included for the + Phytium I2C adapter. Only master mode is supported. + + This driver can also be built as a module. If so, the module + will be called i2c-phytium-pci. + +config I2C_PHYTIUM_PLATFORM + tristate "Phytium I2C Platform" + depends on (ACPI && COMMON_CLK) || !ACPI + select I2C_SLAVE + select I2C_PHYTIUM_CORE + select I2C_SMBUS + help + If you say yes to this option, support will be included for the + Phytium I2C adapter. Only master mode is supported. + + This driver can also be built as a module. If so, the module + will be called i2c-phytium-platform. + config I2C_DIGICOLOR tristate "Conexant Digicolor I2C driver" depends on ARCH_DIGICOLOR diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 18b26af82b1c5425a9dcec9c61cca3cdff694d60..0aea0dd31c9188dad8ec55f385d09e399e07c542 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -53,6 +53,10 @@ i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-bayt obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o i2c-designware-pci-objs := i2c-designware-pcidrv.o obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o +obj-$(CONFIG_I2C_PHYTIUM_CORE) += i2c-phytium-core.o +i2c-phytium-core-objs := i2c-phytium-common.o i2c-phytium-master.o i2c-phytium-slave.o +obj-$(CONFIG_I2C_PHYTIUM_PCI) += i2c-phytium-pci.o +obj-$(CONFIG_I2C_PHYTIUM_PLATFORM) += i2c-phytium-platform.o obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o obj-$(CONFIG_I2C_EMEV2) += i2c-emev2.o diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index 69ec4a791f23e78e1462c002eaca62c9e400da5a..fa3a1f7fd472684584ed0553904cb7cda331e148 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -108,9 +108,10 @@ int i2c_dw_set_reg_access(struct dw_i2c_dev *dev) /* Configure register access mode 16bit */ dev->flags |= ACCESS_16BIT; } else if (reg != DW_IC_COMP_TYPE_VALUE) { - dev_err(dev->dev, - "Unknown Synopsys component type: 0x%08x\n", reg); - return -ENODEV; + //dev_err(dev->dev, + //"Unknown Synopsys component type: 0x%08x\n", reg); + //return -ENODEV; + return 0; } return 0; diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 18cc324f3ca94541d830f825693ee42b6cf60df9..eeb38fe8a03c631ed798278f9f783390b9e0ae59 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -66,8 +66,6 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) scl_falling_time, 0); /* No offset */ } - dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n", - dev->ss_hcnt, dev->ss_lcnt); /* * Set SCL timing parameters for fast mode or fast mode plus. Only @@ -103,8 +101,6 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) scl_falling_time, 0); /* No offset */ } - dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n", - fp_str, dev->fs_hcnt, dev->fs_lcnt); /* Check is high speed possible and fall back to fast mode if not */ if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) == diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index b5750fd851251e74b0558576774da4a82d81c757..8c61595ed5277def57bb718fde9fbe711586d673 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -151,6 +151,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { { "APMC0D0F", 0 }, { "HISI02A1", 0 }, { "HISI02A2", 0 }, + { "PHYT0003", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); @@ -203,8 +204,8 @@ static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id) * the depth could be from 2 to 256 from HW spec. */ param = i2c_dw_read_comp_param(dev); - tx_fifo_depth = ((param >> 16) & 0xff) + 1; - rx_fifo_depth = ((param >> 8) & 0xff) + 1; + tx_fifo_depth = 8;//((param >> 16) & 0xff) + 1; + rx_fifo_depth = 8;//((param >> 8) & 0xff) + 1; if (!dev->tx_fifo_depth) { dev->tx_fifo_depth = tx_fifo_depth; dev->rx_fifo_depth = rx_fifo_depth; @@ -289,7 +290,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) else if (acpi_speed || t->bus_freq_hz) t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed); else - t->bus_freq_hz = 400000; + t->bus_freq_hz = 100000; if (has_acpi_companion(&pdev->dev)) dw_i2c_acpi_configure(pdev); @@ -322,7 +323,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz; clk_khz = dev->get_clk_rate_khz(dev); - if (!dev->sda_hold_time && t->sda_hold_ns) dev->sda_hold_time = div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000); diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index c1ce2299a76e3f67910d9a98071779dacab2ace7..5c57ecf4b79eb1208ffb6e41b344cd960fe2477d 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -800,9 +800,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev) } ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq, - IRQF_NO_SUSPEND | IRQF_ONESHOT, - dev_name(&pdev->dev), i2c); - + IRQF_NO_SUSPEND, dev_name(&pdev->dev), i2c); if (ret != 0) { dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq); goto err_clk; diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index b5ad7696adf9816c0e9c7981c1a45201dd013350..0f08ef1f7f4187eff60c013b709ec5ca8bff8cb1 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -449,8 +449,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev) hix5hd2_i2c_init(priv); ret = devm_request_irq(&pdev->dev, irq, hix5hd2_i2c_irq, - IRQF_NO_SUSPEND | IRQF_ONESHOT, - dev_name(&pdev->dev), priv); + IRQF_NO_SUSPEND, dev_name(&pdev->dev), priv); if (ret != 0) { dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", irq); goto err_clk; diff --git a/drivers/i2c/busses/i2c-phytium-common.c b/drivers/i2c/busses/i2c-phytium-common.c new file mode 100644 index 0000000000000000000000000000000000000000..2af8689d861e62c3dcaaac39bd975a0bbda23672 --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-common.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Phytium I2C adapter driver. + * + * Based on the TI DAVINCI I2C adapter driver. + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +static char *abort_sources[] = { + [ABRT_7B_ADDR_NOACK] = + "slave address not acknowledged (7bit mode)", + [ABRT_10ADDR1_NOACK] = + "first address byte not acknowledged (10bit mode)", + [ABRT_10ADDR2_NOACK] = + "second address byte not acknowledged (10bit mode)", + [ABRT_TXDATA_NOACK] = + "data not acknowledged", + [ABRT_GCALL_NOACK] = + "no acknowledgement for a general call", + [ABRT_GCALL_READ] = + "read after general call", + [ABRT_SBYTE_ACKDET] = + "start byte acknowledged", + [ABRT_SBYTE_NORSTRT] = + "trying to send start byte when restart is disabled", + [ABRT_10B_RD_NORSTRT] = + "trying to read when restart is disabled (10bit mode)", + [ABRT_MASTER_DIS] = + "trying to use disabled adapter", + [ARB_LOST] = + "lost arbitration", + [ABRT_SLAVE_FLUSH_TXFIFO] = + "read command so flush old data in the TX FIFO", + [ABRT_SLAVE_ARBLOST] = + "slave lost the bus while transmitting data to a remote master", + [ABRT_SLAVE_RD_INTX] = + "incorrect slave-transmitter mode configuration", +}; + +u32 phytium_readl(struct phytium_i2c_dev *dev, int offset) +{ + return readl_relaxed(dev->base + offset); +} + +void phytium_writel(struct phytium_i2c_dev *dev, u32 b, int offset) +{ + writel_relaxed(b, dev->base + offset); +} + +u32 i2c_phytium_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) +{ + if (cond) + return (ic_clk * tSYMBOL + 500000) / 1000000 - 8 + offset; + else + return (ic_clk * (tSYMBOL + tf) + 500000) / 1000000 - 3 + offset; +} + +u32 i2c_phytium_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) +{ + return ((ic_clk * (tLOW + tf) + 500000) / 1000000) - 1 + offset; +} + +int i2c_phytium_set_sda_hold(struct phytium_i2c_dev *dev) +{ + if (!dev->sda_hold_time) { + /* Keep previous hold time setting if no one set it */ + dev->sda_hold_time = phytium_readl(dev, IC_SDA_HOLD); + } + + if (!(dev->sda_hold_time & IC_SDA_HOLD_RX_MASK)) + dev->sda_hold_time |= 1 << IC_SDA_HOLD_RX_SHIFT; + + dev_dbg(dev->dev, "SDA Hold Time TX:RX = %d:%d\n", + dev->sda_hold_time & ~(u32)IC_SDA_HOLD_RX_MASK, + dev->sda_hold_time >> IC_SDA_HOLD_RX_SHIFT); + + return 0; +} + +void __i2c_phytium_disable(struct phytium_i2c_dev *dev) +{ + int timeout = 100; + + do { + __i2c_phytium_disable_nowait(dev); + if ((phytium_readl(dev, IC_ENABLE_STATUS) & 1) == 0) + return; + + /* + * Wait 10 times the signaling period of the highest I2C + * transfer supported by the driver (for 400KHz this is + * 25us). + */ + usleep_range(25, 250); + } while (timeout--); + + dev_warn(dev->dev, "timeout in disabling adapter\n"); +} + +unsigned long i2c_phytium_clk_rate(struct phytium_i2c_dev *dev) +{ + if (WARN_ON_ONCE(!dev->get_clk_rate_khz)) + return 0; + return dev->get_clk_rate_khz(dev); +} + +int i2c_phytium_prepare_clk(struct phytium_i2c_dev *dev, bool prepare) +{ + if (IS_ERR(dev->clk)) + return PTR_ERR(dev->clk); + + if (prepare) + return clk_prepare_enable(dev->clk); + + clk_disable_unprepare(dev->clk); + return 0; +} +EXPORT_SYMBOL_GPL(i2c_phytium_prepare_clk); + +int i2c_phytium_wait_bus_not_busy(struct phytium_i2c_dev *dev) +{ + int timeout = 20; /* 20 ms */ + + while (phytium_readl(dev, IC_STATUS) & IC_STATUS_ACTIVITY) { + if (timeout <= 0) { + dev_warn(dev->dev, "timeout waiting for bus ready\n"); + i2c_recover_bus(&dev->adapter); + + if (phytium_readl(dev, IC_STATUS) & IC_STATUS_ACTIVITY) + return -ETIMEDOUT; + return 0; + } + timeout--; + usleep_range(1000, 1100); + } + + return 0; +} + +int i2c_phytium_handle_tx_abort(struct phytium_i2c_dev *dev) +{ + unsigned long abort_source = dev->abort_source; + int i; + + if (abort_source & IC_TX_ABRT_NOACK) { + for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) + dev_dbg(dev->dev, + "%s: %s\n", __func__, abort_sources[i]); + return -EREMOTEIO; + } + + for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) + dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); + + if (abort_source & IC_TX_ARB_LOST) + return -EAGAIN; + else if (abort_source & IC_TX_ABRT_GCALL_READ) + return -EINVAL; + else + return -EIO; + + return 0; +} + +u32 i2c_phytium_func(struct i2c_adapter *adapter) +{ + struct phytium_i2c_dev *dev = i2c_get_adapdata(adapter); + + return dev->functionality; +} + +void i2c_phytium_disable(struct phytium_i2c_dev *dev) +{ + /* Disable controller */ + __i2c_phytium_disable(dev); + + /* Disable all interupts */ + phytium_writel(dev, 0, IC_INTR_MASK); + phytium_readl(dev, IC_CLR_INTR); +} + +void i2c_phytium_disable_int(struct phytium_i2c_dev *dev) +{ + phytium_writel(dev, 0, IC_INTR_MASK); +} + +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium I2C bus adapter core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-phytium-core.h b/drivers/i2c/busses/i2c-phytium-core.h new file mode 100644 index 0000000000000000000000000000000000000000..7cf42787389ee2ed2337ae7421086d44c2ed2175 --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-core.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium I2C adapter driver. + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include + +#define IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \ + I2C_FUNC_SMBUS_BYTE | \ + I2C_FUNC_SMBUS_BYTE_DATA | \ + I2C_FUNC_SMBUS_WORD_DATA | \ + I2C_FUNC_SMBUS_BLOCK_DATA | \ + I2C_FUNC_SMBUS_I2C_BLOCK) + +#define IC_CON_MASTER 0x1 +#define IC_CON_SPEED_STD 0x2 +#define IC_CON_SPEED_FAST 0x4 +#define IC_CON_SPEED_HIGH 0x6 +#define IC_CON_SPEED_MASK 0x6 +#define IC_CON_10BITADDR_SLAVE 0x8 +#define IC_CON_10BITADDR_MASTER 0x10 +#define IC_CON_RESTART_EN 0x20 +#define IC_CON_SLAVE_DISABLE 0x40 +#define IC_CON_STOP_DET_IFADDRESSED 0x80 +#define IC_CON_TX_EMPTY_CTRL 0x100 +#define IC_CON_RX_FIFO_FULL_HLD_CTRL 0x200 + +#define IC_CON 0x0 +#define IC_TAR 0x4 +#define IC_SAR 0x8 +#define IC_DATA_CMD 0x10 +#define IC_SS_SCL_HCNT 0x14 +#define IC_SS_SCL_LCNT 0x18 +#define IC_FS_SCL_HCNT 0x1c +#define IC_FS_SCL_LCNT 0x20 +#define IC_HS_SCL_HCNT 0x24 +#define IC_HS_SCL_LCNT 0x28 +#define IC_INTR_STAT 0x2c +#define IC_INTR_MASK 0x30 +#define IC_RAW_INTR_STAT 0x34 +#define IC_RX_TL 0x38 +#define IC_TX_TL 0x3c +#define IC_CLR_INTR 0x40 +#define IC_CLR_RX_UNDER 0x44 +#define IC_CLR_RX_OVER 0x48 +#define IC_CLR_TX_OVER 0x4c +#define IC_CLR_RD_REQ 0x50 +#define IC_CLR_TX_ABRT 0x54 +#define IC_CLR_RX_DONE 0x58 +#define IC_CLR_ACTIVITY 0x5c +#define IC_CLR_STOP_DET 0x60 +#define IC_CLR_START_DET 0x64 +#define IC_CLR_GEN_CALL 0x68 +#define IC_ENABLE 0x6c +#define IC_STATUS 0x70 +#define IC_TXFLR 0x74 +#define IC_RXFLR 0x78 +#define IC_SDA_HOLD 0x7c +#define IC_TX_ABRT_SOURCE 0x80 +#define IC_ENABLE_STATUS 0x9c +#define IC_SMBCLK_LOW_MEXT 0xa8 +#define IC_SMBCLK_LOW_TIMEOUT 0xac +#define IC_SMBDAT_STUCK_TIMEOUT 0xb4 +#define IC_CLR_SMBCLK_EXT_LOW_TIMEOUT 0xbc +#define IC_CLR_SMBCLK_TMO_LOW_TIMEOUT 0xc0 +#define IC_CLR_SMBDAT_LOW_TIMEOUT 0xc4 +#define IC_CLR_SMBALERT_IN_N 0xd0 + +#define IC_INTR_RX_UNDER 0x001 +#define IC_INTR_RX_OVER 0x002 +#define IC_INTR_RX_FULL 0x004 +#define IC_INTR_TX_OVER 0x008 +#define IC_INTR_TX_EMPTY 0x010 +#define IC_INTR_RD_REQ 0x020 +#define IC_INTR_TX_ABRT 0x040 +#define IC_INTR_RX_DONE 0x080 +#define IC_INTR_ACTIVITY 0x100 +#define IC_INTR_STOP_DET 0x200 +#define IC_INTR_START_DET 0x400 +#define IC_INTR_GEN_CALL 0x800 +#define IC_INTR_SMBCLK_EXT_LOW_TIMEOUT 0x1000 +#define IC_INTR_SMBCLK_TMO_LOW_TIMEOUT 0x2000 +#define IC_INTR_SMBSDA_LOW_TIMEOUT 0x4000 +#define IC_INTR_SMBALERT_IN_N 0x20000 + +#define IC_INTR_DEFAULT_MASK (IC_INTR_RX_FULL | \ + IC_INTR_TX_ABRT | \ + IC_INTR_STOP_DET) +#define IC_INTR_MASTER_MASK (IC_INTR_DEFAULT_MASK | \ + IC_INTR_TX_EMPTY) +#define IC_INTR_SLAVE_MASK (IC_INTR_DEFAULT_MASK | \ + IC_INTR_RX_DONE | \ + IC_INTR_RX_UNDER | \ + IC_INTR_RD_REQ) +#define IC_INTR_SMBUS_MASK (IC_INTR_MASTER_MASK | \ + IC_INTR_SMBCLK_EXT_LOW_TIMEOUT | \ + IC_INTR_SMBCLK_TMO_LOW_TIMEOUT | \ + IC_INTR_SMBSDA_LOW_TIMEOUT) + +#define IC_STATUS_ACTIVITY 0x1 +#define IC_STATUS_TFE BIT(2) +#define IC_STATUS_MASTER_ACTIVITY BIT(5) +#define IC_STATUS_SLAVE_ACTIVITY BIT(6) + +#define IC_SDA_HOLD_RX_SHIFT 16 +#define IC_SDA_HOLD_RX_MASK GENMASK(23, IC_SDA_HOLD_RX_SHIFT) + +#define IC_ERR_TX_ABRT 0x1 + +#define IC_TAR_10BITADDR_MASTER BIT(12) + +#define IC_COMP_PARAM_1_SPEED_MODE_HIGH (BIT(2) | BIT(3)) +#define IC_COMP_PARAM_1_SPEED_MODE_MASK GENMASK(3, 2) + +#define STATUS_IDLE 0x0 +#define STATUS_WRITE_IN_PROGRESS 0x1 +#define STATUS_READ_IN_PROGRESS 0x2 + +/* + * operation modes + */ +#define PHYTIUM_IC_MASTER 0 +#define PHYTIUM_IC_SLAVE 1 + +#define ABRT_7B_ADDR_NOACK 0 +#define ABRT_10ADDR1_NOACK 1 +#define ABRT_10ADDR2_NOACK 2 +#define ABRT_TXDATA_NOACK 3 +#define ABRT_GCALL_NOACK 4 +#define ABRT_GCALL_READ 5 +#define ABRT_SBYTE_ACKDET 7 +#define ABRT_SBYTE_NORSTRT 9 +#define ABRT_10B_RD_NORSTRT 10 +#define ABRT_MASTER_DIS 11 +#define ARB_LOST 12 +#define ABRT_SLAVE_FLUSH_TXFIFO 13 +#define ABRT_SLAVE_ARBLOST 14 +#define ABRT_SLAVE_RD_INTX 15 + +#define IC_TX_ABRT_7B_ADDR_NOACK (1UL << ABRT_7B_ADDR_NOACK) +#define IC_TX_ABRT_10ADDR1_NOACK (1UL << ABRT_10ADDR1_NOACK) +#define IC_TX_ABRT_10ADDR2_NOACK (1UL << ABRT_10ADDR2_NOACK) +#define IC_TX_ABRT_TXDATA_NOACK (1UL << ABRT_TXDATA_NOACK) +#define IC_TX_ABRT_GCALL_NOACK (1UL << ABRT_GCALL_NOACK) +#define IC_TX_ABRT_GCALL_READ (1UL << ABRT_GCALL_READ) +#define IC_TX_ABRT_SBYTE_ACKDET (1UL << ABRT_SBYTE_ACKDET) +#define IC_TX_ABRT_SBYTE_NORSTRT (1UL << ABRT_SBYTE_NORSTRT) +#define IC_TX_ABRT_10B_RD_NORSTRT (1UL << ABRT_10B_RD_NORSTRT) +#define IC_TX_ABRT_MASTER_DIS (1UL << ABRT_MASTER_DIS) +#define IC_TX_ARB_LOST (1UL << ARB_LOST) +#define IC_RX_ABRT_SLAVE_RD_INTX (1UL << ABRT_SLAVE_RD_INTX) +#define IC_RX_ABRT_SLAVE_ARBLOST (1UL << ABRT_SLAVE_ARBLOST) +#define IC_RX_ABRT_SLAVE_FLUSH_TXFIFO (1UL << ABRT_SLAVE_FLUSH_TXFIFO) + +#define IC_TX_ABRT_NOACK (IC_TX_ABRT_7B_ADDR_NOACK | \ + IC_TX_ABRT_10ADDR1_NOACK | \ + IC_TX_ABRT_10ADDR2_NOACK | \ + IC_TX_ABRT_TXDATA_NOACK | \ + IC_TX_ABRT_GCALL_NOACK) +#define CONTROLLER_TYPE_IIC 0 +#define CONTROLLER_TYPE_SMBUS 1 + +struct phytium_i2c_dev { + struct device *dev; + void __iomem *base; + int irq; + u32 flags; + struct completion cmd_complete; + struct clk *clk; + struct reset_control *rst; + int mode; + struct i2c_client *slave; + u32 (*get_clk_rate_khz)(struct phytium_i2c_dev *dev); + + struct i2c_adapter adapter; + struct i2c_client *ara; + struct i2c_smbus_alert_setup alert_data; + + struct phytium_pci_i2c *controller; + + unsigned int status; + int cmd_err; + u32 abort_source; + + struct i2c_msg *msgs; + int msgs_num; + int msg_write_idx; + int msg_read_idx; + int msg_err; + u32 tx_buf_len; + u8 *tx_buf; + u32 rx_buf_len; + u8 *rx_buf; + + u32 master_cfg; + u32 slave_cfg; + u32 functionality; + unsigned int tx_fifo_depth; + unsigned int rx_fifo_depth; + int rx_outstanding; + + struct i2c_timings timings; + u32 sda_hold_time; + u16 ss_hcnt; + u16 ss_lcnt; + u16 fs_hcnt; + u16 fs_lcnt; + u16 fp_hcnt; + u16 fp_lcnt; + u16 hs_hcnt; + u16 hs_lcnt; + + bool pm_disabled; + void (*disable)(struct phytium_i2c_dev *dev); + void (*disable_int)(struct phytium_i2c_dev *dev); + int (*init)(struct phytium_i2c_dev *dev); +}; + +#define ACCESS_INTR_MASK 0x00000004 + +#define DEFAULT_CLOCK_FREQUENCY 48000000 + +u32 phytium_readl(struct phytium_i2c_dev *dev, int offset); +void phytium_writel(struct phytium_i2c_dev *dev, u32 b, int offset); +unsigned long i2c_phytium_clk_rate(struct phytium_i2c_dev *dev); +int i2c_phytium_prepare_clk(struct phytium_i2c_dev *dev, bool prepare); +int i2c_phytium_wait_bus_not_busy(struct phytium_i2c_dev *dev); +int i2c_phytium_handle_tx_abort(struct phytium_i2c_dev *dev); +u32 i2c_phytium_func(struct i2c_adapter *adap); +void i2c_phytium_disable(struct phytium_i2c_dev *dev); +void i2c_phytium_disable_int(struct phytium_i2c_dev *dev); +int i2c_phytium_set_sda_hold(struct phytium_i2c_dev *dev); +u32 i2c_phytium_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset); +u32 i2c_phytium_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset); + +static inline void __i2c_phytium_enable(struct phytium_i2c_dev *dev) +{ + phytium_writel(dev, 1, IC_ENABLE); +} + +static inline void __i2c_phytium_disable_nowait(struct phytium_i2c_dev *dev) +{ + phytium_writel(dev, 0, IC_ENABLE); +} + +void __i2c_phytium_disable(struct phytium_i2c_dev *dev); + +extern int i2c_phytium_probe(struct phytium_i2c_dev *dev); + +extern int i2c_phytium_probe_slave(struct phytium_i2c_dev *dev); diff --git a/drivers/i2c/busses/i2c-phytium-master.c b/drivers/i2c/busses/i2c-phytium-master.c new file mode 100644 index 0000000000000000000000000000000000000000..b525b8e153b86d140e5247d42a5ff4aa17ba1a37 --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-master.c @@ -0,0 +1,577 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2C adapter driver. + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +static int i2c_phytium_init_master(struct phytium_i2c_dev *dev) +{ + /* Disable the adapter */ + __i2c_phytium_disable(dev); + + /* Write standard speed timing parameters */ + phytium_writel(dev, dev->ss_hcnt, IC_SS_SCL_HCNT); + phytium_writel(dev, dev->ss_lcnt, IC_SS_SCL_LCNT); + + /* Write fast mode/fast mode plus timing parameters */ + phytium_writel(dev, dev->fs_hcnt, IC_FS_SCL_HCNT); + phytium_writel(dev, dev->fs_lcnt, IC_FS_SCL_LCNT); + + /* Write high speed timing parameters if supported */ + if (dev->hs_hcnt && dev->hs_hcnt) { + phytium_writel(dev, dev->hs_hcnt, IC_HS_SCL_HCNT); + phytium_writel(dev, dev->hs_lcnt, IC_HS_SCL_LCNT); + } + + /* Write SDA hold time if supported */ + if (dev->sda_hold_time) + phytium_writel(dev, dev->sda_hold_time, IC_SDA_HOLD); + + /* Configure Tx/Rx FIFO threshold levels */ + phytium_writel(dev, dev->tx_fifo_depth >> 1, IC_TX_TL); + phytium_writel(dev, 0, IC_RX_TL); + + /* Configure the I2C master */ + phytium_writel(dev, dev->master_cfg, IC_CON); + + return 0; +} + +static void i2c_phytium_xfer_init(struct phytium_i2c_dev *dev) +{ + struct i2c_msg *msgs = dev->msgs; + u32 ic_con, ic_tar = 0; + + /* Disable the adapter */ + __i2c_phytium_disable(dev); + + /* If the slave address is 10-bit address, enable 10BITADDR */ + ic_con = phytium_readl(dev, IC_CON); + if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { + ic_con |= IC_CON_10BITADDR_MASTER; + ic_tar = IC_TAR_10BITADDR_MASTER; + } else { + ic_con &= ~IC_CON_10BITADDR_MASTER; + } + + phytium_writel(dev, ic_con, IC_CON); + + /* + * Set the slave (target) address and enable 10-bit addressing mode + * if applicable. + */ + phytium_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, IC_TAR); + + /* Enforce disabled interrupts */ + i2c_phytium_disable_int(dev); + + /* Enable the adapter */ + __i2c_phytium_enable(dev); + + /* Dummy read */ + phytium_readl(dev, IC_ENABLE_STATUS); + + /* Clear and enable interrupts */ + phytium_readl(dev, IC_CLR_INTR); + phytium_writel(dev, IC_INTR_SMBUS_MASK, IC_INTR_MASK); +} + +static void i2c_phytium_xfer_msg(struct phytium_i2c_dev *dev) +{ + struct i2c_msg *msgs = dev->msgs; + u32 intr_mask; + int tx_limit, rx_limit; + u32 addr = msgs[dev->msg_write_idx].addr; + u32 buf_len = dev->tx_buf_len; + u8 *buf = dev->tx_buf; + bool need_restart = false; + + intr_mask = IC_INTR_MASTER_MASK; + + for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { + u32 flags = msgs[dev->msg_write_idx].flags; + + if (msgs[dev->msg_write_idx].addr != addr) { + dev_err(dev->dev, + "%s: invalid target address\n", __func__); + dev->msg_err = -EINVAL; + break; + } + + if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { + /* new i2c_msg */ + buf = msgs[dev->msg_write_idx].buf; + buf_len = msgs[dev->msg_write_idx].len; + + if ((dev->master_cfg & IC_CON_RESTART_EN) && + (dev->msg_write_idx > 0)) + need_restart = true; + } + + tx_limit = dev->tx_fifo_depth - phytium_readl(dev, IC_TXFLR); + rx_limit = dev->tx_fifo_depth - phytium_readl(dev, IC_RXFLR); + + while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { + u32 cmd = 0; + + if (dev->msg_write_idx == dev->msgs_num - 1 && + buf_len == 1 && !(flags & I2C_M_RECV_LEN)) + cmd |= BIT(9); + + if (need_restart) { + cmd |= BIT(10); + need_restart = false; + } + + if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { + /* avoid rx buffer overrun */ + if (dev->rx_outstanding >= dev->rx_fifo_depth) + break; + + phytium_writel(dev, cmd | 0x100, IC_DATA_CMD); + rx_limit--; + dev->rx_outstanding++; + } else { + phytium_writel(dev, cmd | *buf++, IC_DATA_CMD); + } + tx_limit--; + buf_len--; + } + + dev->tx_buf = buf; + dev->tx_buf_len = buf_len; + + /* + * Because we don't know the buffer length in the + * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop + * the transaction here. + */ + if (buf_len > 0 || flags & I2C_M_RECV_LEN) { + /* more bytes to be written */ + dev->status |= STATUS_WRITE_IN_PROGRESS; + break; + } else { + dev->status &= ~STATUS_WRITE_IN_PROGRESS; + } + } + + if (dev->msg_write_idx == dev->msgs_num) + intr_mask &= ~IC_INTR_TX_EMPTY; + + if (dev->msg_err) + intr_mask = 0; + + phytium_writel(dev, intr_mask, IC_INTR_MASK); +} + +static u8 i2c_phytium_recv_len(struct phytium_i2c_dev *dev, u8 len) +{ + struct i2c_msg *msgs = dev->msgs; + u32 flags = msgs[dev->msg_read_idx].flags; + + /* + * Adjust the buffer length and mask the flag + * after receiving the first byte. + */ + len += (flags & I2C_CLIENT_PEC) ? 2 : 1; + dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding); + msgs[dev->msg_read_idx].len = len; + msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN; + + return len; +} + +static void i2c_phytium_read(struct phytium_i2c_dev *dev) +{ + struct i2c_msg *msgs = dev->msgs; + int rx_valid; + + for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { + u32 len; + u8 *buf; + + if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) + continue; + + if (!(dev->status & STATUS_READ_IN_PROGRESS)) { + len = msgs[dev->msg_read_idx].len; + buf = msgs[dev->msg_read_idx].buf; + } else { + len = dev->rx_buf_len; + buf = dev->rx_buf; + } + + rx_valid = phytium_readl(dev, IC_RXFLR); + + for (; len > 0 && rx_valid > 0; len--, rx_valid--) { + u32 flags = msgs[dev->msg_read_idx].flags; + + *buf = phytium_readl(dev, IC_DATA_CMD); + /* Ensure length byte is a valid value */ + if (flags & I2C_M_RECV_LEN && + *buf <= I2C_SMBUS_BLOCK_MAX && *buf > 0) { + len = i2c_phytium_recv_len(dev, *buf); + } + buf++; + dev->rx_outstanding--; + } + + if (len > 0) { + dev->status |= STATUS_READ_IN_PROGRESS; + dev->rx_buf_len = len; + dev->rx_buf = buf; + return; + } else + dev->status &= ~STATUS_READ_IN_PROGRESS; + } +} + +static int i2c_phytium_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num) +{ + struct phytium_i2c_dev *dev = i2c_get_adapdata(adapter); + int ret; + + dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); + + pm_runtime_get_sync(dev->dev); + + reinit_completion(&dev->cmd_complete); + dev->msgs = msgs; + dev->msgs_num = num; + dev->cmd_err = 0; + dev->msg_write_idx = 0; + dev->msg_read_idx = 0; + dev->msg_err = 0; + dev->status = STATUS_IDLE; + dev->abort_source = 0; + dev->rx_outstanding = 0; + + ret = i2c_phytium_wait_bus_not_busy(dev); + if (ret < 0) + goto done; + + /* Start the transfers */ + i2c_phytium_xfer_init(dev); + + /* Wait for tx to complete */ + if (!wait_for_completion_timeout(&dev->cmd_complete, adapter->timeout)) { + dev_err(dev->dev, "controller timed out\n"); + i2c_recover_bus(&dev->adapter); + i2c_phytium_init_master(dev); + ret = -ETIMEDOUT; + goto done; + } + + __i2c_phytium_disable_nowait(dev); + + if (dev->msg_err) { + ret = dev->msg_err; + goto done; + } + + if (likely(!dev->cmd_err && !dev->status)) { + ret = num; + goto done; + } + + /* We have got an error */ + if (dev->cmd_err == IC_ERR_TX_ABRT) { + ret = i2c_phytium_handle_tx_abort(dev); + goto done; + } + + if (dev->status) + dev_err(dev->dev, "transfer terminated early.\n"); + + ret = -EIO; + +done: + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return ret; +} + +static const struct i2c_algorithm i2c_phytium_algo = { + .master_xfer = i2c_phytium_xfer, + .functionality = i2c_phytium_func, +}; + +static const struct i2c_adapter_quirks i2c_phytium_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN, +}; + +static u32 i2c_phytium_read_clear_intrbits(struct phytium_i2c_dev *dev) +{ + u32 stat; + + stat = phytium_readl(dev, IC_INTR_STAT); + + if (stat & IC_INTR_RX_UNDER) + phytium_readl(dev, IC_CLR_RX_UNDER); + if (stat & IC_INTR_RX_OVER) + phytium_readl(dev, IC_CLR_RX_OVER); + if (stat & IC_INTR_TX_OVER) + phytium_readl(dev, IC_CLR_TX_OVER); + if (stat & IC_INTR_RD_REQ) + phytium_readl(dev, IC_CLR_RD_REQ); + if (stat & IC_INTR_TX_ABRT) { + dev->abort_source = phytium_readl(dev, IC_TX_ABRT_SOURCE); + phytium_readl(dev, IC_CLR_TX_ABRT); + } + if (stat & IC_INTR_RX_DONE) + phytium_readl(dev, IC_CLR_RX_DONE); + if (stat & IC_INTR_ACTIVITY) + phytium_readl(dev, IC_CLR_ACTIVITY); + if (stat & IC_INTR_STOP_DET) + phytium_readl(dev, IC_CLR_STOP_DET); + if (stat & IC_INTR_START_DET) + phytium_readl(dev, IC_CLR_START_DET); + if (stat & IC_INTR_GEN_CALL) + phytium_readl(dev, IC_CLR_GEN_CALL); + if (stat & IC_INTR_SMBCLK_EXT_LOW_TIMEOUT) + phytium_readl(dev, IC_CLR_SMBCLK_EXT_LOW_TIMEOUT); + if (stat & IC_INTR_SMBCLK_TMO_LOW_TIMEOUT) + phytium_readl(dev, IC_CLR_SMBCLK_TMO_LOW_TIMEOUT); + if (stat & IC_INTR_SMBSDA_LOW_TIMEOUT) + phytium_readl(dev, IC_CLR_SMBDAT_LOW_TIMEOUT); + if (stat & IC_INTR_SMBALERT_IN_N) + phytium_readl(dev, IC_CLR_SMBALERT_IN_N); + + return stat; +} + +static int i2c_phytium_irq_handler_master(struct phytium_i2c_dev *dev) +{ + u32 stat; + + stat = i2c_phytium_read_clear_intrbits(dev); + + /* SMBus interrupt */ + if (stat & (IC_INTR_SMBCLK_EXT_LOW_TIMEOUT | IC_INTR_SMBCLK_TMO_LOW_TIMEOUT)) { + phytium_writel(dev, phytium_readl(dev, IC_ENABLE) & (~BIT(6)), + IC_ENABLE); + phytium_writel(dev, phytium_readl(dev, IC_ENABLE) | BIT(4), + IC_ENABLE); + goto abort; + } + + if (stat & IC_INTR_SMBSDA_LOW_TIMEOUT) { + phytium_writel(dev, phytium_readl(dev, IC_ENABLE) | BIT(6), + IC_ENABLE); + goto abort; + } + + if (stat & IC_INTR_SMBALERT_IN_N && dev->ara) + i2c_handle_smbus_alert(dev->ara); + + if (stat & IC_INTR_TX_ABRT) { + dev->cmd_err |= IC_ERR_TX_ABRT; + dev->status = STATUS_IDLE; + + /* Anytime TX_ABRT is set, the contents of the tx/rx + * buffers are flushed. Make sure to skip them. + */ + phytium_writel(dev, 0, IC_INTR_MASK); + goto abort; + } + + if (stat & IC_INTR_RX_FULL) + i2c_phytium_read(dev); + + if (stat & IC_INTR_TX_EMPTY) + i2c_phytium_xfer_msg(dev); + +abort: + if ((stat & (IC_INTR_TX_ABRT | IC_INTR_STOP_DET)) || + dev->msg_err) + complete(&dev->cmd_complete); + else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { + /* Workaround to trigger pending interrupt */ + stat = phytium_readl(dev, IC_INTR_MASK); + i2c_phytium_disable_int(dev); + phytium_writel(dev, stat, IC_INTR_MASK); + } + + return 0; +} + +static int i2c_phytium_set_timings_master(struct phytium_i2c_dev *dev) +{ + const char *mode_str, *fp_str = ""; + u32 sda_falling_time, scl_falling_time; + struct i2c_timings *t = &dev->timings; + u32 ic_clk; + int ret; + + /* Set standard and fast speed dividers for high/low periods */ + sda_falling_time = t->sda_fall_ns ?: 300; /* ns */ + scl_falling_time = t->scl_fall_ns ?: 300; /* ns */ + + /* Calculate SCL timing parameters for standard mode if not set */ + if (!dev->ss_hcnt || !dev->ss_lcnt) { + ic_clk = i2c_phytium_clk_rate(dev); + dev->ss_hcnt = + i2c_phytium_scl_hcnt(ic_clk, + 4000, /* tHD;STA = tHIGH = 4.0 us */ + sda_falling_time, + 0, /* 0: DW default, 1: Ideal */ + 0); /* No offset */ + dev->ss_lcnt = + i2c_phytium_scl_lcnt(ic_clk, + 4700, /* tLOW = 4.7 us */ + scl_falling_time, + 0); /* No offset */ + } + dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n", + dev->ss_hcnt, dev->ss_lcnt); + /* + * Set SCL timing parameters for fast mode or fast mode plus. Only + * difference is the timing parameter values since the registers are + * the same. + */ + if (t->bus_freq_hz == 1000000) { + /* + * Check are fast mode plus parameters available and use + * fast mode if not. + */ + if (dev->fp_hcnt && dev->fp_lcnt) { + dev->fs_hcnt = dev->fp_hcnt; + dev->fs_lcnt = dev->fp_lcnt; + fp_str = " Plus"; + } + } + /* + * Calculate SCL timing parameters for fast mode if not set. They are + * needed also in high speed mode. + */ + if (!dev->fs_hcnt || !dev->fs_lcnt) { + ic_clk = i2c_phytium_clk_rate(dev); + dev->fs_hcnt = + i2c_phytium_scl_hcnt(ic_clk, + 600, /* tHD;STA = tHIGH = 0.6 us */ + sda_falling_time, + 0, /* 0: DW default, 1: Ideal */ + 0); /* No offset */ + dev->fs_lcnt = + i2c_phytium_scl_lcnt(ic_clk, + 1300, /* tLOW = 1.3 us */ + scl_falling_time, + 0); /* No offset */ + } + dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n", + fp_str, dev->fs_hcnt, dev->fs_lcnt); + + if (dev->hs_hcnt && dev->hs_lcnt) + dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n", + dev->hs_hcnt, dev->hs_lcnt); + + ret = i2c_phytium_set_sda_hold(dev); + if (ret) + goto out; + + switch (dev->master_cfg & IC_CON_SPEED_MASK) { + case IC_CON_SPEED_STD: + mode_str = "Standard Mode"; + break; + case IC_CON_SPEED_HIGH: + mode_str = "High Speed Mode"; + break; + default: + mode_str = "Fast Mode"; + } + dev_dbg(dev->dev, "Bus speed: %s%s\n", mode_str, fp_str); + +out: + return ret; +} + +static irqreturn_t i2c_phytium_isr(int this_irq, void *dev_id) +{ + struct phytium_i2c_dev *dev = dev_id; + u32 stat, enabled; + + enabled = phytium_readl(dev, IC_ENABLE); + stat = phytium_readl(dev, IC_RAW_INTR_STAT); + if (!enabled || !(stat & ~IC_INTR_ACTIVITY)) + return IRQ_NONE; + + i2c_phytium_irq_handler_master(dev); + + return IRQ_HANDLED; +} + +int i2c_phytium_probe(struct phytium_i2c_dev *dev) +{ + struct i2c_adapter *adapter = &dev->adapter; + unsigned long irq_flags; + int ret; + + init_completion(&dev->cmd_complete); + + dev->init = i2c_phytium_init_master; + dev->disable = i2c_phytium_disable; + dev->disable_int = i2c_phytium_disable_int; + + ret = i2c_phytium_set_timings_master(dev); + if (ret) + return ret; + + ret = dev->init(dev); + if (ret) + return ret; + + /* XXX: should be initialized in firmware, remove it in future */ +#define DEFAULT_TIMEOUT (DEFAULT_CLOCK_FREQUENCY / 1000 * 35) + phytium_writel(dev, DEFAULT_TIMEOUT, IC_SMBCLK_LOW_MEXT); + phytium_writel(dev, DEFAULT_TIMEOUT, IC_SMBCLK_LOW_TIMEOUT); + phytium_writel(dev, DEFAULT_TIMEOUT, IC_SMBDAT_STUCK_TIMEOUT); + + snprintf(adapter->name, sizeof(adapter->name), "Phytium I2C adapter"); + adapter->retries = 3; + adapter->algo = &i2c_phytium_algo; + adapter->quirks = &i2c_phytium_quirks; + adapter->dev.parent = dev->dev; + i2c_set_adapdata(adapter, dev); + + irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; + + i2c_phytium_disable_int(dev); + ret = devm_request_irq(dev->dev, dev->irq, i2c_phytium_isr, irq_flags, + dev_name(dev->dev), dev); + if (ret) { + dev_err(dev->dev, "failed to request irq %i: %d\n", dev->irq, ret); + return ret; + } + + /* + * Increment PM usage count during adapter registration in order to + * avoid possible spurious runtime suspend when adapter device is + * registered to the device core and immediate resume in case bus has + * registered I2C slaves that do I2C transfers in their probe. + */ + pm_runtime_get_noresume(dev->dev); + ret = i2c_add_numbered_adapter(adapter); + if (ret) + dev_err(dev->dev, "fail to add adapter: %d\n", ret); + pm_runtime_put_noidle(dev->dev); + + return ret; +} +EXPORT_SYMBOL_GPL(i2c_phytium_probe); + +MODULE_DESCRIPTION("Phytium I2C bus master adapter"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-phytium-pci.c b/drivers/i2c/busses/i2c-phytium-pci.c new file mode 100644 index 0000000000000000000000000000000000000000..0e0a72468bb55b121d5070d2991a8dfe23d4160d --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-pci.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PCI driver for Phytium I2C adapter. + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +#define DRV_NAME "i2c-phytium-pci" + +enum phytium_pci_ctl_id_t { + octopus_i2c, +}; + +struct scl_sda_cfg { + u32 ss_hcnt; + u32 fs_hcnt; + u32 ss_lcnt; + u32 fs_lcnt; + u32 sda_hold; +}; + +struct phytium_pci_i2c { + u32 bus_num; + u32 bus_cfg; + u32 tx_fifo_depth; + u32 rx_fifo_depth; + u32 clk_khz; + u32 functionality; + u32 flags; + struct scl_sda_cfg *scl_sda_cfg; + int (*setup)(struct pci_dev *pdev, struct phytium_pci_i2c *c); +}; + +/* Octopus HCNT/LCNT/SDA hold time */ +static struct scl_sda_cfg octopus_config = { + .ss_hcnt = 0x190, + .ss_lcnt = 0x1d6, + .fs_hcnt = 0x3c, + .fs_lcnt = 0x82, + .sda_hold = 0x0, // XXX +}; + +static int octopus_setup(struct pci_dev *pdev, struct phytium_pci_i2c *c) +{ + struct phytium_i2c_dev *i2c = pci_get_drvdata(pdev); + + if (pdev->device == 0xdc32) { + /* + * Since we have already register the adapter, the dev->irq + * must be valid. + */ + i2c->alert_data.irq = i2c->irq; + + i2c->ara = i2c_setup_smbus_alert(&i2c->adapter, &i2c->alert_data); + if (!i2c->ara) + return -ENODEV; + } + + return 0; +} + +static struct phytium_pci_i2c pci_ctrl_info[] = { + [octopus_i2c] = { + .bus_num = -1, + .bus_cfg = IC_CON_MASTER | IC_CON_SLAVE_DISABLE | + IC_CON_RESTART_EN | IC_CON_SPEED_FAST, + .tx_fifo_depth = 7, + .rx_fifo_depth = 7, + .functionality = I2C_FUNC_10BIT_ADDR, + .clk_khz = 48000000, + .scl_sda_cfg = &octopus_config, + .setup = octopus_setup, + }, +}; + +#ifdef CONFIG_PM +static int i2c_phytium_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_i2c_dev *i_dev = pci_get_drvdata(pdev); + + i_dev->disable(i_dev); + + return 0; +} + +static int i2c_phytium_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_i2c_dev *i_dev = pci_get_drvdata(pdev); + + return i_dev->init(i_dev); +} +#endif + +static UNIVERSAL_DEV_PM_OPS(i2c_phytium_pm_ops, i2c_phytium_pci_suspend, + i2c_phytium_pci_resume, NULL); + +static u32 i2c_phytium_get_clk_rate_khz(struct phytium_i2c_dev *dev) +{ + return dev->controller->clk_khz; +} + +static int i2c_phytium_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_i2c_dev *dev; + struct i2c_adapter *adapter; + struct phytium_pci_i2c *controller; + struct scl_sda_cfg *cfg; + int ret; + + if (id->driver_data >= ARRAY_SIZE(pci_ctrl_info)) { + dev_err(&pdev->dev, "%s: invalid driver data %ld\n", __func__, + id->driver_data); + ret = -EINVAL; + goto out; + } + + controller = &pci_ctrl_info[id->driver_data]; + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "Failed to enable I2C PCI device (%d)\n", ret); + goto out; + } + + ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + goto out; + } + + dev = devm_kzalloc(&pdev->dev, sizeof(struct phytium_i2c_dev), GFP_KERNEL); + if (!dev) { + ret = -ENOMEM; + goto out; + } + + dev->controller = controller; + dev->get_clk_rate_khz = i2c_phytium_get_clk_rate_khz; + dev->base = pcim_iomap_table(pdev)[0]; + dev->dev = &pdev->dev; + dev->irq = pdev->irq; + dev->flags |= controller->flags; + + dev->functionality = controller->functionality | IC_DEFAULT_FUNCTIONALITY; + dev->master_cfg = controller->bus_cfg; + if (controller->scl_sda_cfg) { + cfg = controller->scl_sda_cfg; + dev->ss_hcnt = cfg->ss_hcnt; + dev->fs_hcnt = cfg->fs_hcnt; + dev->ss_lcnt = cfg->ss_lcnt; + dev->fs_lcnt = cfg->fs_lcnt; + dev->sda_hold_time = cfg->sda_hold; + } + + pci_set_drvdata(pdev, dev); + + dev->tx_fifo_depth = controller->tx_fifo_depth; + dev->rx_fifo_depth = controller->rx_fifo_depth; + + adapter = &dev->adapter; + adapter->owner = THIS_MODULE; + adapter->class = 0; + ACPI_COMPANION_SET(&adapter->dev, ACPI_COMPANION(&pdev->dev)); + adapter->nr = controller->bus_num; + + ret = i2c_phytium_probe(dev); + if (ret) + goto out; + + if (controller->setup) { + ret = controller->setup(pdev, controller); + if (ret) + goto out; + } + + pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + pm_runtime_allow(&pdev->dev); + +out: + return ret; +} + +static void i2c_phytium_pci_remove(struct pci_dev *pdev) +{ + struct phytium_i2c_dev *dev = pci_get_drvdata(pdev); + + dev->disable(dev); + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + + i2c_del_adapter(&dev->adapter); +} + +static const struct pci_device_id i2_phytium_pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc32), octopus_i2c }, + { PCI_VDEVICE(PHYTIUM, 0xdc30), octopus_i2c }, + { } +}; +MODULE_DEVICE_TABLE(pci, i2_phytium_pci_ids); + +static struct pci_driver phytium_i2c_driver = { + .name = DRV_NAME, + .id_table = i2_phytium_pci_ids, + .probe = i2c_phytium_pci_probe, + .remove = i2c_phytium_pci_remove, + .driver = { + .pm = &i2c_phytium_pm_ops, + }, +}; + +module_pci_driver(phytium_i2c_driver); + +MODULE_ALIAS("i2c-phytium-pci"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium PCI I2C bus adapter"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-phytium-platform.c b/drivers/i2c/busses/i2c-phytium-platform.c new file mode 100644 index 0000000000000000000000000000000000000000..66709bb91d69823ce19bb0cfaeed73ca72674c33 --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-platform.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Phytium I2C adapter driver. + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +#define DRV_NAME "i2c-phytium-platform" + +static u32 i2c_phytium_get_clk_rate_khz(struct phytium_i2c_dev *dev) +{ + return clk_get_rate(dev->clk)/1000; +} + +#ifdef CONFIG_ACPI +static void phytium_i2c_acpi_params(struct platform_device *pdev, char method[], + u16 *hcnt, u16 *lcnt, u32 *sda_hold) +{ + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + union acpi_object *obj; + + if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) + return; + + obj = (union acpi_object *)buf.pointer; + if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 3) { + const union acpi_object *objs = obj->package.elements; + + *hcnt = (u16)objs[0].integer.value; + *lcnt = (u16)objs[1].integer.value; + *sda_hold = (u32)objs[2].integer.value; + } + + kfree(buf.pointer); +} + +static int phytium_i2c_acpi_configure(struct platform_device *pdev) +{ + struct phytium_i2c_dev *dev = platform_get_drvdata(pdev); + struct i2c_timings *t = &dev->timings; + u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0; + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + const struct acpi_device_id *id; + struct acpi_device *adev; + + dev->adapter.nr = -1; + dev->tx_fifo_depth = 32; + dev->rx_fifo_depth = 32; + + /* + * Try to get SDA hold time and *CNT values from an ACPI method for + * selected speed modes. + */ + phytium_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht); + phytium_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht); + phytium_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht); + phytium_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht); + + switch (t->bus_freq_hz) { + case 100000: + dev->sda_hold_time = ss_ht; + break; + case 1000000: + dev->sda_hold_time = fp_ht; + break; + case 3400000: + dev->sda_hold_time = hs_ht; + break; + case 400000: + default: + dev->sda_hold_time = fs_ht; + break; + } + + id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); + if (id && id->driver_data) + dev->flags |= (u32)id->driver_data; + + if (acpi_bus_get_device(handle, &adev)) + return -ENODEV; + + return 0; +} + +static const struct acpi_device_id phytium_i2c_acpi_match[] = { + { "PHYT0038", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, phytium_i2c_acpi_match); +#else +static inline int phytium_i2c_acpi_configure(struct platform_device *pdev) +{ + return -ENODEV; +} +#endif + +static void i2c_phytium_configure_master(struct phytium_i2c_dev *dev) +{ + struct i2c_timings *t = &dev->timings; + + dev->functionality = I2C_FUNC_10BIT_ADDR | IC_DEFAULT_FUNCTIONALITY; + + dev->master_cfg = IC_CON_MASTER | IC_CON_SLAVE_DISABLE | + IC_CON_RESTART_EN; + + dev->mode = PHYTIUM_IC_MASTER; + + switch (t->bus_freq_hz) { + case 100000: + dev->master_cfg |= IC_CON_SPEED_STD; + break; + case 3400000: + dev->master_cfg |= IC_CON_SPEED_HIGH; + break; + default: + dev->master_cfg |= IC_CON_SPEED_FAST; + } +} + +static void i2c_phytium_configure_slave(struct phytium_i2c_dev *dev) +{ + dev->functionality = I2C_FUNC_SLAVE | IC_DEFAULT_FUNCTIONALITY; + + dev->slave_cfg = IC_CON_RX_FIFO_FULL_HLD_CTRL | + IC_CON_RESTART_EN | IC_CON_STOP_DET_IFADDRESSED; + + dev->mode = PHYTIUM_IC_SLAVE; +} + +static int phytium_i2c_plat_probe(struct platform_device *pdev) +{ + struct i2c_adapter *adap; + struct phytium_i2c_dev *dev; + struct i2c_timings *t; + u32 acpi_speed; + struct resource *mem; + int irq, ret, i; + static const int supported_speeds[] = { + 0, 100000, 400000, 1000000, 3400000 + }; + + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + dev = devm_kzalloc(&pdev->dev, sizeof(struct phytium_i2c_dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dev->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(dev->base)) + return PTR_ERR(dev->base); + + dev->dev = &pdev->dev; + dev->irq = irq; + platform_set_drvdata(pdev, dev); + + dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + if (IS_ERR(dev->rst)) { + if (PTR_ERR(dev->rst) == -EPROBE_DEFER) + return -EPROBE_DEFER; + } else { + reset_control_deassert(dev->rst); + } + + t = &dev->timings; + i2c_parse_fw_timings(&pdev->dev, t, false); + + acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); + /* + * Some DSTDs use a non standard speed, round down to the lowest + * standard speed. + */ + for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) { + if (acpi_speed < supported_speeds[i]) + break; + } + acpi_speed = supported_speeds[i - 1]; + + /* + * Find bus speed from the "clock-frequency" device property, ACPI + * or by using fast mode if neither is set. + */ + if (acpi_speed && t->bus_freq_hz) + t->bus_freq_hz = min(t->bus_freq_hz, acpi_speed); + else if (acpi_speed || t->bus_freq_hz) + t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed); + else + t->bus_freq_hz = 400000; + + if (has_acpi_companion(&pdev->dev)) + phytium_i2c_acpi_configure(pdev); + + /* + * Only standard mode at 100kHz, fast mode at 400kHz, + * fast mode plus at 1MHz and high speed mode at 3.4MHz are supported. + */ + if (t->bus_freq_hz != 100000 && t->bus_freq_hz != 400000 && + t->bus_freq_hz != 1000000 && t->bus_freq_hz != 3400000) { + dev_err(&pdev->dev, + "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", + t->bus_freq_hz); + ret = -EINVAL; + goto exit_reset; + } + + if (i2c_detect_slave_mode(&pdev->dev)) + i2c_phytium_configure_slave(dev); + else + i2c_phytium_configure_master(dev); + + dev->clk = devm_clk_get(&pdev->dev, NULL); + if (!i2c_phytium_prepare_clk(dev, true)) { + u64 clk_khz; + + dev->get_clk_rate_khz = i2c_phytium_get_clk_rate_khz; + clk_khz = dev->get_clk_rate_khz(dev); + + if (!dev->sda_hold_time && t->sda_hold_ns) + dev->sda_hold_time = + div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000); + } + + dev->tx_fifo_depth = 7; + dev->rx_fifo_depth = 7; + dev->adapter.nr = pdev->id; + + adap = &dev->adapter; + adap->owner = THIS_MODULE; + adap->class = I2C_CLASS_DEPRECATED; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + adap->dev.of_node = pdev->dev.of_node; + + dev_pm_set_driver_flags(&pdev->dev, + DPM_FLAG_SMART_PREPARE | + DPM_FLAG_SMART_SUSPEND | + DPM_FLAG_LEAVE_SUSPENDED); + + /* The code below assumes runtime PM to be disabled. */ + WARN_ON(pm_runtime_enabled(&pdev->dev)); + + pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + + pm_runtime_enable(&pdev->dev); + + if (dev->mode == PHYTIUM_IC_SLAVE) + ret = i2c_phytium_probe_slave(dev); + else + ret = i2c_phytium_probe(dev); + + if (ret) + goto exit_probe; + + return ret; + +exit_probe: + pm_runtime_disable(dev->dev); +exit_reset: + if (!IS_ERR_OR_NULL(dev->rst)) + reset_control_assert(dev->rst); + return ret; +} + +static int phytium_i2c_plat_remove(struct platform_device *pdev) +{ + struct phytium_i2c_dev *dev = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&pdev->dev); + + i2c_del_adapter(&dev->adapter); + + dev->disable(dev); + + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(dev->dev); + + if (!IS_ERR_OR_NULL(dev->rst)) + reset_control_assert(dev->rst); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id phytium_i2c_of_match[] = { + { .compatible = "phytium,i2c", }, + {}, +}; +MODULE_DEVICE_TABLE(of, phytium_i2c_of_match); +#endif + +static int __maybe_unused phytium_i2c_plat_suspend(struct device *dev) +{ + struct phytium_i2c_dev *idev = dev_get_drvdata(dev); + + idev->disable(idev); + i2c_phytium_prepare_clk(idev, false); + + return 0; +} + +static int __maybe_unused phytium_i2c_plat_resume(struct device *dev) +{ + struct phytium_i2c_dev *idev = dev_get_drvdata(dev); + + i2c_phytium_prepare_clk(idev, true); + + idev->init(idev); + + return 0; +} + +static const struct dev_pm_ops phytium_i2c_dev_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(phytium_i2c_plat_suspend, + phytium_i2c_plat_resume) + SET_RUNTIME_PM_OPS(phytium_i2c_plat_suspend, + phytium_i2c_plat_resume, NULL) +}; + +static struct platform_driver phytium_i2c_driver = { + .probe = phytium_i2c_plat_probe, + .remove = phytium_i2c_plat_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = of_match_ptr(phytium_i2c_of_match), + .acpi_match_table = ACPI_PTR(phytium_i2c_acpi_match), + .pm = &phytium_i2c_dev_pm_ops, + }, +}; +module_platform_driver(phytium_i2c_driver); + +MODULE_ALIAS("platform:i2c-phytium"); +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium I2C bus adapter"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-phytium-slave.c b/drivers/i2c/busses/i2c-phytium-slave.c new file mode 100644 index 0000000000000000000000000000000000000000..a9409f55c6522e15b1127cf793e89dd5d97b2c3f --- /dev/null +++ b/drivers/i2c/busses/i2c-phytium-slave.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2C adapter driver (slave only). + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phytium-core.h" + +static void i2c_phytium_configure_fifo_slave(struct phytium_i2c_dev *dev) +{ + /* Configure Tx/Rx FIFO threshold levels. */ + phytium_writel(dev, 0, IC_TX_TL); + phytium_writel(dev, 0, IC_RX_TL); + + /* Configure the I2C slave. */ + phytium_writel(dev, dev->slave_cfg, IC_CON); + phytium_writel(dev, IC_INTR_SLAVE_MASK, IC_INTR_MASK); +} + +static int i2c_phytium_init_slave(struct phytium_i2c_dev *dev) +{ + /* Disable the adapter. */ + __i2c_phytium_disable(dev); + + /* Write SDA hold time if supported */ + if (dev->sda_hold_time) + phytium_writel(dev, dev->sda_hold_time, IC_SDA_HOLD); + + i2c_phytium_configure_fifo_slave(dev); + + return 0; +} + +static int i2c_phytium_reg_slave(struct i2c_client *slave) +{ + struct phytium_i2c_dev *dev = i2c_get_adapdata(slave->adapter); + + if (dev->slave) + return -EBUSY; + if (slave->flags & I2C_CLIENT_TEN) + return -EAFNOSUPPORT; + pm_runtime_get_sync(dev->dev); + + /* + * Set slave address in the IC_SAR register, + * the address to which the i2c responds. + */ + __i2c_phytium_disable_nowait(dev); + phytium_writel(dev, slave->addr, IC_SAR); + dev->slave = slave; + + __i2c_phytium_enable(dev); + + dev->cmd_err = 0; + dev->msg_write_idx = 0; + dev->msg_read_idx = 0; + dev->msg_err = 0; + dev->status = STATUS_IDLE; + dev->abort_source = 0; + dev->rx_outstanding = 0; + + return 0; +} + +static int i2c_phytium_unreg_slave(struct i2c_client *slave) +{ + struct phytium_i2c_dev *dev = i2c_get_adapdata(slave->adapter); + + dev->disable_int(dev); + dev->disable(dev); + dev->slave = NULL; + pm_runtime_put(dev->dev); + + return 0; +} + +static u32 i2c_phytium_read_clear_intrbits_slave(struct phytium_i2c_dev *dev) +{ + u32 stat; + + /* + * The IC_INTR_STAT register just indicates "enabled" interrupts. + * Ths unmasked raw version of interrupt status bits are available + * in the IC_RAW_INTR_STAT register. + * + * That is, + * stat = phytium_readl(IC_INTR_STAT); + * equals to, + * stat = phytium_readl(IC_RAW_INTR_STAT) & phytium_readl(IC_INTR_MASK); + * + * The raw version might be useful for debugging purposes. + */ + stat = phytium_readl(dev, IC_INTR_STAT); + + /* + * Do not use the IC_CLR_INTR register to clear interrupts, or + * you'll miss some interrupts, triggered during the period from + * phytium_readl(IC_INTR_STAT) to phytium_readl(IC_CLR_INTR). + * + * Instead, use the separately-prepared IC_CLR_* registers. + */ + if (stat & IC_INTR_TX_ABRT) + phytium_readl(dev, IC_CLR_TX_ABRT); + if (stat & IC_INTR_RX_UNDER) + phytium_readl(dev, IC_CLR_RX_UNDER); + if (stat & IC_INTR_RX_OVER) + phytium_readl(dev, IC_CLR_RX_OVER); + if (stat & IC_INTR_TX_OVER) + phytium_readl(dev, IC_CLR_TX_OVER); + if (stat & IC_INTR_RX_DONE) + phytium_readl(dev, IC_CLR_RX_DONE); + if (stat & IC_INTR_ACTIVITY) + phytium_readl(dev, IC_CLR_ACTIVITY); + if (stat & IC_INTR_STOP_DET) + phytium_readl(dev, IC_CLR_STOP_DET); + if (stat & IC_INTR_START_DET) + phytium_readl(dev, IC_CLR_START_DET); + if (stat & IC_INTR_GEN_CALL) + phytium_readl(dev, IC_CLR_GEN_CALL); + + return stat; +} + +/* + * Interrupt service routine. This gets called whenever an I2C slave interrupt + * occurs. + */ +static int i2c_phytium_irq_handler_slave(struct phytium_i2c_dev *dev) +{ + u32 raw_stat, stat, enabled; + u8 val, slave_activity; + + stat = phytium_readl(dev, IC_INTR_STAT); + enabled = phytium_readl(dev, IC_ENABLE); + raw_stat = phytium_readl(dev, IC_RAW_INTR_STAT); + slave_activity = ((phytium_readl(dev, IC_STATUS) & + IC_STATUS_SLAVE_ACTIVITY) >> 6); + + if (!enabled || !(raw_stat & ~IC_INTR_ACTIVITY) || !dev->slave) + return 0; + + dev_dbg(dev->dev, + "%#x STATUS SLAVE_ACTIVITY=%#x : RAW_INTR_STAT=%#x : INTR_STAT=%#x\n", + enabled, slave_activity, raw_stat, stat); + + if ((stat & IC_INTR_RX_FULL) && (stat & IC_INTR_STOP_DET)) + i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val); + + if (stat & IC_INTR_RD_REQ) { + if (slave_activity) { + if (stat & IC_INTR_RX_FULL) { + val = phytium_readl(dev, IC_DATA_CMD); + + if (!i2c_slave_event(dev->slave, + I2C_SLAVE_WRITE_RECEIVED, + &val)) { + dev_vdbg(dev->dev, "Byte %X acked!", + val); + } + phytium_readl(dev, IC_CLR_RD_REQ); + stat = i2c_phytium_read_clear_intrbits_slave(dev); + } else { + phytium_readl(dev, IC_CLR_RD_REQ); + phytium_readl(dev, IC_CLR_RX_UNDER); + stat = i2c_phytium_read_clear_intrbits_slave(dev); + } + if (!i2c_slave_event(dev->slave, + I2C_SLAVE_READ_REQUESTED, + &val)) + phytium_writel(dev, val, IC_DATA_CMD); + } + } + + if (stat & IC_INTR_RX_DONE) { + if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED, + &val)) + phytium_readl(dev, IC_CLR_RX_DONE); + + i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); + stat = i2c_phytium_read_clear_intrbits_slave(dev); + return 1; + } + + if (stat & IC_INTR_RX_FULL) { + val = phytium_readl(dev, IC_DATA_CMD); + if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, + &val)) + dev_vdbg(dev->dev, "Byte %X acked!", val); + } else { + i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); + stat = i2c_phytium_read_clear_intrbits_slave(dev); + } + + return 1; +} + +static irqreturn_t i2c_phytium_isr_slave(int this_irq, void *dev_id) +{ + struct phytium_i2c_dev *dev = dev_id; + int ret; + + i2c_phytium_read_clear_intrbits_slave(dev); + ret = i2c_phytium_irq_handler_slave(dev); + if (ret > 0) + complete(&dev->cmd_complete); + + return IRQ_RETVAL(ret); +} + +static const struct i2c_algorithm i2c_phytium_algo = { + .functionality = i2c_phytium_func, + .reg_slave = i2c_phytium_reg_slave, + .unreg_slave = i2c_phytium_unreg_slave, +}; + +int i2c_phytium_probe_slave(struct phytium_i2c_dev *dev) +{ + struct i2c_adapter *adap = &dev->adapter; + int ret; + + init_completion(&dev->cmd_complete); + + dev->init = i2c_phytium_init_slave; + dev->disable = i2c_phytium_disable; + dev->disable_int = i2c_phytium_disable_int; + + ret = dev->init(dev); + if (ret) + return ret; + + snprintf(adap->name, sizeof(adap->name), + "Phytium I2C Slave adapter"); + adap->retries = 3; + adap->algo = &i2c_phytium_algo; + adap->dev.parent = dev->dev; + i2c_set_adapdata(adap, dev); + + ret = devm_request_irq(dev->dev, dev->irq, i2c_phytium_isr_slave, + IRQF_SHARED, dev_name(dev->dev), dev); + if (ret) { + dev_err(dev->dev, "failure requesting irq %i: %d\n", + dev->irq, ret); + return ret; + } + + ret = i2c_add_numbered_adapter(adap); + if (ret) + dev_err(dev->dev, "failure adding adapter: %d\n", ret); + + return ret; +} +EXPORT_SYMBOL_GPL(i2c_phytium_probe_slave); diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 1dabd366ec0bcc23050c30f3bb8d25f0031d4506..86579c9e56bf06a3c337a3f68a72fb4665f4834f 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -943,4 +943,16 @@ config XILINX_XADC The driver can also be build as a module. If so, the module will be called xilinx-xadc. +config PHYTIUM_ADC + tristate "Phytium ADC driver" + depends on ARCH_PHYTIUM || COMPILE_TEST + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + help + Say yes here to build support for Phytium analog to digital + converters (ADC). + + To compile this driver as a module, choose M here: the module + will be called phytium-adc. + endmenu diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index 03db7b578f9c077c4a5790f79f8ba48beb8d56dd..7d58314fb0cfbff7309d5030d51b88d081011146 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -86,3 +86,4 @@ obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o +obj-$(CONFIG_PHYTIUM_ADC) += phytium-adc.o diff --git a/drivers/iio/adc/phytium-adc.c b/drivers/iio/adc/phytium-adc.c new file mode 100755 index 0000000000000000000000000000000000000000..e8dfa4546326f4da11f6bcc2426d0343ba97d48a --- /dev/null +++ b/drivers/iio/adc/phytium-adc.c @@ -0,0 +1,689 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium ADC device driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* ADC register */ +#define ADC_CTRL_REG 0x00 +#define ADC_CTRL_REG_PD_EN BIT(31) +#define ADC_CTRL_REG_CH_ONLY_S(x) ((x & 0x7) << 16) +#define ADC_CTRL_REG_CLK_DIV(x) ((x) << 12) +#define ADC_CTRL_REG_CHANNEL_EN(x) BIT((x) + 4) +#define ADC_CTRL_REG_CH_ONLY_EN BIT(3) +#define ADC_CTRL_REG_SINGLE_EN BIT(2) +#define ADC_CTRL_REG_SINGLE_SEL BIT(1) +#define ADC_CTRL_REG_SOC_EN BIT(0) +#define ADC_INTER_REG 0x04 +#define ADC_STATE_REG 0x08 +#define ADC_STATE_REG_B_STA(x) ((x) << 8) +#define ADC_STATE_REG_EOC_STA BIT(7) +#define ADC_STATE_REG_S_STA(x) ((x) << 4) +#define ADC_STATE_REG_SOC_STA BIT(3) +#define ADC_STATE_REG_ERR_STA BIT(2) +#define ADC_STATE_REG_COV_FINISH_STA BIT(1) +#define ADC_STATE_REG_ADCCTL_BUSY_STA BIT(0) +#define ADC_ERRCLR_REG 0x0c +#define ADC_LEVEL_REG(x) (0x10 + ((x) << 2)) +#define ADC_LEVEL_REG_HIGH_LEVEL(x) ((x) << 16) +#define ADC_LEVEL_REG_LOW_LEVEL(x) (x) +#define ADC_INTRMASK_REG 0x30 +#define ADC_INTRMASK_REG_ERR_INTR_MASK BIT(24) +#define ADC_INTRMASK_REG_ULIMIT_OFF(x) BIT(9 + ((x) << 1)) +#define ADC_INTRMASK_REG_DLIMIT_MASK(x) BIT(8 + ((x) << 1)) +#define ADC_INTRMASK_REG_COVFIN_MASK(x) BIT((x)) +#define ADC_INTR_REG 0x34 +#define ADC_INTR_REG_ERR BIT(24) +#define ADC_INTR_REG_ULIMIT(x) BIT(9 + ((x) << 1)) +#define ADC_INTR_REG_DLIMIT(x) BIT(8 + ((x) << 1)) +#define ADC_INTR_REG_LIMIT_MASK GENMASK(23, 8) +#define ADC_INTR_REG_COVFIN(x) BIT((x)) +#define ADC_INTR_REG_COVFIN_MASK GENMASK(7, 0) +#define ADC_COV_RESULT_REG(x) (0x38 + ((x) << 2)) +#define ADC_COV_RESULT_REG_MASK GENMASK(9, 0) +#define ADC_FINISH_CNT_REG(x) (0x58 + ((x) << 2)) +#define ADC_HIS_LIMIT_REG(x) (0x78 + ((x) << 2)) + +#define PHYTIUM_MAX_CHANNELS 8 +#define PHYTIUM_ADC_TIMEOUT usecs_to_jiffies(1000 * 1000) + +static const struct iio_event_spec phytium_adc_event[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_RISING, + .mask_separate = BIT(IIO_EV_INFO_VALUE), + }, { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_FALLING, + .mask_separate = BIT(IIO_EV_INFO_VALUE), + }, +}; + +struct phytium_adc_data { + const struct iio_chan_spec *channels; + u8 num_channels; +}; + +struct phytium_adc { + struct device *dev; + void __iomem *regs; + struct clk *adc_clk; + + u32 interval; + u16 thresh_high[PHYTIUM_MAX_CHANNELS]; + u16 thresh_low[PHYTIUM_MAX_CHANNELS]; + u16 last_val[PHYTIUM_MAX_CHANNELS]; + const struct phytium_adc_data *data; + u16 *scan_data; + + struct completion completion; + struct mutex lock; +}; + +static ssize_t phytium_adc_show_conv_interval(struct iio_dev *indio_dev, + uintptr_t priv, + struct iio_chan_spec const *ch, + char *buf) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + + return sprintf(buf, "%u\n", adc->interval); +} + +static ssize_t phytium_adc_store_conv_interval(struct iio_dev *indio_dev, + uintptr_t priv, + struct iio_chan_spec const *ch, + const char *buf, size_t len) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + u32 interval; + int ret; + + ret = kstrtou32(buf, 0, &interval); + if (ret < 0) + return ret; + + mutex_lock(&adc->lock); + adc->interval = interval; + mutex_unlock(&adc->lock); + + return len; +} + +static const struct iio_chan_spec_ext_info phytium_adc_ext_info[] = { + { + .name = "conv_interval", + .read = phytium_adc_show_conv_interval, + .write = phytium_adc_store_conv_interval, + }, + { /* sentinel */ } +}; + +static int phytium_adc_parse_properties(struct platform_device *pdev, struct phytium_adc *adc) +{ + struct iio_chan_spec *chan_array; + struct fwnode_handle *fwnode; + struct phytium_adc_data *data; + unsigned int channel; + int num_channels; + int ret, i = 0; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + num_channels = device_get_child_node_count(&pdev->dev); + if (!num_channels) { + dev_err(&pdev->dev, "no channel children\n"); + return -ENODEV; + } + + if (num_channels > PHYTIUM_MAX_CHANNELS) { + dev_err(&pdev->dev, "num of channel children out of range\n"); + return -EINVAL; + } + + chan_array = devm_kcalloc(&pdev->dev, num_channels, sizeof(*chan_array), + GFP_KERNEL); + if (!chan_array) + return -ENOMEM; + + device_for_each_child_node(&pdev->dev, fwnode) { + ret = fwnode_property_read_u32(fwnode, "reg", &channel); + if (ret) + return ret; + + if (channel >= PHYTIUM_MAX_CHANNELS) + return -EINVAL; + + chan_array[i].type = IIO_VOLTAGE; + chan_array[i].indexed = 1; + chan_array[i].channel = channel; + chan_array[i].info_mask_separate = BIT(IIO_CHAN_INFO_RAW); + chan_array[i].event_spec = phytium_adc_event; + chan_array[i].num_event_specs = ARRAY_SIZE(phytium_adc_event); + chan_array[i].scan_index = channel; + chan_array[i].scan_type.sign = 'u'; + chan_array[i].scan_type.realbits = 10; + chan_array[i].scan_type.storagebits = 16; + chan_array[i].scan_type.endianness = IIO_LE; + chan_array[i].ext_info = phytium_adc_ext_info; + i++; + } + + data->num_channels = num_channels; + data->channels = chan_array; + adc->data = data; + + return 0; +} + +static void phytium_adc_start_stop(struct phytium_adc *adc, bool start) +{ + u32 ctrl; + + ctrl = readl(adc->regs + ADC_CTRL_REG); + if (start) + ctrl |= ADC_CTRL_REG_SOC_EN | ADC_CTRL_REG_SINGLE_EN; + else + ctrl &= ~ADC_CTRL_REG_SOC_EN; + /* Start conversion */ + writel(ctrl, adc->regs + ADC_CTRL_REG); +} + +static void phytium_adc_power_setup(struct phytium_adc *adc, bool on) +{ + u32 reg; + + reg = readl(adc->regs + ADC_CTRL_REG); + if (on) + reg &= ~ADC_CTRL_REG_PD_EN; + else + reg |= ADC_CTRL_REG_PD_EN; + writel(reg, adc->regs + ADC_CTRL_REG); +} + +static int phytium_adc_hw_init(struct phytium_adc *adc) +{ + int ret; + u32 reg; + + ret = clk_prepare_enable(adc->adc_clk); + if (ret) + return ret; + + /* + * Setup ctrl register: + * - Power up conversion module + * - Set the division by 4 as default + */ + reg = ADC_CTRL_REG_CLK_DIV(4); + writel(reg, adc->regs + ADC_CTRL_REG); + + /* Set all the interrupt mask, unmask them when necessary. */ + writel(0x1ffffff, adc->regs + ADC_INTRMASK_REG); + + /* Set default conversion interval */ + adc->interval = (clk_get_rate(adc->adc_clk) * 1000) / NSEC_PER_SEC; + + phytium_adc_power_setup(adc, true); + + return 0; +} + +static void phytium_adc_intrmask_setup(struct phytium_adc *adc, unsigned long chan_mask, bool on) +{ + u32 reg; + u16 limit_mask = 0; + int ch; + + for_each_set_bit(ch, &chan_mask, PHYTIUM_MAX_CHANNELS) + limit_mask |= BIT(ch << 1) | BIT((ch << 1) + 1); + + reg = readl(adc->regs + ADC_INTRMASK_REG); + if (on) + reg &= ~(ADC_INTRMASK_REG_ERR_INTR_MASK | + (limit_mask << 8) | chan_mask); + else + reg |= (ADC_INTRMASK_REG_ERR_INTR_MASK | + (limit_mask << 8) | chan_mask); + writel(reg, adc->regs + ADC_INTRMASK_REG); +} + +static void phytium_adc_single_conv_setup(struct phytium_adc *adc, u8 ch) +{ + u32 reg; + + /* + * Setup control register: + * - Single conversion mode selection + * - Single conversion enable + * - Fixed channel conversion + * - Target channel + */ + reg = readl(adc->regs + ADC_CTRL_REG); + + /* Clean ch_only_s bits */ + reg &= ~ADC_CTRL_REG_CH_ONLY_S(7); + + /* Clean channel_en bit */ + reg &= 0xFFF00F; + + reg |= ADC_CTRL_REG_SINGLE_SEL | ADC_CTRL_REG_SINGLE_EN | + ADC_CTRL_REG_CH_ONLY_EN | ADC_CTRL_REG_CH_ONLY_S(ch) | ADC_CTRL_REG_CHANNEL_EN(ch); + writel(reg, adc->regs + ADC_CTRL_REG); +} + +static int phytium_adc_single_conv(struct iio_dev *indio_dev, u8 ch) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + int ret; + + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + mutex_lock(&adc->lock); + + phytium_adc_intrmask_setup(adc, BIT(ch), true); + reinit_completion(&adc->completion); + phytium_adc_single_conv_setup(adc, ch); + phytium_adc_start_stop(adc, true); + + if (!wait_for_completion_timeout(&adc->completion, PHYTIUM_ADC_TIMEOUT)) + ret = -ETIMEDOUT; + + phytium_adc_start_stop(adc, false); + phytium_adc_intrmask_setup(adc, BIT(ch), false); + + mutex_unlock(&adc->lock); + iio_device_release_direct_mode(indio_dev); + + return ret; +} + +static int phytium_adc_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + if (chan->type != IIO_VOLTAGE) + return -EINVAL; + + ret = phytium_adc_single_conv(indio_dev, chan->channel); + if (ret) + return ret; + *val = adc->last_val[chan->channel]; + + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int phytium_read_thresh(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info iinfo, int *val, int *val2) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + + if (dir == IIO_EV_DIR_FALLING) + *val = adc->thresh_low[chan->channel]; + else + *val = adc->thresh_high[chan->channel]; + + return IIO_VAL_INT; +} + +static int phytium_write_thresh(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info einfo, int val, int val2) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + u32 thresh; + + switch (dir) { + case IIO_EV_DIR_FALLING: + adc->thresh_low[chan->channel] = val; + thresh = readl(adc->regs + ADC_LEVEL_REG(chan->channel)) & 0x3ff0000; + thresh |= ADC_LEVEL_REG_LOW_LEVEL(val); + writel(thresh, adc->regs + ADC_LEVEL_REG(chan->channel)); + break; + case IIO_EV_DIR_RISING: + adc->thresh_high[chan->channel] = val; + thresh = readl(adc->regs + ADC_LEVEL_REG(chan->channel)) & 0xffff; + thresh |= ADC_LEVEL_REG_HIGH_LEVEL(val); + writel(thresh, adc->regs + ADC_LEVEL_REG(chan->channel)); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int phytium_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *mask) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + unsigned int n; + + n = bitmap_weight(mask, indio_dev->masklength); + + kfree(adc->scan_data); + adc->scan_data = kcalloc(n, sizeof(*adc->scan_data), GFP_KERNEL); + if (!adc->scan_data) + return -ENOMEM; + + return 0; +} + +static const u64 phytium_adc_event_codes[] = { + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 0, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 0, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 1, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 1, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 2, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 2, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 3, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 3, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 4, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 4, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 5, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 5, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 6, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 6, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 7, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), + IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 7, + IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), +}; + +static irqreturn_t phytium_adc_threaded_irq(int irq, void *data) +{ + struct phytium_adc *adc = data; + struct iio_dev *indio_dev = iio_priv_to_dev(adc); + s64 timestamp = iio_get_time_ns(indio_dev); + unsigned long status; + int ch; + u32 intr; + + intr = readl(adc->regs + ADC_INTR_REG); + + if (intr & ADC_INTR_REG_ERR) { + dev_err(adc->dev, "conversion error: ADC_INTR_REG(0x%x)\n", intr); + writel(ADC_INTR_REG_ERR, adc->regs + ADC_INTR_REG); + return IRQ_HANDLED; + } + + status = (intr & ADC_INTR_REG_LIMIT_MASK) >> 8; + if (status) { + for_each_set_bit(ch, &status, PHYTIUM_MAX_CHANNELS * 2) + iio_push_event(indio_dev, phytium_adc_event_codes[ch], timestamp); + } + + status = intr & ADC_INTR_REG_COVFIN_MASK; + if (status) { + for_each_set_bit(ch, &status, PHYTIUM_MAX_CHANNELS) + adc->last_val[ch] = readl(adc->regs + ADC_COV_RESULT_REG(ch)) & + ADC_COV_RESULT_REG_MASK; + + if (iio_buffer_enabled(indio_dev)) + iio_trigger_poll(indio_dev->trig); + else + complete(&adc->completion); + } + + /* Clear all the interrupts */ + writel(status, adc->regs + ADC_INTR_REG); + + return IRQ_HANDLED; +} + +static void phytium_adc_cont_conv_setup(struct phytium_adc *adc, + unsigned long chan_mask, + u32 interval) +{ + u32 reg; + + /* + * Setup control register: + * - Continuous conversion mode + * - Multi-channel rotation mode + * - Channel enablement + */ + reg = readl(adc->regs + ADC_CTRL_REG); + reg &= ~(ADC_CTRL_REG_SINGLE_SEL | ADC_CTRL_REG_SINGLE_EN | + ADC_CTRL_REG_CH_ONLY_EN); + reg |= chan_mask << 4; + writel(reg, adc->regs + ADC_CTRL_REG); + + /* Setup interval between two conversions */ + writel(interval, adc->regs + ADC_INTER_REG); +} + +static int phytium_adc_preenable(struct iio_dev *indio_dev) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + unsigned long scan_mask = *indio_dev->active_scan_mask; + + phytium_adc_cont_conv_setup(adc, scan_mask & 0xff, adc->interval); + phytium_adc_intrmask_setup(adc, scan_mask & 0xff, true); + + return 0; +} + +static int phytium_adc_postenable(struct iio_dev *indio_dev) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + + iio_triggered_buffer_postenable(indio_dev); + phytium_adc_start_stop(adc, true); + + return 0; +} + +static int phytium_adc_postdisable(struct iio_dev *indio_dev) +{ + struct phytium_adc *adc = iio_priv(indio_dev); + unsigned long scan_mask = *indio_dev->active_scan_mask; + + phytium_adc_start_stop(adc, false); + phytium_adc_intrmask_setup(adc, scan_mask & 0xff, false); + + return 0; +} + +static const struct iio_buffer_setup_ops phytium_buffer_setup_ops = { + .preenable = &phytium_adc_preenable, + .postenable = &phytium_adc_postenable, + .predisable = &iio_triggered_buffer_predisable, + .postdisable = &phytium_adc_postdisable, +}; + +static irqreturn_t phytium_adc_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct phytium_adc *adc = iio_priv(indio_dev); + int i, j = 0; + + if (!adc->scan_data) + goto out; + + for_each_set_bit(i, indio_dev->active_scan_mask, indio_dev->masklength) + adc->scan_data[j++] = adc->last_val[i]; + + iio_push_to_buffers(indio_dev, adc->scan_data); + +out: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static const struct iio_info phytium_adc_iio_info = { + .read_raw = &phytium_adc_read_raw, + .read_event_value = &phytium_read_thresh, + .write_event_value = &phytium_write_thresh, + .update_scan_mode = &phytium_update_scan_mode, +}; + +static int phytium_adc_probe(struct platform_device *pdev) +{ + struct phytium_adc *adc; + struct iio_dev *indio_dev; + struct device *dev = &pdev->dev; + struct resource *res; + int ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*adc)); + if (!indio_dev) + return -ENOMEM; + platform_set_drvdata(pdev, indio_dev); + + adc = iio_priv(indio_dev); + adc->dev = dev; + + ret = phytium_adc_parse_properties(pdev, adc); + if (ret) + return ret; + + mutex_init(&adc->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + adc->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(adc->regs)) + return PTR_ERR(adc->regs); + + adc->adc_clk = devm_clk_get(dev, NULL); + if (IS_ERR(adc->adc_clk)) + return PTR_ERR(adc->adc_clk); + + init_completion(&adc->completion); + + indio_dev->name = dev_name(dev); + indio_dev->info = &phytium_adc_iio_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = adc->data->channels; + indio_dev->num_channels = adc->data->num_channels; + + ret = devm_request_threaded_irq(adc->dev, platform_get_irq(pdev, 0), + NULL, phytium_adc_threaded_irq, IRQF_ONESHOT, + dev_name(dev), adc); + if (ret) + return ret; + + ret = devm_iio_triggered_buffer_setup(dev, indio_dev, + &iio_pollfunc_store_time, + phytium_adc_trigger_handler, + &phytium_buffer_setup_ops); + if (ret) + return ret; + + ret = phytium_adc_hw_init(adc); + if (ret) { + dev_err(&pdev->dev, "failed to initialize Phytium ADC, %d\n", ret); + return ret; + } + + return devm_iio_device_register(dev, indio_dev); +} + +static int phytium_adc_remove(struct platform_device *pdev) +{ + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct phytium_adc *adc = iio_priv(indio_dev); + + phytium_adc_power_setup(adc, false); + iio_device_unregister(indio_dev); + kfree(adc->scan_data); + + return 0; +} + +static const struct of_device_id phytium_of_match[] = { + { .compatible = "phytium,adc", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, phytium_of_match); + +#ifdef CONFIG_PM +static int phytium_adc_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct phytium_adc *adc = iio_priv(indio_dev); + + phytium_adc_power_setup(adc, false); + clk_disable_unprepare(adc->adc_clk); + + return 0; +} + +static int phytium_adc_resume(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct phytium_adc *adc = iio_priv(indio_dev); + + clk_prepare_enable(adc->adc_clk); + phytium_adc_power_setup(adc, true); + + return 0; +} +#endif + +SIMPLE_DEV_PM_OPS(phytium_adc_pm_ops, phytium_adc_suspend, phytium_adc_resume); + +static struct platform_driver phytium_adc_driver = { + .driver = { + .name = "phytium_adc", + .of_match_table = phytium_of_match, + .pm = &phytium_adc_pm_ops, + }, + .probe = phytium_adc_probe, + .remove = phytium_adc_remove, +}; +module_platform_driver(phytium_adc_driver); + +MODULE_AUTHOR("Yang Liu "); +MODULE_DESCRIPTION("Phytium ADC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 01ed0a667928d47c40f9ca4824a54ce6bcb07277..2c62de6b5bf1624122669ab313ab6fc7e1a429e9 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -1039,7 +1039,7 @@ int hfi1_get_proc_affinity(int node) struct hfi1_affinity_node *entry; cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; const struct cpumask *node_mask, - *proc_mask = ¤t->cpus_allowed; + *proc_mask = current->cpus_ptr; struct hfi1_affinity_node_list *affinity = &node_affinity; struct cpu_mask_set *set = &affinity->proc; @@ -1047,7 +1047,7 @@ int hfi1_get_proc_affinity(int node) * check whether process/context affinity has already * been set */ - if (cpumask_weight(proc_mask) == 1) { + if (current->nr_cpus_allowed == 1) { hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", current->pid, current->comm, cpumask_pr_args(proc_mask)); @@ -1058,7 +1058,7 @@ int hfi1_get_proc_affinity(int node) cpu = cpumask_first(proc_mask); cpumask_set_cpu(cpu, &set->used); goto done; - } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { + } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", current->pid, current->comm, cpumask_pr_args(proc_mask)); diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 38258de75a94cbcdacb6696f8146e5466f178e9a..3ee680fc3fda44dd94aae8d9cfb87593f3196675 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -853,14 +853,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, { struct sdma_rht_node *rht_node; struct sdma_engine *sde = NULL; - const struct cpumask *current_mask = ¤t->cpus_allowed; unsigned long cpu_id; /* * To ensure that always the same sdma engine(s) will be * selected make sure the process is pinned to this CPU only. */ - if (cpumask_weight(current_mask) != 1) + if (current->nr_cpus_allowed != 1) goto out; cpu_id = smp_processor_id(); diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 98e1ce14fa2ab901a4d8fe53f50e3cde5bc2daf5..5d3828625017ca09e2bda562facbf22a1305d481 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1142,7 +1142,7 @@ static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt) static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd) { struct qib_filedata *fd = fp->private_data; - const unsigned int weight = cpumask_weight(¤t->cpus_allowed); + const unsigned int weight = current->nr_cpus_allowed; const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus); int local_cpu; @@ -1623,9 +1623,8 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) ret = find_free_ctxt(i_minor - 1, fp, uinfo); else { int unit; - const unsigned int cpu = cpumask_first(¤t->cpus_allowed); - const unsigned int weight = - cpumask_weight(¤t->cpus_allowed); + const unsigned int cpu = cpumask_first(current->cpus_ptr); + const unsigned int weight = current->nr_cpus_allowed; if (weight == 1 && !test_bit(cpu, qib_cpulist)) if (!find_hca(cpu, &unit) && unit >= 0) diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 4713957b0cbba11e942a01c2b2138854d648637f..d134c5fd5767f52bbc16d9367a2c9288244fcf04 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -756,4 +756,15 @@ config KEYBOARD_MTK_PMIC To compile this driver as a module, choose M here: the module will be called pmic-keys. +config KEYBOARD_PHYTIUM + tristate "Phytium keypad support" + depends on ARCH_PHYTIUM + select INPUT_MATRIXKMAP + help + Say Y here if you want to enable support for Phytium keypad + port. + + To compile this driver as a module, choose M here: the + module will be called phytium_keypad. + endif diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 182e92985dbf69f18c783db7b04523fbc5ae56a3..e410f6e9c97c2416f783bd27e9ab7775ea776c02 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -67,3 +67,4 @@ obj-$(CONFIG_KEYBOARD_TM2_TOUCHKEY) += tm2-touchkey.o obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o +obj-$(CONFIG_KEYBOARD_PHYTIUM) += phytium-keypad.o diff --git a/drivers/input/keyboard/phytium-keypad.c b/drivers/input/keyboard/phytium-keypad.c new file mode 100644 index 0000000000000000000000000000000000000000..37750d78bc2d682bba3b4a0f9f0bc2cb5dea8cd2 --- /dev/null +++ b/drivers/input/keyboard/phytium-keypad.c @@ -0,0 +1,585 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the Phytium keypad port. + * + * Copyright (c) 2020-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* + * Keypad Controller registers + */ +#define KPCR 0x00 /* Keypad Control Register */ + +#define KPSR 0x04 /* Keypad Status Register */ +#define KBD_STAT_KPKD (0x1 << 0) /* Key Press Interrupt Status bit (w1c) */ +#define KBD_STAT_KPKR (0x1 << 1) /* Key Release Interrupt Status bit (w1c) */ +#define KBD_STAT_KDSC (0x1 << 2) /* Key Depress Synch Chain Status bit (w1c)*/ +#define KBD_STAT_KRSS (0x1 << 3) /* Key Release Synch Status bit (w1c)*/ +#define KBD_STAT_KDIE (0x1 << 8) /* Key Depress Interrupt Enable Status bit */ +#define KBD_STAT_KRIE (0x1 << 9) /* Key Release Interrupt Enable */ + +#define KDDR 0x08 /* Keypad Data Direction Register */ +#define KPDR 0x0C /* Keypad Data Register */ + +#define MAX_MATRIX_KEY_ROWS 8 +#define MAX_MATRIX_KEY_COLS 8 + +#define MAX_MATRIX_KEY_NUM (MAX_MATRIX_KEY_ROWS * MAX_MATRIX_KEY_COLS) + +struct phytium_keypad { + struct input_dev *input_dev; + void __iomem *mmio_base; + + int irq; + struct timer_list check_matrix_timer; + + /* + * The matrix is stable only if no changes are detected after + * PHYTIUM_KEYPAD_SCANS_FOR_STABILITY scans + */ +#define PHYTIUM_KEYPAD_SCANS_FOR_STABILITY 3 + + int stable_count; + + bool enabled; + + unsigned int n_rows; + unsigned int n_cols; + int row_shift; + + /* Masks for enabled rows/cols */ + unsigned short rows_en_mask; + unsigned short cols_en_mask; + + unsigned short keycodes[MAX_MATRIX_KEY_NUM]; + + /* + * Matrix states: + * -stable: achieved after a complete debounce process. + * -unstable: used in the debouncing process. + */ + unsigned short matrix_stable_state[MAX_MATRIX_KEY_COLS]; + unsigned short matrix_unstable_state[MAX_MATRIX_KEY_COLS]; +}; + +static u32 phytium_read(struct phytium_keypad *keypad, int reg) +{ + return readl(keypad->mmio_base + reg); +} + +static void phytium_write(struct phytium_keypad *keypad, u32 value, int reg) +{ + writel(value, keypad->mmio_base + reg); +} + +/* Scan the matrix and return the new state in *matrix_volatile_state. */ +static void phytium_keypad_scan_matrix(struct phytium_keypad *keypad, + unsigned short *matrix_volatile_state) +{ + int col; + u32 reg_val; + + for (col = 0; col < keypad->n_cols; col++) { + if ((keypad->cols_en_mask & (1 << col)) == 0) + continue; + /* + * Discharge keypad capacitance: + * 2. write 0s on KDDR[KCD], configure columns as input. + */ + reg_val = phytium_read(keypad, KDDR); + reg_val = 0x00000000; + phytium_write(keypad, reg_val, KDDR); + + /* + * 3. Write a single column to 0, others to 1. + * 4. Sample row inputs and save data. + * 5. Repeat steps 3 - 4 for remaining columns. + */ + reg_val = 0; + reg_val |= (1 << (16 + col)); + phytium_write(keypad, reg_val, KDDR); + reg_val = phytium_read(keypad, KPDR); + reg_val = 0x00000000; + phytium_write(keypad, reg_val, KPDR); + + /* + * Delay added to avoid propagating the 0 from column to row + * when scanning. + */ + udelay(5); + + /* + * 1s in matrix_volatile_state[col] means key pressures + * throw data from non enabled rows. + */ + reg_val = phytium_read(keypad, KPDR); + matrix_volatile_state[col] = (~reg_val) & keypad->rows_en_mask; + } + + /* + * Return in standby mode: + * 6. write 0s to columns + */ + /* Configure columns as output, output 0 */ + reg_val = 0; + reg_val |= (keypad->cols_en_mask & 0xffff) << 16; + phytium_write(keypad, reg_val, KDDR); + phytium_write(keypad, 0x00000000, KPDR); +} + +/* + * Compare the new matrix state (volatile) with the stable one stored in + * keypad->matrix_stable_state and fire events if changes are detected. + */ +static void phytium_keypad_fire_events(struct phytium_keypad *keypad, + unsigned short *matrix_volatile_state) +{ + struct input_dev *input_dev = keypad->input_dev; + int row, col; + + for (col = 0; col < keypad->n_cols; col++) { + unsigned short bits_changed; + int code; + + if ((keypad->cols_en_mask & (1 << col)) == 0) + continue; /* Column is not enabled */ + + bits_changed = keypad->matrix_stable_state[col] ^ + matrix_volatile_state[col]; + + if (bits_changed == 0) + continue; /* Column does not contain changes */ + + for (row = 0; row < keypad->n_rows; row++) { + if ((keypad->rows_en_mask & (1 << row)) == 0) + continue; /* Row is not enabled */ + if ((bits_changed & (1 << row)) == 0) + continue; /* Row does not contain changes */ + + code = MATRIX_SCAN_CODE(row, col, keypad->row_shift); + input_event(input_dev, EV_MSC, MSC_SCAN, code); + input_report_key(input_dev, keypad->keycodes[code], + matrix_volatile_state[col] & (1 << row)); + dev_dbg(&input_dev->dev, "Event code: %d, val: %d", + keypad->keycodes[code], + matrix_volatile_state[col] & (1 << row)); + } + } + input_sync(input_dev); +} + +/* + * phytium_keypad_check_for_events is the timer handler. + */ +static void phytium_keypad_check_for_events(struct timer_list *t) +{ + struct phytium_keypad *keypad = from_timer(keypad, t, check_matrix_timer); + unsigned short matrix_volatile_state[MAX_MATRIX_KEY_COLS]; + u32 reg_val; + bool state_changed, is_zero_matrix; + int i; + + memset(matrix_volatile_state, 0, sizeof(matrix_volatile_state)); + + phytium_keypad_scan_matrix(keypad, matrix_volatile_state); + + state_changed = false; + for (i = 0; i < keypad->n_cols; i++) { + if ((keypad->cols_en_mask & (1 << i)) == 0) + continue; + + if (keypad->matrix_unstable_state[i] ^ matrix_volatile_state[i]) { + state_changed = true; + break; + } + } + + /* + * If the matrix state is changed from the previous scan + * (Re)Begin the debouncing process, saving the new state in + * keypad->matrix_unstable_state. + * else + * Increase the count of number of scans with a stable state. + */ + if (state_changed) { + memcpy(keypad->matrix_unstable_state, matrix_volatile_state, + sizeof(matrix_volatile_state)); + keypad->stable_count = 0; + } else { + keypad->stable_count++; + } + + /* + * If the matrix is not as stable as we want reschedule scan + * in the near future. + */ + if (keypad->stable_count < PHYTIUM_KEYPAD_SCANS_FOR_STABILITY) { + mod_timer(&keypad->check_matrix_timer, + jiffies + msecs_to_jiffies(10)); + return; + } + + /* + * If the matrix state is stable, fire the events and save the new + * stable state. Note, if the matrix is kept stable for longer + * (keypad->stable_count > PHYTIUM_KEYPAD_SCANS_FOR_STABILITY) all + * events have already been generated. + */ + if (keypad->stable_count == PHYTIUM_KEYPAD_SCANS_FOR_STABILITY) { + phytium_keypad_fire_events(keypad, matrix_volatile_state); + memcpy(keypad->matrix_stable_state, matrix_volatile_state, + sizeof(matrix_volatile_state)); + } + + is_zero_matrix = true; + for (i = 0; i < keypad->n_cols; i++) { + if (matrix_volatile_state[i] != 0) { + is_zero_matrix = false; + break; + } + } + + if (is_zero_matrix) { + /* + * All keys have been released. Enable only the KDI + * interrupt for future key presses (clear the KDI + * status bit and its sync chain before that). + */ + reg_val = phytium_read(keypad, KPSR); + reg_val |= KBD_STAT_KPKD | KBD_STAT_KDSC; + phytium_write(keypad, reg_val, KPSR); + + reg_val = phytium_read(keypad, KPSR); + reg_val |= KBD_STAT_KDIE; + reg_val &= ~KBD_STAT_KRIE; + phytium_write(keypad, reg_val, KPSR); + } else { + /* + * Some keys are still pressed. Schedule a rescan in + * attempt to detect multiple key presses and enable + * the KRI interrupt to react quickly to key release + * event. + */ + mod_timer(&keypad->check_matrix_timer, + jiffies + msecs_to_jiffies(60)); + + reg_val = phytium_read(keypad, KPSR); + reg_val |= KBD_STAT_KPKR | KBD_STAT_KRSS; + phytium_write(keypad, reg_val, KPSR); + + reg_val = phytium_read(keypad, KPSR); + reg_val |= KBD_STAT_KRIE; + reg_val &= ~KBD_STAT_KDIE; + phytium_write(keypad, reg_val, KPSR); + } +} + +static irqreturn_t phytium_keypad_irq_handler(int irq, void *dev_id) +{ + struct phytium_keypad *keypad = dev_id; + u32 reg_val; + + reg_val = phytium_read(keypad, KPSR); + /* Disable both interrupt types */ + reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); + /* Clear interrupts status bits */ + reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD; + phytium_write(keypad, reg_val, KPSR); + + if (keypad->enabled) { + /* The matrix is supposed to be changed */ + keypad->stable_count = 0; + + /* Schedule the scanning procedure near in the future */ + mod_timer(&keypad->check_matrix_timer, + jiffies + msecs_to_jiffies(2)); + } + + return IRQ_HANDLED; +} + +static void phytium_keypad_config(struct phytium_keypad *keypad) +{ + u32 reg_val; + + /* + * Include enabled rows in interrupt generation (KPCR[15:0]) + * Configure keypad columns as open-drain (KPCR[31:16]) + */ + reg_val = phytium_read(keypad, KPCR); + reg_val |= keypad->rows_en_mask & 0xffff; /* rows */ + reg_val |= (keypad->cols_en_mask & 0xffff) << 16; /* cols */ + phytium_write(keypad, reg_val, KPCR); + + /* Configure columns as output, output 0 */ + reg_val = 0; + reg_val |= (keypad->cols_en_mask & 0xffff) << 16; + phytium_write(keypad, reg_val, KDDR); + phytium_write(keypad, 0x00000000, KPDR); + + /* + * Clear Key Depress and Key Release status bit. + * Clear both synchronizer chain. + */ + reg_val = phytium_read(keypad, KPSR); + reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD | + KBD_STAT_KDSC | KBD_STAT_KRSS; + phytium_write(keypad, reg_val, KPSR); + + /* Enable KDI and disable KRI (avoid false release events). */ + reg_val |= KBD_STAT_KDIE; + reg_val &= ~KBD_STAT_KRIE; + phytium_write(keypad, reg_val, KPSR); +} + +static void phytium_keypad_inhibit(struct phytium_keypad *keypad) +{ + unsigned short reg_val; + + /* Inhibit KDI and KRI interrupts. */ + reg_val = phytium_read(keypad, KPSR); + reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); + reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD; + phytium_write(keypad, reg_val, KPSR); +} + +static void phytium_keypad_close(struct input_dev *dev) +{ + struct phytium_keypad *keypad = input_get_drvdata(dev); + + dev_dbg(&dev->dev, ">%s\n", __func__); + + /* Mark keypad as being inactive */ + keypad->enabled = false; + synchronize_irq(keypad->irq); + del_timer_sync(&keypad->check_matrix_timer); + + phytium_keypad_inhibit(keypad); +} + +static int phytium_keypad_open(struct input_dev *dev) +{ + struct phytium_keypad *keypad = input_get_drvdata(dev); + + dev_dbg(&dev->dev, ">%s\n", __func__); + + /* We became active from now */ + keypad->enabled = true; + + phytium_keypad_config(keypad); + + /* Sanity control, not all the rows must be activated now. */ + if ((phytium_read(keypad, KPDR) & keypad->rows_en_mask) == 0) { + dev_err(&dev->dev, + "too many keys pressed, control pins initialisation\n"); + goto open_err; + } + + return 0; + +open_err: + phytium_keypad_close(dev); + return -EIO; +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_keypad_acpi_ids[] = { + { "PHYT0028", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_keypad_acpi_ids); +#endif + +#ifdef CONFIG_OF +static const struct of_device_id phytium_keypad_of_match[] = { + { .compatible = "phytium,keypad", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, phytium_keypad_of_match); +#endif + +static int phytium_keypad_probe(struct platform_device *pdev) +{ + const struct matrix_keymap_data *keymap_data = dev_get_platdata(&pdev->dev); + struct phytium_keypad *keypad; + struct input_dev *input_dev; + struct resource *res; + int irq, error, i, row, col; + + if (!keymap_data && !((&pdev->dev)->of_node) && !has_acpi_companion(&pdev->dev)) { + dev_err(&pdev->dev, "no keymap defined\n"); + return -EINVAL; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "no irq defined in platform data\n"); + return irq; + } + + input_dev = devm_input_allocate_device(&pdev->dev); + if (!input_dev) { + dev_err(&pdev->dev, "failed to allocate the input device\n"); + return -ENOMEM; + } + + keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL); + if (!keypad) + return -ENOMEM; + + keypad->input_dev = input_dev; + keypad->irq = irq; + keypad->stable_count = 0; + + timer_setup(&keypad->check_matrix_timer, + phytium_keypad_check_for_events, 0); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(keypad->mmio_base)) + return PTR_ERR(keypad->mmio_base); + + /* Init the Input device */ + input_dev->name = pdev->name; + input_dev->id.bustype = BUS_HOST; + input_dev->dev.parent = &pdev->dev; + input_dev->open = phytium_keypad_open; + input_dev->close = phytium_keypad_close; + + error = matrix_keypad_parse_properties(&pdev->dev, &keypad->n_rows, &keypad->n_cols); + if (error) { + dev_err(&pdev->dev, "failed to parse phytium kp params\n"); + return error; + } + + error = matrix_keypad_build_keymap(keymap_data, NULL, + keypad->n_rows, + keypad->n_cols, + keypad->keycodes, input_dev); + if (error) { + dev_err(&pdev->dev, "failed to build keymap\n"); + return error; + } + + keypad->row_shift = get_count_order(keypad->n_cols); + + /* Search for rows and cols enabled */ + for (row = 0; row < keypad->n_rows; row++) { + for (col = 0; col < keypad->n_cols; col++) { + i = MATRIX_SCAN_CODE(row, col, keypad->row_shift); + if (keypad->keycodes[i] != KEY_RESERVED) { + keypad->rows_en_mask |= 1 << row; + keypad->cols_en_mask |= 1 << col; + } + } + } + + __set_bit(EV_REP, input_dev->evbit); + input_set_capability(input_dev, EV_MSC, MSC_SCAN); + input_set_drvdata(input_dev, keypad); + + phytium_keypad_inhibit(keypad); + + error = devm_request_irq(&pdev->dev, irq, phytium_keypad_irq_handler, 0, + pdev->name, keypad); + if (error) { + dev_err(&pdev->dev, "failed to request IRQ\n"); + return error; + } + + /* Register the input device */ + error = input_register_device(input_dev); + if (error) { + dev_err(&pdev->dev, "failed to register input device\n"); + return error; + } + + platform_set_drvdata(pdev, keypad); + device_init_wakeup(&pdev->dev, 1); + + return 0; +} + +static int phytium_keypad_remove(struct platform_device *pdev) +{ + struct phytium_keypad *keypad = platform_get_drvdata(pdev); + + input_unregister_device(keypad->input_dev); + devm_kfree(&pdev->dev, keypad); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_keypad_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_keypad *keypad = platform_get_drvdata(pdev); + struct input_dev *input_dev = keypad->input_dev; + + mutex_lock(&input_dev->mutex); + + if (input_dev->users) + phytium_keypad_inhibit(keypad); + + mutex_unlock(&input_dev->mutex); + + if (device_may_wakeup(&pdev->dev)) + enable_irq_wake(keypad->irq); + + return 0; +} + +static int phytium_keypad_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_keypad *keypad = platform_get_drvdata(pdev); + struct input_dev *input_dev = keypad->input_dev; + int ret = 0; + + if (device_may_wakeup(&pdev->dev)) + disable_irq_wake(keypad->irq); + + mutex_lock(&input_dev->mutex); + + if (input_dev->users) + phytium_keypad_config(keypad); + + mutex_unlock(&input_dev->mutex); + + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_keypad_pm_ops, phytium_keypad_suspend, phytium_keypad_resume); + +static struct platform_driver phytium_keypad_driver = { + .driver = { + .name = "phytium-keypad", + .pm = &phytium_keypad_pm_ops, + .of_match_table = of_match_ptr(phytium_keypad_of_match), + .acpi_match_table = ACPI_PTR(phytium_keypad_acpi_ids), + }, + .probe = phytium_keypad_probe, + .remove = phytium_keypad_remove, +}; +module_platform_driver(phytium_keypad_driver); + +MODULE_AUTHOR("Song Wenting "); +MODULE_DESCRIPTION("PHYTIUM Keypad Port Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:phytium-keypad"); diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index d90d9f1098ff8ccba4c35d032812f6a62019baa3..958312b2169c871476d8b6f28939f82169a4a5ff 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig @@ -39,6 +39,18 @@ config SERIO_I8042 To compile this driver as a module, choose M here: the module will be called i8042. +config SERIO_PHYTIUM_PS2 + depends on SERIO + tristate "PHYTIUM PS/2 (keyboard and mouse)" + default y if ARCH_PHYTIUM + depends on PCI + help + This selects support for the PS/2 Host Controller on + Phytium SoCs. + + To compile this driver as a module, choose M here: the + module will be called phytium-ps2. + config SERIO_SERPORT tristate "Serial port line discipline" default y diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile index 67950a5ccb3f18484b6457f895049412415dbf49..c180361bdd12aea7325d711c9beb93cea26cd154 100644 --- a/drivers/input/serio/Makefile +++ b/drivers/input/serio/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_SERIO) += serio.o obj-$(CONFIG_SERIO_I8042) += i8042.o +obj-$(CONFIG_SERIO_PHYTIUM_PS2) += phytium-ps2.o obj-$(CONFIG_SERIO_PARKBD) += parkbd.o obj-$(CONFIG_SERIO_SERPORT) += serport.o obj-$(CONFIG_SERIO_CT82C710) += ct82c710.o diff --git a/drivers/input/serio/phytium-ps2.c b/drivers/input/serio/phytium-ps2.c new file mode 100644 index 0000000000000000000000000000000000000000..173866f396664f13630ee29e71e6194cea2dc91f --- /dev/null +++ b/drivers/input/serio/phytium-ps2.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Phytium PS/2 keyboard controller driver. + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "phytium_ps2_pci" + +#define REG_STAT 0x0 +#define REG_STAT_TX_TIMEOUT 0x1 +#define REG_STAT_RX_TIMEOUT 0x2 +#define REG_STAT_TX_FULL 0x4 +#define REG_CTRL 0x4 +#define REG_CTRL_RESET 0x1 +#define REG_CTRL_TX_TIMEOUT 0x2 +#define REG_CTRL_RX_TIMEOUT 0x4 +#define REG_CTRL_RX_INTR 0x8 +#define REG_INTR 0x8 +#define REG_INTR_TIMEOUT 0x1 +#define REG_INTR_RX 0x2 +#define REG_TX 0xc +#define REG_RX 0x10 +#define REG_TIMER_VAL 0x14 + +#define REG_CTRL_ENABLE (REG_CTRL_TX_TIMEOUT|REG_CTRL_RX_TIMEOUT|REG_CTRL_RX_INTR) +#define REG_DATA_PARITY 0x100 + +#define STAT_RX_COUNTER(stat) ((stat >> 8) & 0x1f) + +struct phytium_ps2_data { + void __iomem *base; + struct serio *io; + struct pci_dev *dev; +}; + +static irqreturn_t phytium_ps2_irq(int irq, void *devid) +{ + struct phytium_ps2_data *ps2if = devid; + u32 status, scancode, val = 0; + unsigned int flag; + int i, rxcount; + + status = readl(ps2if->base + REG_STAT); + if (!status) + return IRQ_NONE; + + /* Check if there is timeout interrupt */ + if (status & (REG_STAT_RX_TIMEOUT|REG_STAT_TX_TIMEOUT)) + val |= REG_INTR_TIMEOUT; + + rxcount = STAT_RX_COUNTER(status); + for (i = 0; i < rxcount; i++) { + scancode = readl(ps2if->base + REG_RX) & 0x1ff; + + if (rxcount <= 16 && scancode != 0x1ff) { + flag = ((scancode & REG_DATA_PARITY) ? SERIO_PARITY : 0); + serio_interrupt(ps2if->io, scancode & 0xff, flag); + } + } + + val |= REG_INTR_RX; + writel(val, ps2if->base + REG_INTR); + + return IRQ_HANDLED; +} + +int phytium_ps2_write(struct serio *serio, unsigned char val) +{ + struct phytium_ps2_data *ps2if = serio->port_data; + unsigned int stat; + + do { + stat = readl(ps2if->base + REG_STAT); + cpu_relax(); + } while (stat & REG_STAT_TX_FULL); + + writel(val, ps2if->base + REG_TX); + + return 0; +} + +int phytium_ps2_open(struct serio *io) +{ + struct phytium_ps2_data *ps2if = io->port_data; + + writel(REG_CTRL_RESET, ps2if->base + REG_CTRL); + /* Wait 4ms for the controller to be reset */ + usleep_range(4000, 6000); + writel(REG_CTRL_ENABLE, ps2if->base + REG_CTRL); + + return 0; +} + +void phytium_ps2_close(struct serio *io) +{ + struct phytium_ps2_data *ps2if = io->port_data; + + writel(0, ps2if->base + REG_CTRL); +} + +static int phytium_pci_ps2_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct phytium_ps2_data *ps2if; + struct serio *serio; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + goto out; + + ret = pcim_iomap_regions(pdev, 0x1, DRV_NAME); + if (ret) + goto out; + + ps2if = devm_kzalloc(&pdev->dev, sizeof(struct phytium_ps2_data), GFP_KERNEL); + serio = kzalloc(sizeof(struct serio), GFP_KERNEL); + if (!ps2if || !serio) { + ret = -ENOMEM; + goto free; + } + + serio->id.type = SERIO_8042; + serio->write = phytium_ps2_write; + serio->open = phytium_ps2_open; + serio->close = phytium_ps2_close; + strlcpy(serio->name, pci_name(pdev), sizeof(serio->name)); + strlcpy(serio->phys, dev_name(&pdev->dev), sizeof(serio->phys)); + serio->port_data = ps2if; + serio->dev.parent = &pdev->dev; + ps2if->io = serio; + ps2if->dev = pdev; + ps2if->base = pcim_iomap_table(pdev)[0]; + + ret = devm_request_irq(&pdev->dev, pdev->irq, phytium_ps2_irq, + IRQF_SHARED, DRV_NAME, ps2if); + if (ret) { + dev_err(&pdev->dev, "could not request IRQ %d\n", pdev->irq); + goto free; + } + + pci_set_drvdata(pdev, ps2if); + serio_register_port(ps2if->io); + + return 0; + +free: + kfree(serio); +out: + return ret; +} + +static void phytium_pci_ps2_remove(struct pci_dev *pdev) +{ + struct phytium_ps2_data *ps2if = pci_get_drvdata(pdev); + + serio_unregister_port(ps2if->io); + pcim_iounmap_regions(pdev, 0x1); +} + +static const struct pci_device_id phytium_pci_ps2_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc34) }, + {}, +}; +MODULE_DEVICE_TABLE(pci, phytium_pci_ps2_ids); + +static struct pci_driver phytium_pci_ps2_driver = { + .name = DRV_NAME, + .id_table = phytium_pci_ps2_ids, + .probe = phytium_pci_ps2_probe, + .remove = phytium_pci_ps2_remove, +}; +module_pci_driver(phytium_pci_ps2_driver); + +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium PCI PS/2 controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 9f16f47e70219614ede7422d483d05a26fd65bd0..75f33dbb7fd65798a93ed6554a1323b08b0aff55 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -201,6 +201,13 @@ #define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5) #define Q_BASE_LOG2SIZE GENMASK(4, 0) +/* Ensure DMA allocations are naturally aligned */ +#ifdef CONFIG_CMA_ALIGNMENT +#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT) +#else +#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1) +#endif + /* * Stream table. * @@ -298,8 +305,9 @@ FIELD_GET(ARM64_TCR_##fld, tcr)) /* Command queue */ -#define CMDQ_ENT_DWORDS 2 -#define CMDQ_MAX_SZ_SHIFT 8 +#define CMDQ_ENT_SZ_SHIFT 4 +#define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3) +#define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT) #define CMDQ_CONS_ERR GENMASK(30, 24) #define CMDQ_ERR_CERROR_NONE_IDX 0 @@ -338,14 +346,16 @@ #define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2) /* Event queue */ -#define EVTQ_ENT_DWORDS 4 -#define EVTQ_MAX_SZ_SHIFT 7 +#define EVTQ_ENT_SZ_SHIFT 5 +#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3) +#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT) #define EVTQ_0_ID GENMASK_ULL(7, 0) /* PRI queue */ -#define PRIQ_ENT_DWORDS 2 -#define PRIQ_MAX_SZ_SHIFT 8 +#define PRIQ_ENT_SZ_SHIFT 4 +#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3) +#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT) #define PRIQ_0_SID GENMASK_ULL(31, 0) #define PRIQ_0_SSID GENMASK_ULL(51, 32) @@ -568,6 +578,7 @@ struct arm_smmu_device { int gerr_irq; int combined_irq; u32 sync_nr; + u8 prev_cmd_opcode; unsigned long ias; /* IPA */ unsigned long oas; /* PA */ @@ -594,6 +605,7 @@ struct arm_smmu_device { /* IOMMU core code handle */ struct iommu_device iommu; + bool bypass; }; /* SMMU private data for each master */ @@ -789,7 +801,7 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) /* High-level queue accessors */ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) { - memset(cmd, 0, CMDQ_ENT_DWORDS << 3); + memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT); cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode); switch (ent->opcode) { @@ -918,6 +930,8 @@ static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd) struct arm_smmu_queue *q = &smmu->cmdq.q; bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); + smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]); + while (queue_insert_raw(q, cmd) == -ENOSPC) { if (queue_poll_cons(q, false, wfe)) dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); @@ -970,9 +984,16 @@ static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu) }; spin_lock_irqsave(&smmu->cmdq.lock, flags); - ent.sync.msidata = ++smmu->sync_nr; - arm_smmu_cmdq_build_cmd(cmd, &ent); - arm_smmu_cmdq_insert_cmd(smmu, cmd); + + /* Piggy-back on the previous command if it's a SYNC */ + if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) { + ent.sync.msidata = smmu->sync_nr; + } else { + ent.sync.msidata = ++smmu->sync_nr; + arm_smmu_cmdq_build_cmd(cmd, &ent); + arm_smmu_cmdq_insert_cmd(smmu, cmd); + } + spin_unlock_irqrestore(&smmu->cmdq.lock, flags); return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata); @@ -2036,17 +2057,32 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, struct arm_smmu_queue *q, unsigned long prod_off, unsigned long cons_off, - size_t dwords) + size_t dwords, const char *name) { - size_t qsz = ((1 << q->max_n_shift) * dwords) << 3; + size_t qsz; + + do { + qsz = ((1 << q->max_n_shift) * dwords) << 3; + q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, + GFP_KERNEL); + if (q->base || qsz < PAGE_SIZE) + break; + + q->max_n_shift--; + } while (1); - q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL); if (!q->base) { - dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n", - qsz); + dev_err(smmu->dev, + "failed to allocate queue (0x%zx bytes) for %s\n", + qsz, name); return -ENOMEM; } + if (!WARN_ON(q->base_dma & (qsz - 1))) { + dev_info(smmu->dev, "allocated %u entries for %s\n", + 1 << q->max_n_shift, name); + } + q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu); q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu); q->ent_dwords = dwords; @@ -2066,13 +2102,15 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu) /* cmdq */ spin_lock_init(&smmu->cmdq.lock); ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD, - ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS); + ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS, + "cmdq"); if (ret) return ret; /* evtq */ ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD, - ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS); + ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS, + "evtq"); if (ret) return ret; @@ -2081,7 +2119,8 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu) return 0; return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD, - ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS); + ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS, + "priq"); } static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) @@ -2253,6 +2292,13 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; doorbell &= MSI_CFG0_ADDR_MASK; +#ifdef CONFIG_PM_SLEEP + /* Saves the msg (base addr of msi irq) and restores it during resume */ + desc->msg.address_lo = msg->address_lo; + desc->msg.address_hi = msg->address_hi; + desc->msg.data = msg->data; +#endif + writeq_relaxed(doorbell, smmu->base + cfg[0]); writel_relaxed(msg->data, smmu->base + cfg[1]); writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]); @@ -2308,11 +2354,51 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) devm_add_action(dev, arm_smmu_free_msis, dev); } -static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) +#ifdef CONFIG_PM_SLEEP +static void arm_smmu_resume_msis(struct arm_smmu_device *smmu) +{ + struct msi_desc *desc; + struct device *dev = smmu->dev; + + for_each_msi_entry(desc, dev) { + switch (desc->platform.msi_index) { + case EVTQ_MSI_INDEX: + case GERROR_MSI_INDEX: + case PRIQ_MSI_INDEX: { + phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index]; + struct msi_msg *msg = &desc->msg; + phys_addr_t doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; + + doorbell &= MSI_CFG0_ADDR_MASK; + writeq_relaxed(doorbell, smmu->base + cfg[0]); + writel_relaxed(msg->data, smmu->base + cfg[1]); + writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, + smmu->base + cfg[2]); + break; + } + default: + continue; + + } + } +} +#else +static void arm_smmu_resume_msis(struct arm_smmu_device *smmu) +{ +} +#endif + +static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu, bool resume) { int irq, ret; - arm_smmu_setup_msis(smmu); + if (!resume) + arm_smmu_setup_msis(smmu); + else { + /* The irq doesn't need to be re-requested during resume */ + arm_smmu_resume_msis(smmu); + return; + } /* Request interrupt lines */ irq = smmu->evtq.q.irq; @@ -2354,7 +2440,7 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) } } -static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) +static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu, bool resume) { int ret, irq; u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN; @@ -2381,7 +2467,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) if (ret < 0) dev_warn(smmu->dev, "failed to enable combined irq\n"); } else - arm_smmu_setup_unique_irqs(smmu); + arm_smmu_setup_unique_irqs(smmu, resume); if (smmu->features & ARM_SMMU_FEAT_PRI) irqen_flags |= IRQ_CTRL_PRIQ_IRQEN; @@ -2406,7 +2492,7 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu) return ret; } -static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) +static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) { int ret; u32 reg, enables; @@ -2504,7 +2590,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) } } - ret = arm_smmu_setup_irqs(smmu); + ret = arm_smmu_setup_irqs(smmu, resume); if (ret) { dev_err(smmu->dev, "failed to setup irqs\n"); return ret; @@ -2514,7 +2600,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) enables &= ~(CR0_EVTQEN | CR0_PRIQEN); /* Enable the SMMU interface, or ensure bypass */ - if (!bypass || disable_bypass) { + if (!smmu->bypass || disable_bypass) { enables |= CR0_SMMUEN; } else { ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT); @@ -2635,7 +2721,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) return -ENXIO; } - /* Queue sizes, capped at 4k */ + /* Queue sizes, capped to ensure natural alignment */ smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, reg)); if (!smmu->cmdq.q.max_n_shift) { @@ -2796,6 +2882,26 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu) return SZ_128K; } +#ifdef CONFIG_PM_SLEEP +static int arm_smmu_suspend(struct device *dev) +{ + /* + * The smmu is powered off and related registers are automatically + * cleared when suspend. No need to do anything. + */ + return 0; +} + +static int arm_smmu_resume(struct device *dev) +{ + struct arm_smmu_device *smmu = dev_get_drvdata(dev); + + arm_smmu_device_reset(smmu, true); + + return 0; +} +#endif + static int arm_smmu_device_probe(struct platform_device *pdev) { int irq, ret; @@ -2803,7 +2909,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev) resource_size_t ioaddr; struct arm_smmu_device *smmu; struct device *dev = &pdev->dev; - bool bypass; smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); if (!smmu) { @@ -2821,7 +2926,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) } /* Set bypass mode according to firmware probing result */ - bypass = !!ret; + smmu->bypass = !!ret; /* Base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2867,7 +2972,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) platform_set_drvdata(pdev, smmu); /* Reset the device */ - ret = arm_smmu_device_reset(smmu, bypass); + ret = arm_smmu_device_reset(smmu, false); if (ret) return ret; @@ -2929,10 +3034,21 @@ static const struct of_device_id arm_smmu_of_match[] = { }; MODULE_DEVICE_TABLE(of, arm_smmu_of_match); +#ifdef CONFIG_PM_SLEEP +static const struct dev_pm_ops arm_smmu_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_suspend, + arm_smmu_resume) +}; +#define ARM_SMMU_PM_OPS (&arm_smmu_pm_ops) +#else +#define ARM_SMMU_PM_OPS NULL +#endif + static struct platform_driver arm_smmu_driver = { .driver = { .name = "arm-smmu-v3", .of_match_table = of_match_ptr(arm_smmu_of_match), + .pm = ARM_SMMU_PM_OPS, }, .probe = arm_smmu_device_probe, .remove = arm_smmu_device_remove, diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 86334aef4bd056f4443b7a2c3e0d79638eb51f81..e5d3e4b12b22a53c14cd9c8b67325e22f8637baa 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -19,13 +19,16 @@ #include #include #include +#include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -51,6 +54,7 @@ #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) +#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) static u32 lpi_id_bits; @@ -166,6 +170,13 @@ static struct { int next_victim; } vpe_proxy; +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count); + static LIST_HEAD(its_nodes); static DEFINE_RAW_SPINLOCK(its_lock); static struct rdists *gic_rdists; @@ -178,6 +189,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock); static DEFINE_IDA(its_vpeid_ida); #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) +#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) @@ -1061,7 +1073,7 @@ static inline u32 its_get_event_id(struct irq_data *d) static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) { irq_hw_number_t hwirq; - struct page *prop_page; + void *va; u8 *cfg; if (irqd_is_forwarded_to_vcpu(d)) { @@ -1069,7 +1081,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) u32 event = its_get_event_id(d); struct its_vlpi_map *map; - prop_page = its_dev->event_map.vm->vprop_page; + va = page_address(its_dev->event_map.vm->vprop_page); map = &its_dev->event_map.vlpi_maps[event]; hwirq = map->vintid; @@ -1077,11 +1089,11 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) map->properties &= ~clr; map->properties |= set | LPI_PROP_GROUP1; } else { - prop_page = gic_rdists->prop_page; + va = gic_rdists->prop_table_va; hwirq = d->hwirq; } - cfg = page_address(prop_page) + hwirq - 8192; + cfg = va + hwirq - 8192; *cfg &= ~clr; *cfg |= set | LPI_PROP_GROUP1; @@ -1143,42 +1155,159 @@ static void its_unmask_irq(struct irq_data *d) lpi_update_config(d, 0, LPI_PROP_ENABLED); } +static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_inc_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_dec_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static unsigned int cpumask_pick_least_loaded(struct irq_data *d, + const struct cpumask *cpu_mask) +{ + unsigned int cpu = nr_cpu_ids, tmp; + int count = S32_MAX; + + for_each_cpu(tmp, cpu_mask) { + int this_count = its_read_lpi_count(d, tmp); + if (this_count < count) { + cpu = tmp; + count = this_count; + } + } + + return cpu; +} + +/* + * As suggested by Thomas Gleixner in: + * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de + */ +static int its_select_cpu(struct irq_data *d, + const struct cpumask *aff_mask) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + cpumask_var_t tmpmask; + int cpu, node; + + if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) + return -ENOMEM; + + node = its_dev->its->numa_node; + + if (!irqd_affinity_is_managed(d)) { + /* First try the NUMA node */ + if (node != NUMA_NO_NODE) { + /* + * Try the intersection of the affinity mask and the + * node mask (and the online mask, just to be safe). + */ + cpumask_and(tmpmask, cpumask_of_node(node), aff_mask); + cpumask_and(tmpmask, tmpmask, cpu_online_mask); + + /* + * Ideally, we would check if the mask is empty, and + * try again on the full node here. + * + * But it turns out that the way ACPI describes the + * affinity for ITSs only deals about memory, and + * not target CPUs, so it cannot describe a single + * ITS placed next to two NUMA nodes. + * + * Instead, just fallback on the online mask. This + * diverges from Thomas' suggestion above. + */ + cpu = cpumask_pick_least_loaded(d, tmpmask); + if (cpu < nr_cpu_ids) + goto out; + + /* If we can't cross sockets, give up */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) + goto out; + + /* If the above failed, expand the search */ + } + + /* Try the intersection of the affinity and online masks */ + cpumask_and(tmpmask, aff_mask, cpu_online_mask); + + /* If that doesn't fly, the online mask is the last resort */ + if (cpumask_empty(tmpmask)) + cpumask_copy(tmpmask, cpu_online_mask); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } else { + cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask); + + /* If we cannot cross sockets, limit the search to that node */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && + node != NUMA_NO_NODE) + cpumask_and(tmpmask, tmpmask, cpumask_of_node(node)); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } +out: + free_cpumask_var(tmpmask); + + pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu); + return cpu; +} + static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned int cpu; - const struct cpumask *cpu_mask = cpu_online_mask; struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_collection *target_col; u32 id = its_get_event_id(d); + int cpu, prev_cpu; /* A forwarded interrupt should use irq_set_vcpu_affinity */ if (irqd_is_forwarded_to_vcpu(d)) return -EINVAL; - /* lpi cannot be routed to a redistributor that is on a foreign node */ - if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { - if (its_dev->its->numa_node >= 0) { - cpu_mask = cpumask_of_node(its_dev->its->numa_node); - if (!cpumask_intersects(mask_val, cpu_mask)) - return -EINVAL; - } - } + prev_cpu = its_dev->event_map.col_map[id]; + its_dec_lpi_count(d, prev_cpu); - cpu = cpumask_any_and(mask_val, cpu_mask); + if (!force) + cpu = its_select_cpu(d, mask_val); + else + cpu = cpumask_pick_least_loaded(d, mask_val); - if (cpu >= nr_cpu_ids) - return -EINVAL; + if (cpu < 0 || cpu >= nr_cpu_ids) + goto err; /* don't set the affinity when the target cpu is same as current one */ - if (cpu != its_dev->event_map.col_map[id]) { + if (cpu != prev_cpu) { target_col = &its_dev->its->collections[cpu]; its_send_movi(its_dev, target_col, id); its_dev->event_map.col_map[id] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); } + its_inc_lpi_count(d, cpu); + return IRQ_SET_MASK_OK_DONE; + +err: + its_inc_lpi_count(d, prev_cpu); + return -EINVAL; } static u64 its_irq_get_msi_base(struct its_device *its_dev) @@ -1445,6 +1574,11 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) } } +static int its_irq_retrigger(struct irq_data *d) +{ + return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + static struct irq_chip its_irq_chip = { .name = "ITS", .irq_mask = its_mask_irq, @@ -1453,6 +1587,7 @@ static struct irq_chip its_irq_chip = { .irq_set_affinity = its_set_affinity, .irq_compose_msi_msg = its_irq_compose_msi_msg, .irq_set_irqchip_state = its_irq_set_irqchip_state, + .irq_retrigger = its_irq_retrigger, .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, }; @@ -1633,6 +1768,15 @@ static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) kfree(bitmap); } +static void gic_reset_prop_table(void *va) +{ + /* Priority 0xa0, Group-1, disabled */ + memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + + /* Make sure the GIC will observe the written configuration */ + gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); +} + static struct page *its_allocate_prop_table(gfp_t gfp_flags) { struct page *prop_page; @@ -1641,13 +1785,7 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags) if (!prop_page) return NULL; - /* Priority 0xa0, Group-1, disabled */ - memset(page_address(prop_page), - LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, - LPI_PROPBASE_SZ); - - /* Make sure the GIC will observe the written configuration */ - gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ); + gic_reset_prop_table(page_address(prop_page)); return prop_page; } @@ -1658,20 +1796,74 @@ static void its_free_prop_table(struct page *prop_page) get_order(LPI_PROPBASE_SZ)); } -static int __init its_alloc_lpi_tables(void) +static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) { - phys_addr_t paddr; + phys_addr_t start, end, addr_end; + u64 i; - lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), - ITS_MAX_LPI_NRBITS); - gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); - if (!gic_rdists->prop_page) { - pr_err("Failed to allocate PROPBASE\n"); - return -ENOMEM; + /* + * We don't bother checking for a kdump kernel as by + * construction, the LPI tables are out of this kernel's + * memory map. + */ + if (is_kdump_kernel()) + return true; + + addr_end = addr + size - 1; + + for_each_reserved_mem_region(i, &start, &end) { + if (addr >= start && addr_end <= end) + return true; } - paddr = page_to_phys(gic_rdists->prop_page); - pr_info("GIC: using LPI property table @%pa\n", &paddr); + /* Not found, not a good sign... */ + pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n", + &addr, &addr_end); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + return false; +} + +static int gic_reserve_range(phys_addr_t addr, unsigned long size) +{ + if (efi_enabled(EFI_CONFIG_TABLES)) + return efi_mem_reserve_persistent(addr, size); + + return 0; +} + +static int __init its_setup_lpi_prop_table(void) +{ + if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { + u64 val; + + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; + + gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); + gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ, + MEMREMAP_WB); + gic_reset_prop_table(gic_rdists->prop_table_va); + } else { + struct page *page; + + lpi_id_bits = min_t(u32, + GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), + ITS_MAX_LPI_NRBITS); + page = its_allocate_prop_table(GFP_NOWAIT); + if (!page) { + pr_err("Failed to allocate PROPBASE\n"); + return -ENOMEM; + } + + gic_rdists->prop_table_pa = page_to_phys(page); + gic_rdists->prop_table_va = page_address(page); + WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ)); + } + + pr_info("GICv3: using LPI property table @%pa\n", + &gic_rdists->prop_table_pa); return its_lpi_init(lpi_id_bits); } @@ -1962,12 +2154,9 @@ static int its_alloc_collections(struct its_node *its) static struct page *its_allocate_pending_table(gfp_t gfp_flags) { struct page *pend_page; - /* - * The pending pages have to be at least 64kB aligned, - * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. - */ + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, - get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); + get_order(LPI_PENDBASE_SZ)); if (!pend_page) return NULL; @@ -1979,8 +2168,63 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags) static void its_free_pending_table(struct page *pt) { - free_pages((unsigned long)page_address(pt), - get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); + free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); +} + +/* + * Booting with kdump and LPIs enabled is generally fine. Any other + * case is wrong in the absence of firmware/EFI support. + */ +static bool enabled_lpis_allowed(void) +{ + phys_addr_t addr; + u64 val; + + /* Check whether the property table is in a reserved region */ + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + addr = val & GENMASK_ULL(51, 12); + + return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); +} + +static int __init allocate_lpi_tables(void) +{ + u64 val; + int err, cpu; + + /* + * If LPIs are enabled while we run this from the boot CPU, + * flag the RD tables as pre-allocated if the stars do align. + */ + val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); + if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { + gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | + RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); + pr_info("GICv3: Using preallocated redistributor tables\n"); + } + + err = its_setup_lpi_prop_table(); + if (err) + return err; + + /* + * We allocate all the pending tables anyway, as we may have a + * mix of RDs that have had LPIs enabled, and some that + * don't. We'll free the unused ones as each CPU comes online. + */ + for_each_possible_cpu(cpu) { + struct page *pend_page; + + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); + return -ENOMEM; + } + + gic_data_rdist_cpu(cpu)->pend_page = pend_page; + } + + return 0; } static u64 its_clear_vpend_valid(void __iomem *vlpi_base) @@ -2010,28 +2254,40 @@ static void its_cpu_init_lpis(void) { void __iomem *rbase = gic_data_rdist_rd_base(); struct page *pend_page; + phys_addr_t paddr; u64 val, tmp; - /* If we didn't allocate the pending table yet, do it now */ - pend_page = gic_data_rdist()->pend_page; - if (!pend_page) { - phys_addr_t paddr; + if (gic_data_rdist()->lpi_enabled) + return; - pend_page = its_allocate_pending_table(GFP_NOWAIT); - if (!pend_page) { - pr_err("Failed to allocate PENDBASE for CPU%d\n", - smp_processor_id()); - return; - } + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); - paddr = page_to_phys(pend_page); - pr_info("CPU%d: using LPI pending table @%pa\n", - smp_processor_id(), &paddr); - gic_data_rdist()->pend_page = pend_page; + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + its_free_pending_table(gic_data_rdist()->pend_page); + gic_data_rdist()->pend_page = NULL; + + goto out; } + pend_page = gic_data_rdist()->pend_page; + paddr = page_to_phys(pend_page); + WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); + /* set PROPBASE */ - val = (page_to_phys(gic_rdists->prop_page) | + val = (gic_rdists->prop_table_pa | GICR_PROPBASER_InnerShareable | GICR_PROPBASER_RaWaWb | ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); @@ -2105,6 +2361,12 @@ static void its_cpu_init_lpis(void) /* Make sure the GIC has seen the above */ dsb(sy); +out: + gic_data_rdist()->lpi_enabled = true; + pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", + smp_processor_id(), + gic_data_rdist()->pend_page ? "allocated" : "reserved", + &paddr); } static void its_cpu_init_collection(struct its_node *its) @@ -2489,22 +2751,13 @@ static int its_irq_domain_activate(struct irq_domain *domain, { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - const struct cpumask *cpu_mask = cpu_online_mask; int cpu; - /* get the cpu_mask of local node */ - if (its_dev->its->numa_node >= 0) - cpu_mask = cpumask_of_node(its_dev->its->numa_node); - - /* Bind the LPI to the first possible CPU */ - cpu = cpumask_first_and(cpu_mask, cpu_online_mask); - if (cpu >= nr_cpu_ids) { - if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) - return -EINVAL; - - cpu = cpumask_first(cpu_online_mask); - } + cpu = its_select_cpu(d, cpu_online_mask); + if (cpu < 0 || cpu >= nr_cpu_ids) + return -EINVAL; + its_inc_lpi_count(d, cpu); its_dev->event_map.col_map[event] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -2519,6 +2772,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain, struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + its_dec_lpi_count(d, its_dev->event_map.col_map[event]); /* Stop the delivery of interrupts */ its_send_discard(its_dev, event); } @@ -3270,6 +3524,7 @@ static void its_restore_enable(void) { struct its_node *its; int ret; + int cpu; raw_spin_lock(&its_lock); list_for_each_entry(its, &its_nodes, entry) { @@ -3323,6 +3578,23 @@ static void its_restore_enable(void) GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) its_cpu_init_collection(its); } + + /* + * Enable LPIs:firmware just restore GICR_CTLR_ENABLE_LPIs of boot + * CPU, the other CPUs also should be restored. + */ + for_each_possible_cpu(cpu) { + void __iomem *rbase = gic_data_rdist_cpu(cpu)->rd_base; + u32 val; + + /*Enable LPIs*/ + val = readl_relaxed(rbase + GICR_CTLR); + if (val & GICR_CTLR_ENABLE_LPIS) + continue; + + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + } raw_spin_unlock(&its_lock); } @@ -3584,16 +3856,6 @@ static int redist_disable_lpis(void) u64 timeout = USEC_PER_SEC; u64 val; - /* - * If coming via a CPU hotplug event, we don't need to disable - * LPIs before trying to re-enable them. They are already - * configured and all is well in the world. Detect this case - * by checking the allocation of the pending table for the - * current CPU. - */ - if (gic_data_rdist()->pend_page) - return 0; - if (!gic_rdists_supports_plpis()) { pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); return -ENXIO; @@ -3603,7 +3865,21 @@ static int redist_disable_lpis(void) if (!(val & GICR_CTLR_ENABLE_LPIS)) return 0; - pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n", + /* + * If coming via a CPU hotplug event, we don't need to disable + * LPIs before trying to re-enable them. They are already + * configured and all is well in the world. + * + * If running with preallocated tables, there is nothing to do. + */ + if (gic_data_rdist()->lpi_enabled || + (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) + return 0; + + /* + * From that point on, we only try to do some damage control. + */ + pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", smp_processor_id()); add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); @@ -3859,7 +4135,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, } gic_rdists = rdists; - err = its_alloc_lpi_tables(); + + err = allocate_lpi_tables(); if (err) return err; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 05b9a4cdc8fd59254497145bf365e1096e4f505c..0d4519a8d889cffc03819bad40f052f9bdd760b1 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -653,7 +653,9 @@ early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); static int gic_dist_supports_lpis(void) { - return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && !gicv3_nolpi; + return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && + !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && + !gicv3_nolpi); } static void gic_cpu_init(void) @@ -673,10 +675,6 @@ static void gic_cpu_init(void) gic_cpu_config(rbase, gic_redist_wait_for_rwp); - /* Give LPIs a spin */ - if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) - its_cpu_init(); - /* initialise system registers */ gic_cpu_sys_reg_init(); } @@ -689,6 +687,10 @@ static void gic_cpu_init(void) static int gic_starting_cpu(unsigned int cpu) { gic_cpu_init(); + + if (gic_dist_supports_lpis()) + its_cpu_init(); + return 0; } @@ -818,6 +820,11 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, #define gic_smp_init() do { } while(0) #endif +static int gic_retrigger(struct irq_data *data) +{ + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); +} + #ifdef CONFIG_CPU_PM /* Check whether it's single security state view */ static bool gic_dist_security_disabled(void) @@ -859,6 +866,7 @@ static struct irq_chip gic_chip = { .irq_eoi = gic_eoi_irq, .irq_set_type = gic_set_type, .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, .flags = IRQCHIP_SET_TYPE_MASKED | @@ -873,6 +881,7 @@ static struct irq_chip gic_eoimode1_chip = { .irq_eoi = gic_eoimode1_eoi_irq, .irq_set_type = gic_set_type, .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, @@ -887,6 +896,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct irq_chip *chip = &gic_chip; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); if (static_branch_likely(&supports_deactivate_key)) chip = &gic_eoimode1_chip; @@ -913,7 +923,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL); irq_set_probe(irq); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); + irqd_set_single_target(irqd); } /* LPIs */ if (hw >= 8192 && hw < GIC_ID_NR) { @@ -923,6 +933,8 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, handle_fasteoi_irq, NULL, NULL); } + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); return 0; } @@ -1127,14 +1139,16 @@ static int __init gic_init_bases(void __iomem *dist_base, gic_update_vlpi_properties(); - if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) - its_init(handle, &gic_data.rdists, gic_data.domain); - gic_smp_init(); gic_dist_init(); gic_cpu_init(); gic_cpu_pm_init(); + if (gic_dist_supports_lpis()) { + its_init(handle, &gic_data.rdists, gic_data.domain); + its_cpu_init(); + } + return 0; out_free: diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index b5417012afc8c302faf3eea0b27d311f68fb455f..0f4ec68e3c6a4a35517058dac0c7fd35253bd3b2 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -342,6 +342,11 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, } #endif +static int gic_retrigger(struct irq_data *data) +{ + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); +} + static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) { u32 irqstat, irqnr; @@ -412,6 +417,7 @@ static const struct irq_chip gic_chip = { .irq_unmask = gic_unmask_irq, .irq_eoi = gic_eoi_irq, .irq_set_type = gic_set_type, + .irq_retrigger = gic_retrigger, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, .flags = IRQCHIP_SET_TYPE_MASKED | @@ -964,6 +970,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct gic_chip_data *gic = d->host_data; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); if (hw < 32) { irq_set_percpu_devid(irq); @@ -974,8 +981,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, handle_fasteoi_irq, NULL, NULL); irq_set_probe(irq); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); + irqd_set_single_target(irqd); } + + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); return 0; } diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig index 4018af769969366de8ee8412da78f8a4f9d6ba95..b4ce8c11594925d421f04ce0637f5c1307ed1c16 100644 --- a/drivers/leds/trigger/Kconfig +++ b/drivers/leds/trigger/Kconfig @@ -63,6 +63,7 @@ config LEDS_TRIGGER_BACKLIGHT config LEDS_TRIGGER_CPU bool "LED CPU Trigger" + depends on !PREEMPT_RT_BASE help This allows LEDs to be controlled by active CPUs. This shows the active CPUs across an array of LEDs so you can see which diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 841c005d8ebb2f720047a984ff25c0d974a37dff..461e2397dc88387b8c1b0e5aa36835ed7f1d96d8 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -21,6 +21,12 @@ config IMX_MBOX help Mailbox implementation for i.MX Messaging Unit (MU). +config PHYTIUM_MBOX + tristate "Phytium SoC Mailbox Support" + depends on ARCH_PHYTIUM || COMPILE_TEST + help + This driver provides the support for the Phytium mailbox controller. + config PLATFORM_MHU tristate "Platform MHU Mailbox" depends on OF diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index c818b5d011aef28ae2e4632c3e93ff05f446590d..de3cbe3ffa44e7969a82815273fff688ab147c3b 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -9,6 +9,8 @@ obj-$(CONFIG_ARM_MHU) += arm_mhu.o obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o +obj-$(CONFIG_PHYTIUM_MBOX) += phytium_mailbox.o + obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o diff --git a/drivers/mailbox/phytium_mailbox.c b/drivers/mailbox/phytium_mailbox.c new file mode 100644 index 0000000000000000000000000000000000000000..e33eb82efa44184e331e898a11356d5d3387b1c4 --- /dev/null +++ b/drivers/mailbox/phytium_mailbox.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SoC mailbox driver + * + * Copyright (c) 2020-2023 Phytium Technology Co., Ltd. + * + * Derived from drivers/mailbox/arm_mhu.c + * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd. + * Copyright (C) 2015 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define INTR_STAT 0x0 +#define INTR_SET 0x8 +#define INTR_CLR 0x10 + +#define TX_REG 0x100 + +#define NR_CHANS 1 + +static spinlock_t lock; + +struct phytium_mbox_link { + unsigned int irq; + void __iomem *tx_reg; + void __iomem *rx_reg; +}; + +struct phytium_mbox { + void __iomem *base; + struct phytium_mbox_link mlink; + struct mbox_chan chan; + struct mbox_controller mbox; +}; + +static irqreturn_t phytium_mbox_rx_irq(int irq, void *ch) +{ + struct mbox_chan *chan = ch; + struct phytium_mbox_link *mlink = chan->con_priv; + u32 val; + unsigned long flags = 0; + + spin_lock_irqsave(&lock, flags); + val = readl_relaxed(mlink->rx_reg + INTR_STAT); + spin_unlock_irqrestore(&lock, flags); + if (!val) + return IRQ_NONE; + + mbox_chan_received_data(chan, (void *)&val); + + spin_lock_irqsave(&lock, flags); + writel_relaxed(val, mlink->rx_reg + INTR_CLR); + spin_unlock_irqrestore(&lock, flags); + + return IRQ_HANDLED; +} + +static int phytium_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + u32 *arg = data; + unsigned long flags = 0; + + spin_lock_irqsave(&lock, flags); + writel_relaxed(*arg, mlink->tx_reg + INTR_SET); + spin_unlock_irqrestore(&lock, flags); + + return 0; +} + +static int phytium_mbox_startup(struct mbox_chan *chan) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + u32 val; + int ret; + unsigned long flags = 0; + + spin_lock_irqsave(&lock, flags); + val = readl_relaxed(mlink->tx_reg + INTR_STAT); + writel_relaxed(val, mlink->tx_reg + INTR_CLR); + spin_unlock_irqrestore(&lock, flags); + + ret = request_irq(mlink->irq, phytium_mbox_rx_irq, + IRQF_SHARED, "phytium_mbox_link", chan); + if (ret) { + dev_err(chan->mbox->dev, + "Unable to acquire IRQ %d\n", mlink->irq); + } + + return ret; +} + +static void phytium_mbox_shutdown(struct mbox_chan *chan) +{ + struct phytium_mbox_link *mlink = chan->con_priv; + + free_irq(mlink->irq, chan); +} + +static bool phytium_mbox_last_tx_done(struct mbox_chan *chan) +{ + unsigned long flags; + struct phytium_mbox_link *mlink = chan->con_priv; + u32 val; + + spin_lock_irqsave(&lock, flags); + val = readl_relaxed(mlink->tx_reg + INTR_STAT); + spin_unlock_irqrestore(&lock, flags); + + return (val == (u32)(1U << 31)); +} + +static const struct mbox_chan_ops phytium_mbox_ops = { + .send_data = phytium_mbox_send_data, + .startup = phytium_mbox_startup, + .shutdown = phytium_mbox_shutdown, + .last_tx_done = phytium_mbox_last_tx_done, +}; + +static const struct acpi_device_id phytium_mbox_acpi_match[] = { + { "PHYT0009", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_mbox_acpi_match); + +static const struct of_device_id phytium_mbox_of_match[] = { + { .compatible = "phytium,mbox", }, + { }, +}; +MODULE_DEVICE_TABLE(of, phytium_mbox_of_match); + +static int phytium_mbox_probe(struct platform_device *pdev) +{ + struct phytium_mbox *mbox; + struct resource *res; + int err, irq; + + spin_lock_init(&lock); + + /* Allocate memory for device */ + mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mbox->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mbox->base)) { + dev_err(&pdev->dev, "ioremap base failed\n"); + return PTR_ERR(mbox->base); + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "cannot obtain irq\n"); + return irq; + } + + mbox->chan.con_priv = &mbox->mlink; + mbox->mlink.irq = irq; + mbox->mlink.rx_reg = mbox->base; + mbox->mlink.tx_reg = mbox->mlink.rx_reg + TX_REG; + + mbox->mbox.dev = &pdev->dev; + mbox->mbox.chans = &mbox->chan; + mbox->mbox.num_chans = NR_CHANS; + mbox->mbox.ops = &phytium_mbox_ops; + mbox->mbox.txdone_irq = false; + mbox->mbox.txdone_poll = true; + mbox->mbox.txpoll_period = 1; + + platform_set_drvdata(pdev, mbox); + + err = mbox_controller_register(&mbox->mbox); + if (err) { + dev_err(&pdev->dev, "Failed to register mailboxes %d\n", err); + goto fail; + } + + dev_info(&pdev->dev, "Phytium SoC Mailbox registered\n"); +fail: + return err; +} + +static int phytium_mbox_remove(struct platform_device *pdev) +{ + struct phytium_mbox *mbox = platform_get_drvdata(pdev); + + mbox_controller_unregister(&mbox->mbox); + + return 0; +} + +static struct platform_driver phytium_mbox_driver = { + .probe = phytium_mbox_probe, + .remove = phytium_mbox_remove, + .driver = { + .name = "phytium-mbox", + .of_match_table = of_match_ptr(phytium_mbox_of_match), + .acpi_match_table = ACPI_PTR(phytium_mbox_acpi_match), + }, +}; + +module_platform_driver(phytium_mbox_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium SoC Mailbox Driver"); +MODULE_AUTHOR("Chen Baozi "); diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index f6e0a8b3a61ed34b7b36e02c639c5e7507c23857..18c03d79a442da26beb7e6810ca2254d3ff9bcfb 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig @@ -1,6 +1,7 @@ config BCACHE tristate "Block device as cache" + depends on !PREEMPT_RT_FULL select CRC64 help Allows a block device to be used as cache for other devices; uses diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 2957a3763f01915a9337bbf8bae8f2042be8e079..bc8192b4e7b28d1814081037c20353dc0a931e16 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -689,7 +689,6 @@ static void dm_old_request_fn(struct request_queue *q) /* Establish tio->ti before queuing work (map_tio_request) */ tio->ti = ti; kthread_queue_work(&md->kworker, &tio->work); - BUG_ON(!irqs_disabled()); } } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c7bda4b0bcedfc8db1203b410d33da6a6f074485..8eb234732c9b879a6d9f58e491e93212ff1796af 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2069,8 +2069,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) struct raid5_percpu *percpu; unsigned long cpu; - cpu = get_cpu(); + cpu = get_cpu_light(); percpu = per_cpu_ptr(conf->percpu, cpu); + spin_lock(&percpu->lock); if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { ops_run_biofill(sh); overlap_clear++; @@ -2129,7 +2130,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) if (test_and_clear_bit(R5_Overlap, &dev->flags)) wake_up(&sh->raid_conf->wait_for_overlap); } - put_cpu(); + spin_unlock(&percpu->lock); + put_cpu_light(); } static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) @@ -6816,6 +6818,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) __func__, cpu); return -ENOMEM; } + spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); return 0; } @@ -6826,7 +6829,6 @@ static int raid5_alloc_percpu(struct r5conf *conf) conf->percpu = alloc_percpu(struct raid5_percpu); if (!conf->percpu) return -ENOMEM; - err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); if (!err) { conf->scribble_disks = max(conf->raid_disks, diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 8474c224127bf3f47266e8570458945230626692..a3bf907ab2afaac6652d9872a4fa24e11f5b58c0 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -637,6 +637,7 @@ struct r5conf { int recovery_disabled; /* per cpu variables */ struct raid5_percpu { + spinlock_t lock; /* Protection for -RT */ struct page *spare_page; /* Used when checking P/Q in raid6 */ struct flex_array *scribble; /* space for constructing buffer * lists and performing address diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 54fe90acb5b2962432140949a17e1630361e08ca..6c224071bc83fcad826f24ad583b8399a0bb837f 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -151,6 +151,16 @@ config VIDEO_TI_CAL In TI Technical Reference Manual this module is referred as Camera Interface Subsystem (CAMSS). +config VIDEO_PHYTIUM_JPEG + tristate "Phytium JPEG Encoder driver" + depends on VIDEO_V4L2 + select VIDEOBUF2_DMA_CONTIG + help + Support for the Phytium JPEG Encoder Engine embedded + in the Phytium SOCs. + The engine can capture and compress video data from + digital or analog sources. + endif # V4L_PLATFORM_DRIVERS menuconfig V4L_MEM2MEM_DRIVERS diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index 41322ab658027f1d7aa0c571d01ea34444319570..f89634e3239ad5a995a909c33d88a4fe3d714d50 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -93,6 +93,8 @@ obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss/ obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/ +obj-$(CONFIG_VIDEO_PHYTIUM_JPEG) += phytium-jpeg/ + obj-y += meson/ obj-y += cros-ec-cec/ diff --git a/drivers/media/platform/phytium-jpeg/Makefile b/drivers/media/platform/phytium-jpeg/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d9f50a1aaba3546017f4f69f80be616ee21b4aff --- /dev/null +++ b/drivers/media/platform/phytium-jpeg/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +phytium_jpeg-objs := phytium_jpeg_core.o +obj-$(CONFIG_VIDEO_PHYTIUM_JPEG) += phytium_jpeg.o diff --git a/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.c b/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.c new file mode 100644 index 0000000000000000000000000000000000000000..37a5674a0dd1d64849a8358241618b96bfca88ae --- /dev/null +++ b/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.c @@ -0,0 +1,1381 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Phytium JPEG Encoder Engine + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include "phytium_jpeg_reg.h" +#include "phytium_jpeg_core.h" +#include + +static u32 phytium_jpeg_header[PHYTIUM_JPEG_HEADER_SIZE] = { + 0xe0ffd8ff, 0x464a0100, 0x01004649, 0x01000001, + 0x00000100, 0x4300dbff, 0x0c0b1000, 0x100a0c0e, + 0x120e0d0e, 0x18131011, 0x16181a28, 0x23311816, + 0x3a281d25, 0x393c3d33, 0x40373833, 0x404e5c48, + 0x37455744, 0x516d5038, 0x67625f57, 0x4d3e6768, + 0x64707971, 0x67655c78, 0x00dbff63, 0x12110143, + 0x18151812, 0x2f1a1a2f, 0x42384263, 0x63636363, + 0x63636363, 0x63636363, 0x63636363, 0x63636363, + 0x63636363, 0x63636363, 0x63636363, 0x63636363, + 0x63636363, 0x63636363, 0x63636363, 0xc0ff6363, + /* h_index(40) indicates high 8 bits of the height + * w_index(41) contains the low 8 bits of the height, + * and the width. For example, height 480(0x01e0) + * locates at 0x<01> 081100 and 0x038002 . + * width 640 (0x0280) locates at 0x03 <80> <02> e0. + */ + + /* 0x0200 <11> 01 is a field marks YUV mode */ + 0x01081100, 0x038002e0, 0x02001101, 0x11030111, + 0x00c4ff01, 0x0100001f, 0x01010105, 0x00010101, + 0x00000000, 0x01000000, 0x05040302, 0x09080706, + 0xc4ff0b0a, 0x00011f00, 0x01010103, 0x01010101, + 0x00000101, 0x00000000, 0x04030201, 0x08070605, + 0xff0b0a09, 0x10b500c4, 0x03010200, 0x03040203, + 0x04040505, 0x7d010000, 0x00030201, 0x12051104, + 0x06413121, 0x07615113, 0x32147122, 0x08a19181, + 0xc1b14223, 0xf0d15215, 0x72623324, 0x160a0982, + 0x1a191817, 0x28272625, 0x35342a29, 0x39383736, + 0x4544433a, 0x49484746, 0x5554534a, 0x59585756, + 0x6564635a, 0x69686766, 0x7574736a, 0x79787776, + 0x8584837a, 0x89888786, 0x9493928a, 0x98979695, + 0xa3a29a99, 0xa7a6a5a4, 0xb2aaa9a8, 0xb6b5b4b3, + 0xbab9b8b7, 0xc5c4c3c2, 0xc9c8c7c6, 0xd4d3d2ca, + 0xd8d7d6d5, 0xe2e1dad9, 0xe6e5e4e3, 0xeae9e8e7, + 0xf4f3f2f1, 0xf8f7f6f5, 0xc4fffaf9, 0x0011b500, + 0x04020102, 0x07040304, 0x00040405, 0x00770201, + 0x11030201, 0x31210504, 0x51411206, 0x13716107, + 0x08813222, 0xa1914214, 0x2309c1b1, 0x15f05233, + 0x0ad17262, 0xe1342416, 0x1817f125, 0x27261a19, + 0x352a2928, 0x39383736, 0x4544433a, 0x49484746, + 0x5554534a, 0x59585756, 0x6564635a, 0x69686766, + 0x7574736a, 0x79787776, 0x8483827a, 0x88878685, + 0x93928a89, 0x97969594, 0xa29a9998, 0xa6a5a4a3, + 0xaaa9a8a7, 0xb5b4b3b2, 0xb9b8b7b6, 0xc4c3c2ba, + 0xc8c7c6c5, 0xd3d2cac9, 0xd7d6d5d4, 0xe2dad9d8, + 0xe6e5e4e3, 0xeae9e8e7, 0xf5f4f3f2, 0xf9f8f7f6, + 0x00fefffa, 0x0000008f, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xdaff0000, 0x01030c00, 0x03110200, 0x003f0011 +}; + +static char yuv_mode_str[YUV_MODE_STR_LEN] = { "yuv444" }; + +module_param_string(yuv_mode, yuv_mode_str, sizeof(yuv_mode_str), 0444); +MODULE_PARM_DESC(yuv_mode, "Users select one mode from such modes as" + " 'yuv444', or 'yuv422', or 'yuv420'. If no mode is set," + " the driver adapts defaults mode 'yuv444'."); + +static u32 phytium_jpeg_read(struct phytium_jpeg_dev *jpeg_dev, u32 reg) +{ + u32 reg_val = readl(jpeg_dev->base_addr + reg); + + dev_dbg(jpeg_dev->dev, "read 0x%p + 0x%x -->val[0x%x]\n", + jpeg_dev->base_addr, reg, reg_val); + + return reg_val; +} + +static void phytium_jpeg_write(struct phytium_jpeg_dev *jpeg_dev, + u32 reg, u32 val) +{ + writel(val, jpeg_dev->base_addr + reg); + dev_dbg(jpeg_dev->dev, "write 0x%x to addr 0x%p + 0x%x\n", + val, jpeg_dev->base_addr, reg); +} + +static void phytium_jpeg_update(struct phytium_jpeg_dev *jpeg_dev, u32 reg, + u32 clear, u32 bits) +{ + u32 reg_val = readl(jpeg_dev->base_addr + reg); + u32 tmp = reg_val; + + reg_val &= ~clear; + reg_val |= bits; + writel(reg_val, jpeg_dev->base_addr + reg); + + dev_dbg(jpeg_dev->dev, "the val of addr 0x%p + 0x%x, from 0x%x to 0x%x\n", + jpeg_dev->base_addr, reg, tmp, readl(jpeg_dev->base_addr + reg)); +} + +static void phytium_jpeg_init_regs(struct phytium_jpeg_dev *jpeg_dev) +{ + u32 transform_info = 0; + u32 disable_all_interrupt = 0; + u32 clear_all_interrupt = INT_FIFO_OVERFLOW | INT_OCM_BUF_OVERFLOW | + INT_JPEG_ENCODE_COMPLETE | INT_VIDEO_FORMAT_CHANGE; + u32 rate_to_reg = 0; + + /* First, disable the JPEG engine, set bit0 = 0*/ + phytium_jpeg_write(jpeg_dev, TRANSFORM_INFO_REG, transform_info); + + /* Second, set VGAvideo_source_information. bit1 = 0 marks VGA */ + transform_info |= 0; + + /* Third, set AXI burst length bit[16:22]= 0xf , default value*/ + transform_info |= (0xF << TRANS_AXI_LEN_SHIFT) & TRANSINFO_AXI_LEN; + + /* Fourth, the default sampling format is YUV422, set bit13 to 0 */ + /* ignore setting sampling interval */ + phytium_jpeg_write(jpeg_dev, TRANSFORM_INFO_REG, transform_info); + udelay(5); + + /* Fifth, setting frame rate. + * Linux driver prohibit float point operations. So use the + * format: reg_val = (1 second * 10^8 / frame_rate / 134 *100) + * write reg_val to register. then enable Highest bit31 = 1 + */ + if (jpeg_dev->frame_rate) { + rate_to_reg = 100000000 / jpeg_dev->frame_rate / 134 * 100; + rate_to_reg |= FRAME_SAMPLE_CTRL_EN; + phytium_jpeg_write(jpeg_dev, FRAME_SAMPLE_CTRL, rate_to_reg); + } + /* Sixth, HUFF_MODE, driver needn't to configure, ignore */ + + /* disable all interrupts and then clear all interrupts */ + phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, + disable_all_interrupt); + udelay(5); + phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, clear_all_interrupt); + + /* Seventh, Sample_mode, hardware default is yuv444 */ + jpeg_dev->yuv420 = false; +} + +/* Turn on the clock of the jpeg engine */ +static void phytium_jpeg_on(struct phytium_jpeg_dev *jpeg_dev) +{ + if (test_bit(VIDEO_CLOCKS_ON, &jpeg_dev->status)) + return; + + /* Turn on the relevant clocks */ + set_bit(VIDEO_CLOCKS_ON, &jpeg_dev->status); +} + +/* Disable the jpeg engine */ +static void phytium_jpeg_off(struct phytium_jpeg_dev *jpeg_dev) +{ + u32 disable_all_interrupt = 0; + u32 clear_all_interrupt = INT_FIFO_OVERFLOW | INT_OCM_BUF_OVERFLOW | + INT_JPEG_ENCODE_COMPLETE | INT_VIDEO_FORMAT_CHANGE; + + if (!test_bit(VIDEO_CLOCKS_ON, &jpeg_dev->status)) { + dev_info(jpeg_dev->dev, "JPEG Engine is already off.\n"); + return; + } + + /* disable all interrupt */ + phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, disable_all_interrupt); + /* clear all interrupt */ + phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, clear_all_interrupt); + /* disable JPEG engine */ + phytium_jpeg_update(jpeg_dev, TRANSFORM_INFO_REG, TRANSINFO_ENABLE_ENGINE, 0); + + clear_bit(VIDEO_CLOCKS_ON, &jpeg_dev->status); + /* wait 50 ms */ + mdelay(50); + /* C08 bit7 1:busy */ +} + +static inline void phytium_jpeg_enable_source_detecting(struct phytium_jpeg_dev *jpeg_dev) +{ + /* + * Enable the dectection to discovery + * the source resolution is changed + */ + //phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, 0, DETECT_RESOLUTION_CHANGE_EN); + phytium_jpeg_update(jpeg_dev, TRANSFORM_INFO_REG, 0, TRANSINFO_SRC_SELECT); +} + +#define res_check(val) \ + test_and_clear_bit(VIDEO_MODE_DETECT_DONE, &(val)->status) + +static void phytium_jpeg_get_resolution(struct phytium_jpeg_dev *jpeg_dev) +{ + u32 source_info; + u32 width; + u32 height; + struct v4l2_bt_timings *detected_timings = &jpeg_dev->detected_timings; + + /* Before get a new resolution, maybe need to wait 10 us */ + detected_timings->width = MIN_WIDTH; + detected_timings->height = MIN_HEIGHT; + jpeg_dev->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; + + + phytium_jpeg_enable_source_detecting(jpeg_dev); + source_info = phytium_jpeg_read(jpeg_dev, SRC_VGA_INFO_REG); + width = (source_info & SRC_HOR_PIXELS) >> SRC_WIDTH_SHIFT; + height = (source_info & SRC_VER_PIXELS) >> SRC_HEIGHT_SHIFT; + + if (width * height != 0) { + detected_timings->width = width; + detected_timings->height = height; + } + + jpeg_dev->v4l2_input_status = 0; + + /* + * Resolution is changed will trigger an interrupt, resolution detecting + * also is disable during process interrupt. So re-enable. + */ + phytium_jpeg_enable_source_detecting(jpeg_dev); + dev_info(jpeg_dev->dev, "Change resolution: %uX%u\n", width, height); +} + +static void phytium_jpeg_set_resolution(struct phytium_jpeg_dev *jpeg_dev) +{ + struct v4l2_bt_timings *active_timings = &jpeg_dev->active_timings; + int i; + int src_addrs[OCM_BUF_NUM]; + /* + * The OCM address space is 0x30C0_0000 ~ 0x30C7_FFFF, JPEG Engine uses the + * high-bottom address. src_0 uses 0x30C4_0000 ~ 0x30c6_0000 (total capacity is + * 128KB, greater than the requirements of the largest resolution). src_1 uses + * 0x30C6_0000 ~ 0x30C7_FFFF. + */ + + /* The OCM address should shift right 8 bits */ + for (i = 0; i < OCM_BUF_NUM; i++) + src_addrs[i] = jpeg_dev->src_addrs[i].dma_addr >> OCM_BUF_SHIFT; + + phytium_jpeg_write(jpeg_dev, OCM_BUF0_ADDR, src_addrs[0]); + phytium_jpeg_write(jpeg_dev, OCM_BUF1_ADDR, src_addrs[1]); + + /* + * In the worst case, the size of one image will be compressed to 25% the + * raw image's size. When a pixel is 4-byte, no need to divide 4. + */ + jpeg_dev->max_compressed_size = active_timings->width * active_timings->height; +} + +/* The below functions is implemented for various v4l2 ioctl operations */ +static int phytium_jpeg_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + strscpy(cap->driver, PHYTIUM_JPEG_NAME, sizeof(cap->driver)); + strscpy(cap->card, "Phytium JPEG Engine", sizeof(cap->card)); + snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", dev_name(jpeg_dev->dev)); + + return 0; +} + +static int phytium_jpeg_enum_format(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + if (f->index) { + dev_err(jpeg_dev->dev, "Failed to enum format\n"); + return -EINVAL; + } + + f->pixelformat = V4L2_PIX_FMT_JPEG; + + return 0; +} + +static int phytium_jpeg_get_format(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + f->fmt.pix = jpeg_dev->pix_fmt; + + return 0; +} + +static int phytium_jpeg_enum_input(struct file *file, void *priv, + struct v4l2_input *input) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + if (input->index) { + dev_err(jpeg_dev->dev, "failed to enum input\n"); + return -EINVAL; + } + + strscpy(input->name, "Host DC Capture", sizeof(input->name)); + input->type = V4L2_INPUT_TYPE_CAMERA; + input->capabilities = V4L2_IN_CAP_DV_TIMINGS; + input->status = jpeg_dev->v4l2_input_status; + + return 0; +} + +static int phytium_jpeg_get_input(struct file *file, void *priv, + unsigned int *i) +{ + *i = 0; + return 0; +} + +static int phytium_jpeg_set_input(struct file *file, void *priv, + unsigned int i) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + if (i != 0) { + dev_err(jpeg_dev->dev, "Failed to set input\n"); + return -EINVAL; + } + + return 0; +} + +static int phytium_jpeg_get_parm(struct file *file, void *priv, + struct v4l2_streamparm *stream) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + /* Readbuffers num is 3 */ + stream->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + stream->parm.capture.readbuffers = CAPTURE_BUF_NUMBER; + stream->parm.capture.timeperframe.denominator = 1; + + if (jpeg_dev->frame_rate == 0) + stream->parm.capture.timeperframe.denominator = MAX_FRAME_RATE; + else + stream->parm.capture.timeperframe.denominator = jpeg_dev->frame_rate; + + return 0; +} + +static int phytium_jpeg_set_parm(struct file *file, void *priv, + struct v4l2_streamparm *stream) +{ + unsigned int frame_rate = 0; + u32 rate_to_reg = 0; + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + /* Readbuffers num is 3 */ + stream->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + stream->parm.capture.readbuffers = CAPTURE_BUF_NUMBER; + + if (stream->parm.capture.timeperframe.numerator) + frame_rate = stream->parm.capture.timeperframe.denominator / + stream->parm.capture.timeperframe.numerator; + + if (frame_rate == 0 || frame_rate > MAX_FRAME_RATE) { + frame_rate = MAX_FRAME_RATE; + stream->parm.capture.timeperframe.denominator = MAX_FRAME_RATE; + stream->parm.capture.timeperframe.numerator = 1; + } + /* + * reg_val = (1 second * 10^9 / frame_rate / 13.4) + * Linux driver prohibit float point operations. So use the + * format: reg_val = (1 second * 10^8 / frame_rate / 134 *100) + * write reg_val to register. then enable Highest bit31 = 1 + */ + if (jpeg_dev->frame_rate != frame_rate) { + jpeg_dev->frame_rate = frame_rate; + rate_to_reg = 100000000 / jpeg_dev->frame_rate / 134 * 100; + rate_to_reg |= FRAME_SAMPLE_CTRL_EN; + phytium_jpeg_write(jpeg_dev, FRAME_SAMPLE_CTRL, rate_to_reg); + } + + return 0; +} + +static int phytium_jpeg_enum_framesizes(struct file *file, void *priv, + struct v4l2_frmsizeenum *fsize) + +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + if (fsize->index != 0) { + dev_err(jpeg_dev->dev, "Failed to enum framesize.\n"); + return -EINVAL; + } + + if (fsize->pixel_format != V4L2_PIX_FMT_JPEG) { + dev_err(jpeg_dev->dev, "enum framesize pixel_format is not JPEG"); + return -EINVAL; + } + + fsize->discrete.width = jpeg_dev->pix_fmt.width; + fsize->discrete.height = jpeg_dev->pix_fmt.height; + fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; + + return 0; + +} + +static int phytium_jpeg_enum_frameintervals(struct file *file, void *priv, + struct v4l2_frmivalenum *fival) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + if (fival->index != 0) { + dev_err(jpeg_dev->dev, "enum frame intervals failed\n"); + return -EINVAL; + } + + if (fival->width != jpeg_dev->detected_timings.width || + fival->height != jpeg_dev->detected_timings.height) { + dev_err(jpeg_dev->dev, "interval isn't same with the detected_timings.\n"); + return -EINVAL; + } + + if (fival->pixel_format != V4L2_PIX_FMT_JPEG) { + dev_err(jpeg_dev->dev, "enum frame interval pixel fomat is incorrect.\n"); + return -EINVAL; + } + + fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; + fival->stepwise.min.denominator = MAX_FRAME_RATE; + fival->stepwise.min.numerator = 1; + fival->stepwise.max.denominator = 1; + fival->stepwise.max.numerator = 1; + fival->stepwise.step = fival->stepwise.max; + + return 0; +} + +static int phytium_jpeg_set_dv_timings(struct file *file, void *priv, + struct v4l2_dv_timings *timings) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + /* the params are passed from user space are same with hardware's params */ + if (timings->bt.width == jpeg_dev->active_timings.width && + timings->bt.height == jpeg_dev->active_timings.height) + return 0; + + if (vb2_is_busy(&jpeg_dev->queue)) { + dev_err(jpeg_dev->dev, "queue is busy during setting dv timings.\n"); + return -EBUSY; + } + + jpeg_dev->active_timings = timings->bt; + phytium_jpeg_set_resolution(jpeg_dev); + jpeg_dev->pix_fmt.width = timings->bt.width; + jpeg_dev->pix_fmt.height = timings->bt.height; + jpeg_dev->pix_fmt.sizeimage = jpeg_dev->max_compressed_size; + timings->type = V4L2_DV_BT_656_1120; + + return 0; +} + +static int phytium_jpeg_get_dv_timings(struct file *file, void *priv, + struct v4l2_dv_timings *timings) +{ + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + timings->type = V4L2_DV_BT_656_1120; + timings->bt = jpeg_dev->active_timings; + + return 0; +} + +static int phytium_jpeg_query_dv_timings(struct file *file, void *priv, + struct v4l2_dv_timings *timings) +{ + int ret; + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + /* + * This blocks only if the driver is currently in the process of + * detecting a new resolution; in the event of no signal or timeout + * this function is woken up. + */ + if ((file->f_flags & O_NONBLOCK) && + test_bit(VIDEO_RES_CHANGE, &jpeg_dev->status)) + return -EAGAIN; + + ret = wait_event_interruptible(jpeg_dev->wait, !test_bit(VIDEO_RES_CHANGE, + &jpeg_dev->status)); + if (ret) { + dev_err(jpeg_dev->dev, "Failed to query dv timing\n"); + return -EINTR; + } + + timings->type = V4L2_DV_BT_656_1120; + timings->bt = jpeg_dev->detected_timings; + + return jpeg_dev->v4l2_input_status ? -ENOLINK : 0; +} + +static const struct v4l2_dv_timings_cap phytium_jpeg_timings_cap = { + .type = V4L2_DV_BT_656_1120, + .bt = { + .min_width = MIN_WIDTH, + .max_width = MAX_WIDTH, + .min_height = MIN_HEIGHT, + .max_height = MAX_HEIGHT, + .min_pixelclock = 6574080, /* 640 x 480 x 24Hz */ + .max_pixelclock = 1244160000, /* 1920 x 1080 x 60Hz */ + .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | + V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF, + .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | + V4L2_DV_BT_CAP_REDUCED_BLANKING | + V4L2_DV_BT_CAP_CUSTOM, + }, +}; + +static int phytium_jpeg_enum_dv_timings(struct file *file, void *priv, + struct v4l2_enum_dv_timings *timings) +{ + return v4l2_enum_dv_timings_cap(timings, &phytium_jpeg_timings_cap, + NULL, NULL); +} + +static int phytium_jpeg_dv_timings_cap(struct file *file, void *priv, + struct v4l2_dv_timings_cap *cap) +{ + *cap = phytium_jpeg_timings_cap; + + return 0; +} + +/* The function is used to notify DV that video resolution is altered */ +static int phytium_jpeg_sub_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + switch (sub->type) { + case V4L2_EVENT_SOURCE_CHANGE: + return v4l2_src_change_event_subscribe(fh, sub); + default: + break; + } + + return v4l2_ctrl_subscribe_event(fh, sub); +} + +static const struct v4l2_ioctl_ops phytium_jpeg_ioctl_ops = { + .vidioc_querycap = phytium_jpeg_querycap, + .vidioc_enum_fmt_vid_cap = phytium_jpeg_enum_format, + .vidioc_g_fmt_vid_cap = phytium_jpeg_get_format, + .vidioc_s_fmt_vid_cap = phytium_jpeg_get_format, + .vidioc_try_fmt_vid_cap = phytium_jpeg_get_format, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_enum_input = phytium_jpeg_enum_input, + .vidioc_g_input = phytium_jpeg_get_input, + .vidioc_s_input = phytium_jpeg_set_input, + .vidioc_g_parm = phytium_jpeg_get_parm, + .vidioc_s_parm = phytium_jpeg_set_parm, + .vidioc_enum_framesizes = phytium_jpeg_enum_framesizes, + .vidioc_enum_frameintervals = phytium_jpeg_enum_frameintervals, + .vidioc_s_dv_timings = phytium_jpeg_set_dv_timings, + .vidioc_g_dv_timings = phytium_jpeg_get_dv_timings, + .vidioc_query_dv_timings = phytium_jpeg_query_dv_timings, + .vidioc_enum_dv_timings = phytium_jpeg_enum_dv_timings, + .vidioc_dv_timings_cap = phytium_jpeg_dv_timings_cap, + .vidioc_subscribe_event = phytium_jpeg_sub_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +static void phytium_jpeg_init_jpeg_quant(struct phytium_jpeg_dev *jpeg_dev) +{ + const u32 y_quant_table[QUANT_REG_NUM] = { + 0x08000000, 0x0ba2e8ba, 0x0aaaaaab, 0x09249249, 0x0aaaaaab, + 0x0ccccccc, 0x08000000, 0x09249249, 0x09d89d8a, 0x09249249, + 0x071c71c7, 0x07878788, 0x08000000, 0x06bca1af, 0x05555555, + 0x03333333, 0x04ec4ec5, 0x05555555, 0x05d1745d, 0x05d1745d, + 0x05555555, 0x029cbc15, 0x03a83a84, 0x03759f23, 0x0469ee58, + 0x03333333, 0x0234f72c, 0x02828283, 0x02192e2a, 0x02222222, + 0x023ee090, 0x02828283, 0x02492492, 0x0253c825, 0x02000000, + 0x01c71c72, 0x01642c86, 0x01a41a42, 0x02000000, 0x01e1e1e2, + 0x0178a4c8, 0x01dae607, 0x0253c825, 0x02492492, 0x0199999a, + 0x012c9fb5, 0x01948b10, 0x0178a4c8, 0x0158ed23, 0x014e5e0a, + 0x013e22cc, 0x013b13b1, 0x013e22cc, 0x02108421, 0x01a98ef6, + 0x0121fb78, 0x010ecf57, 0x01249249, 0x013e22cc, 0x01111111, + 0x01642c86, 0x01446f86, 0x013e22cc, 0x014afd6a + }; + + const u32 c_quant_table[QUANT_REG_NUM] = { + 0x07878788, 0x071c71c7, 0x071c71c7, 0x05555555, 0x06186186, + 0x05555555, 0x02b93105, 0x04ec4ec5, 0x04ec4ec5, 0x02b93105, + 0x014afd6a, 0x01f07c1f, 0x02492492, 0x01f07c1f, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a, + 0x014afd6a, 0x014afd6a, 0x014afd6a, 0x014afd6a + }; + int i; + + for (i = 0; i < QUANT_REG_NUM; i++) { + phytium_jpeg_write(jpeg_dev, Y_QUANT_INDEX_ADDR_REG(i), y_quant_table[i]); + phytium_jpeg_write(jpeg_dev, C_QUANT_INDEX_ADDR_REG(i), c_quant_table[i]); + } + +} + +static void phytium_jpeg_start(struct phytium_jpeg_dev *jpeg_dev) +{ + phytium_jpeg_on(jpeg_dev); + phytium_jpeg_init_regs(jpeg_dev); + + /* Resolution set to 640x480 if no signal is found */ + phytium_jpeg_get_resolution(jpeg_dev); + + /* Set timings since the device is being opened for the first tiime */ + jpeg_dev->active_timings = jpeg_dev->detected_timings; + phytium_jpeg_set_resolution(jpeg_dev); + + jpeg_dev->pix_fmt.width = jpeg_dev->active_timings.width; + jpeg_dev->pix_fmt.height = jpeg_dev->active_timings.height; + jpeg_dev->pix_fmt.sizeimage = jpeg_dev->max_compressed_size; +} + +static void phytium_jpeg_stop(struct phytium_jpeg_dev *jpeg_dev) +{ + set_bit(VIDEO_STOPPED, &jpeg_dev->status); + cancel_delayed_work_sync(&jpeg_dev->res_work); + + phytium_jpeg_off(jpeg_dev); + + jpeg_dev->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; + jpeg_dev->status = 0; +} + +static int phytium_jpeg_open(struct file *file) +{ + int ret; + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + mutex_lock(&jpeg_dev->video_lock); + + ret = v4l2_fh_open(file); + if (ret != 0) { + mutex_unlock(&jpeg_dev->video_lock); + dev_err(jpeg_dev->dev, "Failed to open the phytium jpeg device.\n"); + return ret; + } + + if (v4l2_fh_is_singular_file(file)) + phytium_jpeg_start(jpeg_dev); + + mutex_unlock(&jpeg_dev->video_lock); + + return 0; +} + +static int phytium_jpeg_release(struct file *file) +{ + int ret; + struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); + + mutex_lock(&jpeg_dev->video_lock); + + if (v4l2_fh_is_singular_file(file)) + phytium_jpeg_stop(jpeg_dev); + + ret = _vb2_fop_release(file, NULL); + mutex_unlock(&jpeg_dev->video_lock); + + return ret; +} + + +static const struct v4l2_file_operations phytium_jpeg_fops = { + .owner = THIS_MODULE, + .read = vb2_fop_read, + .poll = vb2_fop_poll, + .unlocked_ioctl = video_ioctl2, + .mmap = vb2_fop_mmap, + .open = phytium_jpeg_open, + .release = phytium_jpeg_release, +}; + +static void phytium_jpeg_update_jpeg_header(u32 width, u32 height) +{ + const int h_index = PHYTIUM_JPEG_HEADER_H_INDEX; + const int w_index = PHYTIUM_JPEG_HEADER_W_INDEX; + + /* the high 8 bits of the height locates at bit24~bit31 */ + phytium_jpeg_header[h_index] = phytium_jpeg_header[h_index] & 0x00FFFFFF; + phytium_jpeg_header[h_index] |= ((height >> 8) & 0xFF) << 24; + + /* the low 8 bits of the height locates at bit0~bit7 */ + phytium_jpeg_header[w_index] = phytium_jpeg_header[w_index] & 0xFF000000; + phytium_jpeg_header[w_index] |= height & 0xFF; + + /* the high 8 bits of the width locates at bit8~bit15 */ + phytium_jpeg_header[w_index] |= ((width >> 8) & 0xFF) << 8; + /* the low 8 bits of the width locates at bit16~bit24 */ + phytium_jpeg_header[w_index] |= (width & 0xFF) << 16; +} + +static void phytium_jpeg_fill_header(struct phytium_jpeg_dev *jpeg_dev, + struct phytium_jpeg_buffer *jpeg_buf) +{ + void *vbuf = vb2_plane_vaddr(&jpeg_buf->vb.vb2_buf, 0); + u32 width = jpeg_dev->active_timings.width; + u32 height = jpeg_dev->active_timings.height; + + /* update the contents of the phytium jpeg header according to the resolution */ + phytium_jpeg_update_jpeg_header(width, height); + + /* replenish the contents of the JPEG header */ + memcpy(vbuf, phytium_jpeg_header, PHYTIUM_JPEG_HEADER_LEN); +} + +static int phytium_jpeg_start_frame(struct phytium_jpeg_dev *jpeg_dev) +{ + dma_addr_t dst_addr; + unsigned long status; + struct phytium_jpeg_buffer *jpeg_buf; + + if (jpeg_dev->v4l2_input_status) { + dev_err(jpeg_dev->dev, "No signal; needn't start frame\n"); + return 0; + } + + spin_lock_irqsave(&jpeg_dev->hw_lock, status); + jpeg_buf = list_first_entry_or_null(&jpeg_dev->buffers, + struct phytium_jpeg_buffer, link); + if (jpeg_buf == NULL) { + spin_unlock_irqrestore(&jpeg_dev->hw_lock, status); + dev_err(jpeg_dev->dev, "No buffers; doesn't start frame\n"); + return -EPROTO; + } + + set_bit(VIDEO_FRAME_INPRG, &jpeg_dev->status); + dst_addr = vb2_dma_contig_plane_dma_addr(&jpeg_buf->vb.vb2_buf, 0); + spin_unlock_irqrestore(&jpeg_dev->hw_lock, status); + + /* + * Because the JPEG Engine is unable to add a JPEG header, the phytium + * jpeg driver is required to fill the contents of a JPEG header before + * the jpeg engine write datas to the dma address. + */ + phytium_jpeg_fill_header(jpeg_dev, jpeg_buf); + dst_addr += PHYTIUM_JPEG_HEADER_LEN; + /* + * The ikvm application only using the last frame, so the driver replenish + * one output register with a dma address. + */ + dst_addr >>= JPEG_DST_ADDR_SHIFT; + phytium_jpeg_write(jpeg_dev, BUF_LIST_INDEX_ADDR(VB_BUF_NO), dst_addr); + /* Enable the validilty of the buffer marked with index */ + phytium_jpeg_write(jpeg_dev, BUF_LIST_INDEX_CTRL_STS_ADDR(VB_BUF_NO), + STS_JPEG_BUF_HIGH_LEVEL_VALID); + /* Enable the interruption which is used to identify an image was compressed */ + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, 0, STS_VE_JPEG_CODE_COMP_EN); + /* Enable JPEG, start to capture and compress */ + phytium_jpeg_update(jpeg_dev, TRANSFORM_INFO_REG, TRANSINFO_ENABLE_ENGINE, 1); + + return 0; +} + +static void phytium_jpeg_resolution_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct phytium_jpeg_dev *jpeg_dev = container_of(dwork, + struct phytium_jpeg_dev, res_work); + u32 input_status = jpeg_dev->v4l2_input_status; + + phytium_jpeg_on(jpeg_dev); + + /* Exit early in the case no clients remain */ + if (test_bit(VIDEO_STOPPED, &jpeg_dev->status)) + goto done; + + phytium_jpeg_init_regs(jpeg_dev); + phytium_jpeg_get_resolution(jpeg_dev); + + /* if source's resolution is changed, the event should be enqueued */ + if (jpeg_dev->detected_timings.width != jpeg_dev->active_timings.width || + jpeg_dev->detected_timings.height != jpeg_dev->active_timings.height || + input_status != jpeg_dev->v4l2_input_status) { + + static const struct v4l2_event event = { + .type = V4L2_EVENT_SOURCE_CHANGE, + .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, + }; + v4l2_event_queue(&jpeg_dev->vdev, &event); + clear_bit(VIDEO_FRAME_INPRG, &jpeg_dev->status); + dev_info(jpeg_dev->dev, "event notifies changing resolution\n"); + } else if (test_bit(VIDEO_STREAMING, &jpeg_dev->status)) { + /* No resolution change so just restart streaming */ + dev_info(jpeg_dev->dev, "resolution doesn't change\n"); + phytium_jpeg_set_resolution(jpeg_dev); + phytium_jpeg_start_frame(jpeg_dev); + } + +done: + clear_bit(VIDEO_RES_CHANGE, &jpeg_dev->status); + wake_up_interruptible_all(&jpeg_dev->wait); +} + +static int phytium_jpeg_queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, + unsigned int *num_planes, + unsigned int sizes[], + struct device *alloc_devs[]) +{ + struct phytium_jpeg_dev *jpeg_dev = vb2_get_drv_priv(q); + + if (*num_planes) { + if (sizes[0] < jpeg_dev->max_compressed_size) { + v4l2_err(&jpeg_dev->v4l2_dev, "queue v4l2_buf's size is invalid\n"); + return -EINVAL; + } + } + + *num_planes = 1; + sizes[0] = jpeg_dev->max_compressed_size; + return 0; +} + +static int phytium_jpeg_buf_prepare(struct vb2_buffer *vb) +{ + struct phytium_jpeg_dev *jpeg_dev = vb2_get_drv_priv(vb->vb2_queue); + + if (vb2_plane_size(vb, 0) < jpeg_dev->max_compressed_size) { + v4l2_err(&jpeg_dev->v4l2_dev, "failed to prepare buffer\n"); + return -EINVAL; + } + + return 0; +} + +static inline struct phytium_jpeg_buffer * +phytium_vb2buf_to_dstbuf(struct vb2_v4l2_buffer *buf) +{ + return container_of(buf, struct phytium_jpeg_buffer, vb); +} + +static void phytium_jpeg_buf_queue(struct vb2_buffer *vb) +{ + bool empty; + struct phytium_jpeg_dev *jpeg_dev = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct phytium_jpeg_buffer *jpeg_buf = phytium_vb2buf_to_dstbuf(vbuf); + unsigned long status; + + spin_lock_irqsave(&jpeg_dev->hw_lock, status); + empty = list_empty(&jpeg_dev->buffers); + list_add_tail(&jpeg_buf->link, &jpeg_dev->buffers); + spin_unlock_irqrestore(&jpeg_dev->hw_lock, status); + + /* the empty ensures the address of the first node's vb2_v4l2_buf + * in the list is written to output register + */ + if (test_bit(VIDEO_STREAMING, &jpeg_dev->status) && + (!test_bit(VIDEO_FRAME_INPRG, &jpeg_dev->status)) && + empty) + phytium_jpeg_start_frame(jpeg_dev); +} + +static void phytium_jpeg_bufs_done(struct phytium_jpeg_dev *jpeg_dev, + enum vb2_buffer_state state) +{ + unsigned long flags; + struct phytium_jpeg_buffer *buf; + + spin_lock_irqsave(&jpeg_dev->hw_lock, flags); + + list_for_each_entry(buf, &jpeg_dev->buffers, link) + vb2_buffer_done(&buf->vb.vb2_buf, state); + + INIT_LIST_HEAD(&jpeg_dev->buffers); + + spin_unlock_irqrestore(&jpeg_dev->hw_lock, flags); +} + +static void phytium_jpeg_irq_res_change(struct phytium_jpeg_dev *jpeg_dev, + ulong delay) +{ + dev_info(jpeg_dev->dev, "Source resolution is changed, resetting\n"); + set_bit(VIDEO_RES_CHANGE, &jpeg_dev->status); + + phytium_jpeg_off(jpeg_dev); + + schedule_delayed_work(&jpeg_dev->res_work, delay); +} + +static irqreturn_t phytium_jpeg_irq(int irq, void *arg) +{ + struct phytium_jpeg_dev *jpeg_dev = arg; + u32 status; + struct phytium_jpeg_buffer *buf; + u32 frame_size; + + if (test_bit(VIDEO_POWEROFF, &jpeg_dev->status)) { + dev_info(jpeg_dev->dev, "jpeg engine is requested to poweroff\n"); + return IRQ_HANDLED; + } + + status = phytium_jpeg_read(jpeg_dev, INT_STATUS_CTRL_REG); + + if (status & INT_VIDEO_FORMAT_CHANGE) { + dev_info(jpeg_dev->dev, "receive resolution changed interrupt\n"); + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, + DETECT_RESOLUTION_CHANGE_EN, 0); + phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, INT_VIDEO_FORMAT_CHANGE); + phytium_jpeg_irq_res_change(jpeg_dev, RESOLUTION_CHANGE_DELAY); + return IRQ_HANDLED; + } + + /* + * JPEG engine finish compressing a image JPEG encoding to trigger + * a interruption. the status identifies the buffer number. Currently, + * the driver uses one buffer. + * + * Note: Because the JPEG doesn't support adding a JPEG header, and + * driver is also unable to add a JPEG header to vb2_buffers. One + * solution is that a JPEG header is added by an application. + */ + if (status & INT_JPEG_COMP_BUF_LIST_NO) { + frame_size = phytium_jpeg_read(jpeg_dev, jpeg_dev->comp_size_read); + frame_size &= JPEG_BUF_CAPACITY_SIZE; + frame_size >>= JPEG_BUF_CAPACITY_SIZE_SHIFT; + spin_lock(&jpeg_dev->hw_lock); + clear_bit(VIDEO_FRAME_INPRG, &jpeg_dev->status); + /* Delete first node from the queue */ + buf = list_first_entry_or_null(&jpeg_dev->buffers, + struct phytium_jpeg_buffer, link); + if (buf != NULL) { + frame_size += PHYTIUM_JPEG_HEADER_LEN; + vb2_set_plane_payload(&buf->vb.vb2_buf, 0, frame_size); + if (!list_is_last(&buf->link, &jpeg_dev->buffers)) { + buf->vb.vb2_buf.timestamp = ktime_get_ns(); + buf->vb.sequence = jpeg_dev->sequence++; + buf->vb.field = V4L2_FIELD_NONE; + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); + list_del(&buf->link); + } + } + + spin_unlock(&jpeg_dev->hw_lock); + /* Disable JPEG engine */ + phytium_jpeg_update(jpeg_dev, TRANSFORM_INFO_REG, TRANSINFO_ENABLE_ENGINE, 0); + /* Disable interruption */ + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, STS_VE_JPEG_CODE_COMP_EN, 0); + /* clear all interruption of the hardware's buffers */ + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, INT_JPEG_ENCODE_COMPLETE, 1); + + status &= ~INT_JPEG_COMP_BUF_LIST_NO; + if (test_bit(VIDEO_STREAMING, &jpeg_dev->status) && buf) + phytium_jpeg_start_frame(jpeg_dev); + } + + return IRQ_HANDLED; +} + +/* VIDIOC_STREAMON, all vb2_v4l2_buf' states are queue */ +static int phytium_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) +{ + int ret; + struct phytium_jpeg_dev *jpeg_dev = vb2_get_drv_priv(q); + + jpeg_dev->sequence = 0; + ret = phytium_jpeg_start_frame(jpeg_dev); + if (ret != 0) { + phytium_jpeg_bufs_done(jpeg_dev, VB2_BUF_STATE_QUEUED); + return ret; + } + + /* set the states of the jpeg engine */ + set_bit(VIDEO_STREAMING, &jpeg_dev->status); + return ret; +} + +static void phytium_jpeg_stop_streaming(struct vb2_queue *q) +{ + int ret; + struct phytium_jpeg_dev *jpeg_dev = vb2_get_drv_priv(q); + + clear_bit(VIDEO_STREAMING, &jpeg_dev->status); + ret = wait_event_timeout(jpeg_dev->wait, + !test_bit(VIDEO_FRAME_INPRG, &jpeg_dev->status), + STOP_TIMEOUT); + + /* time out */ + if (ret == 0) { + dev_err(jpeg_dev->dev, "Timed out when stopping streaming.\n"); + /* + * Need to force stop any DMA and try and get HW into a good states + * for future calls to start streaming again. + */ + phytium_jpeg_off(jpeg_dev); + phytium_jpeg_on(jpeg_dev); + phytium_jpeg_init_regs(jpeg_dev); + phytium_jpeg_get_resolution(jpeg_dev); + } + /* first stop jpeg, wait, the free buffer */ + phytium_jpeg_bufs_done(jpeg_dev, VB2_BUF_STATE_ERROR); +} + +static const struct vb2_ops phytium_jpeg_vb2_ops = { + .queue_setup = phytium_jpeg_queue_setup, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .buf_prepare = phytium_jpeg_buf_prepare, + .buf_queue = phytium_jpeg_buf_queue, + .start_streaming = phytium_jpeg_start_streaming, + .stop_streaming = phytium_jpeg_stop_streaming, +}; + +static void phytium_jpeg_set_yuv_mode(struct phytium_jpeg_dev *jpeg_dev) +{ + const char *mode = yuv_mode_str; + enum jpeg_yuv_mode yuv_mode; + + if (strstr(mode, "yuv422") != NULL) + yuv_mode = YUV422; + else if (strstr(mode, "yuv420") != NULL) + yuv_mode = YUV420; + else + yuv_mode = YUV444; + + /* set the yuv mode register */ + phytium_jpeg_write(jpeg_dev, SAMPLE_MODE_REG, yuv_mode); + + /* update the field which indicates YUV mode locates in the JPEG header. */ + phytium_jpeg_header[YUVID] &= 0xFFFF00FF; + if (yuv_mode == YUV422) + phytium_jpeg_header[YUVID] |= 0x2100; + else if (yuv_mode == YUV420) + phytium_jpeg_header[YUVID] |= 0x2200; + else + phytium_jpeg_header[YUVID] |= 0x1100; + +} + +static irqreturn_t phytium_jpeg_timer31_irq(int irq, void *arg) +{ + struct phytium_jpeg_dev *jpeg_dev = arg; + + /* disable timer interrupt */ + writel(0, jpeg_dev->timer31_addr); + + /* clear timer interrupt status */ + writel(0x8, jpeg_dev->timer31_addr + 0x2c); + + /* clear JPEG Engine's poweroff status */ + clear_bit(VIDEO_POWEROFF, &jpeg_dev->status); + dev_info(jpeg_dev->dev, "timer31 set jpeg status 0x%lx\n", jpeg_dev->status); + + /* JPEG Engine is poweron, reconfig quntization table and YUV mode */ + phytium_jpeg_init_jpeg_quant(jpeg_dev); + phytium_jpeg_set_yuv_mode(jpeg_dev); + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, 0, DETECT_RESOLUTION_CHANGE_EN); + phytium_jpeg_update(jpeg_dev, TRANSFORM_INFO_REG, 0, TRANSINFO_SRC_SELECT); + + dev_info(jpeg_dev->dev, "reconfigure quant table and yuv mode\n"); + + return IRQ_HANDLED; +} + +static int phytium_jpeg_parser_timer31_irq(struct phytium_jpeg_dev *jpeg_dev) +{ + int irq; + int ret; + struct device *dev = jpeg_dev->dev; + + irq = irq_of_parse_and_map(dev->of_node, 2); + if (!irq) { + dev_err(dev, "Failed to get timer31 IRQ\n"); + return -ENODEV; + } + + ret = devm_request_threaded_irq(dev, irq, NULL, phytium_jpeg_timer31_irq, + IRQF_ONESHOT, PHYTIUM_JPEG_NAME, jpeg_dev); + if (ret < 0) + dev_err(dev, "Failed to request timer31 IRQ %d\n", irq); + + return ret; +} + +static irqreturn_t phytium_jpeg_timer30_irq(int irq, void *arg) +{ + struct phytium_jpeg_dev *jpeg_dev = arg; + struct arm_smccc_res res; + + /* disable timer interrupt */ + writel(0, jpeg_dev->timer30_addr); + /* clear timer interrupt status */ + writel(0x8, jpeg_dev->timer30_addr + 0x2c); + + /* Disable interruption */ + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, STS_VE_JPEG_CODE_COMP_EN, 0); + + /* call SE to poweroff JPEG Engine */ + arm_smccc_smc(0xc300fff4, 0x9, 0x2, 0x80000020, 0, 0, 0, 0, &res); + + /* set JPEG Engine's status is poweroff */ + set_bit(VIDEO_POWEROFF, &jpeg_dev->status); + dev_info(jpeg_dev->dev, "timer30 set jpeg status 0x%lx\n", jpeg_dev->status); + + return IRQ_HANDLED; +} + +static int phytium_jpeg_parser_timer30_irq(struct phytium_jpeg_dev *jpeg_dev) +{ + int irq; + int ret; + struct device *dev = jpeg_dev->dev; + + irq = irq_of_parse_and_map(dev->of_node, 1); + if (!irq) { + dev_err(dev, "Failed to get timer30 IRQ\n"); + return -ENODEV; + } + + ret = devm_request_threaded_irq(dev, irq, NULL, phytium_jpeg_timer30_irq, + IRQF_ONESHOT, PHYTIUM_JPEG_NAME, jpeg_dev); + if (ret < 0) + dev_err(dev, "Failed to request timer30 IRQ %d\n", irq); + + return ret; +} + +static int phytium_jpeg_init(struct phytium_jpeg_dev *jpeg_dev) +{ + int irq; + int ret; + struct device *dev = jpeg_dev->dev; + u32 ocm_buf_addr[OCM_BUF_NUM]; + int i; + + irq = irq_of_parse_and_map(dev->of_node, 0); + if (!irq) { + dev_err(dev, "Failed to get IRQ\n"); + return -ENODEV; + } + + ret = devm_request_threaded_irq(dev, irq, NULL, phytium_jpeg_irq, + IRQF_ONESHOT, PHYTIUM_JPEG_NAME, jpeg_dev); + if (ret < 0) { + dev_err(dev, "Failed to request IRQ %d\n", irq); + return ret; + } + + ret = phytium_jpeg_parser_timer30_irq(jpeg_dev); + if (ret < 0) + return ret; + + ret = phytium_jpeg_parser_timer31_irq(jpeg_dev); + if (ret < 0) + return ret; + + ret = of_property_read_u32_array(dev->of_node, "phytium,ocm-buf-addr", + ocm_buf_addr, OCM_BUF_NUM); + if (ret != 0) { + dev_err(dev, "Failed to get the OCM address from device tree node.\n"); + return ret; + } + + for (i = 0; i < OCM_BUF_NUM; i++) + jpeg_dev->src_addrs[i].dma_addr = ocm_buf_addr[i]; + + /* CMA memory for JPEG device */ + of_reserved_mem_device_init(dev); + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret != 0) { + dev_err(dev, "Failed to set DMA mask\n"); + return ret; + } + + /* Initializing JPEG Y and CbCr quantization table */ + phytium_jpeg_init_jpeg_quant(jpeg_dev); + + /* Select YUV mode */ + phytium_jpeg_set_yuv_mode(jpeg_dev); + dev_info(dev, "successfully initialize jpeg engine\n"); + return 0; + +} + + +static int phytium_jpeg_setup_video(struct phytium_jpeg_dev *jpeg_dev) +{ + struct v4l2_device *v4l2_dev = &jpeg_dev->v4l2_dev; + struct vb2_queue *dst_vq = &jpeg_dev->queue; + struct video_device *vdev = &jpeg_dev->vdev; + int ret; + + jpeg_dev->pix_fmt.pixelformat = V4L2_PIX_FMT_JPEG; + jpeg_dev->pix_fmt.field = V4L2_FIELD_NONE; + jpeg_dev->pix_fmt.colorspace = V4L2_COLORSPACE_SRGB; /* maybe ARGB */ + jpeg_dev->pix_fmt.quantization = V4L2_QUANTIZATION_FULL_RANGE; + jpeg_dev->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; + + ret = v4l2_device_register(jpeg_dev->dev, v4l2_dev); + if (ret != 0) { + dev_err(jpeg_dev->dev, "Failed to register v4l2 device\n"); + return ret; + } + + /* Register how many v4l2 controls to a handler */ + dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + dst_vq->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF; + dst_vq->dev = v4l2_dev->dev; + dst_vq->lock = &jpeg_dev->video_lock; + dst_vq->ops = &phytium_jpeg_vb2_ops; + dst_vq->mem_ops = &vb2_dma_contig_memops; + dst_vq->drv_priv = jpeg_dev; + dst_vq->buf_struct_size = sizeof(struct phytium_jpeg_buffer); + dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + dst_vq->min_buffers_needed = CAPTURE_BUF_NUMBER; + ret = vb2_queue_init(dst_vq); + if (ret) { + dev_err(jpeg_dev->dev, "Failed to init vb2 queue\n"); + goto err_v4l2_register; + } + + vdev->queue = dst_vq; + vdev->fops = &phytium_jpeg_fops; + vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | + V4L2_CAP_STREAMING; + vdev->v4l2_dev = v4l2_dev; + strscpy(vdev->name, PHYTIUM_JPEG_NAME, sizeof(vdev->name)); + vdev->vfl_type = VFL_TYPE_GRABBER; /* The newest kernel using VFL_TYPE_VIDEO */ + vdev->vfl_dir = VFL_DIR_RX; + vdev->release = video_device_release_empty; + vdev->ioctl_ops = &phytium_jpeg_ioctl_ops; + vdev->lock = &jpeg_dev->video_lock; + + video_set_drvdata(vdev, jpeg_dev); + ret = video_register_device(vdev, VFL_TYPE_GRABBER, 0); + if (ret != 0) { + dev_err(jpeg_dev->dev, "Failed to register video device\n"); + goto err_video_register; + } + + v4l2_info(v4l2_dev, "phytium JPEG registered as /dev/video%d (%d, %d)\n", + jpeg_dev->vdev.num, VIDEO_MAJOR, jpeg_dev->vdev.minor); + return ret; + +err_video_register: + vb2_queue_release(dst_vq); + +err_v4l2_register: + v4l2_device_unregister(v4l2_dev); + return ret; +} + +static const struct phytium_jpeg_config phytium_config = { + .comp_size_read = BUF_LIST_INDEX_CTRL_STS_ADDR(VB_BUF_NO), +}; + +static const struct of_device_id phytium_jpeg_match[] = { + { + .compatible = "phytium,jpeg", + .data = &phytium_config, + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, phytium_jpeg_match); + +static int phytium_jpeg_probe(struct platform_device *pdev) +{ + struct phytium_jpeg_dev *jpeg_dev; + const struct of_device_id *match; + const struct phytium_jpeg_config *config; + struct resource *res; + int ret; + + jpeg_dev = devm_kzalloc(&pdev->dev, sizeof(*jpeg_dev), GFP_KERNEL); + if (jpeg_dev == NULL) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + jpeg_dev->base_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(jpeg_dev->base_addr)) { + dev_err(jpeg_dev->dev, "Failed to ioremap.\n"); + return PTR_ERR(jpeg_dev->base_addr); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + jpeg_dev->timer30_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(jpeg_dev->base_addr)) { + dev_err(jpeg_dev->dev, "Failed to parse timer30.\n"); + return PTR_ERR(jpeg_dev->timer30_addr); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + jpeg_dev->timer31_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(jpeg_dev->base_addr)) { + dev_err(jpeg_dev->dev, "Failed to parse timer30.\n"); + return PTR_ERR(jpeg_dev->timer31_addr); + } + match = of_match_node(phytium_jpeg_match, pdev->dev.of_node); + if (match == NULL) { + dev_err(jpeg_dev->dev, "Failed to match.\n"); + return -EINVAL; + } + + config = match->data; + jpeg_dev->comp_size_read = config->comp_size_read; + + jpeg_dev->frame_rate = 30; + jpeg_dev->dev = &pdev->dev; + spin_lock_init(&jpeg_dev->hw_lock); + mutex_init(&jpeg_dev->video_lock); + init_waitqueue_head(&jpeg_dev->wait); + INIT_DELAYED_WORK(&jpeg_dev->res_work, phytium_jpeg_resolution_work); + INIT_LIST_HEAD(&jpeg_dev->buffers); + + ret = phytium_jpeg_init(jpeg_dev); + if (ret != 0) { + dev_err(jpeg_dev->dev, "Failed to initialize the JPEG engine.\n"); + return ret; + } + + ret = phytium_jpeg_setup_video(jpeg_dev); + + return ret; +} + +#define to_phytium_jpeg(x) container_of((x), struct phytium_jpeg_dev, v4l2_dev) +static int phytium_jpeg_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct v4l2_device *v4l2_dev = dev_get_drvdata(dev); + struct phytium_jpeg_dev *jpeg_dev = to_phytium_jpeg(v4l2_dev); + + phytium_jpeg_off(jpeg_dev); + + video_unregister_device(&jpeg_dev->vdev); + + vb2_queue_release(&jpeg_dev->queue); + + v4l2_device_unregister(v4l2_dev); + + of_reserved_mem_device_release(dev); + + return 0; +} + +static struct platform_driver phytium_jpeg_driver = { + .probe = phytium_jpeg_probe, + .remove = phytium_jpeg_remove, + .driver = { + .name = PHYTIUM_JPEG_NAME, + .of_match_table = phytium_jpeg_match, + }, +}; + +module_platform_driver(phytium_jpeg_driver); + +MODULE_DESCRIPTION("Phytium JPEG Encoder driver"); +MODULE_AUTHOR("Wang Min "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.h b/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.h new file mode 100644 index 0000000000000000000000000000000000000000..e398d974d928dd2addede501b933aaa872e64244 --- /dev/null +++ b/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_JPEG_CORE_H +#define _PHYTIUM_JPEG_CORE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PHYTIUM_JPEG_NAME "phytium-jpeg" +#define MAX_FRAME_RATE 60 +#define MAX_HEIGHT 1080 +#define MAX_WIDTH 1920 +#define MIN_HEIGHT 480 +#define MIN_WIDTH 640 +#define MIN_PIXEL_CLOCK (640 * 480 * 60) /* 640 x 480 x 60Hz */ +#define MAX_PIXEL_CLOCK (1920 * 1080 * 60) /* 1920 x 1080 x 60Hz */ + +#define SOURCE_RESOLUTION_DETECT_TIMEOUT msecs_to_jiffies(500) +#define RESOLUTION_CHANGE_DELAY msecs_to_jiffies(50) +#define INVALID_RESOLUTION_DELAY msecs_to_jiffies(250) +#define STOP_TIMEOUT msecs_to_jiffies(1000) + +#define INVALID_RESOLUTION_RETRIES 2 +#define CAPTURE_BUF_NUMBER 3 /* using how many buffers */ +#define VB_BUF_NO 0 /* there are 16 buffer, use which one */ + +/* The below macros are defined for the JPEG header of the phytium JPEG Engine */ +#define PHYTIUM_JPEG_HEADER_LEN (256 * 3) +#define PHYTIUM_JPEG_HEADER_SIZE (PHYTIUM_JPEG_HEADER_LEN / sizeof(u32)) +#define PHYTIUM_JPEG_HEADER_H_INDEX 40 +#define PHYTIUM_JPEG_HEADER_W_INDEX 41 + +/* There are two ocm buffers that are used for storaging the inputing video data */ +#define OCM_BUF_NUM 2 + +enum phytium_jpeg_status { + VIDEO_MODE_DETECT_DONE, + VIDEO_RES_CHANGE, + VIDEO_RES_DETECT, + VIDEO_STREAMING, + VIDEO_FRAME_INPRG, + VIDEO_STOPPED, + VIDEO_CLOCKS_ON, + VIDEO_POWEROFF, +}; + +struct phytium_jpeg_addr { + unsigned int size; + dma_addr_t dma_addr; + void *virt_addr; +}; + +struct phytium_jpeg_buffer { + struct vb2_v4l2_buffer vb; + struct list_head link; +}; + +/** + * struct phytium_jpeg - JPEG IP abstraction + * @lock: the mutex protecting this structure + * @hw_lock: spinlock protecting the hw device resource + * @workqueue: decode work queue + * @dev: JPEG device + * @v4l2_dev: v4l2 device for mem2mem mode + * @m2m_dev: v4l2 mem2mem device data + * @alloc_ctx: videobuf2 memory allocator's context + * @dec_vdev: video device node for decoder mem2mem mode + * @dec_reg_base: JPEG registers mapping + * @clk_jdec: JPEG hw working clock + * @clk_jdec_smi: JPEG SMI bus clock + * @larb: SMI device + */ +struct phytium_jpeg_dev { + void __iomem *base_addr; + struct device *dev; + struct v4l2_device v4l2_dev; + struct v4l2_pix_format pix_fmt; + struct v4l2_bt_timings active_timings; + struct v4l2_bt_timings detected_timings; + u32 v4l2_input_status; + struct vb2_queue queue; + struct video_device vdev; + /* v4l2 and videobuf2 lock, protect the structure*/ + struct mutex video_lock; + u32 jpeg_mode; + u32 comp_size_read; + wait_queue_head_t wait; + /* buffer list lock, protecting the hw device resource */ + spinlock_t hw_lock; + struct delayed_work res_work; + struct list_head buffers; + unsigned long status; + unsigned int sequence; + unsigned int max_compressed_size; + struct phytium_jpeg_addr src_addrs[OCM_BUF_NUM]; + struct phytium_jpeg_addr dst_addrs[16]; + + bool yuv420; + unsigned int frame_rate; + void __iomem *timer30_addr; + void __iomem *timer31_addr; +}; + +struct phytium_jpeg_config { + u32 jpeg_mode; + u32 comp_size_read; +}; + +#define YUV_MODE_STR_LEN 8 +#define YUVID 42 + +enum jpeg_yuv_mode { + YUV444 = 0x0, + YUV422 = 0x1, + YUV420 = 0x2 +}; + +#endif /* _PHYTIUM_JPEG_CORE_H */ diff --git a/drivers/media/platform/phytium-jpeg/phytium_jpeg_reg.h b/drivers/media/platform/phytium-jpeg/phytium_jpeg_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..3567badf9eb50139e995beaa52ea9f540db782bb --- /dev/null +++ b/drivers/media/platform/phytium-jpeg/phytium_jpeg_reg.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_JPEG_REG_H +#define _PHYTIUM_JPEG_REG_H + +#include +/* define the all kinds of control registers in a JPEG soc */ + +/* The register is used to set the information of the video comes from main memory */ +#define SRC_DDR_INFO_REG 0x00000800 + +/* The register is used to get the information of the video comes from external VGA */ +#define SRC_VGA_INFO_REG 0x00000894 + +#define SRC_FORMAT BIT(0) /* 0:RGB888, 1:RGB565 */ +#define SRC_DE_POLARITY BIT(1) /* 0:low level is effect, other */ +#define SRC_HS_POLARITY BIT(2) /* 0:low level is effect, other */ +#define SRC_VS_POLARITY BIT(3) /* 0:low level is effect, other */ +#define SRC_HOR_PIXELS GENMASK(15, 4) /* the number of the horizontal pixels */ +#define SRC_WIDTH_SHIFT 4 /* shift right to get width */ +#define SRC_VER_PIXELS GENMASK(26, 16) /* the number of the vertical pixels */ +#define SRC_HEIGHT_SHIFT 16 /* shift right to get height */ +/* The below bit fields is only used by image comes from main memory */ +#define SRC_COMP_DDR_IMG_EN BIT(27) /* 0: disable to JPEG compression, others */ + +/* marks which ocm buffer is occupied to store an image */ +#define SRC_DDR_IMG2OCM_VALID GENMASK(29, 28) + +/* The register controls starting work of the JPEG engine */ +#define TRANSFORM_INFO_REG 0x00000804 +#define TRANSINFO_ENABLE_ENGINE BIT(0) /* 1: enable the JPEG engine */ +/* 1: video comes from external VGA, 0: video comes from DDR */ +#define TRANSINFO_SRC_SELECT BIT(1) +/* 0: video comes from external VGA is cached to OCM, 1: DDR */ +#define TRANSINFO_IMAGE_STORE BIT(2) +#define TRANSINFO_FRAME_RATE GENMASK(9, 4) /* the value notes frame rate */ +#define TRANSINFO_BLOCK_SIZE BIT(12) /* 0: 8x8, 1: 16x16 */ +#define TRANSINFO_ENABLE_YUV422 BIT(13) /* 1: JPEG block is populated YUV422 */ +/* support burst with the values such as 1, 2, 4, 8, 16, 32, 64. using default value 0xf */ +#define TRANSINFO_AXI_LEN GENMASK(22, 16) +#define TRANS_AXI_LEN_SHIFT 16 + +/* The interrupt and status register */ +#define INT_STATUS_CTRL_REG 0x00000808 +#define INT_FIFO_OVERFLOW BIT(0) /* video fifo overflow, write 1 to clear */ +#define INT_OCM_BUF_OVERFLOW BIT(1) /* ocm buffer overflow, write 1 to clear */ +/* JPEG engine complete compression, write 1 to clear */ +#define INT_JPEG_ENCODE_COMPLETE BIT(2) +/* in VGA mode, video's format is changed */ +#define INT_VIDEO_FORMAT_CHANGE BIT(3) +/* enable the interrupt of th video fifo overflow and source resolution */ +#define DETECT_RESOLUTION_CHANGE_EN BIT(4) +/* enable the interrupt of the ocm buffer overflow */ +#define STS_VE_OCM_BUF_OVERFLOW_EN BIT(5) +/* enable the interrupt of the JPEG complete compression */ +#define STS_VE_JPEG_CODE_COMP_EN BIT(6) +/* in VGA mode, the bit notes ocm buff is busy */ +#define STS_VE_OCM_BUF_BUSY BIT(7) +/* in VGA mode, the bit notes sequence number of the frame */ +#define STS_VE_CUR_FRAME_NUMBER GENMASK(9, 8) +/* in VGA mode, the bit notes sequence number of the cached frame */ +#define STS_VE_BUF_CACHE_NUMBER GENMASK(11, 10) +/* in VGA mode, the buffer number in buffer list */ +#define STS_JPEG_COMP_BUF_NO GENMASK(15, 12) +#define INT_JPEG_COMP_BUF_LIST_NO GENMASK(31, 16) /* the interrupt number of the buffer */ + +#define OCM_BUF0_ADDR 0x0000080C +#define OCM_BUF1_ADDR 0x00000810 +#define OCM_BUF_SHIFT 8 + +#define BUF_LIST_BASE_ADDR 0x00000814 + +#define PHYTIUM_BUF_LIST_ACTRL_AND_STS_BASE_ADDR_REG 0x00000818 +#define STS_JPEG_BUF_HIGH_LEVEL_VALID BIT(0) /* Hight levle is validity */ +#define JPEG_BUF_CAPACITY_SIZE GENMASK(29, 8) /* the capacity of the buffer */ +#define JPEG_BUF_CAPACITY_SIZE_SHIFT 8 + +/* There are 16 buffers in the buffer list, the width between each other' address is 8 bytes */ +#define BUF_LIST_ADDR_OFFSET 0x8 +#define BUF_LIST_CTRL_AND_STS_OFFSET 0x8 + +/* Get the address of the specific index buffer */ +#define BUF_LIST_INDEX_ADDR(index) \ + (BUF_LIST_BASE_ADDR + (index) * BUF_LIST_ADDR_OFFSET) + +#define JPEG_DST_ADDR_SHIFT 8 + +#define BUF_LIST_INDEX_CTRL_STS_ADDR(index) \ + (PHYTIUM_BUF_LIST_ACTRL_AND_STS_BASE_ADDR_REG + (index) * BUF_LIST_CTRL_AND_STS_OFFSET) + +#define FRAME_SAMPLE_CTRL 0x00000898 +#define FRAME_SAMPLE_CTRL_EN BIT(31) +#define FRAME_SAMPLE_INTERVAL GENMASK(30, 0) + +/* The below registers are all related to quantilize */ +#define HUFF_MODE_REG 0x300 +#define SAMPLE_MODE_REG 0x304 + +#define Y_QUANT_BASE_ADDR_REG 0x400 +#define C_QUANT_BASE_ADDR_REG 0x500 + +#define QUANT_REG_NUM 64 + +#define Y_QUANT_INDEX_ADDR_REG(index) \ + (Y_QUANT_BASE_ADDR_REG + 4 * (index)) + +#define C_QUANT_INDEX_ADDR_REG(index) \ + (C_QUANT_BASE_ADDR_REG + 4 * (index)) + +#endif /* _PHYTIUM_JPEG_REG_H */ diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index dd938a5d04094e5e2476e33972fa8cafc8078309..acf86c746b3d5cbce2a0e3dea93732a442e46a41 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -915,6 +915,19 @@ config UCB1400_CORE To compile this driver as a module, choose M here: the module will be called ucb1400_core. +config MFD_PHYTIUM_I2S_LSD + tristate "PHYTIUM Px210 I2S LSD MFD driver" + depends on (PCI && ARCH_PHYTIUM) + help + This enables support for the Phytium Px210 LSD I2S controller. + +config MFD_PHYTIUM_I2S_MMD + tristate "PHYTIUM Px210 I2S MMD MFD driver" + depends on (PCI && ARCH_PHYTIUM) + help + This enables support for the Phytium Px210 MMD I2S controllers + for Display Port. + config MFD_PM8XXX tristate "Qualcomm PM8xxx PMIC chips driver" depends on (ARM || HEXAGON || COMPILE_TEST) diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 5856a9489cbd83157025004c05fae2b341e2b51f..561d67112e8933d69d5d0933c53446b1f84a8ec3 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -241,3 +241,5 @@ obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o +obj-$(CONFIG_MFD_PHYTIUM_I2S_LSD) += phytium_px210_i2s_lsd.o +obj-$(CONFIG_MFD_PHYTIUM_I2S_MMD) += phytium_px210_i2s_mmd.o diff --git a/drivers/mfd/phytium_px210_i2s_lsd.c b/drivers/mfd/phytium_px210_i2s_lsd.c new file mode 100644 index 0000000000000000000000000000000000000000..bfdbc9a4ebcd3b268539830eb7b98a21dd47522d --- /dev/null +++ b/drivers/mfd/phytium_px210_i2s_lsd.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2S LSD MFD driver over PCI bus + * + * Copyright (c) 2020-2023 Phytium Technology Co., Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + */ + +#include +#include +#include +#include + +struct phytium_px210_mfd { + struct device *dev; +}; + +struct pdata_px210_mfd { + struct device *dev; + char *name; + int clk_base; +}; + +static struct resource phytium_px210_i2s_res0[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + }, + [2] = { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell phytium_px210_mfd_cells[] = { + { + .id = 0, + .name = "phytium-i2s", + .of_compatible = "phytium,i2s", + .resources = phytium_px210_i2s_res0, + .num_resources = ARRAY_SIZE(phytium_px210_i2s_res0), + .ignore_resource_conflicts = true, + }, +}; + +static void phytium_px210_i2s_setup(struct pci_dev *pdev) +{ + struct mfd_cell *cell = &phytium_px210_mfd_cells[0]; + struct resource *res = (struct resource *)cell->resources; + struct pdata_px210_mfd *pdata; + + res[0].start = pci_resource_start(pdev, 0); + res[0].end = pci_resource_start(pdev, 0) + 0x0fff; + + res[1].start = pci_resource_start(pdev, 0) + 0x1000; + res[1].end = pci_resource_start(pdev, 0) + 0x1fff; + + res[2].start = pdev->irq; + res[2].end = pdev->irq; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + + pdata->dev = &pdev->dev; + pdata->name = "phytium-i2s-lsd"; + pdata->clk_base = 480000000; + + cell->platform_data = pdata; + cell->pdata_size = sizeof(*pdata); +} + +static int phytium_px210_mfd_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_px210_mfd *phytium_mfd; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + phytium_mfd = devm_kzalloc(&pdev->dev, sizeof(*phytium_mfd), GFP_KERNEL); + if (!phytium_mfd) + return -ENOMEM; + + phytium_mfd->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, phytium_mfd); + + phytium_px210_i2s_setup(pdev); + + ret = mfd_add_devices(&pdev->dev, 0, phytium_px210_mfd_cells, + ARRAY_SIZE(phytium_px210_mfd_cells), NULL, 0, + NULL); + if (ret) + return 0; + + return 0; +} + + +static void phytium_px210_mfd_remove(struct pci_dev *pdev) +{ + mfd_remove_devices(&pdev->dev); +} + +static const struct pci_device_id phytium_px210_mfd_ids[] = { + { + .vendor = 0x1DB7, + .device = 0xDC2B, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0x3, + .class_mask = 0, + }, + {}, +}; +MODULE_DEVICE_TABLE(pci, phytium_px210_mfd_ids); + +static struct pci_driver phytium_i2s_lsd_mfd_driver = { + .name = "phytium_px210_mfd_i2s", + .id_table = phytium_px210_mfd_ids, + .probe = phytium_px210_mfd_probe, + .remove = phytium_px210_mfd_remove, +}; + +module_pci_driver(phytium_i2s_lsd_mfd_driver); + +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_DESCRIPTION("Phytium Px210 MFD PCI driver for I2S-LSD"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/phytium_px210_i2s_mmd.c b/drivers/mfd/phytium_px210_i2s_mmd.c new file mode 100644 index 0000000000000000000000000000000000000000..4020686d4ce2e847988246bb3adb3a27dfe5b2ab --- /dev/null +++ b/drivers/mfd/phytium_px210_i2s_mmd.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2S MMD MFD driver over PCI bus + * + * Copyright (c) 2020-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include + +struct phytium_px210_mfd { + struct device *dev; +}; + +struct pdata_px210_mfd { + struct device *dev; + char *name; + int clk_base; +}; + +static struct resource phytium_px210_i2s_res0[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + }, + [2] = { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource phytium_px210_i2s_res1[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + }, + [2] = { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource phytium_px210_i2s_res2[] = { + [0] = { + .flags = IORESOURCE_MEM, + }, + [1] = { + .flags = IORESOURCE_MEM, + }, + [2] = { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell phytium_px210_mfd_cells[] = { + { + .id = 1, + .name = "phytium-i2s", + .of_compatible = "phytium,i2s", + .resources = phytium_px210_i2s_res0, + .num_resources = ARRAY_SIZE(phytium_px210_i2s_res0), + .ignore_resource_conflicts = true, + }, + { + .id = 2, + .name = "phytium-i2s", + .of_compatible = "phytium,i2s", + .resources = phytium_px210_i2s_res1, + .num_resources = ARRAY_SIZE(phytium_px210_i2s_res1), + .ignore_resource_conflicts = true, + }, + { + .id = 3, + .name = "phytium-i2s", + .of_compatible = "phytium,i2s", + .resources = phytium_px210_i2s_res2, + .num_resources = ARRAY_SIZE(phytium_px210_i2s_res2), + .ignore_resource_conflicts = true, + }, +}; + +static void phytium_px210_i2s_setup(struct pci_dev *pdev, int i) +{ + struct mfd_cell *cell = &phytium_px210_mfd_cells[i]; + struct resource *res = (struct resource *)cell->resources; + struct pdata_px210_mfd *pdata; + + res[0].start = pci_resource_start(pdev, 0) + 0x2000 * i + 0x1000; + res[0].end = pci_resource_start(pdev, 0) + 0x2000 * i + 0x1fff; + + res[1].start = pci_resource_start(pdev, 0) + 0x2000 * i; + res[1].end = pci_resource_start(pdev, 0) + 0x2000 * i + 0x0fff; + + res[2].start = pdev->irq; + res[2].end = pdev->irq; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + + pdata->dev = &pdev->dev; + pdata->clk_base = 600000000; + switch (i) { + case 0: + pdata->name = "phytium-i2s-dp0"; + break; + case 1: + pdata->name = "phytium-i2s-dp1"; + break; + case 2: + pdata->name = "phytium-i2s-dp2"; + break; + default: + break; + } + + cell->platform_data = pdata; + cell->pdata_size = sizeof(*pdata); +} + +static int phytium_px210_mfd_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_px210_mfd *phytium_mfd; + int i; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + phytium_mfd = devm_kzalloc(&pdev->dev, sizeof(*phytium_mfd), GFP_KERNEL); + if (!phytium_mfd) + return -ENOMEM; + + phytium_mfd->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, phytium_mfd); + + for (i = 0; i < 3; i++) + phytium_px210_i2s_setup(pdev, i); + + ret = mfd_add_devices(&pdev->dev, 0, phytium_px210_mfd_cells, + ARRAY_SIZE(phytium_px210_mfd_cells), NULL, 0, + NULL); + if (ret) + return 0; + + return 0; +} + + +static void phytium_px210_mfd_remove(struct pci_dev *pdev) +{ + mfd_remove_devices(&pdev->dev); +} + +static const struct pci_device_id phytium_px210_mfd_ids[] = { + { + .vendor = 0x1DB7, + .device = 0xDC23, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0x3, + .class_mask = 0, + }, + {}, +}; +MODULE_DEVICE_TABLE(pci, phytium_px210_mfd_ids); + +static struct pci_driver phytium_i2s_mmd_mfd_driver = { + .name = "phytium_px210_mfd_mmd", + .id_table = phytium_px210_mfd_ids, + .probe = phytium_px210_mfd_probe, + .remove = phytium_px210_mfd_remove, +}; + +module_pci_driver(phytium_i2s_mmd_mfd_driver); + +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_DESCRIPTION("Phytium Px210 MFD PCI driver for I2S-DP"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 3726eacdf65de2470ba83c6a91dc3351f69ea713..0900dec7ec04431839965db5c87c40f33901c396 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -69,8 +69,7 @@ config ATMEL_TCB_CLKSRC are combined to make a single 32-bit timer. When GENERIC_CLOCKEVENTS is defined, the third timer channel - may be used as a clock event device supporting oneshot mode - (delays of up to two seconds) based on the 32 KiHz clock. + may be used as a clock event device supporting oneshot mode. config ATMEL_TCB_CLKSRC_BLOCK int @@ -83,6 +82,15 @@ config ATMEL_TCB_CLKSRC_BLOCK TC can be used for other purposes, such as PWM generation and interval timing. +config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + bool "TC Block use 32 KiHz clock" + depends on ATMEL_TCB_CLKSRC + default y + help + Select this to use 32 KiHz base clock rate as TC block clock + source for clock events. + + config DUMMY_IRQ tristate "Dummy IRQ handler" default n diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 2c11944686cf9ec977f19c4526f0c614af59f600..51fbbe77c0e58eb938f9ba949c5af9dae38f2fce 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -945,3 +945,25 @@ config MMC_SDHCI_OMAP If you have a controller with this interface, say Y or M here. If unsure, say N. + +config MMC_PHYTIUM_MCI_PCI + tristate "Phytium PCI MultiMedia Card Interface support" + depends on ARCH_PHYTIUM + default y if ARCH_PHYTIUM + help + This selects support for the PCI MultiMedia Card Interface on Phytium + Px210 chipset. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_PHYTIUM_MCI_PLTFM + tristate "Phytium MultiMedia Card Interface support" + depends on ARCH_PHYTIUM && OF + default y if ARCH_PHYTIUM + help + This selects support for the MultiMedia Card Interface on Phytium SoCs. + If you have a controller with this interface, say Y or M here. + + If unsure, say N. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index ce8398e6f2c0e49395c4a818c95ecb1e69d6b87e..a3aa80aaf7df7c1895e041563a710c337960e985 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -69,6 +69,8 @@ obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o obj-$(CONFIG_MMC_BCM2835) += bcm2835.o +obj-$(CONFIG_MMC_PHYTIUM_MCI_PCI) += phytium-mci-pci.o phytium-mci.o +obj-$(CONFIG_MMC_PHYTIUM_MCI_PLTFM) += phytium-mci-plat.o phytium-mci.o obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o diff --git a/drivers/mmc/host/phytium-mci-pci.c b/drivers/mmc/host/phytium-mci-pci.c new file mode 100644 index 0000000000000000000000000000000000000000..7e4ba4ac0a20dd40046c2169c376ffdbd548e979 --- /dev/null +++ b/drivers/mmc/host/phytium-mci-pci.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Phytium Multimedia Card Interface PCI driver + * + * Copyright (c) 2020-2023 Phytium Technology Co., Ltd. + * + */ + +#include +#include +#include +#include +#include +#include +#include "phytium-mci.h" + +static u32 sd_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | + MMC_CAP_CMD23 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR50| + MMC_CAP_4_BIT_DATA; +static u32 sd_caps2 = MMC_CAP2_NO_MMC; + +static u32 emmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_WAIT_WHILE_BUSY | + MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_HW_RESET | MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_NONREMOVABLE; +static u32 emmc_caps2 = MMC_CAP2_NO_SDIO | MMC_CAP2_NO_SD; + +#define PCI_BAR_NO 0 + +#if defined CONFIG_PM && defined CONFIG_PM_SLEEP +static const struct dev_pm_ops phytium_mci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_mci_suspend, + phytium_mci_resume) + SET_RUNTIME_PM_OPS(phytium_mci_runtime_suspend, + phytium_mci_runtime_resume, NULL) +}; +#else +#define phytium_mci_dev_pm_ops NULL +#endif + +static int +phytium_mci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct phytium_mci_host *host; + struct mmc_host *mmc; + int ret; + + ret = pcim_enable_device(pdev); + + if (ret) + return ret; + pci_set_master(pdev); + + mmc = mmc_alloc_host(sizeof(struct phytium_mci_host), &pdev->dev); + + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + + pci_enable_msi(pdev); + + host->irq = pdev->irq; + host->irq_flags = IRQF_SHARED; + host->dev = &pdev->dev; + ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_NO, pci_name(pdev)); + + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + goto host_free; + } + + host->base = pcim_iomap_table(pdev)[PCI_BAR_NO]; + host->is_use_dma = 1; + host->is_device_x100 = 1; + + if (pdev->devfn == 2) { + host->caps = emmc_caps; + host->caps2 = emmc_caps2; + } else { + host->caps = sd_caps; + host->caps2 = sd_caps2; + mmc->f_max = 25000000; /* stable frequency */ + } + + host->mmc = mmc; + host->clk_rate = MCI_CLK; + + dev_info(&pdev->dev, "%s %d: [bar %d] addr: 0x%llx size: 0x%llx km: 0x%llx devfn:%d\n", + __func__, __LINE__, PCI_BAR_NO, pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0), (uint64_t)host->base, pdev->devfn); + + dev_dbg(&pdev->dev, "%s %d:irq:0x%x\n", __func__, __LINE__, host->irq); + + ret = phytium_mci_common_probe(host); + + if (ret == MCI_REALEASE_MEM) { + ret = -ENOMEM; + goto release_mem; + } else if (ret) { + goto release; + } + pci_set_drvdata(pdev, mmc); + dev_info(&pdev->dev, "%s %d: probe phytium mci successful.\n", __func__, __LINE__); + return 0; + +release: + phytium_mci_deinit_hw(host); +release_mem: + + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } +host_free: + mmc_free_host(mmc); + pci_disable_device(pdev); + return ret; +} + +static void phytium_mci_pci_remove(struct pci_dev *pdev) +{ + struct phytium_mci_host *host; + struct mmc_host *mmc; + + mmc = pci_get_drvdata(pdev); + if (!mmc) { + dev_info(&pdev->dev, "%s %d: mmc is null.\n", __func__, __LINE__); + return; + } + host = mmc_priv(mmc); + if (!host) { + dev_info(&pdev->dev, "%s %d: host is null.\n", __func__, __LINE__); + mmc_remove_host(mmc); + mmc_free_host(mmc); + return; + } + + del_timer(&host->hotplug_timer); + + mmc_remove_host(host->mmc); + + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } + phytium_mci_deinit_hw(host); + mmc_free_host(mmc); + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id phytium_mci_pci_tbl[] = { + { + .vendor = 0x1DB7, + .device = 0xDC28, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0x5, + .class_mask = 0, + }, + {} +}; +MODULE_DEVICE_TABLE(pci, phytium_mci_pci_tbl); + +static struct pci_driver phytium_mci_pci_driver = { + .name = "phytium-mci-pci", + .id_table = phytium_mci_pci_tbl, + .probe = phytium_mci_pci_probe, + .remove = phytium_mci_pci_remove, + .driver = { + .pm = &phytium_mci_dev_pm_ops, + } +}; +module_pci_driver(phytium_mci_pci_driver); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface PCI driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); diff --git a/drivers/mmc/host/phytium-mci-plat.c b/drivers/mmc/host/phytium-mci-plat.c new file mode 100644 index 0000000000000000000000000000000000000000..966dbe3d96344d13ff9943c23fb029b264a1975c --- /dev/null +++ b/drivers/mmc/host/phytium-mci-plat.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Phytium Multimedia Card Interface platform driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "phytium-mci.h" + +static u32 mci_caps = MMC_CAP_CMD23 | MMC_CAP_ERASE | MMC_CAP_WAIT_WHILE_BUSY; + +#if defined CONFIG_PM && defined CONFIG_PM_SLEEP + +static const struct dev_pm_ops phytium_mci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_mci_suspend, + phytium_mci_resume) + SET_RUNTIME_PM_OPS(phytium_mci_runtime_suspend, + phytium_mci_runtime_resume, NULL) +}; +#else +#define phytium_mci_dev_pm_ops NULL +#endif + +static int phytium_mci_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_mci_host *host; + struct resource *res; + const struct acpi_device_id *match; + struct device *dev = &pdev->dev; + int ret; + + mmc = mmc_alloc_host(sizeof(struct phytium_mci_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + host = mmc_priv(mmc); + ret = mmc_of_parse(mmc); + if (ret) + goto host_free; + + if (dev->of_node) { + host->src_clk = devm_clk_get(&pdev->dev, "phytium_mci_clk"); + if (IS_ERR(host->src_clk)) { + ret = PTR_ERR(host->src_clk); + goto host_free; + } + + host->clk_rate = clk_get_rate(host->src_clk); + } else if (has_acpi_companion(dev)) { + match = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!match) { + dev_err(dev, "Error ACPI match data is missing\n"); + return -ENODEV; + } + host->clk_rate = 1200000000; + } + + host->is_use_dma = 1; + host->is_device_x100 = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto host_free; + } + + host->irq = platform_get_irq(pdev, 0); + + if (host->irq < 0) { + ret = -EINVAL; + goto host_free; + } + host->irq_flags = IRQF_SHARED; + dev_dbg(&pdev->dev, "%s %d:irq:%d\n", __func__, __LINE__, host->irq); + host->dev = &pdev->dev; + host->caps = mci_caps; + host->mmc = mmc; + ret = phytium_mci_common_probe(host); + if (ret == MCI_REALEASE_MEM) { + ret = -ENOMEM; + goto release_mem; + } else if (ret) { + goto release; + } + platform_set_drvdata(pdev, mmc); + dev_info(&pdev->dev, "%s %d: probe phytium mci successful.\n", __func__, __LINE__); + return 0; + +release: + phytium_mci_deinit_hw(host); +release_mem: + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } +host_free: + mmc_free_host(mmc); + return ret; +} + +static int phytium_mci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_mci_host *host; + + mmc = platform_get_drvdata(pdev); + if (!mmc) { + dev_info(&pdev->dev, "%s %d: mmc is null.\n", __func__, __LINE__); + return -1; + } + host = mmc_priv(mmc); + if (!host) { + dev_info(&pdev->dev, "%s %d: host is null.\n", __func__, __LINE__); + mmc_remove_host(mmc); + mmc_free_host(mmc); + return -1; + } + del_timer(&host->hotplug_timer); + del_timer_sync(&host->timeout_timer); + mmc_remove_host(host->mmc); + + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } + phytium_mci_deinit_hw(host); + mmc_free_host(mmc); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static const struct of_device_id phytium_mci_of_ids[] = { + { .compatible = "phytium,mci", }, + {} +}; + +MODULE_DEVICE_TABLE(of, phytium_mci_of_ids); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_mci_acpi_ids[] = { + { .id = "PHYT0017" }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, phytium_mci_acpi_ids); +#else +#define phytium_mci_acpi_ids NULL +#endif + +static struct platform_driver phytium_mci_driver = { + .probe = phytium_mci_probe, + .remove = phytium_mci_remove, + .driver = { + .name = "phytium-mci-platform", + .of_match_table = phytium_mci_of_ids, + .acpi_match_table = phytium_mci_acpi_ids, + .pm = &phytium_mci_dev_pm_ops, + }, +}; + +module_platform_driver(phytium_mci_driver); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); diff --git a/drivers/mmc/host/phytium-mci.c b/drivers/mmc/host/phytium-mci.c new file mode 100644 index 0000000000000000000000000000000000000000..91e844874b81a2db3e1531aaf56170a6fc94b2b7 --- /dev/null +++ b/drivers/mmc/host/phytium-mci.c @@ -0,0 +1,1549 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Phytium Multimedia Card Interface + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium-mci.h" + +static const u32 cmd_ints_mask = MCI_INT_MASK_RE | MCI_INT_MASK_CMD | MCI_INT_MASK_RCRC | + MCI_INT_MASK_RTO | MCI_INT_MASK_HTO | MCI_RAW_INTS_HLE; + +static const u32 data_ints_mask = MCI_INT_MASK_DTO | MCI_INT_MASK_DCRC | MCI_INT_MASK_DRTO | + MCI_INT_MASK_SBE_BCI; +static const u32 cmd_err_ints_mask = MCI_INT_MASK_RTO | MCI_INT_MASK_RCRC | MCI_INT_MASK_RE | + MCI_INT_MASK_DCRC | MCI_INT_MASK_DRTO | + MCI_MASKED_INTS_SBE_BCI; + +static const u32 dmac_ints_mask = MCI_DMAC_INT_ENA_FBE | MCI_DMAC_INT_ENA_DU | + MCI_DMAC_INT_ENA_NIS | MCI_DMAC_INT_ENA_AIS; +static const u32 dmac_err_ints_mask = MCI_DMAC_INT_ENA_FBE | MCI_DMAC_INT_ENA_DU | + MCI_DMAC_INT_ENA_AIS; + +static void phytium_mci_cmd_next(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd); +static void phytium_mci_adma_reset(struct phytium_mci_host *host); +static void phytium_mci_send_cmd(struct phytium_mci_host *host, u32 cmd, u32 arg); +static bool phytium_mci_data_xfer_done(struct phytium_mci_host *host, u32 events, + struct mmc_request *mrq, struct mmc_data *data); +static void phytium_mci_init_adma_table(struct phytium_mci_host *host, + struct phytium_mci_dma *dma); +static void phytium_mci_init_hw(struct phytium_mci_host *host); +static int phytium_mci_get_cd(struct mmc_host *mmc); +static int phytium_mci_err_irq(struct phytium_mci_host *host, u32 dmac_events, u32 events); + +static void sdr_set_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val |= bs; + writel(val, reg); +} + +static void sdr_clr_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val &= ~bs; + writel(val, reg); +} + +static void phytium_mci_reset_hw(struct phytium_mci_host *host) +{ + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET); + + while (readl(host->base + MCI_CNTRL) & (MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET)) + cpu_relax(); + phytium_mci_send_cmd(host, MCI_CMD_UPD_CLK, 0); +} + +static void phytium_mci_update_external_clk(struct phytium_mci_host *host, u32 uhs_reg_value) +{ + writel(0, host->base + MCI_UHS_REG_EXT); + writel(uhs_reg_value, host->base + MCI_UHS_REG_EXT); + while (!(readl(host->base + MCI_CCLK_RDY) & 0x1)) + cpu_relax(); +} + +static void phytium_mci_prepare_data(struct phytium_mci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (!(data->host_cookie & MCI_PREPARE_FLAG)) { + data->host_cookie |= MCI_PREPARE_FLAG; + data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } +} + +static void phytium_mci_unprepare_data(struct phytium_mci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (data->host_cookie & MCI_ASYNC_FLAG) + return; + + if (data->host_cookie & MCI_PREPARE_FLAG) { + dma_unmap_sg(host->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); + data->host_cookie &= ~MCI_PREPARE_FLAG; + } +} + +static void phytium_mci_send_cmd(struct phytium_mci_host *host, u32 cmd, u32 arg) +{ + int rc; + u32 data; + + writel(arg, host->base + MCI_CMDARG); + wmb(); /* drain writebuffer */ + + rc = readl_relaxed_poll_timeout(host->base + MCI_STATUS, + data, + !(data & MCI_STATUS_CARD_BUSY), + 0, 5000 * 1000); + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout mci_status: 0x%08x\n", __func__, __LINE__, data); + + writel(MCI_CMD_START | cmd, host->base + MCI_CMD); + + rc = readl_relaxed_poll_timeout(host->base + MCI_CMD, + data, + !(data & MCI_CMD_START), + 0, 5000 * 1000); + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout mci_cmd: 0x%08x\n", __func__, __LINE__, data); +} + +static void phytium_mci_update_cmd11(struct phytium_mci_host *host, u32 cmd) +{ + writel(MCI_CMD_START | cmd, host->base + MCI_CMD); + + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); +} + +static void phytium_mci_set_clk(struct phytium_mci_host *host, struct mmc_ios *ios) +{ + u32 div = 0xff, drv = 0, sample = 0; + unsigned long clk_rate; + u32 mci_cmd_bits = MCI_CMD_UPD_CLK; + u32 cmd_reg; + u32 cur_cmd_index; + u32 first_uhs_div, tmp_ext_reg; + + cmd_reg = readl(host->base + MCI_CMD); + cur_cmd_index = cmd_reg & 0x3F; + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + mci_cmd_bits |= MCI_CMD_VOLT_SWITCH; + if (ios->clock) { + if (host->current_ios_clk == ios->clock) + return; + + dev_dbg(host->dev, "will change clock, host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + + if (ios->clock >= 25000000) + tmp_ext_reg = 0x202; + else if (ios->clock == 400000) + tmp_ext_reg = 0x502; + else + tmp_ext_reg = 0x302; + + phytium_mci_update_external_clk(host, tmp_ext_reg); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + phytium_mci_update_cmd11(host, mci_cmd_bits | cmd_reg); + else + phytium_mci_send_cmd(host, mci_cmd_bits, 0); + + clk_rate = host->clk_rate; + first_uhs_div = 1 + ((tmp_ext_reg >> 8)&0xFF); + div = clk_rate / (2 * first_uhs_div * ios->clock); + if (div > 2) { + sample = div / 2 + 1; + drv = sample - 1; + writel((sample << 16) | (drv << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else if (div == 2) { + drv = 0; + sample = 1; + writel((drv << 8) | (sample << 16) | (div & 0xff), + host->base + MCI_CLKDIV); + } + + dev_dbg(host->dev, "UHS_REG_EXT ext: %x, CLKDIV: %x\n", + readl(host->base + MCI_UHS_REG_EXT), readl(host->base + MCI_CLKDIV)); + + sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + phytium_mci_update_cmd11(host, mci_cmd_bits | cmd_reg); + else + phytium_mci_send_cmd(host, mci_cmd_bits, 0); + + host->current_ios_clk = ios->clock; + + dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + } else { + host->current_ios_clk = 0; + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + + if (cur_cmd_index == SD_SWITCH_VOLTAGE) + phytium_mci_update_cmd11(host, mci_cmd_bits | cmd_reg); + else + phytium_mci_send_cmd(host, mci_cmd_bits, 0); + + sdr_clr_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); + dev_dbg(host->dev, "host->clk_rate: %ld, ios->clock: %d\n", + host->clk_rate, ios->clock); + } +} + +static inline u32 +phytium_mci_cmd_find_resp(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 resp; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + case MMC_RSP_R1B: + resp = 0x5; + break; + + case MMC_RSP_R2: + resp = 0x7; + break; + + case MMC_RSP_R3: + resp = 0x1; + break; + + case MMC_RSP_NONE: + default: + resp = 0x0; + break; + } + + return resp; +} + +static inline +u32 phytium_mci_cmd_prepare_raw_cmd(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 opcode = cmd->opcode; + u32 resp = phytium_mci_cmd_find_resp(host, mrq, cmd); + u32 rawcmd = ((opcode & 0x3f) | ((resp & 0x7) << 6)); + + if (opcode == MMC_GO_INACTIVE_STATE || + (opcode == SD_IO_RW_DIRECT && ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) + rawcmd |= (0x1 << 14); + else if (opcode == SD_SWITCH_VOLTAGE) + rawcmd |= (0x1 << 28); + + if (test_and_clear_bit(MCI_CARD_NEED_INIT, &host->flags)) + rawcmd |= (0x1 << 15); + + if (cmd->data) { + struct mmc_data *data = cmd->data; + + rawcmd |= (0x1 << 9); + + if (data->flags & MMC_DATA_WRITE) + rawcmd |= (0x1 << 10); + } + + return (rawcmd | (0x1 << 31)); +} + +static inline void +phytium_mci_adma_write_desc(struct phytium_mci_host *host, + struct phytium_adma2_64_desc *desc, + dma_addr_t addr, u32 len, u32 attribute) +{ + desc->attribute = attribute; + desc->len = len; + desc->addr_lo = lower_32_bits(addr); + desc->addr_hi = upper_32_bits(addr); + dev_dbg(host->dev, "%s %d:addr_lo:0x%x ddr_hi:0x%x\n", __func__, + __LINE__, desc->addr_lo, desc->addr_hi); + + if ((attribute == 0x80000004) || (attribute == 0x8000000c)) { + desc->desc_lo = 0; + desc->desc_hi = 0; + } +} + +static void +phytium_mci_data_sg_write_2_admc_table(struct phytium_mci_host *host, struct mmc_data *data) +{ + struct phytium_adma2_64_desc *desc; + u32 dma_len, i; + dma_addr_t dma_address; + struct scatterlist *sg; + + phytium_mci_init_adma_table(host, &host->dma); + + desc = host->dma.adma_table; + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_address = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + + if (i == 0) { + if (sg_is_last(sg) || (data->sg_count == 1 && dma_len == SD_BLOCK_SIZE)) + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x8000000c); + else + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x8000001a); + } else if (sg_is_last(sg)) { + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x80000004); + } else { + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x80000012); + } + + desc++; + } +} + +static void +phytium_mci_data_sg_write_2_fifo(struct phytium_mci_host *host, struct mmc_data *data) +{ + struct scatterlist *sg; + u32 dma_len, i, j; + u32 *virt_addr; + + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { + writel(0x1<<10, host->base + MCI_CMD); + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_len = sg_dma_len(sg); + virt_addr = sg_virt(data->sg); + for (j = 0; j < (dma_len / 4); j++) { + writel(*virt_addr, host->base + MCI_DATA); + virt_addr++; + } + } + } +} + +static void phytium_mci_restart_clk(struct phytium_mci_host *host) +{ + u32 clk_div, uhs; + + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + clk_div = readl(host->base + MCI_CLKDIV); + uhs = readl(host->base + MCI_UHS_REG_EXT); + writel(0, host->base + MCI_UHS_REG_EXT); + writel(uhs, host->base + MCI_UHS_REG_EXT); + while (!(readl(host->base + MCI_CCLK_RDY) & 0x1)) + cpu_relax(); + + writel(clk_div, host->base + MCI_CLKDIV); + sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + writel(MCI_CMD_START | MCI_CMD_UPD_CLK, host->base + MCI_CMD); + while (readl(host->base + MCI_CMD) & MCI_CMD_START) + cpu_relax(); +} + +static int +phytim_mci_start_multiple_write(struct phytium_mci_host *host, + struct mmc_request *mrq, u32 cnts, u32 offset) +{ + u32 rawcmd, cmd_status; + struct mmc_command *cmd = mrq->cmd; + u32 *rsp = cmd->resp; + unsigned long deadline_time; + + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) + return -ESHUTDOWN; + + while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + + writel(0xffffe, host->base + MCI_RAW_INTS); + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + writel(mrq->data->blksz, host->base + MCI_BLKSIZ); + writel(cnts * mrq->data->blksz, host->base + MCI_BYTCNT); + writel(cmd->arg + offset, host->base + MCI_CMDARG); + writel(rawcmd, host->base + MCI_CMD); + deadline_time = jiffies + msecs_to_jiffies(200); + + cmd_status = readl(host->base + MCI_RAW_INTS); + while (!(cmd_status & MCI_MASKED_INTS_CMD)) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) + return -ESHUTDOWN; + + cmd_status = readl(host->base + MCI_RAW_INTS); + if (cmd_err_ints_mask & cmd_status) + return -ESHUTDOWN; + + if (cmd_status & MCI_MASKED_INTS_CMD) + break; + + if (time_after(jiffies, deadline_time)) + return -ESHUTDOWN; + } + + if (cmd_status & MCI_MASKED_INTS_CMD) { + if (cmd->flags & MMC_RSP_136) { + rsp[3] = readl(host->base + MCI_RESP0); + rsp[2] = readl(host->base + MCI_RESP1); + rsp[1] = readl(host->base + MCI_RESP2); + rsp[0] = readl(host->base + MCI_RESP3); + } else { + rsp[0] = readl(host->base + MCI_RESP0); + } + } + deadline_time = jiffies + msecs_to_jiffies(1000); + while (!(cmd_status & MCI_MASKED_INTS_DTO)) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) + return -ESHUTDOWN; + cmd_status = readl(host->base + MCI_RAW_INTS); + if (cmd_err_ints_mask & cmd_status) + return -ESHUTDOWN; + if (cmd_status & MCI_MASKED_INTS_DTO) + return 0; + if (time_after(jiffies, deadline_time)) + return -ESHUTDOWN; + } + return 0; +} + +static int +phytium_mci_start_sbc_stop_cmd(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, u32 arg) +{ + u32 rawcmd, cmd_status; + u32 *rsp = cmd->resp; + unsigned long deadline_time; + + writel(0xffffe, host->base + MCI_RAW_INTS); + + while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + writel(arg, host->base + MCI_CMDARG); + writel(rawcmd, host->base + MCI_CMD); + + deadline_time = jiffies + msecs_to_jiffies(200); + cmd_status = readl(host->base + MCI_RAW_INTS); + while (!(cmd_status & MCI_MASKED_INTS_CMD)) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) + return -ENOMEDIUM; + + cmd_status = readl(host->base + MCI_RAW_INTS); + if (cmd_err_ints_mask & cmd_status) + return -ETIMEDOUT; + + if (cmd_status & MCI_MASKED_INTS_CMD) + break; + + if (time_after(jiffies, deadline_time)) + return -ETIMEDOUT; + } + + if (cmd_status & MCI_MASKED_INTS_CMD) { + if (cmd->flags & MMC_RSP_136) { + rsp[3] = readl(host->base + MCI_RESP0); + rsp[2] = readl(host->base + MCI_RESP1); + rsp[1] = readl(host->base + MCI_RESP2); + rsp[0] = readl(host->base + MCI_RESP3); + } else { + rsp[0] = readl(host->base + MCI_RESP0); + } + } + + if (cmd_err_ints_mask & cmd_status) + return -ETIMEDOUT; + + return 0; +} + +static void +phytium_mci_start_write_multiple_non_dma(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + u32 write_cnts, last_cnts; + u32 i, j, k, send_cnt_one_sg, block_offset; + int ret = 0, dma_len; + struct scatterlist *sg; + u32 *virt_addr = NULL; + + write_cnts = data->blocks / 4; + (data->blocks % 4) ? write_cnts++ : write_cnts; + last_cnts = data->blocks % 4; + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) { + ret = -ENOMEDIUM; + goto write_err; + } + + dev_dbg(host->dev, "%s: cmd:%d, block counts:%d\n", + __func__, mrq->cmd->opcode, data->blocks); + + sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET); + while (readl(host->base + MCI_CNTRL) & MCI_CNTRL_FIFO_RESET) + cpu_relax(); + sdr_clr_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); + + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { + block_offset = 0; + for_each_sg(data->sg, sg, data->sg_count, i) { + /* Each SG data transfor starts */ + dma_len = sg_dma_len(sg); + send_cnt_one_sg = (dma_len / MCI_MAX_FIFO_CNT) + 1; + virt_addr = sg_virt(sg); + for (k = 0; k < send_cnt_one_sg; k++) { + if (dma_len && dma_len >= MCI_MAX_FIFO_CNT) { + /*first write sbc cmd*/ + ret = phytium_mci_start_sbc_stop_cmd(host, mrq, + mrq->sbc, 4); + if (ret) + goto write_err; + writel(0x1 << 10, host->base + MCI_CMD); + for (j = 0; j < (MCI_MAX_FIFO_CNT / 4); j++) { + writel(*virt_addr, host->base + MCI_DATA); + virt_addr++; + } + + /*second write cmd25 here*/ + ret = phytim_mci_start_multiple_write(host, mrq, 4, + block_offset); + if (ret) + goto write_err; + block_offset += 4; + dma_len -= MCI_MAX_FIFO_CNT; + } else if (dma_len > 0) { + /*first write sbc cmd*/ + last_cnts = dma_len / 512; + ret = phytium_mci_start_sbc_stop_cmd(host, mrq, mrq->sbc, + last_cnts); + if (ret) + goto write_err; + writel(0x1 << 10, host->base + MCI_CMD); + for (j = 0; j < (dma_len / 4); j++) { + writel(*virt_addr, host->base + MCI_DATA); + virt_addr++; + } + /*second write cmd25 here*/ + ret = phytim_mci_start_multiple_write(host, mrq, last_cnts, + block_offset); + if (ret) + goto write_err; + block_offset += last_cnts; + dma_len = 0; + } else { + dev_dbg(host->dev, "%s: sg %d end\n", __func__, i); + break; + } + } + } + } + +write_err: + host->data = NULL; + host->cmd = NULL; + host->mrq = NULL; + writel(0xffffe, host->base + MCI_RAW_INTS); + if (ret) { + data->bytes_xfered = 0; + if (ret == -ESHUTDOWN) { + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET); + while (readl(host->base + MCI_CNTRL) & MCI_CNTRL_FIFO_RESET) + cpu_relax(); + + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_CONTROLLER_RESET); + while (readl(host->base + MCI_STATUS) & MCI_STATUS_CARD_BUSY) + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_CONTROLLER_RESET); + phytium_mci_restart_clk(host); + phytium_mci_start_sbc_stop_cmd(host, mrq, mrq->stop, mrq->stop->arg); + } + data->error = -ETIMEDOUT; + mrq->cmd->error = -ETIMEDOUT; + mmc_request_done(host->mmc, mrq); + return; + } + data->bytes_xfered = data->blocks * data->blksz; + mmc_request_done(host->mmc, mrq); +} + +static void +phytium_mci_start_data(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, struct mmc_data *data) +{ + bool read; + u32 rawcmd; + unsigned long flags; + + + WARN_ON(host->cmd); + host->cmd = cmd; + cmd->error = 0; + WARN_ON(host->data); + host->data = data; + read = data->flags & MMC_DATA_READ; + + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) { + phytium_mci_err_irq(host, 0, MCI_INT_MASK_RTO); + return; + } + /* clear interrupts */ + writel(0xffffe, host->base + MCI_RAW_INTS); + + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET); + + while (readl(host->base + MCI_CNTRL) & (MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET)) + cpu_relax(); + + if (host->adtc_type == COMMOM_ADTC) + sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); + else + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_USE_INTERNAL_DMAC); + wmb(); /* drain writebuffer */ + sdr_clr_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE); + + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + if (host->is_use_dma && host->adtc_type == BLOCK_RW_ADTC) + phytium_mci_data_sg_write_2_admc_table(host, data); + else + phytium_mci_data_sg_write_2_fifo(host, data); + + spin_lock_irqsave(&host->lock, flags); + sdr_set_bits(host->base + MCI_INT_MASK, cmd_ints_mask | data_ints_mask); + if (host->is_use_dma && host->adtc_type == BLOCK_RW_ADTC) { + sdr_set_bits(host->base + MCI_DMAC_INT_ENA, dmac_ints_mask); + /* Enable the IDMAC */ + sdr_set_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); + writel((u32)host->dma.adma_addr, host->base + MCI_DESC_LIST_ADDRL); + writel((u32)(host->dma.adma_addr >> 32), host->base + MCI_DESC_LIST_ADDRH); + } + writel(mrq->data->blksz, host->base + MCI_BLKSIZ); + writel(mrq->data->blocks * mrq->data->blksz, host->base + MCI_BYTCNT); + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE); + writel(cmd->arg, host->base + MCI_CMDARG); + wmb(); /* drain writebuffer */ + writel(rawcmd, host->base + MCI_CMD); + spin_unlock_irqrestore(&host->lock, flags); + + mod_timer(&host->timeout_timer, + jiffies + msecs_to_jiffies(MMC_REQ_TIMEOUT_MS)); +} + +static void phytium_mci_track_cmd_data(struct phytium_mci_host *host, + struct mmc_command *cmd, + struct mmc_data *data) +{ + if (host->error) + dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", + __func__, cmd->opcode, cmd->arg, host->error); +} + +static void phytium_mci_request_done(struct phytium_mci_host *host, struct mmc_request *mrq) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + del_timer(&host->timeout_timer); + host->mrq = NULL; + if (host->cmd) + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + phytium_mci_track_cmd_data(host, mrq->cmd, mrq->data); + + if (mrq->data) + phytium_mci_unprepare_data(host, mrq); + + mmc_request_done(host->mmc, mrq); +} + +static bool phytium_mci_cmd_done(struct phytium_mci_host *host, int events, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + bool done = false; + unsigned long flags; + u32 *rsp = cmd->resp; + + if (!(events & (MCI_RAW_INTS_RCRC | MCI_RAW_INTS_RE | MCI_RAW_INTS_CMD | + MCI_RAW_INTS_RTO | MCI_INT_MASK_HTO))) { + dev_err(host->dev, "No interrupt generation:h%x\n", events); + return done; + } + + spin_lock_irqsave(&host->lock, flags); + done = !host->cmd; + host->cmd = NULL; + if (done) { + spin_unlock_irqrestore(&host->lock, flags); + return true; + } + sdr_clr_bits(host->base + MCI_INT_MASK, cmd_ints_mask); + spin_unlock_irqrestore(&host->lock, flags); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + rsp[3] = readl(host->base + MCI_RESP0); + rsp[2] = readl(host->base + MCI_RESP1); + rsp[1] = readl(host->base + MCI_RESP2); + rsp[0] = readl(host->base + MCI_RESP3); + } else { + /* + * Sometimes get ACMD41 cmd done irq but the respose index still the APP_CMD, + * so polling the mci status entill the respose index change. + */ + if (cmd->opcode == SD_APP_OP_COND) { + int polling_cnt = 20; + while (MMC_APP_CMD == MCI_STATUS_RESPOSE_INDEX(readl(host->base + MCI_STATUS))) { + udelay(100); + polling_cnt --; + if (polling_cnt == 0) { + dev_info(host->dev, "hw respose index not equal cmd opcode, respose value may error\n"); + break; + } + } + } + rsp[0] = readl(host->base + MCI_RESP0); + } + + if (cmd->opcode == SD_SEND_RELATIVE_ADDR) + host->current_rca = rsp[0] & 0xFFFF0000; + } + if (!(events & (MCI_RAW_INTS_CMD | MCI_INT_MASK_HTO))) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && (events & MCI_RAW_INTS_RTO) + && readl(host->base + MCI_CARD_DETECT)) { + cmd->error = -ENOMEDIUM; + rsp[0] = 0; + } else if (events & MCI_RAW_INTS_RTO || + (cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)) { + cmd->error = -ETIMEDOUT; + } else if (events & MCI_RAW_INTS_RCRC) { + cmd->error = -EILSEQ; + } else { + cmd->error = -ETIMEDOUT; + } + } + phytium_mci_cmd_next(host, mrq, cmd); + return true; +} + +static void phytium_mci_start_command(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 rawcmd; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + WARN_ON(host->cmd); + host->cmd = cmd; + cmd->error = 0; + writel(0xffffe, host->base + MCI_RAW_INTS); + + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + spin_unlock_irqrestore(&host->lock, flags); + + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && readl(host->base + MCI_CARD_DETECT)) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, cmd); + return; + } + + spin_lock_irqsave(&host->lock, flags); + sdr_set_bits(host->base + MCI_INT_MASK, cmd_ints_mask); + writel(cmd->arg, host->base + MCI_CMDARG); + writel(rawcmd, host->base + MCI_CMD); + spin_unlock_irqrestore(&host->lock, flags); + + mod_timer(&host->timeout_timer, + jiffies + msecs_to_jiffies(MMC_REQ_TIMEOUT_MS)); +} + +static void +phytium_mci_cmd_next(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd) +{ + if ((cmd->error && !(cmd->opcode == MMC_SEND_TUNING_BLOCK || + cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)) || + (mrq->sbc && mrq->sbc->error)) { + phytium_mci_request_done(host, mrq); + } else if (cmd == mrq->sbc) { + if ((mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_BLOCK)) { + dev_dbg(host->dev, "%s %d:sbc done and next cmd :%d length:%d\n", + __func__, __LINE__, mrq->cmd->opcode, mrq->data->sg->length); + phytium_mci_prepare_data(host, mrq); + if (host->is_use_dma) + host->adtc_type = BLOCK_RW_ADTC; + else + host->adtc_type = COMMOM_ADTC; + phytium_mci_start_data(host, mrq, mrq->cmd, mrq->data); + } else { + dev_err(host->dev, "%s %d:ERROR: cmd %d followers the SBC\n", + __func__, __LINE__, cmd->opcode); + } + } else if (!cmd->data) { + phytium_mci_request_done(host, mrq); + } +} + +static void phytium_mci_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 data; + int rc; + + host->error = 0; + WARN_ON(host->mrq); + host->mrq = mrq; + + rc = readl_relaxed_poll_timeout(host->base + MCI_STATUS, + data, + !(data & MCI_STATUS_CARD_BUSY), + 0, 2000 * 1000); + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout mci_status: 0x%08x\n", __func__, __LINE__, data); + + dev_dbg(host->dev, "%s %d: cmd:%d arg:0x%x\n", __func__, __LINE__, + mrq->cmd->opcode, mrq->cmd->arg); + + if (host->is_device_x100 && mrq->sbc && mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) { + phytium_mci_start_write_multiple_non_dma(mmc, mrq); + return; + } + + if (mrq->sbc) { + phytium_mci_start_command(host, mrq, mrq->sbc); + return; + } + if (mrq->data) { + phytium_mci_prepare_data(host, mrq); + + if ((mrq->data->sg->length >= 512) && host->is_use_dma && + ((mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_BLOCK) || + (mrq->cmd->opcode == SD_IO_RW_EXTENDED))) + + host->adtc_type = BLOCK_RW_ADTC; + else + host->adtc_type = COMMOM_ADTC; + + phytium_mci_start_data(host, mrq, mrq->cmd, mrq->data); + return; + } + phytium_mci_start_command(host, mrq, mrq->cmd); +} + +static void phytium_mci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + phytium_mci_prepare_data(host, mrq); + data->host_cookie |= MCI_ASYNC_FLAG; +} + +static void phytium_mci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + if (data->host_cookie & MCI_ASYNC_FLAG) { + data->host_cookie &= ~MCI_ASYNC_FLAG; + phytium_mci_unprepare_data(host, mrq); + } +} + +static void phytium_mci_data_read_without_dma(struct phytium_mci_host *host, + struct mmc_data *data) +{ + u32 length, i, data_val, dma_len, tmp = 0; + u32 *virt_addr; + unsigned long flags; + struct scatterlist *sg; + + length = data->blocks * data->blksz; + + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { + spin_lock_irqsave(&host->lock, flags); + if (data->host_cookie & MCI_ASYNC_FLAG) { + tmp = MCI_ASYNC_FLAG; + phytium_mci_post_req(host->mmc, data->mrq, 0); + } else { + phytium_mci_unprepare_data(host, data->mrq); + } + + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_len = sg_dma_len(sg); + virt_addr = sg_virt(data->sg); + + for (i = 0; i < (dma_len / 4); i++) { + data_val = readl(host->base + MCI_DATA); + memcpy(virt_addr, &data_val, 4); + ++virt_addr; + } + } + + if (tmp & MCI_ASYNC_FLAG) + phytium_mci_pre_req(host->mmc, data->mrq); + else + phytium_mci_prepare_data(host, data->mrq); + + spin_unlock_irqrestore(&host->lock, flags); + } + data->bytes_xfered = length; +} + +static void phytium_mci_data_xfer_next(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_data *data) +{ + if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && + (data->error || !mrq->sbc)) { + while ((readl(host->base + MCI_STATUS) & (MCI_STATUS_CARD_BUSY))) + cpu_relax(); + phytium_mci_start_command(host, mrq, mrq->stop); + } else { + phytium_mci_request_done(host, mrq); + } +} + +static bool phytium_mci_data_xfer_done(struct phytium_mci_host *host, u32 events, + struct mmc_request *mrq, struct mmc_data *data) +{ + unsigned long flags; + bool done; + + unsigned int check_data = events & (MCI_RAW_INTS_DTO | MCI_RAW_INTS_RCRC | + MCI_RAW_INTS_DCRC | MCI_RAW_INTS_RE | + MCI_RAW_INTS_DRTO | MCI_RAW_INTS_EBE | + MCI_DMAC_STATUS_AIS | MCI_DMAC_STATUS_DU | + MCI_RAW_INTS_SBE_BCI | MCI_INT_MASK_RTO); + + spin_lock_irqsave(&host->lock, flags); + done = !host->data; + + if (check_data || host->data) + host->data = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + if (check_data) { + spin_lock_irqsave(&host->lock, flags); + sdr_clr_bits(host->base + MCI_DMAC_INT_ENA, dmac_ints_mask); + sdr_clr_bits(host->base + MCI_INT_MASK, data_ints_mask); + /* Stop the IDMAC running */ + sdr_clr_bits(host->base + MCI_BUS_MODE, MCI_BUS_MODE_DE); + dev_dbg(host->dev, "DMA stop\n"); + spin_unlock_irqrestore(&host->lock, flags); + + if (events & MCI_RAW_INTS_DTO) { + if (!host->is_use_dma || + (host->is_use_dma && host->adtc_type == COMMOM_ADTC && + (mrq->cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC)) + phytium_mci_data_read_without_dma(host, data); + else + data->bytes_xfered = data->blocks * data->blksz; + } else { + data->bytes_xfered = 0; + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) + && readl(host->base + MCI_CARD_DETECT) + && (events & cmd_err_ints_mask)) { + data->error = -ENOMEDIUM; + data->mrq->cmd->error = -ENOMEDIUM; + } else if (events & (MCI_RAW_INTS_DCRC | MCI_RAW_INTS_EBE | + MCI_RAW_INTS_SBE_BCI)) { + data->error = -EILSEQ; + host->cmd = NULL; + } else { + data->error = -ETIMEDOUT; + host->cmd = NULL; + } + } + + phytium_mci_data_xfer_next(host, mrq, data); + done = true; + } + return done; +} + +static int phytium_mci_card_busy(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 status; + + status = readl(host->base + MCI_STATUS); + + return !!(status & MCI_STATUS_CARD_BUSY); +} + +static void __phytium_mci_enable_sdio_irq(struct phytium_mci_host *host, int enable) +{ + if (enable) + sdr_set_bits(host->base + MCI_INT_MASK, MCI_INT_MASK_SDIO); + else + sdr_clr_bits(host->base + MCI_INT_MASK, MCI_INT_MASK_SDIO); +} + +static void phytium_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + __phytium_mci_enable_sdio_irq(host, enable); +} + +static void hotplug_timer_func(struct timer_list *t) +{ + struct phytium_mci_host *host; + u32 status; + + host = from_timer(host, t, hotplug_timer); + if (!host) + return; + + status = readl(host->base + MCI_CARD_DETECT); + + if (status & 0x1) { + if (host->mmc->card) { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(100)); + } + } else { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + } +} + +static void phytium_mci_request_timeout(struct timer_list *t) +{ + struct phytium_mci_host *host = from_timer(host, t, timeout_timer); + + dev_err(host->dev, "request timeout\n"); + if (host->mrq) { + dev_err(host->dev, "aborting mrq=%p cmd=%d\n", + host->mrq, host->mrq->cmd->opcode); + if (host->cmd) { + dev_err(host->dev, "aborting cmd=%d\n", host->cmd->opcode); + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, host->mrq, host->cmd); + } else if (host->data) { + dev_err(host->dev, "abort data: cmd%d; %d blocks\n", + host->mrq->cmd->opcode, host->data->blocks); + phytium_mci_data_xfer_done(host, MCI_RAW_INTS_RTO, host->mrq, + host->data); + } + } +} + +static int phytium_mci_err_irq(struct phytium_mci_host *host, u32 dmac_events, u32 events) +{ + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + + if (cmd && (cmd == mrq->sbc)) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->sbc); + } else if (cmd && (cmd == mrq->stop)) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->stop); + } else if (data) { + data->error = -ETIMEDOUT; + if ((data->flags & MMC_DATA_READ) == MMC_DATA_READ || + (data->flags & MMC_DATA_WRITE) == MMC_DATA_WRITE) + phytium_mci_data_xfer_done(host, events | dmac_events, mrq, data); + } else if (cmd) { + phytium_mci_cmd_done(host, MCI_RAW_INTS_RTO, mrq, mrq->cmd); + } + + return 0; +} + +static irqreturn_t phytium_mci_irq(int irq, void *dev_id) +{ + struct phytium_mci_host *host = (struct phytium_mci_host *) dev_id; + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 events, event_mask, dmac_events, dmac_evt_mask; + + if (!host) + return IRQ_NONE; + writel(0, host->base + 0xfd0); + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + MCI_RAW_INTS); + dmac_events = readl(host->base + MCI_DMAC_STATUS); + event_mask = readl(host->base + MCI_INT_MASK); + dmac_evt_mask = readl(host->base + MCI_DMAC_INT_ENA); + if ((!events) && (!(dmac_events&0x1fff))) { + spin_unlock_irqrestore(&host->lock, flags); + return IRQ_NONE; + } + dev_dbg(host->dev, "%s:events:%x,mask:0x%x,dmac_events:%x,dmac_mask:0x%x,cmd:%d\n", + __func__, events, event_mask, dmac_events, dmac_evt_mask, + host->mrq ? host->mrq->cmd->opcode : 255); + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + + if (((events & event_mask) & MCI_RAW_INTS_SDIO) && + ((events == 0x10001) || (events == 0x10000) || (events == 0x10040))) { + writel(events, host->base + MCI_RAW_INTS); + __phytium_mci_enable_sdio_irq(host, 0); + sdio_signal_irq(host->mmc); + spin_unlock_irqrestore(&host->lock, flags); + goto irq_out; + } + + writel(events, host->base + MCI_RAW_INTS); + writel(dmac_events, host->base + MCI_DMAC_STATUS); + spin_unlock_irqrestore(&host->lock, flags); + + if (((events & event_mask) == 0) && ((dmac_evt_mask & dmac_events) == 0)) + goto irq_out; + + if (((events & event_mask) & MCI_RAW_INTS_CD) && !(host->mmc->caps & MMC_CAP_NONREMOVABLE)) { + mod_timer(&host->hotplug_timer, jiffies + usecs_to_jiffies(20000)); + dev_dbg(host->dev, "sd status changed here ! status:[%d] [%s %d]", + readl(host->base + MCI_CARD_DETECT), __func__, __LINE__); + + if ((events & event_mask) == MCI_RAW_INTS_CD) + goto irq_out; + } + + if (!mrq) { + if (events & MCI_RAW_INTS_HLE) + dev_dbg(host->dev, + "%s: MRQ=NULL and HW write locked, events=%08x,event_mask=%08x\n", + __func__, events, event_mask); + else + dev_dbg(host->dev, "%s: MRQ=NULL events:%08X evt_mask=%08X,sd_status:%d\n", + __func__, events, event_mask, readl(host->base + MCI_CARD_DETECT)); + goto irq_out; + } + + if ((dmac_events & dmac_err_ints_mask) || (events & cmd_err_ints_mask)) { + dev_dbg(host->dev, "ERR:events:%x,mask:0x%x,dmac_evts:%x,dmac_mask:0x%x,cmd:%d\n", + events, event_mask, dmac_events, dmac_evt_mask, mrq->cmd->opcode); + phytium_mci_err_irq(host, dmac_events & dmac_err_ints_mask, + events & cmd_err_ints_mask); + goto irq_out; + } + + if ((events & MCI_MASKED_INTS_DTO) && (events & MCI_MASKED_INTS_CMD)) { + phytium_mci_cmd_done(host, events, mrq, cmd); + phytium_mci_data_xfer_done(host, (events & data_ints_mask) | + (dmac_events & dmac_ints_mask), mrq, data); + } else if (events & MCI_MASKED_INTS_CMD || + ((events & MCI_INT_MASK_HTO) && (cmd->opcode == SD_SWITCH_VOLTAGE))) { + phytium_mci_cmd_done(host, events, mrq, cmd); + } else if (events & MCI_MASKED_INTS_DTO) { + phytium_mci_data_xfer_done(host, (events & data_ints_mask) | + (dmac_events & dmac_ints_mask), mrq, data); + } + +irq_out: + return IRQ_HANDLED; +} + +static void phytium_mci_init_hw(struct phytium_mci_host *host) +{ + u32 val; + int uhs_reg_value = 0x502; + + writel(MCI_SET_FIFOTH(0x2, 0x7, 0x100), host->base + MCI_FIFOTH); + writel(0x800001, host->base + MCI_CARD_THRCTL); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + phytium_mci_update_external_clk(host, uhs_reg_value); + + sdr_set_bits(host->base + MCI_PWREN, MCI_PWREN_ENABLE); + sdr_set_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + sdr_set_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + + phytium_mci_reset_hw(host); + + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + sdr_set_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + else + sdr_clr_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + + writel(0, host->base + MCI_INT_MASK); + val = readl(host->base + MCI_RAW_INTS); + writel(val, host->base + MCI_RAW_INTS); + writel(0, host->base + MCI_DMAC_INT_ENA); + val = readl(host->base + MCI_DMAC_STATUS); + writel(val, host->base + MCI_DMAC_STATUS); + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE)) + writel(MCI_INT_MASK_CD, host->base + MCI_INT_MASK); + + sdr_set_bits(host->base + MCI_CNTRL, MCI_CNTRL_INT_ENABLE | + MCI_CNTRL_USE_INTERNAL_DMAC); + + writel(0xFFFFFFFF, host->base + MCI_TMOUT); + dev_info(host->dev, "init hardware done!"); + +} + +void phytium_mci_deinit_hw(struct phytium_mci_host *host) +{ + u32 val; + + sdr_clr_bits(host->base + MCI_PWREN, MCI_PWREN_ENABLE); + sdr_clr_bits(host->base + MCI_CLKENA, MCI_CLKENA_CCLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG_EXT, MCI_EXT_CLK_ENABLE); + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + writel(0, host->base + MCI_INT_MASK); + val = readl(host->base + MCI_RAW_INTS); + writel(val, host->base + MCI_RAW_INTS); + writel(0, host->base + MCI_DMAC_INT_ENA); + val = readl(host->base + MCI_DMAC_STATUS); + writel(val, host->base + MCI_DMAC_STATUS); + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE)) + writel(MCI_INT_MASK_CD, host->base + MCI_INT_MASK); +} +EXPORT_SYMBOL_GPL(phytium_mci_deinit_hw); + +static void phytium_mci_adma_reset(struct phytium_mci_host *host) +{ + u32 bmod = readl(host->base + MCI_BUS_MODE); + + bmod |= MCI_BUS_MODE_SWR; + writel(bmod, host->base + MCI_BUS_MODE); +} + +static void phytium_mci_init_adma_table(struct phytium_mci_host *host, + struct phytium_mci_dma *dma) +{ + struct phytium_adma2_64_desc *adma_table = dma->adma_table; + dma_addr_t dma_addr; + int i; + + memset(adma_table, 0, sizeof(struct phytium_adma2_64_desc) * MAX_BD_NUM); + + for (i = 0; i < (MAX_BD_NUM - 1); i++) { + dma_addr = dma->adma_addr + sizeof(*adma_table) * (i + 1); + adma_table[i].desc_lo = lower_32_bits(dma_addr); + adma_table[i].desc_hi = upper_32_bits(dma_addr); + adma_table[i].attribute = 0; + adma_table[i].NON1 = 0; + adma_table[i].len = 0; + adma_table[i].NON2 = 0; + } + + phytium_mci_adma_reset(host); +} + +static void phytium_mci_set_buswidth(struct phytium_mci_host *host, u32 width) +{ + u32 val; + + switch (width) { + case MMC_BUS_WIDTH_1: + val = MCI_BUS_1BITS; + break; + + case MMC_BUS_WIDTH_4: + val = MCI_BUS_4BITS; + break; + + case MMC_BUS_WIDTH_8: + val = MCI_BUS_8BITS; + break; + default: + val = MCI_BUS_4BITS; + break; + } + writel(val, host->base + MCI_CTYPE); + dev_dbg(host->dev, "Bus Width = %d, set value:0x%x\n", width, val); +} + +static void phytium_mci_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + if (ios->timing == MMC_TIMING_MMC_DDR52 || ios->timing == MMC_TIMING_UHS_DDR50) + sdr_set_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_DDR); + else + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_DDR); + + phytium_mci_set_buswidth(host, ios->bus_width); + + switch (ios->power_mode) { + case MMC_POWER_UP: + set_bit(MCI_CARD_NEED_INIT, &host->flags); + writel(MCI_POWER_ON, host->base + MCI_PWREN); + break; + + case MMC_POWER_ON: + break; + + case MMC_POWER_OFF: + writel(MCI_POWER_OFF, host->base + MCI_PWREN); + break; + + default: + break; + } + phytium_mci_set_clk(host, ios); +} + +static void phytium_mci_ack_sdio_irq(struct mmc_host *mmc) +{ + unsigned long flags; + struct phytium_mci_host *host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + __phytium_mci_enable_sdio_irq(host, 1); + spin_unlock_irqrestore(&host->lock, flags); +} + +static int phytium_mci_get_cd(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 status; + + if (mmc->caps & MMC_CAP_NONREMOVABLE) + return 1; + + status = readl(host->base + MCI_CARD_DETECT); + + if ((status & 0x1) == 0x1) + return 0; + + return 1; +} + +static int phytium_mci_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + unsigned int is_voltage_180 = 0; + + is_voltage_180 = readl(host->base + MCI_UHS_REG); + if ((mmc->caps & MMC_CAP_NONREMOVABLE) && (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180)) + return -EINVAL; + + if ((ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) && (is_voltage_180 & 0x1)) + sdr_clr_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + else if ((ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) && (!(is_voltage_180 & 0x1))) + sdr_set_bits(host->base + MCI_UHS_REG, MCI_UHS_REG_VOLT); + else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_120) + return -EINVAL; + return 0; +} + +static void phytium_mci_hw_reset(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 reset_flag; + + if (host->is_use_dma) { + reset_flag = MCI_CNTRL_FIFO_RESET | MCI_CNTRL_DMA_RESET; + phytium_mci_adma_reset(host); + sdr_set_bits(host->base + MCI_CNTRL, reset_flag); + } else { + reset_flag = MCI_CNTRL_FIFO_RESET; + sdr_set_bits(host->base + MCI_CNTRL, reset_flag); + } + + while (readl(host->base + MCI_CNTRL) & reset_flag) + cpu_relax(); + + sdr_clr_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + udelay(5); + sdr_set_bits(host->base + MCI_CARD_RESET, MCI_CARD_RESET_ENABLE); + usleep_range(200, 300); +} + +#ifdef CONFIG_PM_SLEEP +int phytium_mci_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_deinit_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_suspend); + +int phytium_mci_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_init_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_resume); + +#endif + +#ifdef CONFIG_PM +int phytium_mci_runtime_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_deinit_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_runtime_suspend); + +int phytium_mci_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_init_hw(host); + return 0; +} +EXPORT_SYMBOL(phytium_mci_runtime_resume); + +#endif + +static struct mmc_host_ops phytium_mci_ops = { + .post_req = phytium_mci_post_req, + .pre_req = phytium_mci_pre_req, + .request = phytium_mci_ops_request, + .set_ios = phytium_mci_ops_set_ios, + .get_cd = phytium_mci_get_cd, + .enable_sdio_irq = phytium_mci_enable_sdio_irq, + .ack_sdio_irq = phytium_mci_ack_sdio_irq, + .card_busy = phytium_mci_card_busy, + .start_signal_voltage_switch = phytium_mci_ops_switch_volt, + .hw_reset = phytium_mci_hw_reset, +}; + +int phytium_mci_common_probe(struct phytium_mci_host *host) +{ + struct mmc_host *mmc = host->mmc; + struct device *dev = host->dev; + int ret; + + dma_set_mask(dev, DMA_BIT_MASK(64)); + dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); + + timer_setup(&host->hotplug_timer, hotplug_timer_func, 0); + timer_setup(&host->timeout_timer, phytium_mci_request_timeout, 0); + + mmc->f_min = MCI_F_MIN; + if (!mmc->f_max) + mmc->f_max = MCI_F_MAX; + + mmc->ops = &phytium_mci_ops; + mmc->ocr_avail_sdio = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->ocr_avail_mmc = MMC_VDD_165_195; + mmc->caps |= host->caps; + + if (mmc->caps & MMC_CAP_SDIO_IRQ) { + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + dev_dbg(host->dev, "%s %d: MMC_CAP_SDIO_IRQ\n", __func__, __LINE__); + } + mmc->caps2 |= host->caps2; + if (host->is_use_dma) { + /* MMC core transfer sizes tunable parameters */ + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 4 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 512 * 1024; + mmc->max_blk_count = mmc->max_req_size / 512; + host->dma.adma_table = dma_zalloc_coherent(host->dev, + MAX_BD_NUM * + sizeof(struct phytium_adma2_64_desc), + &host->dma.adma_addr, GFP_KERNEL); + if (!host->dma.adma_table) + return MCI_REALEASE_MEM; + + host->dma.desc_sz = ADMA2_64_DESC_SZ; + phytium_mci_init_adma_table(host, &host->dma); + } else { + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 4 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 4 * 512; + mmc->max_blk_count = mmc->max_req_size / 512; + } + + spin_lock_init(&host->lock); + + phytium_mci_init_hw(host); + ret = devm_request_irq(host->dev, host->irq, phytium_mci_irq, + host->irq_flags, "phytium-mci", host); + + if (ret) + return ret; + + ret = mmc_add_host(mmc); + + if (ret) { + dev_err(host->dev, "%s %d: mmc add host!\n", __func__, __LINE__); + return ret; + } + return 0; +} +EXPORT_SYMBOL(phytium_mci_common_probe); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cheng Quan "); diff --git a/drivers/mmc/host/phytium-mci.h b/drivers/mmc/host/phytium-mci.h new file mode 100644 index 0000000000000000000000000000000000000000..5423597ecb97b13bc85d0486ee2323debbb66363 --- /dev/null +++ b/drivers/mmc/host/phytium-mci.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Driver for Phytium Multimedia Card Interface + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_MCI_H +#define __PHYTIUM_MCI_H + +#include +#include +#include +#include +#include +#include +#include + +/*------------------------------------------------------*/ +/* Common Definition */ +/*------------------------------------------------------*/ +#define MAX_BD_NUM 128 +#define SD_BLOCK_SIZE 512 + +#define MCI_BUS_1BITS 0x0 +#define MCI_BUS_4BITS 0x1 +#define MCI_BUS_8BITS (0x1 << 16) + +#define MCI_SD_DRV_VALUE 0 +#define MCI_SD_SAMP_VALUE_MAX 0 +#define MCI_SD_SAMP_VALUE_MIN 0 + +#define MCI_TIMEOUT_CMD_VALUE 0xFFFFFFFF +#define MCI_POWER_ON 1 +#define MCI_POWER_OFF 0 + +#define MCI_PREPARE_FLAG (0x1 << 0) +#define MCI_ASYNC_FLAG (0x1 << 1) +#define MCI_MMAP_FLAG (0x1 << 2) + +#define MCI_CMD_TIMEOUT (HZ/10 * 50) /* 100ms x5 */ +#define MCI_DATA_TIMEOUT (HZ * 10) /* 1000ms x5 */ + +#define MCI_CMD_TYPE_ADTC 0x2 + +#define MCI_F_MIN 400000 +#define MCI_F_MAX 50000000 + +#define MCI_CLK 1200000000 +#define MCI_REALEASE_MEM 0x1 +#define MCI_MAX_FIFO_CNT 0x800 + +/* FIFOTH register defines */ +#define MCI_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ + ((r) & 0xFFF) << 16 | ((t) & 0xFFF)) +/* Card read threshold */ +#define MCI_SET_THLD(v, x) (((v) & 0xFFF) << 16 | (x)) +#define MCI_CARD_WR_THR_EN BIT(2) +#define MCI_CARD_RD_THR_EN BIT(0) + +/*----------------------------------------------------------------------*/ +/* Register Offset */ +/*----------------------------------------------------------------------*/ +#define MCI_CNTRL 0x00 /* the controller config reg */ +#define MCI_PWREN 0x04 /* the power enable reg */ +#define MCI_CLKDIV 0x08 /* the clock divider reg */ +#define MCI_CLKENA 0x10 /* the clock enable reg */ +#define MCI_TMOUT 0x14 /* the timeout reg */ +#define MCI_CTYPE 0x18 /* the card type reg */ +#define MCI_BLKSIZ 0x1C /* the block size reg */ +#define MCI_BYTCNT 0x20 /* the byte count reg */ +#define MCI_INT_MASK 0x24 /* the interrupt mask reg */ +#define MCI_CMDARG 0x28 /* the command argument reg */ +#define MCI_CMD 0x2C /* the command reg */ +#define MCI_RESP0 0x30 /* the response reg0 */ +#define MCI_RESP1 0x34 /* the response reg1 */ +#define MCI_RESP2 0x38 /* the response reg2 */ +#define MCI_RESP3 0X3C /* the response reg3 */ +#define MCI_MASKED_INTS 0x40 /* the masked interrupt status reg */ +#define MCI_RAW_INTS 0x44 /* the raw interrupt status reg */ +#define MCI_STATUS 0x48 /* the status reg */ +#define MCI_FIFOTH 0x4C /* the FIFO threshold watermark reg */ +#define MCI_CARD_DETECT 0x50 /* the card detect reg */ +#define MCI_CARD_WRTPRT 0x54 /* the card write protect reg */ +#define MCI_CCLK_RDY 0x58 /* first div is ready? 1:ready,0:not ready*/ +#define MCI_TRAN_CARD_CNT 0x5C /* the transferred CIU card byte count reg */ +#define MCI_TRAN_FIFO_CNT 0x60 /* the transferred host to FIFO byte count reg */ +#define MCI_DEBNCE 0x64 /* the debounce count reg */ +#define MCI_UID 0x68 /* the user ID reg */ +#define MCI_VID 0x6C /* the controller version ID reg */ +#define MCI_HWCONF 0x70 /* the hardware configuration reg */ +#define MCI_UHS_REG 0x74 /* the UHS-I reg */ +#define MCI_CARD_RESET 0x78 /* the card reset reg */ +#define MCI_BUS_MODE 0x80 /* the bus mode reg */ +#define MCI_DESC_LIST_ADDRL 0x88 /* the descriptor list low base address reg */ +#define MCI_DESC_LIST_ADDRH 0x8C /* the descriptor list high base address reg */ +#define MCI_DMAC_STATUS 0x90 /* the internal DMAC status reg */ +#define MCI_DMAC_INT_ENA 0x94 /* the internal DMAC interrupt enable reg */ +#define MCI_CUR_DESC_ADDRL 0x98 /* the current host descriptor low address reg */ +#define MCI_CUR_DESC_ADDRH 0x9C /* the current host descriptor high address reg */ +#define MCI_CUR_BUF_ADDRL 0xA0 /* the current buffer low address reg */ +#define MCI_CUR_BUF_ADDRH 0xA4 /* the current buffer high address reg */ +#define MCI_CARD_THRCTL 0x100 /* the card threshold control reg */ +#define MCI_UHS_REG_EXT 0x108 /* the UHS register extension */ +#define MCI_EMMC_DDR_REG 0x10C /* the EMMC DDR reg */ +#define MCI_ENABLE_SHIFT 0x110 /* the enable phase shift reg */ +#define MCI_DATA 0x200 /* the data FIFO access */ + +/* Command register defines */ +#define MCI_CMD_START BIT(31) +#define MCI_CMD_USE_HOLD_REG BIT(29) +#define MCI_CMD_VOLT_SWITCH BIT(28) +#define MCI_CMD_CCS_EXP BIT(23) +#define MCI_CMD_CEATA_RD BIT(22) +#define MCI_CMD_UPD_CLK BIT(21) +#define MCI_CMD_INIT BIT(15) +#define MCI_CMD_STOP BIT(14) +#define MCI_CMD_PRV_DAT_WAIT BIT(13) +#define MCI_CMD_SEND_STOP BIT(12) +#define MCI_CMD_STRM_MODE BIT(11) +#define MCI_CMD_DAT_WR BIT(10) +#define MCI_CMD_DAT_EXP BIT(9) +#define MCI_CMD_RESP_CRC BIT(8) +#define MCI_CMD_RESP_LONG BIT(7) +#define MCI_CMD_RESP_EXP BIT(6) +#define MCI_CMD_INDX(n) ((n) & 0x1F) + +/*------------------------------------------------------*/ +/* Register Mask */ +/*------------------------------------------------------*/ +/* MCI_CNTRL mask */ +#define MCI_CNTRL_CONTROLLER_RESET (0x1 << 0) /* RW */ +#define MCI_CNTRL_FIFO_RESET (0x1 << 1) /* RW */ +#define MCI_CNTRL_DMA_RESET (0x1 << 2) /* RW */ +#define MCI_CNTRL_RES (0x1 << 3) /* */ +#define MCI_CNTRL_INT_ENABLE (0x1 << 4) /* RW */ +#define MCI_CNTRL_DMA_ENABLE (0x1 << 5) /* RW */ +#define MCI_CNTRL_READ_WAIT (0x1 << 6) /* RW */ +#define MCI_CNTRL_SEND_IRQ_RESPONSE (0x1 << 7) /* RW */ +#define MCI_CNTRL_ABORT_READ_DATA (0x1 << 8) /* RW */ +#define MCI_CNTRL_ENDIAN (0x1 << 11) /* RW */ +//#define MCI_CNTRL_CARD_VOLTAGE_A (0xF << 16) /* RW */ +//#define MCI_CNTRL_CARD_VOLTAGE_B (0xF << 20) /* RW */ +#define MCI_CNTRL_ENABLE_OD_PULLUP (0x1 << 24) /* RW */ +#define MCI_CNTRL_USE_INTERNAL_DMAC (0x1 << 25) /* RW */ + +/* MCI_PWREN mask */ +#define MCI_PWREN_ENABLE (0x1 << 0) /* RW */ + +/* MCI_CLKENA mask */ +#define MCI_CLKENA_CCLK_ENABLE (0x1 << 0) /* RW */ +#define MCI_CLKENA_CCLK_LOW_POWER (0x1 << 16) /* RW */ +#define MCI_EXT_CLK_ENABLE (0x1 << 1) + +/* MCI_INT_MASK mask */ +#define MCI_INT_MASK_CD (0x1 << 0) /* RW */ +#define MCI_INT_MASK_RE (0x1 << 1) /* RW */ +#define MCI_INT_MASK_CMD (0x1 << 2) /* RW */ +#define MCI_INT_MASK_DTO (0x1 << 3) /* RW */ +#define MCI_INT_MASK_TXDR (0x1 << 4) /* RW */ +#define MCI_INT_MASK_RXDR (0x1 << 5) /* RW */ +#define MCI_INT_MASK_RCRC (0x1 << 6) /* RW */ +#define MCI_INT_MASK_DCRC (0x1 << 7) /* RW */ +#define MCI_INT_MASK_RTO (0x1 << 8) /* RW */ +#define MCI_INT_MASK_DRTO (0x1 << 9) /* RW */ +#define MCI_INT_MASK_HTO (0x1 << 10) /* RW */ +#define MCI_INT_MASK_FRUN (0x1 << 11) /* RW */ +#define MCI_INT_MASK_HLE (0x1 << 12) /* RW */ +#define MCI_INT_MASK_SBE_BCI (0x1 << 13) /* RW */ +#define MCI_INT_MASK_ACD (0x1 << 14) /* RW */ +#define MCI_INT_MASK_EBE (0x1 << 15) /* RW */ +#define MCI_INT_MASK_SDIO (0x1 << 16) /* RW */ + +/* MCI_MASKED_INTS mask */ +#define MCI_MASKED_INTS_CD (0x1 << 0) /* RO */ +#define MCI_MASKED_INTS_RE (0x1 << 1) /* RO */ +#define MCI_MASKED_INTS_CMD (0x1 << 2) /* RO */ +#define MCI_MASKED_INTS_DTO (0x1 << 3) /* RO */ +#define MCI_MASKED_INTS_TXDR (0x1 << 4) /* RO */ +#define MCI_MASKED_INTS_RXDR (0x1 << 5) /* RO */ +#define MCI_MASKED_INTS_RCRC (0x1 << 6) /* RO */ +#define MCI_MASKED_INTS_DCRC (0x1 << 7) /* RO */ +#define MCI_MASKED_INTS_RTO (0x1 << 8) /* RO */ +#define MCI_MASKED_INTS_DRTO (0x1 << 9) /* RO */ +#define MCI_MASKED_INTS_HTO (0x1 << 10) /* RO */ +#define MCI_MASKED_INTS_FRUN (0x1 << 11) /* RO */ +#define MCI_MASKED_INTS_HLE (0x1 << 12) /* RO */ +#define MCI_MASKED_INTS_SBE_BCI (0x1 << 13) /* RO */ +#define MCI_MASKED_INTS_ACD (0x1 << 14) /* RO */ +#define MCI_MASKED_INTS_EBE (0x1 << 15) /* RO */ +#define MCI_MASKED_INTS_SDIO (0x1 << 16) /* RO */ + +/* MCI_RAW_INTS mask */ +#define MCI_RAW_INTS_CD (0x1 << 0) /* W1C */ +#define MCI_RAW_INTS_RE (0x1 << 1) /* W1C */ +#define MCI_RAW_INTS_CMD (0x1 << 2) /* W1C */ +#define MCI_RAW_INTS_DTO (0x1 << 3) /* W1C */ +#define MCI_RAW_INTS_TXDR (0x1 << 4) /* W1C */ +#define MCI_RAW_INTS_RXDR (0x1 << 5) /* W1C */ +#define MCI_RAW_INTS_RCRC (0x1 << 6) /* W1C */ +#define MCI_RAW_INTS_DCRC (0x1 << 7) /* W1C */ +#define MCI_RAW_INTS_RTO (0x1 << 8) /* W1C */ +#define MCI_RAW_INTS_DRTO (0x1 << 9) /* W1C */ +#define MCI_RAW_INTS_HTO (0x1 << 10) /* W1C */ +#define MCI_RAW_INTS_FRUN (0x1 << 11) /* W1C */ +#define MCI_RAW_INTS_HLE (0x1 << 12) /* W1C */ +#define MCI_RAW_INTS_SBE_BCI (0x1 << 13) /* W1C */ +#define MCI_RAW_INTS_ACD (0x1 << 14) /* W1C */ +#define MCI_RAW_INTS_EBE (0x1 << 15) /* W1C */ +#define MCI_RAW_INTS_SDIO (0x1 << 16) /* W1C */ + +/* MCI_STATUS mask */ +#define MCI_STATUS_FIFO_RX (0x1 << 0) /* RO */ +#define MCI_STATUS_FIFO_TX (0x1 << 1) /* RO */ +#define MCI_STATUS_FIFO_EMPTY (0x1 << 2) /* RO */ +#define MCI_STATUS_FIFO_FULL (0x1 << 3) /* RO */ +#define MCI_STATUS_CARD_STATUS (0x1 << 8) /* RO */ +#define MCI_STATUS_CARD_BUSY (0x1 << 9) /* RO */ +#define MCI_STATUS_DATA_BUSY (0x1 << 10) /* RO */ +#define MCI_STATUS_RESPOSE_INDEX_OFFSET (11) +#define MCI_STATUS_RESPOSE_INDEX_MASK (0x3f << MCI_STATUS_RESPOSE_INDEX_OFFSET) /* RO */ +#define MCI_STATUS_RESPOSE_INDEX(reg) (((reg) & MCI_STATUS_RESPOSE_INDEX_MASK) >> MCI_STATUS_RESPOSE_INDEX_OFFSET) +#define MCI_STATUS_DMA_ACK (0x1 << 31) /* RO */ +#define MCI_STATUS_DMA_REQ (0x1 << 32) /* RO */ + +/* MCI_UHS_REG mask */ +#define MCI_UHS_REG_VOLT (0x1 << 0) /* RW */ +#define MCI_UHS_REG_DDR (0x1 << 16) /* RW */ + +/* MCI_CARD_RESET mask */ +#define MCI_CARD_RESET_ENABLE (0x1 << 0) /* RW */ + +/* MCI_BUS_MODE mask */ +#define MCI_BUS_MODE_SWR (0x1 << 0) /* RW */ +#define MCI_BUS_MODE_FB (0x1 << 1) /* RW */ +#define MCI_BUS_MODE_DE (0x1 << 7) /* RW */ + +/* MCI_DMAC_STATUS mask */ +#define MCI_DMAC_STATUS_TI (0x1 << 0) /* RW */ +#define MCI_DMAC_STATUS_RI (0x1 << 1) /* RW */ +#define MCI_DMAC_STATUS_FBE (0x1 << 2) /* RW */ +#define MCI_DMAC_STATUS_DU (0x1 << 4) /* RW */ +#define MCI_DMAC_STATUS_NIS (0x1 << 8) /* RW */ +#define MCI_DMAC_STATUS_AIS (0x1 << 9) /* RW */ + +/* MCI_DMAC_INT_ENA mask */ +#define MCI_DMAC_INT_ENA_TI (0x1 << 0) /* RW */ +#define MCI_DMAC_INT_ENA_RI (0x1 << 1) /* RW */ +#define MCI_DMAC_INT_ENA_FBE (0x1 << 2) /* RW */ +#define MCI_DMAC_INT_ENA_DU (0x1 << 4) /* RW */ +#define MCI_DMAC_INT_ENA_CES (0x1 << 5) /* RW */ +#define MCI_DMAC_INT_ENA_NIS (0x1 << 8) /* RW */ +#define MCI_DMAC_INT_ENA_AIS (0x1 << 9) /* RW */ + +/* MCI_CARD_THRCTL mask */ +#define MCI_CARD_THRCTL_CARDRD (0x1 << 0) /* RW */ +#define MCI_CARD_THRCTL_BUSY_CLR (0x1 << 1) /* RW */ +#define MCI_CARD_THRCTL_CARDWR (0x1 << 2) /* RW */ + +/* MCI_UHS_REG_EXT mask */ +#define MCI_UHS_REG_EXT_MMC_VOLT (0x1 << 0) /* RW */ +#define MCI_UHS_REG_EXT_CLK_ENA (0x1 << 1) /* RW */ + +/* MCI_EMMC_DDR_REG mask */ +#define MCI_EMMC_DDR_CYCLE (0x1 << 0) /* RW */ + +/*--------------------------------------*/ +/* Structure Type */ +/*--------------------------------------*/ +/* Maximum segments assuming a 512KiB maximum requisition */ +/* size and a minimum4KiB page size. */ +#define MCI_MAX_SEGS 128 +/* ADMA2 64-bit DMA descriptor size */ +#define ADMA2_64_DESC_SZ 32 + +/* mmc request timeout 5000ms */ +#define MMC_REQ_TIMEOUT_MS 5000 + +/* Each descriptor can transfer up to 4KB of data in chained mode */ +/*ADMA2 64-bit descriptor.*/ +struct phytium_adma2_64_desc { + u32 attribute; +#define IDMAC_DES0_DIC BIT(1) +#define IDMAC_DES0_LD BIT(2) +#define IDMAC_DES0_FD BIT(3) +#define IDMAC_DES0_CH BIT(4) +#define IDMAC_DES0_ER BIT(5) +#define IDMAC_DES0_CES BIT(30) +#define IDMAC_DES0_OWN BIT(31) + u32 NON1; + u32 len; + u32 NON2; + u32 addr_lo; /* Lower 32-bits of Buffer Address Pointer 1*/ + u32 addr_hi; /* Upper 32-bits of Buffer Address Pointer 1*/ + u32 desc_lo; /* Lower 32-bits of Next Descriptor Address */ + u32 desc_hi; /* Upper 32-bits of Next Descriptor Address */ +} __packed __aligned(4); + +struct phytium_mci_dma { + struct scatterlist *sg; /* I/O scatter list */ + /* ADMA descriptor table, pointer to adma_table array */ + struct phytium_adma2_64_desc *adma_table; + /* Mapped ADMA descr. table, the physical address of adma_table array */ + dma_addr_t adma_addr; + unsigned int desc_sz; /* ADMA descriptor size */ +}; + +enum adtc_t { + COMMOM_ADTC = 0, + BLOCK_RW_ADTC = 1 +}; + +struct phytium_mci_host { + struct device *dev; + struct mmc_host *mmc; + u32 caps; + u32 caps2; + spinlock_t lock; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + int error; + void __iomem *base; /* host base address */ + void *adma_table1; + dma_addr_t adma_addr1; + struct phytium_mci_dma dma_rx; /* dma channel */ + struct phytium_mci_dma dma_tx; /* dma channel */ + struct phytium_mci_dma dma; /* dma channel */ + u64 dma_mask; + bool vqmmc_enabled; + u32 *sg_virt_addr; + enum adtc_t adtc_type; /* 0:common adtc cmd; 1:block r/w adtc cmd;*/ + struct timer_list hotplug_timer; + struct timer_list timeout_timer; + struct delayed_work req_timeout; + int irq; /* host interrupt */ + u32 current_rca; /*the current rca value*/ + u32 current_ios_clk; + u32 is_use_dma; + u32 is_device_x100; + struct clk *src_clk; /* phytium_mci source clock */ + unsigned long clk_rate; + unsigned long clk_div; + unsigned long irq_flags; + unsigned long flags; +#define MCI_CARD_NEED_INIT 1 + +}; + +int phytium_mci_common_probe(struct phytium_mci_host *host); +void phytium_mci_deinit_hw(struct phytium_mci_host *host); +int phytium_mci_runtime_suspend(struct device *dev); +int phytium_mci_runtime_resume(struct device *dev); +int phytium_mci_resume(struct device *dev); +int phytium_mci_suspend(struct device *dev); + +#endif /* __PHYTIUM_MCI_HW_H */ diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 5fc9a1bde4ac2e6d6e8475ab92c81342395d03d7..7ad102b71fc653e8d7932f6665f0842b38a88284 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -285,6 +285,25 @@ config MTD_NAND_ATMEL Enables support for NAND Flash / Smart Media Card interface on Atmel AT91 processors. +config MTD_NAND_PHYTIUM + tristate + +config MTD_NAND_PHYTIUM_PCI + tristate "Support Phytium NAND controller as a PCI device" + select MTD_NAND_PHYTIUM + depends on PCI + help + Enable the driver for NAND flash controller of Phytium Px210 chipset, + using the Phytium NAND controller core. + +config MTD_NAND_PHYTIUM_PLAT + tristate "Support Phytium NAND controller as a platform device" + select MTD_NAND_PHYTIUM + depends on ARCH_PHYTIUM + help + Enable the driver for NAND flash controller of Phytium CPU chipset, + using the Phytium NAND controller core. + config MTD_NAND_MARVELL tristate "NAND controller support on Marvell boards" depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \ diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index d5a5f9832b8879e4cbf16f5dd7f902edcf52c239..7a0f420d24120ab2d4053954c6e1fe9267d8b2a4 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -58,6 +58,10 @@ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o +obj-$(CONFIG_MTD_NAND_PHYTIUM) += phytium_nand.o +obj-$(CONFIG_MTD_NAND_PHYTIUM_PCI) += phytium_nand_pci.o +obj-$(CONFIG_MTD_NAND_PHYTIUM_PLAT) += phytium_nand_plat.o + nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_amd.o nand-objs += nand_hynix.o diff --git a/drivers/mtd/nand/raw/phytium_nand.c b/drivers/mtd/nand/raw/phytium_nand.c new file mode 100644 index 0000000000000000000000000000000000000000..f94ccd41221bd83136d8af1e68542d8ecd85be2b --- /dev/null +++ b/drivers/mtd/nand/raw/phytium_nand.c @@ -0,0 +1,2171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Core driver for Phytium NAND flash controller + * + * Copyright (c) 2020-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "phytium_nand.h" + +u16 timing_asy_mode0[TIMING_ASY_NUM] = { /* x100 pass, sample: 1 */ + 0x03, 0x03, 0x28, 0x28, 0x03, 0x03, 0x06, 0x06, 0x28, 0x70, 0x30, 0x50}; +u16 timing_asy_mode1[TIMING_ASY_NUM] = { /* x100 pass, sample: 1 */ + 0x03, 0x03, 0x14, 0x14, 0x03, 0x03, 0x06, 0x06, 0x14, 0x70, 0x30, 0x28}; +u16 timing_asy_mode2[TIMING_ASY_NUM] = { /* x100 pass, sample: 7/8 (unlic) */ + 0x03, 0x03, 0x0D, 0x0D, 0x03, 0x03, 0x06, 0x06, 0x0D, 0x70, 0x20, 0x1A}; +u16 timing_asy_mode3[TIMING_ASY_NUM] = { /* x100 pass, sample: 4-7 */ + 0x03, 0x03, 0x0A, 0x0A, 0x03, 0x03, 0x06, 0x06, 0x0A, 0x70, 0x20, 0x14}; +u16 timing_asy_mode4[TIMING_ASY_NUM] = { /* x100 1.8v pass */ + 0x03, 0x03, 0x08, 0x08, 0x03, 0x03, 0x06, 0x06, 0x08, 0x70, 0x15, 0x10}; +u16 timing_asy_mode5[TIMING_ASY_NUM] = { /* x100 1.8v pass */ + 0x03, 0x03, 0x07, 0x07, 0x03, 0x03, 0x06, 0x06, 0x07, 0x20, 0x15, 0x0E}; +u16 timing_syn_mode0[TIMING_SYN_NUM] = { /* x100 1.8v pass */ + 0x20, 0x41, 0x05, 0x20, 0x10, 0x19, 0x62, 0x40, 0x38, 0x20, 0x00, 0x09, + 0x50, 0x20}; +u16 timing_syn_mode1[TIMING_SYN_NUM] = { /* x100 1.8v pass */ + 0x18, 0x32, 0x06, 0x18, 0x0C, 0x10, 0x76, 0x40, 0x2A, 0x1E, 0x00, 0x12, + 0x24, 0x18}; +u16 timing_syn_mode2[TIMING_SYN_NUM] = { /* x100 1.8v pass */ + 0x10, 0x0A, 0x04, 0x10, 0x08, 0x0A, 0x6E, 0x50, 0x1D, 0x10, 0x00, 0x0C, + 0x18, 0x10}; +u16 timing_syn_mode3[TIMING_SYN_NUM] = { /* x100 1.8v pass */ + 0x0C, 0x1A, 0x02, 0x0C, 0x06, 0x08, 0x78, 0x7C, 0x15, 0x0C, 0x00, 0x08, + 0x12, 0x0C}; +u16 timing_syn_mode4[TIMING_SYN_NUM] = { /* x100 1.8v failed */ + 0x08, 0x17, 0x05, 0x08, 0x04, 0x01, 0x73, 0x40, 0x0C, 0x08, 0x00, 0x06, + 0x0C, 0x10}; +u16 timing_tog_ddr_mode0[TIMING_TOG_NUM] = { /* 600M clk */ + 0x14, 0x0a, 0x08, 0x08, 0xc8, 0xc8, 0x08, 0x08, 0x20, 0x0a, 0x14, 0x08}; + +static u32 nfc_ecc_errover; +static u32 nfc_ecc_err; +static u32 nfc_irq_st; +static u32 nfc_irq_en; +static u32 nfc_irq_complete; + +static DECLARE_WAIT_QUEUE_HEAD(wait_done); + +/* + * Internal helper to conditionnally apply a delay (from the above structure, + * most of the time). + */ +static void cond_delay(unsigned int ns) +{ + if (!ns) + return; + + if (ns < 10000) + ndelay(ns); + else + udelay(DIV_ROUND_UP(ns, 1000)); +} + +static inline struct phytium_nfc *to_phytium_nfc(struct nand_controller *ctrl) +{ + return container_of(ctrl, struct phytium_nfc, controller); +} + +static inline struct phytium_nand_chip *to_phytium_nand(struct nand_chip *chip) +{ + return container_of(chip, struct phytium_nand_chip, chip); +} + +static u32 phytium_read(struct phytium_nfc *nfc, u32 reg) +{ + return readl_relaxed(nfc->regs + reg); +} + +static void phytium_write(struct phytium_nfc *nfc, u32 reg, u32 value) +{ + return writel_relaxed(value, nfc->regs + reg); +} + +static inline int phytium_wait_busy(struct phytium_nfc *nfc) +{ + u32 status; + + if (nfc_ecc_errover) { + nfc_ecc_errover = 0; + return 0; + } + + return readl_relaxed_poll_timeout(nfc->regs + NDSR, status, + !(status & NDSR_BUSY), 10, 10000); +} + +static void phytium_nfc_disable_int(struct phytium_nfc *nfc, u32 int_mask) +{ + u32 reg; + + reg = phytium_read(nfc, NDIR_MASK); + phytium_write(nfc, NDIR_MASK, reg | int_mask); +} + +static void phytium_nfc_enable_int(struct phytium_nfc *nfc, u32 int_mask) +{ + u32 reg; + + reg = phytium_read(nfc, NDIR_MASK); + phytium_write(nfc, NDIR_MASK, reg & (~int_mask)); +} + +static void phytium_nfc_clear_int(struct phytium_nfc *nfc, u32 int_mask) +{ + phytium_write(nfc, NDIR_MASK, int_mask); +} + +static int phytium_nfc_cmd_correct(struct phytium_nfc_op *nfc_op) +{ + if (!nfc_op) + return -EINVAL; + + if (nfc_op->cmd_len == 0x01) { + nfc_op->cmd[1] = nfc_op->cmd[0]; + nfc_op->cmd[0] = 0; + } + + return 0; +} + +static int phytium_nfc_addr_correct(struct phytium_nfc_op *nfc_op) +{ + u32 len; + int i, j; + + if (!nfc_op) + return -EINVAL; + + len = nfc_op->addr_len > PHYTIUM_NFC_ADDR_MAX_LEN ? + PHYTIUM_NFC_ADDR_MAX_LEN : nfc_op->addr_len; + + if (len == PHYTIUM_NFC_ADDR_MAX_LEN) + return 0; + + for (i = len-1, j = PHYTIUM_NFC_ADDR_MAX_LEN - 1; i >= 0; i--, j--) { + nfc_op->addr[j] = nfc_op->addr[i]; + nfc_op->addr[i] = 0; + } + + return 0; +} + +static void phytium_nfc_parse_instructions(struct nand_chip *chip, + const struct nand_subop *subop, + struct phytium_nfc_op *nfc_op) +{ + struct nand_op_instr *instr = NULL; + bool first_cmd = true; + u32 op_id; + int i; + + /* Reset the input structure as most of its fields will be OR'ed */ + memset(nfc_op, 0, sizeof(struct phytium_nfc_op)); + + for (op_id = 0; op_id < subop->ninstrs; op_id++) { + unsigned int offset, naddrs; + const u8 *addrs; + int len; + + instr = (struct nand_op_instr *)&subop->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + if (first_cmd) { + nfc_op->cmd[0] = instr->ctx.cmd.opcode; + } else { + nfc_op->cmd[1] = instr->ctx.cmd.opcode; + nfc_op->cmd_ctrl.nfc_ctrl.dbc = 1; + } + + nfc_op->cle_ale_delay_ns = instr->delay_ns; + first_cmd = false; + nfc_op->cmd_len++; + + break; + + case NAND_OP_ADDR_INSTR: + offset = nand_subop_get_addr_start_off(subop, op_id); + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); + addrs = &instr->ctx.addr.addrs[offset]; + + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = naddrs; + + for (i = 0; i < min_t(u32, PHYTIUM_NFC_ADDR_MAX_LEN, naddrs); i++) + nfc_op->addr[i] = addrs[i]; + + nfc_op->cle_ale_delay_ns = instr->delay_ns; + + nfc_op->addr_len = naddrs; + break; + + case NAND_OP_DATA_IN_INSTR: + nfc_op->data_instr = instr; + nfc_op->data_instr_idx = op_id; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + len = nand_subop_get_data_len(subop, op_id); + nfc_op->page_cnt = len; + nfc_op->data_delay_ns = instr->delay_ns; + + break; + + case NAND_OP_DATA_OUT_INSTR: + nfc_op->data_instr = instr; + nfc_op->data_instr_idx = op_id; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + len = nand_subop_get_data_len(subop, op_id); + nfc_op->page_cnt = len; + nfc_op->data_delay_ns = instr->delay_ns; + break; + + case NAND_OP_WAITRDY_INSTR: + nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; + nfc_op->rdy_delay_ns = instr->delay_ns; + break; + } + } +} + +int phytium_nfc_prepare_cmd(struct nand_chip *chip, + struct phytium_nfc_op *nfc_op, + enum dma_data_direction direction) +{ + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + int i; + + phytium_nfc_cmd_correct(nfc_op); + phytium_nfc_addr_correct(nfc_op); + + nfc_op->cmd_ctrl.nfc_ctrl.csel = 0x0F ^ (0x01 << phytium_nand->selected_die); + + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = (nfc->dma_phy_addr >> (8 * i)) & 0xFF; + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_prepare_cmd); + +#ifdef DEBUG +static int phytium_nfc_cmd_dump(struct phytium_nfc *nfc, + struct phytium_nfc_op *nfc_op, u8 *buf) +{ + u8 *p; + u8 str[1024] = {0}; + int i; + + sprintf(str, "Phytium NFC cmd dump:\n"); + sprintf(str, "%s cmd0:%x, cmd1:%x, ctrl:%x, page_cnt:%d\n", + str, nfc_op->cmd[0], nfc_op->cmd[1], nfc_op->cmd_ctrl.ctrl, nfc_op->page_cnt); + + p = &nfc_op->addr[0]; + sprintf(str, "%s addr:%02x %02x %02x %02x %02x\n", + str, p[0], p[1], p[2], p[3], p[4]); + + p = &nfc_op->mem_addr_first[0]; + sprintf(str, "%s mem_addr_first:%02x %02x %02x %02x %02x\n", + str, p[0], p[1], p[2], p[3], p[4]); + + for (i = 0; i < PHYTIUM_NFC_DSP_SIZE; i++) + sprintf(str, "%s %02x", str, buf[i]); + + dev_dbg(nfc->dev, "%s\n", str); + + return 0; +} + +int phytium_nfc_data_dump(struct phytium_nfc *nfc, u8 *buf, u32 len) +{ + u8 str[1024] = {0}; + int i; + + len = len > 512 ? 512 : len; + + sprintf(str, "Phytium NFC data dump: %d\n", len); + for (i = 0; i < len; i++) { + if (i && (i%128 == 0)) { + dev_info(nfc->dev, "next:\n%s\n", str); + memset(str, 0, 1024); + } + + if (i && (i%16 == 0)) + sprintf(str, "%s\n", str); + sprintf(str, "%s %02x", str, buf[i]); + } + + dev_dbg(nfc->dev, "%s\n", str); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_data_dump); +#else +#define phytium_nfc_cmd_dump(...) +#define phytium_nfc_data_dump(...) +#endif + +int phytium_nfc_send_cmd(struct nand_chip *chip, + struct phytium_nfc_op *nfc_op) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 value = 0; + + memset((u8 *)nfc->dsp_addr, 0, PAGE_SIZE); + memcpy((u8 *)nfc->dsp_addr, (u8 *)nfc_op, PHYTIUM_NFC_DSP_SIZE); + + phytium_nfc_cmd_dump(nfc, nfc_op, (u8 *)nfc->dsp_addr); + + if (phytium_wait_busy(nfc) != 0) { + dev_err(nfc->dev, "NFC was always busy\n"); + dev_err(nfc->dev, "NFC state: %x\n", phytium_read(nfc, NDSR)); + dev_err(nfc->dev, "NFC debug: %x\n", phytium_read(nfc, ND_DEBUG)); + return 0; + } + + spin_lock(&nfc->spinlock); + value = nfc->dsp_phy_addr & 0xFFFFFFFF; + phytium_write(nfc, NDAR0, value); + + /* Don't modify NDAR1_DMA_RLEN & NDAR1_DMA_WLEN */ + value = phytium_read(nfc, NDAR1); + value |= NDAR1_H8((nfc->dsp_phy_addr >> 32) & 0xFF); + phytium_write(nfc, NDAR1, value); + + phytium_nfc_enable_int(nfc, NDIR_CMD_FINISH_MASK); + + value |= NDAR1_DMA_EN; + phytium_write(nfc, NDAR1, value); + spin_unlock(&nfc->spinlock); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_send_cmd); + +int phytium_nfc_prepare_cmd2(struct nand_chip *chip, + struct phytium_nfc_op *nfc_op, + enum dma_data_direction direction, + u32 cmd_num) +{ + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + int i; + + for (i = 0; i < cmd_num; i++) { + phytium_nfc_cmd_correct(nfc_op); + phytium_nfc_addr_correct(nfc_op); + nfc_op->cmd_ctrl.nfc_ctrl.csel = 0x0F ^ (0x01 << phytium_nand->selected_die); + nfc_op++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_prepare_cmd2); + +int phytium_nfc_send_cmd2(struct nand_chip *chip, + struct phytium_nfc_op *nfc_op, + u32 cmd_num) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 value = 0; + int i; + + memset((u8 *)nfc->dsp_addr, 0, PAGE_SIZE); + + for (i = 0; i < cmd_num; i++) { + memcpy((u8 *)nfc->dsp_addr + i*PHYTIUM_NFC_DSP_SIZE, + (u8 *)nfc_op, PHYTIUM_NFC_DSP_SIZE); + phytium_nfc_cmd_dump(nfc, nfc_op, (u8 *)nfc->dsp_addr + i*PHYTIUM_NFC_DSP_SIZE); + nfc_op++; + } + + if (phytium_wait_busy(nfc) != 0) { + dev_err(nfc->dev, "NFC was always busy\n"); + dev_err(nfc->dev, "NFC state: %x\n", phytium_read(nfc, NDSR)); + dev_err(nfc->dev, "NFC debug: %x\n", phytium_read(nfc, ND_DEBUG)); + return 0; + } + + spin_lock(&nfc->spinlock); + value = nfc->dsp_phy_addr & 0xFFFFFFFF; + phytium_write(nfc, NDAR0, value); + + /* Don't modify NDAR1_DMA_RLEN & NDAR1_DMA_WLEN */ + value = phytium_read(nfc, NDAR1); + value |= NDAR1_H8((nfc->dsp_phy_addr >> 32) & 0xFF); + phytium_write(nfc, NDAR1, value); + + phytium_nfc_enable_int(nfc, NDIR_DMA_FINISH_MASK | NDIR_ECC_ERR_MASK); + + value |= NDAR1_DMA_EN; + phytium_write(nfc, NDAR1, value); + spin_unlock(&nfc->spinlock); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_send_cmd2); + +int phytium_nfc_wait_op(struct nand_chip *chip, + u32 timeout_ms) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + int ret; + + /* Timeout is expressed in ms */ + if (!timeout_ms) + timeout_ms = IRQ_TIMEOUT; + else if (timeout_ms > 1000) + timeout_ms = 1000; + else if (timeout_ms < 100) + timeout_ms = 100; + + ret = wait_event_interruptible_timeout(wait_done, nfc_irq_complete, + msecs_to_jiffies(timeout_ms)); + nfc_irq_complete = false; + + if (!ret) { + dev_err(nfc->dev, "Timeout waiting for RB signal\n"); + dev_err(nfc->dev, "NFC state: %x\n", phytium_read(nfc, NDSR)); + dev_err(nfc->dev, "NFC irq state: %x, irq en:%x\n", + phytium_read(nfc, NDIR), phytium_read(nfc, NDIR_MASK)); + dev_err(nfc->dev, "NFC debug: %x\n", phytium_read(nfc, ND_DEBUG)); + + phytium_nfc_clear_int(nfc, NDIR_ALL_INT(nfc->caps->int_mask_bits)); + return -ETIMEDOUT; + } + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_wait_op); + +static int phytium_nfc_xfer_data_pio(struct phytium_nfc *nfc, + const struct nand_subop *subop, + struct phytium_nfc_op *nfc_op) +{ + const struct nand_op_instr *instr = nfc_op->data_instr; + unsigned int op_id = nfc_op->data_instr_idx; + unsigned int len = nand_subop_get_data_len(subop, op_id); + unsigned int offset = nand_subop_get_data_start_off(subop, op_id); + bool reading = (instr->type == NAND_OP_DATA_IN_INSTR); + + if (reading) { + u8 *in = instr->ctx.data.buf.in + offset; + + memcpy(in, nfc->dma_buf, len); + + nfc->dma_offset = 0; + } else { + const u8 *out = instr->ctx.data.buf.out + offset; + + memcpy(nfc->dma_buf, out, len); + } + + return 0; +} + +static int memcpy_to_reg16(struct phytium_nfc *nfc, u32 reg, u16 *buf, size_t len) +{ + int i; + u32 val = 0; + + if (!nfc || !buf || (len >= 16)) + return -EINVAL; + + for (i = 0; i < len; i++) { + val = (val << 16) + buf[i]; + if (i % 2) { + phytium_write(nfc, reg, val); + val = 0; + reg += 4; + } + } + + return 0; +} + +int phytium_nfc_default_data_interface(struct phytium_nfc *nfc) +{ + int value; + + value = phytium_read(nfc, NDCR0); + value &= (~NDCR0_IN_MODE(3)); + value |= NDCR0_IN_MODE(nfc->inter_mode); + phytium_write(nfc, NDCR0, value); + + value = phytium_read(nfc, NDCR1); + value &= (~NDCR1_SAMPL_PHASE(0xFFFF)); + + switch (nfc->inter_mode) { + case ASYN_SDR: + if (nfc->timing_mode == ASY_MODE4) { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode4, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(4)); + } else if (nfc->timing_mode == ASY_MODE3) { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode3, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(5)); + } else if (nfc->timing_mode == ASY_MODE2) { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode2, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(3)); + } else if (nfc->timing_mode == ASY_MODE1) { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode1, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(2)); + } else { + memcpy_to_reg16(nfc, NDTR0, timing_asy_mode0, TIMING_ASY_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(1)); + } + phytium_write(nfc, ND_INTERVAL_TIME, 0x01); + break; + case ONFI_DDR: + if (nfc->timing_mode == SYN_MODE4) { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode4, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(0x0D)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x30); + } else if (nfc->timing_mode == SYN_MODE3) { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode3, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(0x05)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x18); + } else if (nfc->timing_mode == SYN_MODE2) { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode2, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(0x08)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x20); + } else if (nfc->timing_mode == SYN_MODE1) { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode1, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(0x12)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x40); + } else { + memcpy_to_reg16(nfc, NDTR6, timing_syn_mode0, TIMING_SYN_NUM); + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(0x12)); + phytium_write(nfc, ND_INTERVAL_TIME, 0x40); + } + break; + case TOG_ASYN_DDR: + phytium_write(nfc, NDCR1, value | NDCR1_SAMPL_PHASE(8)); + phytium_write(nfc, ND_INTERVAL_TIME, 0xC8); + memcpy_to_reg16(nfc, NDTR13, timing_tog_ddr_mode0, TIMING_TOG_NUM); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_default_data_interface); + +static int phytium_nfc_naked_waitrdy_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + + dev_info(nfc->dev, "Phytium nand command 0x%02x 0x%02x.\n", + nfc_op.cmd[0], nfc_op.cmd[1]); + + switch (nfc_op.cmd[0]) { + case NAND_CMD_PARAM: + memset(nfc->dma_buf, 0, PAGE_SIZE); + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ_PARAM; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + if (nfc->inter_pro == NAND_ONFI) + nfc_op.page_cnt = 3 * sizeof(struct nand_onfi_params); + else if (nfc->inter_pro == NAND_JEDEC) + nfc_op.page_cnt = 3 * sizeof(struct nand_jedec_params); + if (nfc_op.page_cnt) + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc->dma_offset = 0; + break; + case NAND_CMD_SET_FEATURES: + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_SET_FTR; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + if (nfc->inter_mode != ASYN_SDR) { + dev_err(nfc->dev, "Not support SET_FEATURES command!\n"); + return 0; + } + break; + case NAND_CMD_GET_FEATURES: + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_GET_FTR; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + break; + case NAND_CMD_READ0: + if (nfc_op.cmd[1] == NAND_CMD_READSTART) { /* large page */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + } else if (nfc_op.cmd[1] == NAND_CMD_SEQIN) { /* program page begin */ + nfc_op.cmd[0] = NAND_CMD_SEQIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + } else { /* small page */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + } + break; + case NAND_CMD_RNDOUT: /* change read column */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_READ_COL; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + break; + case NAND_CMD_READSTART: /* large page */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + break; + case NAND_CMD_RNDOUTSTART: /* change read column */ + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_READ_COL; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc->dma_offset = nfc_op.addr[1]; + nfc->dma_offset = (nfc->dma_offset << 8) + nfc_op.addr[0]; + break; + case NAND_CMD_SEQIN: /* program begin */ + if (nfc_op.cmd[0] == NAND_CMD_READ0) { + nfc_op.cmd[0] = NAND_CMD_SEQIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + } + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + break; + case NAND_CMD_RNDIN: /* change write column */ + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_WR_COL; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + break; + case NAND_CMD_PAGEPROG: /* program end */ + nfc_op.cmd[0] = NAND_CMD_RNDIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + break; + default: + dev_err(nfc->dev, "Not support cmd %d.\n", nfc_op.cmd[1]); + ret = -EINVAL; + goto out; + } + + if ((nfc_op.data_instr) && (direction == DMA_TO_DEVICE)) + phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); + + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + nfc_op.rdy_timeout_ms = nfc_op.rdy_timeout_ms; + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + goto out; + + cond_delay(nfc_op.rdy_delay_ns); + + if ((nfc_op.data_instr) && (direction == DMA_FROM_DEVICE)) + phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); + +out: + return ret; +} + +static int phytium_nfc_read_id_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + u16 read_len = 0; + int ret; + u8 *buf = nfc->dma_buf; + + memset(nfc->dma_buf, 0, PAGE_SIZE); + direction = DMA_FROM_DEVICE; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + read_len = nfc_op.page_cnt; + nfc_op.page_cnt = (read_len & 0x03) ? ((read_len & 0xFFFC) + 4) : read_len; + + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ_ID; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 0; + + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + return ret; + + cond_delay(nfc_op.rdy_delay_ns); + + if (!strncmp(nfc->dma_buf, "ONFI", 4)) { + nfc->inter_pro = NAND_ONFI; + } else if (!strncmp(nfc->dma_buf, "JEDEC", 5)) { + nfc->inter_pro = NAND_JEDEC; + if (buf[5] == 1) + nfc->inter_mode = ASYN_SDR; + else if (buf[5] == 2) + nfc->inter_mode = TOG_ASYN_DDR; + else if (buf[5] == 4) + nfc->inter_mode = ASYN_SDR; + } else { + nfc->inter_pro = NAND_OTHER; + } + + dev_info(nfc->dev, "Nand protocol: %d, interface mode: %d\n", + nfc->inter_pro, nfc->inter_mode); + + phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); + + return 0; +} + +static int phytium_nfc_read_status_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + u16 read_len = 0; + u32 timeout, count = 0; + int ret = 0; + + direction = DMA_FROM_DEVICE; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + read_len = nfc_op.page_cnt; + nfc_op.page_cnt = (read_len & 0x03) ? ((read_len & 0xFFFC) + 4) : read_len; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ_STATUS; + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + +read_status_retry: + count++; + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + timeout = nfc_op.rdy_timeout_ms ? nfc_op.rdy_timeout_ms : 10; + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + goto out; + + phytium_nfc_xfer_data_pio(nfc, subop, &nfc_op); + + if (0xE0 != *(u8 *)(nfc->dma_buf)) { + dev_info(nfc->dev, "Retry to read status (%x)\n", *(u8 *)(nfc->dma_buf)); + + if (count < 5) + goto read_status_retry; + } + +out: + return ret; +} + +static int phytium_nfc_reset_cmd_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + + direction = DMA_NONE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_RESET; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + return phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); +} + +static int phytium_nfc_erase_cmd_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + direction = DMA_NONE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_ERASE; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + return phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); +} + +static int phytium_nfc_data_in_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nfc_op nfc_op; + struct nand_op_instr *instr; + unsigned int op_id; + unsigned int len; + unsigned int offset; + u8 *in = NULL; + + phytium_nfc_parse_instructions(chip, subop, &nfc_op); + if (nfc_op.data_instr->type != NAND_OP_DATA_IN_INSTR) { + dev_err(nfc->dev, "Phytium nfc instrs parser failed!\n"); + return -EINVAL; + } + + instr = nfc_op.data_instr; + op_id = nfc_op.data_instr_idx; + len = nand_subop_get_data_len(subop, op_id); + offset = nand_subop_get_data_start_off(subop, op_id); + in = instr->ctx.data.buf.in + offset; + + phytium_nfc_cmd_dump(nfc, &nfc_op, (u8 *)nfc->dsp_addr); + + memcpy(in, nfc->dma_buf + nfc->dma_offset, len); + nfc->dma_offset += len; + phytium_nfc_data_dump(nfc, in, len); + + return 0; +} + +static const struct nand_op_parser phytium_nfc_op_parser = NAND_OP_PARSER( + /* Naked commands not supported, use a function for each pattern */ + NAND_OP_PARSER_PATTERN( + phytium_nfc_read_id_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_erase_cmd_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_read_status_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_reset_cmd_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_naked_waitrdy_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_naked_waitrdy_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_naked_waitrdy_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 8), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_naked_waitrdy_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, PHYTIUM_NFC_ADDR_MAX_LEN), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), + NAND_OP_PARSER_PATTERN( + phytium_nfc_data_in_type_exec, + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)), + ); + +static int phytium_nfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + return nand_op_parser_exec_op(chip, &phytium_nfc_op_parser, + op, check_only); +} + +static int phytium_nfc_reset(struct phytium_nfc *nfc) +{ + u32 value; + + phytium_write(nfc, NDIR_MASK, NDIR_ALL_INT(nfc->caps->int_mask_bits)); + phytium_write(nfc, NDSR, NDIR_ALL_INT(nfc->caps->int_mask_bits)); + + phytium_write(nfc, ND_ERR_CLR, 0x0F); + phytium_write(nfc, NDFIFO_CLR, 1); + + value = phytium_read(nfc, NDCR0); + phytium_write(nfc, NDCR0, value & ~(NDCR0_ECC_EN | NDCR0_SPARE_EN)); + + return 0; +} + +static void phytium_nfc_select_chip(struct mtd_info *mtd, int die_nr) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + + dev_dbg(nfc->dev, "Phytium nand selected chip %d\n", die_nr); + + if (chip == nfc->selected_chip && die_nr == phytium_nand->selected_die) + return; + + if (die_nr < 0 || die_nr >= phytium_nand->nsels) { + nfc->selected_chip = NULL; + phytium_nand->selected_die = -1; + return; + } + + phytium_nfc_reset(nfc); + + nfc->selected_chip = chip; + phytium_nand->selected_die = die_nr; +} + +static int phytium_nand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->length = chip->ecc.total; + oobregion->offset = mtd->oobsize - oobregion->length; + + return 0; +} + +static int phytium_nand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + /* + * Bootrom looks in bytes 0 & 5 for bad blocks for the + * 4KB page / 4bit BCH combination. + */ + if (mtd->writesize >= SZ_4K) + oobregion->offset = 6; + else + oobregion->offset = 2; + + oobregion->length = mtd->oobsize - chip->ecc.total - oobregion->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops phytium_nand_ooblayout_ops = { + .ecc = phytium_nand_ooblayout_ecc, + .free = phytium_nand_ooblayout_free, +}; + +static void phytium_nfc_enable_hw_ecc(struct nand_chip *chip) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 ndcr0 = phytium_read(nfc, NDCR0); + + if (!(ndcr0 & NDCR0_ECC_EN)) + phytium_write(nfc, NDCR0, ndcr0 | NDCR0_ECC_EN); +} + +static void phytium_nfc_disable_hw_ecc(struct nand_chip *chip) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 ndcr0 = phytium_read(nfc, NDCR0); + + if (ndcr0 & NDCR0_ECC_EN) + phytium_write(nfc, NDCR0, ndcr0 & ~NDCR0_ECC_EN); +} + +irqreturn_t phytium_nfc_isr(int irq, void *dev_id) +{ + struct phytium_nfc *nfc = dev_id; + u32 st = phytium_read(nfc, NDIR); + u32 ien = (~phytium_read(nfc, NDIR_MASK)) & NDIR_ALL_INT(nfc->caps->int_mask_bits); + + if (!(st & ien)) + return IRQ_NONE; + + nfc_irq_st = st; + nfc_irq_en = ien; + phytium_nfc_disable_int(nfc, st & NDIR_ALL_INT(nfc->caps->int_mask_bits)); + phytium_write(nfc, 0xFD0, 0); + + if (st & (NDIR_CMD_FINISH | NDIR_DMA_FINISH)) { + if (st & NDIR_ECC_ERR) + nfc_ecc_err = 1; + phytium_write(nfc, NDIR, st); + nfc_irq_complete = 1; + } else if (st & (NDIR_FIFO_TIMEOUT | NDIR_PGFINISH)) { + phytium_write(nfc, NDIR, st); + phytium_nfc_enable_int(nfc, (~st) & (NDIR_DMA_FINISH_MASK | + NDIR_PGFINISH_MASK | + NDIR_FIFO_TIMEOUT_MASK | + NDIR_CMD_FINISH_MASK)); + nfc_irq_complete = 0; + } else if (st & NDIR_ECC_ERR) { + phytium_write(nfc, ND_ERR_CLR, 0x08); + phytium_write(nfc, NDIR, st); + phytium_write(nfc, NDFIFO_CLR, 0x01); + nfc_irq_complete = 1; + nfc_ecc_errover = 1; + } else { + phytium_write(nfc, NDIR, st); + nfc_irq_complete = 1; + } + + wake_up(&wait_done); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL(phytium_nfc_isr); + +static int phytium_nfc_hw_ecc_correct(struct nand_chip *chip, + char *buf, int len) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 i, j, value, tmp; + u32 ecc_err_reg_nums; + int stat = 0; + + if (!buf) + return -EINVAL; + + if (nfc->caps->hw_ver == 1) + ecc_err_reg_nums = 2; + else + ecc_err_reg_nums = 4; + + for (i = 0; i < chip->ecc.steps; i++) { + for (j = 0; j < ecc_err_reg_nums; j++) { + value = phytium_read(nfc, 0xB8 + 4 * (ecc_err_reg_nums * i + j)); + dev_info(nfc->dev, "ECC_FLAG: offset:%x value:0x%08x\n", + 0xB8 + 4 * (2 * i + j), value); + + tmp = value & 0xFFFF; + if (tmp && (tmp <= 4096)) { + tmp--; + stat++; + buf[chip->ecc.size * i + (tmp >> 3)] ^= (0x01 << tmp % 8); + dev_info(nfc->dev, "ECC_CORRECT %x %02x\n", + chip->ecc.size * i + (tmp >> 3), + buf[chip->ecc.size * i + (tmp >> 3)]); + dev_info(nfc->dev, "ECC_CORRECT xor %x %02x\n", + 0x01 << (tmp % 8), buf[chip->ecc.size * i + (tmp >> 3)]); + } else if (tmp > 4096) { + dev_info(nfc->dev, "ECC_CORRECT offset > 4096!\n"); + } + + tmp = (value >> 16) & 0xFFFF; + if (tmp && (tmp <= 4096)) { + tmp--; + stat++; + buf[chip->ecc.size * i + (tmp >> 3)] ^= (0x01 << tmp % 8); + dev_info(nfc->dev, "ECC_CORRECT %x %02x\n", + chip->ecc.size * i + (tmp >> 3), + buf[chip->ecc.size * i + (tmp >> 3)]); + dev_info(nfc->dev, "ECC_CORRECT xor %x %02x\n", + chip->ecc.size * i + (tmp >> 3), + buf[chip->ecc.size * i + (tmp >> 3)]); + } else if (tmp > 4096) { + dev_info(nfc->dev, "ECC_CORRECT offset > 4096!\n"); + } + } + } + + return stat; +} + +static int phytium_nand_page_read(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + memset(&nfc_op, 0, sizeof(nfc_op)); + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); + + memset(nfc->dma_buf, 0x0, mtd->writesize + mtd->oobsize); + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd[0] = NAND_CMD_READ0; + nfc_op.cmd[1] = NAND_CMD_READSTART; + nfc_op.cmd_len = 2; + nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); + nfc_op.rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); + + nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op.addr[2] = page; + nfc_op.addr[3] = page >> 8; + nfc_op.addr[4] = page >> 16; + nfc_op.addr_len = 5; + nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + + nfc_op.page_cnt = mtd->writesize; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; + + /* For data read/program */ + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + return ret; + + if ((direction == DMA_FROM_DEVICE) && buf) + memcpy(buf, nfc->dma_buf, mtd->writesize); + + return ret; +} + +static int phytium_nand_oob_read(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + memset(&nfc_op, 0, sizeof(nfc_op)); + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); + + memset(nfc->dma_buf, 0x00, mtd->writesize + mtd->oobsize); + direction = DMA_FROM_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op.cmd[0] = NAND_CMD_READ0; + nfc_op.cmd[1] = NAND_CMD_READSTART; + nfc_op.cmd_len = 2; + nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); + nfc_op.rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); + + nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op.addr[2] = page; + nfc_op.addr[3] = page >> 8; + nfc_op.addr[4] = page >> 16; + nfc_op.addr_len = 5; + nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + + nfc_op.page_cnt = oob_len; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; + nfc_op.addr[0] = mtd->writesize & 0xFF; + nfc_op.addr[1] = (mtd->writesize >> 8) & 0xFF; + + /* For data read/program */ + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + return ret; + + cond_delay(nfc_op.rdy_delay_ns); + + if (direction == DMA_FROM_DEVICE) + memcpy(oob_buf, nfc->dma_buf, oob_len); + + return ret; +} + +static int phytium_nand_get_ecc_total(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) +{ + int ecc_total = 0; + + switch (mtd->writesize) { + case 0x200: + if (ecc->strength == 8) + ecc_total = 0x0D; + else if (ecc->strength == 4) + ecc_total = 7; + else if (ecc->strength == 2) + ecc_total = 4; + else + ecc_total = 0; + break; + case 0x800: + if (ecc->strength == 8) + ecc_total = 0x34; + else if (ecc->strength == 4) + ecc_total = 0x1a; + else if (ecc->strength == 2) + ecc_total = 0xd; + else + ecc_total = 0; + break; + case 0x1000: + if (ecc->strength == 8) + ecc_total = 0x68; + else if (ecc->strength == 4) + ecc_total = 0x34; + else if (ecc->strength == 2) + ecc_total = 0x1a; + else + ecc_total = 0; + break; + case 0x2000: + if (ecc->strength == 8) + ecc_total = 0xD0; + else if (ecc->strength == 4) + ecc_total = 0x68; + else if (ecc->strength == 2) + ecc_total = 0x34; + else + ecc_total = 0; + break; + case 0x4000: + if (ecc->strength == 8) + ecc_total = 0x1A0; + if (ecc->strength == 4) + ecc_total = 0xD0; + else if (ecc->strength == 2) + ecc_total = 0x68; + else + ecc_total = 0; + break; + default: + ecc_total = 0; + break; + } + + return ecc_total; +} + +static int phytium_nand_page_read_hwecc(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op *nfc_op = NULL; + enum dma_data_direction direction; + u32 ecc_offset; + int max_bitflips = 0; + u32 nfc_state = 0; + int ret = 0; + int i; + + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); + + ecc_offset = phytium_nand->ecc.offset; + memset(nfc->dma_buf, 0x00, mtd->writesize + mtd->oobsize); + nfc_op = kzalloc(2 * sizeof(struct phytium_nfc_op), GFP_KERNEL); + if (!nfc_op) { + dev_err(nfc->dev, "Can't malloc space for phytium_nfc_op\n"); + return 0; + } + + nfc_op->cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op->rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); + nfc_op->rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); + + direction = DMA_FROM_DEVICE; + nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_READ; + nfc_op->cmd[0] = NAND_CMD_READ0; + nfc_op->cmd[1] = NAND_CMD_READSTART; + nfc_op->cmd_len = 2; + nfc_op->addr_len = 5; + nfc_op->cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op->addr[2] = page; + nfc_op->addr[3] = page >> 8; + nfc_op->addr[4] = page >> 16; + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op->page_cnt = mtd->writesize; + nfc_op->cmd_ctrl.nfc_ctrl.nc = 1; + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = (nfc->dma_phy_addr >> (8 * i)) & 0xFF; + + nfc_op++; + memcpy(nfc_op, nfc_op - 1, sizeof(struct phytium_nfc_op)); + nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_READ_COL; + nfc_op->cmd[0] = NAND_CMD_RNDOUT; + nfc_op->cmd[1] = NAND_CMD_RNDOUTSTART; + memset(&nfc_op->addr, 0, PHYTIUM_NFC_ADDR_MAX_LEN); + nfc_op->addr_len = 2; + nfc_op->addr[0] = mtd->writesize + phytium_nand->ecc.offset; + nfc_op->addr[1] = (mtd->writesize + phytium_nand->ecc.offset) >> 8; + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x02; + nfc_op->page_cnt = phytium_nand_get_ecc_total(mtd, &chip->ecc); + nfc_op->cmd_ctrl.nfc_ctrl.nc = 0; + nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 0; + nfc_op->cmd_ctrl.nfc_ctrl.ecc_en = 1; + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = + ((nfc->dma_phy_addr + mtd->writesize) >> (8 * i)) & 0xFF; + + nfc_op--; + phytium_nfc_prepare_cmd2(chip, nfc_op, direction, 2); + phytium_nfc_send_cmd2(chip, nfc_op, 2); + cond_delay(nfc_op->cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op->rdy_timeout_ms); + if (ret) { + kfree(nfc_op); + return ret; + } + + cond_delay(nfc_op->rdy_delay_ns); + + if ((direction == DMA_FROM_DEVICE) && buf) { + nfc_state = phytium_read(nfc, NDSR); + if ((nfc_state & NDSR_ECC_ERROVER) || (nfc_ecc_errover == 1)) { + for (i = 0; i < mtd->writesize/16; i++) { + if (0xFF != *(u8 *)(nfc->dma_buf + i)) { + dev_info(nfc->dev, "NFC: NDSR_ECC_ERROVER %x\n", page); + mtd->ecc_stats.failed++; + mtd->ecc_stats.corrected += max_bitflips; + break; + } + } + } else if (nfc_state & NDSR_ECC_ERR) { + max_bitflips = phytium_nfc_hw_ecc_correct(chip, + nfc->dma_buf, mtd->writesize); + mtd->ecc_stats.corrected += max_bitflips; + dev_info(nfc->dev, "NFC: NDSR_ECC_ERR page:%x, bit:%d\n", + page, max_bitflips); + } + + memcpy(buf, nfc->dma_buf, mtd->writesize); + } + + kfree(nfc_op); + return max_bitflips; +} + +static int phytium_nand_page_write(struct mtd_info *mtd, struct nand_chip *chip, + const u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + memset(&nfc_op, 0, sizeof(nfc_op)); + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); + + memcpy(nfc->dma_buf, buf, mtd->writesize); + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd[0] = NAND_CMD_SEQIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + nfc_op.cmd_len = 2; + nfc_op.addr_len = 5; + nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tPROG_max); + nfc_op.rdy_delay_ns = 0; + + nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op.addr[2] = page; + nfc_op.addr[3] = page >> 8; + nfc_op.addr[4] = page >> 16; + nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op.page_cnt = mtd->writesize; + + /* For data read/program */ + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + goto out; + + cond_delay(nfc_op.rdy_delay_ns); +out: + return ret; +} + +static int phytium_nand_oob_write(struct mtd_info *mtd, struct nand_chip *chip, + u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op nfc_op; + enum dma_data_direction direction; + int ret = 0; + + memset(&nfc_op, 0, sizeof(nfc_op)); + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); + + direction = DMA_TO_DEVICE; + nfc_op.cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op.cmd[0] = NAND_CMD_SEQIN; + nfc_op.cmd[1] = NAND_CMD_PAGEPROG; + nfc_op.cmd_len = 2; + nfc_op.addr_len = 5; + nfc_op.cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op.rdy_timeout_ms = PSEC_TO_MSEC(sdr->tPROG_max); + nfc_op.rdy_delay_ns = 0; + + nfc_op.cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op.addr[2] = page; + nfc_op.addr[3] = page >> 8; + nfc_op.addr[4] = page >> 16; + nfc_op.cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op.cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op.cmd_ctrl.nfc_ctrl.auto_rs = 1; + + nfc_op.page_cnt = oob_len; + nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; + nfc_op.addr[0] = mtd->writesize & 0xFF; + nfc_op.addr[1] = (mtd->writesize >> 8) & 0xFF; + nfc_op.cmd_ctrl.nfc_ctrl.ecc_en = 0; + memcpy(nfc->dma_buf, oob_buf, mtd->oobsize); + + /* For data read/program */ + phytium_nfc_prepare_cmd(chip, &nfc_op, direction); + phytium_nfc_send_cmd(chip, &nfc_op); + cond_delay(nfc_op.cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); + if (ret) + goto out; + + cond_delay(nfc_op.rdy_delay_ns); +out: + return ret; +} + +static int phytium_nand_page_write_hwecc(struct mtd_info *mtd, struct nand_chip *chip, + const u8 *buf, u8 *oob_buf, int oob_len, int page, + bool read) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + struct phytium_nand_chip *phytium_nand = NULL; + const struct nand_sdr_timings *sdr = NULL; + struct phytium_nfc_op *nfc_op; + enum dma_data_direction direction; + u32 ecc_offset; + int ret = 0; + int i; + + phytium_nand = to_phytium_nand(chip); + sdr = nand_get_sdr_timings(&phytium_nand->chip.data_interface); + ecc_offset = phytium_nand->ecc.offset; + + nfc_op = kzalloc(2 * sizeof(struct phytium_nfc_op), GFP_KERNEL); + if (!nfc_op) { + dev_err(nfc->dev, "Can't malloc space for phytium_nfc_op\n"); + return 0; + } + + nfc_op->cle_ale_delay_ns = PSEC_TO_NSEC(sdr->tWB_max); + nfc_op->rdy_timeout_ms = PSEC_TO_MSEC(sdr->tR_max); + nfc_op->rdy_delay_ns = PSEC_TO_NSEC(sdr->tRR_min); + + direction = DMA_TO_DEVICE; + nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_CH_ROW_ADDR; + nfc_op->cmd[0] = NAND_CMD_SEQIN; + nfc_op->cmd_len = 1; + nfc_op->addr_len = 5; + nfc_op->cmd_ctrl.nfc_ctrl.dbc = 0; + nfc_op->addr[2] = page; + nfc_op->addr[3] = page >> 8; + nfc_op->addr[4] = page >> 16; + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x05; + nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 0; + nfc_op->cmd_ctrl.nfc_ctrl.nc = 1; + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = (nfc->dma_phy_addr >> (8 * i)) & 0xFF; + + /* The first dsp must have data to transfer */ + memcpy(nfc->dma_buf, buf, mtd->writesize); + nfc_op->page_cnt = mtd->writesize; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + + nfc_op++; + memcpy(nfc_op, nfc_op - 1, sizeof(struct phytium_nfc_op)); + nfc_op->cmd_ctrl.nfc_ctrl.cmd_type = TYPE_PAGE_PRO; + nfc_op->cmd_ctrl.nfc_ctrl.dbc = 1; + nfc_op->cmd_ctrl.nfc_ctrl.auto_rs = 1; + nfc_op->cmd[0] = NAND_CMD_RNDIN; + nfc_op->cmd[1] = NAND_CMD_PAGEPROG; + memset(&nfc_op->addr, 0, PHYTIUM_NFC_ADDR_MAX_LEN); + nfc_op->addr_len = 2; + nfc_op->cmd_len = 2; + nfc_op->addr[0] = mtd->writesize + ecc_offset; + nfc_op->addr[1] = (mtd->writesize + ecc_offset) >> 8; + nfc_op->cmd_ctrl.nfc_ctrl.addr_cyc = 0x02; + nfc_op->page_cnt = phytium_nand_get_ecc_total(mtd, &chip->ecc); + nfc_op->cmd_ctrl.nfc_ctrl.nc = 0; + nfc_op->cmd_ctrl.nfc_ctrl.dc = 1; + nfc_op->cmd_ctrl.nfc_ctrl.ecc_en = 1; + for (i = 0; i < PHYTIUM_NFC_ADDR_MAX_LEN; i++) + nfc_op->mem_addr_first[i] = + ((nfc->dma_phy_addr + mtd->writesize + ecc_offset) >> (8 * i)) & 0xFF; + + /* when enable ECC, must offer ecc_offset of oob, but no oobdata */ + nfc_op--; + phytium_nfc_prepare_cmd2(chip, nfc_op, direction, 2); + phytium_nfc_send_cmd2(chip, nfc_op, 2); + cond_delay(nfc_op->cle_ale_delay_ns); + + ret = phytium_nfc_wait_op(chip, nfc_op->rdy_timeout_ms); + if (ret) + goto out; + + cond_delay(nfc_op->rdy_delay_ns); +out: + kfree(nfc_op); + return ret; +} + +static int phytium_nfc_hw_ecc_bch_read_page_raw(struct mtd_info *mtd, + struct nand_chip *chip, + u8 *buf, int oob_required, + int page) +{ + u32 oob_len = oob_required ? mtd->oobsize : 0; + int ret; + + ret = phytium_nand_page_read(mtd, chip, buf, NULL, 0, page, true); + if (oob_required) + ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, + oob_len, page, true); + + phytium_nfc_data_dump(to_phytium_nfc(chip->controller), buf, mtd->writesize); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_read_oob_raw(struct mtd_info *mtd, + struct nand_chip *chip, int page) +{ + int ret; + + /* Invalidate page cache */ + chip->pagebuf = -1; + memset(chip->oob_poi, 0xFF, mtd->oobsize); + + ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, + mtd->oobsize, page, true); + + phytium_nfc_data_dump(to_phytium_nfc(chip->controller), chip->oob_poi, mtd->oobsize); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd, + struct nand_chip *chip, + u8 *buf, int oob_required, + int page) +{ + int ret; + u32 oob_len = oob_required ? mtd->oobsize : 0; + struct phytium_nand_chip *phytium_nand = NULL; + + phytium_nand = to_phytium_nand(chip); + + phytium_nfc_enable_hw_ecc(chip); + ret = phytium_nand_page_read_hwecc(mtd, chip, buf, NULL, + 0, page, true); + phytium_nfc_disable_hw_ecc(chip); + + if (oob_required) { + oob_len = mtd->oobsize; + ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, + oob_len, page, true); + } + + phytium_nfc_data_dump(to_phytium_nfc(chip->controller), buf, mtd->writesize); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_read_oob(struct mtd_info *mtd, + struct nand_chip *chip, + int page) +{ + u32 oob_len = mtd->oobsize; + int ret; + + ret = phytium_nand_oob_read(mtd, chip, NULL, chip->oob_poi, + oob_len, page, true); + + phytium_nfc_data_dump(to_phytium_nfc(chip->controller), chip->oob_poi, oob_len); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_write_page_raw(struct mtd_info *mtd, + struct nand_chip *chip, + const u8 *buf, + int oob_required, int page) +{ + void *oob_buf = oob_required ? chip->oob_poi : NULL; + + if (oob_required) + phytium_nand_oob_write(mtd, chip, NULL, oob_buf, + mtd->oobsize, page, false); + + return phytium_nand_page_write(mtd, chip, buf, NULL, + 0, page, false); +} + +static int phytium_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd, + struct nand_chip *chip, + const u8 *buf, + int oob_required, int page) +{ + int ret; + void *oob_buf = oob_required ? chip->oob_poi : NULL; + u32 oob_len; + + if (oob_required) { + oob_len = mtd->oobsize; + phytium_nand_oob_write(mtd, chip, NULL, oob_buf, + oob_len, page, false); + } + + phytium_nfc_enable_hw_ecc(chip); + ret = phytium_nand_page_write_hwecc(mtd, chip, buf, NULL, + 0, page, false); + phytium_nfc_disable_hw_ecc(chip); + + return ret; +} + +static int phytium_nfc_hw_ecc_bch_write_oob_raw(struct mtd_info *mtd, + struct nand_chip *chip, int page) +{ + return phytium_nand_oob_write(mtd, chip, NULL, chip->oob_poi, + mtd->oobsize, page, false); +} + +static int phytium_nfc_hw_ecc_bch_write_oob(struct mtd_info *mtd, + struct nand_chip *chip, int page) +{ + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + u32 oob_len = mtd->oobsize - phytium_nand->ecc.length; + + return phytium_nand_oob_write(mtd, chip, NULL, chip->oob_poi, + oob_len, page, false); +} + +static int phytium_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if ((mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) + return -ENOTSUPP; + + chip->ecc.algo = NAND_ECC_BCH; + ecc->read_page_raw = phytium_nfc_hw_ecc_bch_read_page_raw; + ecc->read_page = phytium_nfc_hw_ecc_bch_read_page; + ecc->read_oob_raw = phytium_nfc_hw_ecc_bch_read_oob_raw; + ecc->read_oob = phytium_nfc_hw_ecc_bch_read_oob; + ecc->write_page_raw = phytium_nfc_hw_ecc_bch_write_page_raw; + ecc->write_page = phytium_nfc_hw_ecc_bch_write_page; + ecc->write_oob_raw = phytium_nfc_hw_ecc_bch_write_oob_raw; + ecc->write_oob = phytium_nfc_hw_ecc_bch_write_oob; + + return 0; +} + +static int phytium_nand_ecc_init(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + int ret = 0; + + if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) { + if (chip->ecc_step_ds && chip->ecc_strength_ds) { + ecc->size = chip->ecc_step_ds; + ecc->strength = chip->ecc_strength_ds; + } else { + ecc->size = 512; + ecc->strength = 1; + } + } + + mtd_set_ooblayout(mtd, &phytium_nand_ooblayout_ops); + + switch (ecc->mode) { + case NAND_ECC_HW: + ret = phytium_nand_hw_ecc_ctrl_init(mtd, ecc); + break; + case NAND_ECC_NONE: + ecc->read_page_raw = phytium_nfc_hw_ecc_bch_read_page_raw; + ecc->read_oob_raw = phytium_nfc_hw_ecc_bch_read_oob; + ecc->write_page_raw = phytium_nfc_hw_ecc_bch_write_page_raw; + ecc->write_oob_raw = phytium_nfc_hw_ecc_bch_write_oob_raw; + ecc->read_page = ecc->read_page_raw; + ecc->read_oob = ecc->read_oob_raw; + ecc->write_page = ecc->write_page_raw; + ecc->write_oob = ecc->write_oob_raw; + break; + case NAND_ECC_SOFT: + case NAND_ECC_ON_DIE: + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static u8 bbt_pattern[] = {'P', 'H', 'Y', 'b', 't', '0' }; +static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'Y', 'H', 'P' }; + +static struct nand_bbt_descr bbt_main_descr = { + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | + NAND_BBT_2BIT | NAND_BBT_VERSION, + .offs = 8, + .len = 6, + .veroffs = 14, + .maxblocks = 8, /* Last 8 blocks in each chip */ + .pattern = bbt_pattern +}; + +static struct nand_bbt_descr bbt_mirror_descr = { + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | + NAND_BBT_2BIT | NAND_BBT_VERSION, + .offs = 8, + .len = 6, + .veroffs = 14, + .maxblocks = 8, /* Last 8 blocks in each chip */ + .pattern = bbt_mirror_pattern +}; + +static int phytium_nand_attach_chip(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct phytium_nand_chip *phytium_nand = to_phytium_nand(chip); + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + u32 value; + int ret = 0; + + if (nfc->caps->flash_bbt) + chip->bbt_options |= NAND_BBT_USE_FLASH; + + if (chip->bbt_options & NAND_BBT_USE_FLASH) { + /* + * We'll use a bad block table stored in-flash and don't + * allow writing the bad block marker to the flash. + */ + chip->bbt_options |= NAND_BBT_NO_OOB_BBM; + chip->bbt_td = &bbt_main_descr; + chip->bbt_md = &bbt_mirror_descr; + } + + if (chip->options & NAND_BUSWIDTH_16) + phytium_nand->ndcr |= NDCR0_WIDTH; + + /* + * On small page NANDs, only one cycle is needed to pass the + * column address. + */ + if (mtd->writesize <= 512) + phytium_nand->addr_cyc = 1; + else + phytium_nand->addr_cyc = 2; + + /* + * Now add the number of cycles needed to pass the row + * address. + * + * Addressing a chip using CS 2 or 3 should also need the third row + * cycle but due to inconsistance in the documentation and lack of + * hardware to test this situation, this case is not supported. + */ + if (chip->options & NAND_ROW_ADDR_3) + phytium_nand->addr_cyc += 3; + else + phytium_nand->addr_cyc += 2; + + if (nfc->caps) { + if (chip->ecc.mode == NAND_ECC_HW) { + chip->ecc.size = nfc->caps->ecc_step_size ? + nfc->caps->ecc_step_size : + chip->ecc.size; + chip->ecc.strength = nfc->caps->ecc_strength ? + nfc->caps->ecc_strength : + chip->ecc.strength; + chip->ecc.strength = nfc->caps->ecc_strength; + chip->ecc.bytes = 7; + } else { + chip->ecc.size = 512; + chip->ecc.strength = 1; + chip->ecc.bytes = 0; + } + chip->ecc.mode = NAND_ECC_HW; + } + + if (nfc->caps->hw_ver == 1) { + if (chip->ecc.strength == 0x04) + phytium_nand->ndcr |= NDCR0_ECC_STREN(4); + else if (chip->ecc.strength == 0x02) + phytium_nand->ndcr |= NDCR0_ECC_STREN(2); + else + phytium_nand->ndcr |= NDCR0_ECC_STREN(0); + } else { + if (chip->ecc.strength == 0x08) + phytium_nand->ndcr |= NDCR0_ECC_STREN(7); + else if (chip->ecc.strength == 0x04) + phytium_nand->ndcr |= NDCR0_ECC_STREN(3); + else if (chip->ecc.strength == 0x02) + phytium_nand->ndcr |= NDCR0_ECC_STREN(1); + else + phytium_nand->ndcr |= NDCR0_ECC_STREN(0); + } + + value = phytium_read(nfc, NDCR0); + value &= ~NDCR0_EN; + phytium_write(nfc, NDCR0, value); + + value &= ~NDCR0_ECC_STREN(7); + value |= phytium_nand->ndcr; + phytium_write(nfc, NDCR0, value | NDCR0_EN); + + ret = phytium_nand_ecc_init(mtd, &chip->ecc); + if (ret) { + dev_err(nfc->dev, "ECC init failed: %d\n", ret); + goto out; + } + + /* + * Subpage write not available with hardware ECC, prohibit also + * subpage read as in userspace subpage access would still be + * allowed and subpage write, if used, would lead to numerous + * uncorrectable ECC errors. + */ + if (chip->ecc.mode == NAND_ECC_HW) + chip->options |= NAND_NO_SUBPAGE_WRITE; + + /* + * We keep the MTD name unchanged to avoid breaking platforms + * where the MTD cmdline parser is used and the bootloader + * has not been updated to use the new naming scheme. + */ + if (nfc->caps->legacy_of_bindings) + mtd->name = "phytium_nand-0"; + +out: + return ret; +} + +static const struct nand_controller_ops phytium_nand_controller_ops = { + .attach_chip = phytium_nand_attach_chip, +}; + +static void phytium_nand_chips_cleanup(struct phytium_nfc *nfc) +{ + struct phytium_nand_chip *entry, *temp; + + list_for_each_entry_safe(entry, temp, &nfc->chips, node) { + nand_release(&entry->chip); + list_del(&entry->node); + } +} + +static int phytium_nfc_init_dma(struct phytium_nfc *nfc) +{ + int ret; + + ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + nfc->dsp_addr = dma_alloc_coherent(nfc->dev, PAGE_SIZE, + &nfc->dsp_phy_addr, GFP_KERNEL | GFP_DMA); + if (!nfc->dsp_addr) + return -ENOMEM; + + nfc->dma_buf = dma_alloc_coherent(nfc->dev, MAX_CHUNK_SIZE, + &nfc->dma_phy_addr, GFP_KERNEL | GFP_DMA); + if (!nfc->dma_buf) + return -ENOMEM; + + dev_info(nfc->dev, "NFC address dsp_phy_addr:%llx, dma_phy_addr:%llx\n", + nfc->dsp_phy_addr, nfc->dma_phy_addr); + + return 0; +} + +int phytium_nfc_init(struct phytium_nfc *nfc) +{ + u32 value; + + nfc->inter_mode = ASYN_SDR; + nfc->timing_mode = ASY_MODE0; + + value = phytium_read(nfc, NDCR1); + value &= (~NDCR1_SAMPL_PHASE(0xFFFF)); + value &= ~NDCR1_ECC_DATA_FIRST_EN; + value |= NDCR1_SAMPL_PHASE(1); + value |= NDCR1_ECC_BYPASS; + phytium_write(nfc, NDCR1, value); + phytium_write(nfc, ND_INTERVAL_TIME, 1); + phytium_write(nfc, NDFIFO_LEVEL0, 4); + phytium_write(nfc, NDFIFO_LEVEL1, 4); + phytium_write(nfc, NDFIFO_CLR, 1); + phytium_write(nfc, ND_ERR_CLR, 1); + + /* Configure the DMA */ + phytium_nfc_init_dma(nfc); + + + + phytium_nfc_reset(nfc); + + value = phytium_read(nfc, NDCR0); + value &= (~NDCR0_IN_MODE(3)); + value |= NDCR0_IN_MODE(nfc->inter_mode); + value |= NDCR0_EN; + + phytium_write(nfc, NDCR0, value); + + nfc_ecc_errover = 0; + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nfc_init); + +static int phytium_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr, + const struct nand_data_interface + *conf) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + unsigned int period_ns = 2; + const struct nand_sdr_timings *sdr; + struct phytium_nfc_timings nfc_tmg; + int read_delay; + + sdr = nand_get_sdr_timings(conf); + if (IS_ERR(sdr)) + return PTR_ERR(sdr); + + nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1; + nfc_tmg.tRH = nfc_tmg.tRP; + nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1; + nfc_tmg.tWH = nfc_tmg.tWP; + nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns); + nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1; + nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns); + dev_info(nfc->dev, "[nfc_tmg]tRP: %d, tRH:%d, tWP:%d tWH:%d\n", + nfc_tmg.tRP, nfc_tmg.tRH, nfc_tmg.tWP, nfc_tmg.tWH); + dev_info(nfc->dev, "[nfc_tmg]tCS: %d, tCH:%d, tADL:%d\n", + nfc_tmg.tCS, nfc_tmg.tCH, nfc_tmg.tADL); + + read_delay = sdr->tRC_min >= 30000 ? + MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH; + + nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns); + nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min), + period_ns) - 2, + nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min), + period_ns); + dev_info(nfc->dev, "[nfc_tmg]tAR: %d, tWHR:%d, tRHW:%d\n", + nfc_tmg.tAR, nfc_tmg.tWHR, nfc_tmg.tRHW); + + nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns); + + if (chipnr < 0) + return 0; + + if (nfc_tmg.tWP > 0x10) + nfc->timing_mode = ASY_MODE1; + else if (nfc_tmg.tWP < 0x0D) + nfc->timing_mode = ASY_MODE3; + + if (nfc->inter_mode == ONFI_DDR) + nfc->timing_mode = SYN_MODE3; + + phytium_nfc_default_data_interface(nfc); + + return 0; +} + +static int phytium_nand_chip_init(struct phytium_nfc *nfc) +{ + struct device *dev = nfc->dev; + struct phytium_nand_chip *phytium_nand; + struct mtd_info *mtd; + struct nand_chip *chip; + int ret; + + /* Alloc the nand chip structure */ + phytium_nand = devm_kzalloc(dev, sizeof(*phytium_nand), GFP_KERNEL); + if (!phytium_nand) + return -ENOMEM; + + phytium_nand->nsels = 1; + phytium_nand->selected_die = -1; + + chip = &phytium_nand->chip; + chip->controller = &nfc->controller; + chip->exec_op = phytium_nfc_exec_op; + chip->select_chip = phytium_nfc_select_chip; + chip->setup_data_interface = phytium_nfc_setup_data_interface; + phytium_nfc_default_data_interface(nfc); + + mtd = nand_to_mtd(chip); + mtd->dev.parent = dev; + mtd->owner = THIS_MODULE; + + /* + * Default to HW ECC engine mode. If the nand-ecc-mode property is given + * in the DT node, this entry will be overwritten in nand_scan_ident(). + */ + chip->ecc.mode = NAND_ECC_HW; + + chip->options |= NAND_BUSWIDTH_AUTO; + chip->options |= NAND_SKIP_BBTSCAN; + chip->bbt_options |= NAND_BBT_NO_OOB; + + ret = nand_scan(chip, phytium_nand->nsels); + if (ret) { + dev_err(dev, "could not scan the nand chip\n"); + goto out; + } + + if (nfc->caps->parts) { + ret = mtd_device_register(mtd, nfc->caps->parts, nfc->caps->nr_parts - 1); + } else if (dev->of_node) { + nand_set_flash_node(chip, dev->of_node); + ret = mtd_device_register(mtd, NULL, 0); + } else { + ret = -EINVAL; + } + + if (ret) { + dev_err(dev, "failed to register mtd device: %d\n", ret); + nand_release(chip); + return ret; + } + + phytium_nand->ecc.length = phytium_nand_get_ecc_total(mtd, &chip->ecc); + phytium_nand->ecc.offset = mtd->oobsize - phytium_nand->ecc.length; + chip->ecc.total = phytium_nand_get_ecc_total(mtd, &chip->ecc); + + mtd_ooblayout_ecc(mtd, 0, &phytium_nand->ecc); + + dev_info(dev, "ooblayout ecc offset: %x, length: %x\n", + phytium_nand->ecc.offset, phytium_nand->ecc.length); + +out: + list_add_tail(&phytium_nand->node, &nfc->chips); + return 0; +} + +int phytium_nand_init(struct phytium_nfc *nfc) +{ + int ret; + + nand_controller_init(&nfc->controller); + nfc->controller.ops = &phytium_nand_controller_ops; + INIT_LIST_HEAD(&nfc->chips); + + /* Init the controller and then probe the chips */ + ret = phytium_nfc_init(nfc); + if (ret) + goto out; + + ret = phytium_nand_chip_init(nfc); + if (ret) + goto out; + + spin_lock_init(&nfc->spinlock); + +out: + return ret; +} +EXPORT_SYMBOL_GPL(phytium_nand_init); + +int phytium_nand_remove(struct phytium_nfc *nfc) +{ + phytium_nand_chips_cleanup(nfc); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nand_remove); + +static int phytium_nfc_wait_ndrun(struct nand_chip *chip) +{ + struct phytium_nfc *nfc = to_phytium_nfc(chip->controller); + int ret = 0; + u32 val; + + ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val, + (val & NDSR_RB) == 0, + 0, 100 * 1000); + if (ret) { + dev_err(nfc->dev, "Timeout on NAND controller run mode\n"); + ret = -EAGAIN; + } + + return ret; +} + +int phytium_nand_prepare(struct phytium_nfc *nfc) +{ + struct phytium_nand_chip *chip = NULL; + + list_for_each_entry(chip, &nfc->chips, node) + phytium_nfc_wait_ndrun(&chip->chip); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nand_prepare); + +int phytium_nand_resume(struct phytium_nfc *nfc) +{ + nfc->selected_chip = NULL; + phytium_nfc_init(nfc); + phytium_nfc_default_data_interface(nfc); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_nand_resume); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium NAND controller platform driver"); +MODULE_AUTHOR("Zhu Mingshuai "); diff --git a/drivers/mtd/nand/raw/phytium_nand.h b/drivers/mtd/nand/raw/phytium_nand.h new file mode 100644 index 0000000000000000000000000000000000000000..963d555a2aca009c7904b1d362d7b833483b507f --- /dev/null +++ b/drivers/mtd/nand/raw/phytium_nand.h @@ -0,0 +1,449 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium NAND flash controller driver + * + * Copyright (c) 2020-2023 Phytium Technology Co., Ltd. + */ +#ifndef PHYTIUM_NAND_H +#define PHYTIUM_NAND_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* NFC does not support transfers of larger chunks at a time */ +#define MAX_PAGE_NUM 16 +#define MAX_CHUNK_SIZE ((1024 + 76) * 16) + +#define POLL_PERIOD 0 +#define POLL_TIMEOUT 100000 +/* Interrupt maximum wait period in ms */ +#define IRQ_TIMEOUT 1000 + +/* Latency in clock cycles between SoC pins and NFC logic */ +#define MIN_RD_DEL_CNT 3 + +#define PHYTIUM_NFC_ADDR_MAX_LEN 5 +#define PHYTIUM_NFC_DSP_SIZE 16 + +/* NAND controller flash control register */ +#define NDCR0 0x00 +#define NDCR0_EN BIT(0) +#define NDCR0_WIDTH BIT(1) +#define NDCR0_IN_MODE(x) (min_t(u32, x, 0x3) << 2) +#define NDCR0_ECC_EN BIT(4) +#define NDCR0_ECC_STREN(x) (min_t(u32, x, 0x7) << 5) +#define NDCR0_SPARE_EN BIT(8) +#define NDCR0_SPARE_SIZE(x) (min_t(u32, x, 0xFFF) << 9) +#define NDCR0_GENERIC_FIELDS_MASK + +#define NDCR1 0x04 +#define NDCR1_SAMPL_PHASE(x) min_t(u32, x, 0xFFFF) +#define NDCR1_ECC_DATA_FIRST_EN BIT(16) +#define NDCR1_RB_SHARE_EN BIT(17) +#define NDCR1_ECC_BYPASS BIT(18) + +#define NDAR0 0x08 + +#define NDAR1 0x0C +#define NDAR1_H8(x) min_t(u32, x, 0xFF) +#define NDAR1_DMA_EN BIT(8) +#define NDAR1_EMPTY(x) (min_t(u32, x, 0x7F) << 9) +#define NDAR1_DMA_RLEN(x) (min_t(u32, x, 0xFF) << 9) +#define NDAR1_DMA_WLEN(x) (min_t(u32, x, 0xFF) << 9) + +#define NDTR0 0x10 +#define NDTR0_TCS_TCLS(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR0_TCLS_TWP(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR1 0x14 +#define NDTR1_TWH(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR1_TWP(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR2 0x18 +#define NDTR2_TCH_TCLH(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR2_TCLH_TWH(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR3 0x1c +#define NDTR3_TDQ_EN(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR3_TCH_TWH(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR4 0x20 +#define NDTR4_TWHR_SMX(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR4_TREH(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR5 0x24 +#define NDTR5_TRC(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR5_TADL_SMX(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR6 0x28 +#define NDTR6_TCAD_TCS_SMX(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR6_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR7 0x2c +#define NDTR7_TCK(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR7_TDQ_EN(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR8 0x30 +#define NDTR8_TCAD_TCK_SMX(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR8_HF_TCK(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR9 0x34 +#define NDTR9_TWHR(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR9_TCCS_TCALS_SMX(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR10 0x38 +#define NDTR10_TCK(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR10_MTCK(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR11 0x3c +#define NDTR11_TCK_TCALS(x) (min_t(u32, x, 0xFFFF) << 16) +#define NDTR11_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR12 0x40 +#define NDTR12_TWRCK(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR12_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR13 0x44 +#define NDTR13_TWRHCA(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR13_TRLCA(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR14 0x48 +#define NDTR14_TWRHCE(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR14_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR15 0x4c +#define NDTR15_TCDQSS_TWPRE_TDS(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR15_HFTDSC(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR16 0x50 +#define NDTR16_TWPST_TDH(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR16_TWPSTH(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR17 0x54 +#define NDTR17_TCS_TRPRE(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR17_TRELDQS(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDTR18 0x58 +#define NDTR18_TRPST_TDQSRE(x) (min_t(u32, x, 0xFFFF) << 0) +#define NDTR18_RES(x) (min_t(u32, x, 0xFFFF) << 16) + +#define NDFIFO 0x5c +#define NDFIFO_REV (min_t(u32, x, 0) << 12) +#define NDFIFO_FULL BIT(11) +#define NDFIFO_EMP BIT(10) +#define NDFIFO_CNT(x) (min_t(u32, x, 0x3F) << 0) + +#define ND_INTERVAL_TIME 0x60 +#define NDCMD_INTERVAL_TIME 0x64 +#define NDFIFO_TIMEOUT 0x68 +#define NDFIFO_LEVEL0 0x6c +#define NDFIFO_LEVEL1 0x70 +#define NDWP 0x74 +#define NDFIFO_CLR 0x78 + +#define NDSR 0x7c +#define NDSR_BUSY BIT(0) +#define NDSR_DMA_BUSY BIT(1) +#define NDSR_DMA_PGFINISH BIT(2) +#define NDSR_DMA_FINISH BIT(3) +#define NDSR_FIFO_EMP BIT(4) +#define NDSR_FIFO_FULL BIT(5) +#define NDSR_FIFO_TIMEOUT BIT(6) +#define NDSR_CS(x) (min_t(u32, x, 0xF) << 7) +#define NDSR_CMD_PGFINISH BIT(11) +#define NDSR_PG_PGFINISH BIT(12) +#define NDSR_RE BIT(13) +#define NDSR_DQS BIT(14) +#define NDSR_RB BIT(15) +#define NDSR_ECC_BUSY BIT(16) +#define NDSR_ECC_FINISH BIT(17) +#define NDSR_ECC_RIGHT BIT(18) +#define NDSR_ECC_ERR BIT(19) +#define NDSR_ECC_ERROVER BIT(20) +#define NDSR_AXI_DSP_ERR BIT(21) +#define NDSR_AXI_RD_ERR BIT(22) +#define NDSR_AXI_WR_ERR BIT(23) +#define NDSR_RB_STATUS(x) (min_t(u32, x, 0xF) << 24) +#define NDSR_PROT_ERR BIT(28) +#define NDSR_ECC_BYPASS BIT(29) + +#define NDIR_MASK 0x80 +#define NDIR_BUSY_MASK BIT(0) +#define NDIR_DMA_BUSY_MASK BIT(1) +#define NDIR_DMA_PGFINISH_MASK BIT(2) +#define NDIR_DMA_FINISH_MASK BIT(3) +#define NDIR_FIFO_EMP_MASK BIT(4) +#define NDIR_FIFO_FULL_MASK BIT(5) +#define NDIR_FIFO_TIMEOUT_MASK BIT(6) +#define NDIR_CMD_FINISH_MASK BIT(7) +#define NDIR_PGFINISH_MASK BIT(8) +#define NDIR_RE_MASK BIT(9) +#define NDIR_DQS_MASK BIT(10) +#define NDIR_RB_MASK BIT(11) +#define NDIR_ECC_FINISH_MASK BIT(12) +#define NDIR_ECC_ERR_MASK BIT(13) + +#define NDIR 0x84 +#define NDIR_ALL_INT(x) GENMASK(x, 0) +#define NDIR_BUSY BIT(0) +#define NDIR_DMA_BUSY BIT(1) +#define NDIR_DMA_PGFINISH BIT(2) +#define NDIR_DMA_FINISH BIT(3) +#define NDIR_FIFO_EMP BIT(4) +#define NDIR_FIFO_FULL BIT(5) +#define NDIR_FIFO_TIMEOUT BIT(6) +#define NDIR_CMD_FINISH BIT(7) +#define NDIR_PGFINISH BIT(8) +#define NDIR_RE BIT(9) +#define NDIR_DQS BIT(10) +#define NDIR_RB BIT(11) +#define NDIR_ECC_FINISH BIT(12) +#define NDIR_ECC_ERR BIT(13) + +#define ND_DEBUG 0x88 + +#define ND_ERR_CLR 0x8c +#define ND_DSP_ERR_CLR BIT(0) +#define ND_AXI_RD_ERR_CLR BIT(1) +#define ND_AXI_WR_ERR_CLR BIT(2) +#define ND_ECC_ERR_CLR BIT(3) + +#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0) + +enum nand_inter_pro { + NAND_ONFI, + NAND_JEDEC, + NAND_OTHER, +}; + +enum nand_inter_mode { + ASYN_SDR, + ONFI_DDR, + TOG_ASYN_DDR, +}; + +enum asy_timing_mode { + ASY_MODE0, + ASY_MODE1, + ASY_MODE2, + ASY_MODE3, + ASY_MODE4, +}; + +enum onfi_syn_timing_mode { + SYN_MODE0 = 0x10, + SYN_MODE1, + SYN_MODE2, + SYN_MODE3, + SYN_MODE4, +}; + +/** + * NAND controller timings expressed in NAND Controller clock cycles + * + * @tRP: ND_nRE pulse width + * @tRH: ND_nRE high duration + * @tWP: ND_nWE pulse time + * @tWH: ND_nWE high duration + * @tCS: Enable signal setup time + * @tCH: Enable signal hold time + * @tADL: Address to write data delay + * @tAR: ND_ALE low to ND_nRE low delay + * @tWHR: ND_nWE high to ND_nRE low for status read + * @tRHW: ND_nRE high duration, read to write delay + * @tR: ND_nWE high to ND_nRE low for read + */ +struct phytium_nfc_timings { + u16 tRP; + u16 tRH; + u16 tWP; /* NDTR1_TWP */ + u16 tWH; /* NDTR1_TWH */ + u16 tCS; + u16 tCH; + u16 tADL; + u16 tAR; + u16 tWHR; + u16 tRHW; + u16 tR; +}; + +/** + * NAND chip structure: stores NAND chip device related information + * + * @chip: Base NAND chip structure + * @node: Used to store NAND chips into a list + * @ndcr: Controller register value for this NAND chip + * @ndtr0: Timing registers 0 value for this NAND chip + * @ndtr1: Timing registers 1 value for this NAND chip + * @selected_die: Current active CS + * @nsels: Number of CS lines required by the NAND chip + */ +struct phytium_nand_chip { + struct nand_chip chip; + struct list_head node; + u32 ndcr; + u32 ndtr0; + u32 ndtr1; + int addr_cyc; + int selected_die; + unsigned int nsels; + struct mtd_oob_region ecc; +}; + +/** + * NAND controller capabilities for distinction between compatible strings + * + * @max_cs_nb: Number of Chip Select lines available + * @max_rb_nb: Number of Ready/Busy lines available + * @legacy_of_bindings: Indicates if DT parsing must be done using the old + * fashion way + * @flash_bbt: + * @ecc_strength: + * @ecc_step_size: + * @parts: + * @nr_parts: + */ +struct phytium_nfc_caps { + unsigned int hw_ver; + unsigned int int_mask_bits; + unsigned int max_cs_nb; + unsigned int max_rb_nb; + bool legacy_of_bindings; + bool flash_bbt; + int ecc_strength; + int ecc_step_size; + struct mtd_partition *parts; + unsigned int nr_parts; +}; + +/** + * NAND controller structure: stores Marvell NAND controller information + * + * @controller: Base controller structure + * @dev: Parent device (used to print error messages) + * @regs: NAND controller registers + * @reg_clk: Regiters clock + * @complete: Completion object to wait for NAND controller events + * @chips: List containing all the NAND chips attached to + * this NAND controller + * @caps: NAND controller capabilities for each compatible string + * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only) + */ +struct phytium_nfc { + struct nand_controller controller; + struct device *dev; + void __iomem *regs; + int irq; + struct list_head chips; + struct nand_chip *selected_chip; + struct phytium_nfc_caps *caps; + + void *dsp_addr; + dma_addr_t dsp_phy_addr; + + void *dma_buf; + u32 dma_offset; + dma_addr_t dma_phy_addr; + + enum nand_inter_pro inter_pro; + enum nand_inter_mode inter_mode; + u32 timing_mode; + + spinlock_t spinlock; +}; + +/** + * Derives a duration in numbers of clock cycles. + * + * @ps: Duration in pico-seconds + * @period_ns: Clock period in nano-seconds + * + * Convert the duration in nano-seconds, then divide by the period and + * return the number of clock periods. + */ +#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns)) +#define TO_CYCLES64(ps, period_ns) (DIV_ROUND_UP_ULL(div_u64(ps, 1000), \ + period_ns)) + +struct phytium_nfc_cmd_ctrl { + u16 csel:4; + u16 dbc:1; + u16 addr_cyc:3; + u16 nc:1; +#define TYPE_RESET 0x00 +#define TYPE_SET_FTR 0x01 +#define TYPE_GET_FTR 0x02 +#define TYPE_READ_ID 0x03 +#define TYPE_PAGE_PRO 0x04 +#define TYPE_ERASE 0x05 +#define TYPE_READ 0x06 +#define TYPE_TOGGLE 0x07 +#define TYPE_READ_PARAM 0x02 +#define TYPE_READ_STATUS 0x03 +#define TYPE_CH_READ_COL 0x03 +#define TYPE_CH_ROW_ADDR 0x01 +#define TYPE_CH_WR_COL 0x01 + u16 cmd_type:4; + u16 dc:1; + u16 auto_rs:1; + u16 ecc_en:1; +}; + +/** + * NAND driver structure filled during the parsing of the ->exec_op() subop + * subset of instructions. + * + * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle + * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin + * @rdy_delay_ns: Optional delay after waiting for the RB pin + * @data_delay_ns: Optional delay after the data xfer + * @data_instr_idx: Index of the data instruction in the subop + * @data_instr: Pointer to the data instruction in the subop + */ +struct phytium_nfc_op { + u8 cmd[2]; + union { + u16 ctrl; + struct phytium_nfc_cmd_ctrl nfc_ctrl; + } cmd_ctrl; + u8 addr[PHYTIUM_NFC_ADDR_MAX_LEN]; + u16 page_cnt; + u8 mem_addr_first[PHYTIUM_NFC_ADDR_MAX_LEN]; + + u32 cmd_len; + u32 addr_len; + + u32 cle_ale_delay_ns; + u32 rdy_timeout_ms; + u32 rdy_delay_ns; + u32 data_delay_ns; + u32 data_instr_idx; + struct nand_op_instr *data_instr; +} __attribute__ ((__packed__)); + +#define TIMING_ASY_NUM 12 +#define TIMING_SYN_NUM 14 +#define TIMING_TOG_NUM 12 + +#define TMP_DMA_DEBUG 0 /* Temporary dma space */ + +int phytium_nand_init(struct phytium_nfc *nfc); +int phytium_nand_remove(struct phytium_nfc *nfc); +int phytium_nand_prepare(struct phytium_nfc *nfc); +int phytium_nand_suspend(struct phytium_nfc *nfc); +int phytium_nand_resume(struct phytium_nfc *nfc); + +irqreturn_t phytium_nfc_isr(int irq, void *dev_id); + +#endif /* NAND_PHYTIUM_NAND_H */ diff --git a/drivers/mtd/nand/raw/phytium_nand_pci.c b/drivers/mtd/nand/raw/phytium_nand_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..6ce76dd15161a183bbad882ff7a1c333ecdb5095 --- /dev/null +++ b/drivers/mtd/nand/raw/phytium_nand_pci.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI driver for Phytium NAND flash controller + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ +#include +#include +#include + +#include "phytium_nand.h" + +#define DRV_NAME "phytium_nand_pci" + +static struct mtd_partition partition_info[] = { + { + .name = "Flash partition 1", + .offset = 0x0000000, + .size = 0x4000000 }, + { + .name = "Flash partition 2", + .offset = 0x4000000, + .size = 0x8000000 }, + { + .name = "Flash partition 3", + .offset = 0x8000000, + .size = 0x10000000 }, + { + .name = "Flash partition 4", + .offset = 0x10000000, + .size = 0x12000000 }, + { + .name = "Flash partition 5", + .offset = 0x12000000, + .size = 0x14000000 }, +}; + +static struct phytium_nfc_caps x100_nfc_caps = { + .hw_ver = 1, + .int_mask_bits = 13, + .max_cs_nb = 2, + .max_rb_nb = 1, + .legacy_of_bindings = true, + .ecc_strength = 4, + .ecc_step_size = 512, + .nr_parts = 5, + .parts = partition_info, +}; + +static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct phytium_nfc *nfc; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + return ret; + } + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + nfc = devm_kzalloc(&pdev->dev, sizeof(struct phytium_nfc), + GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nfc->dev = &pdev->dev; + nfc->regs = pcim_iomap_table(pdev)[0]; + nfc->irq = pdev->irq; + nfc->caps = &x100_nfc_caps; + + ret = devm_request_irq(nfc->dev, nfc->irq, phytium_nfc_isr, + IRQF_SHARED, "phytium-nfc-pci", nfc); + if (ret) { + dev_err(nfc->dev, "Failed to register NFC interrupt.\n"); + return ret; + } + + ret = phytium_nand_init(nfc); + if (ret) + return ret; + + pci_set_drvdata(pdev, nfc); + + return ret; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct phytium_nfc *nfc = pci_get_drvdata(pdev); + int ret; + + ret = phytium_nand_remove(nfc); + if (ret) + dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); +} + +static int __maybe_unused phytium_nfc_prepare(struct device *dev) +{ + struct pci_dev *pci = to_pci_dev(dev); + struct phytium_nfc *nfc = pci_get_drvdata(pci); + int ret; + + ret = phytium_nand_prepare(nfc); + + return 0; +} + +static int __maybe_unused phytium_nfc_resume(struct device *dev) +{ + struct pci_dev *pci = to_pci_dev(dev); + struct phytium_nfc *nfc = pci_get_drvdata(pci); + int ret; + + ret = phytium_nand_resume(nfc); + + return ret; +} + +static const struct dev_pm_ops phytium_pci_dev_pm_ops = { + .prepare = phytium_nfc_prepare, + .resume = phytium_nfc_resume, +}; + +static const struct pci_device_id phytium_pci_id_table[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc29) }, + { } +}; +MODULE_DEVICE_TABLE(pci, phytium_pci_id_table); + +static struct pci_driver phytium_pci_driver = { + .name = DRV_NAME, + .id_table = phytium_pci_id_table, + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .driver = { + .pm = &phytium_pci_dev_pm_ops, + }, +}; +module_pci_driver(phytium_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("PCI driver for Phytium NAND controller"); +MODULE_AUTHOR("Zhu Mingshuai "); diff --git a/drivers/mtd/nand/raw/phytium_nand_plat.c b/drivers/mtd/nand/raw/phytium_nand_plat.c new file mode 100644 index 0000000000000000000000000000000000000000..b3fde07b4aeb18b16db35fcd061716ff6c8c8d9f --- /dev/null +++ b/drivers/mtd/nand/raw/phytium_nand_plat.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Core driver for Phytium NAND flash controller + * + * Copyright (c) 2020-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "phytium_nand.h" + +#define DRV_NAME "phytium_nand_plat" + +static int phytium_nfc_plat_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *r; + struct phytium_nfc *nfc; + unsigned int ecc_strength; + unsigned int ecc_step_size; + int ret; + + nfc = devm_kzalloc(&pdev->dev, sizeof(struct phytium_nfc), + GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nfc->dev = dev; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + nfc->regs = devm_ioremap_resource(dev, r); + if (IS_ERR(nfc->regs)) + return PTR_ERR(nfc->regs); + + dev_info(nfc->dev, "NFC register address :%p, phy address:%llx\n", + nfc->regs, r->start); + + nfc->irq = platform_get_irq(pdev, 0); + if (nfc->irq < 0) { + dev_err(dev, "failed to retrieve irq\n"); + return nfc->irq; + } + + ret = devm_request_irq(dev, nfc->irq, phytium_nfc_isr, 0, + "phytium-nfc-plat", nfc); + if (ret) { + dev_err(nfc->dev, "Failed to register NFC interrupt.\n"); + return ret; + } + + nfc->caps = devm_kzalloc(dev, sizeof(struct phytium_nfc_caps), GFP_KERNEL); + if (!nfc->caps) + return -ENOMEM; + + /* Currently hard-coded parameters */ + nfc->caps->hw_ver = 2; + nfc->caps->int_mask_bits = 17; + nfc->caps->max_cs_nb = 4; + nfc->caps->max_rb_nb = 4; + nfc->caps->nr_parts = 0; + nfc->caps->parts = NULL; + + device_property_read_u32(&pdev->dev, "nand-ecc-strength", &ecc_strength); + nfc->caps->ecc_strength = ecc_strength ? ecc_strength : 8; + + device_property_read_u32(&pdev->dev, "nand-ecc-step-size", &ecc_step_size); + nfc->caps->ecc_step_size = ecc_step_size ? ecc_step_size : 512; + + ret = phytium_nand_init(nfc); + if (ret) + return ret; + + platform_set_drvdata(pdev, nfc); + + return ret; +} + +static int phytium_nfc_plat_remove(struct platform_device *pdev) +{ + struct phytium_nfc *nfc = platform_get_drvdata(pdev); + + return phytium_nand_remove(nfc); +} + +static int __maybe_unused phytium_nfc_plat_prepare(struct device *dev) +{ + struct phytium_nfc *nfc = dev_get_drvdata(dev); + + return phytium_nand_prepare(nfc); +} + +static int __maybe_unused phytium_nfc_plat_resume(struct device *dev) +{ + struct phytium_nfc *nfc = dev_get_drvdata(dev); + int ret; + + ret = phytium_nand_resume(nfc); + + return ret; +} + +static const struct dev_pm_ops phytium_dev_pm_ops = { + .prepare = phytium_nfc_plat_prepare, + .resume = phytium_nfc_plat_resume, +}; + +#ifdef CONFIG_OF +static const struct of_device_id phytium_nfc_of_ids[] = { + { .compatible = "phytium,nfc", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, phytium_nfc_of_ids); +#endif + +static struct platform_driver phytium_nfc_plat_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = phytium_nfc_of_ids, + .pm = &phytium_dev_pm_ops, + }, + .probe = phytium_nfc_plat_probe, + .remove = phytium_nfc_plat_remove, +}; +module_platform_driver(phytium_nfc_plat_driver) + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium NAND controller Platform driver"); +MODULE_AUTHOR("Zhu Mingshuai "); diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 37775fc09e09515efda6c41d7167b8a4decadc09..ff2af58a9a4a9cdeb4af430cefd38cf7b91c911c 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -7,6 +7,15 @@ menuconfig MTD_SPI_NOR if MTD_SPI_NOR +config SPI_PHYTIUM_QUADSPI + tristate "Phytium Quad SPI Controller" + depends on ARCH_PHYTIUM || ARM + depends on OF && HAS_IOMEM + help + This enables support for the Quad SPI controller in master mode. + This driver does not support generic SPI. The implementation only + supports SPI NOR. + config MTD_MT81xx_NOR tristate "Mediatek MT81xx SPI NOR flash controller" depends on HAS_IOMEM diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index f4c61d282abd5c29538d3e208615f799fcaad251..ebc8ce095bd08493749591bfeb70bb7a36d68eb6 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o +obj-$(CONFIG_SPI_PHYTIUM_QUADSPI) += phytium-quadspi.o obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o diff --git a/drivers/mtd/spi-nor/phytium-quadspi.c b/drivers/mtd/spi-nor/phytium-quadspi.c new file mode 100644 index 0000000000000000000000000000000000000000..ec317602f3a56e6d0c2e7a37cf772571b5e8049b --- /dev/null +++ b/drivers/mtd/spi-nor/phytium-quadspi.c @@ -0,0 +1,1028 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium QuadSPI driver. + * + * Copyright (c) 2019-2023 Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define QSPI_FLASH_CAP_REG 0x000 +#define QSPI_RD_CFG_REG 0x004 +#define QSPI_WR_CFG_REG 0x008 +#define QSPI_FLUSH_REG 0x00C +#define QSPI_CMD_PORT_REG 0x010 +#define QSPI_ADDR_PORT_REG 0x014 +#define QSPI_HD_PORT_REG 0x018 +#define QSPI_LD_PORT_REG 0x01C +#define QSPI_FUN_SET_REG 0x020 +#define QSPI_WIP_REG 0x024 +#define QSPI_WP_REG 0x028 +#define QSPI_MODE_REG 0x02C + +#define QSPI_FLASH_CAP_NUM_SHIFT 3 +#define QSPI_FLASH_CAP_NUM_MASK (0x3 << QSPI_FLASH_CAP_NUM_SHIFT) +#define QSPI_FLASH_CAP_CAP_SHIFT 0 +#define QSPI_FLASH_CAP_CAP_MASK (0x7 << QSPI_FLASH_CAP_CAP_SHIFT) + +#define QSPI_RD_CFG_RD_CMD_SHIFT 24 +#define QSPI_RD_CFG_RD_CMD_MASK (0xFF << QSPI_RD_CFG_RD_CMD_SHIFT) +#define QSPI_RD_CFG_RD_THROUGH_SHIFT 23 +#define QSPI_RD_CFG_RD_THROUGH_MASK (0x01 << QSPI_RD_CFG_RD_THROUGH_SHIFT) +#define QSPI_RD_CFG_RD_TRANSFER_SHIFT 20 +#define QSPI_RD_CFG_RD_TRANSFER_MASK (0x07 << QSPI_RD_CFG_RD_TRANSFER_SHIFT) +#define QSPI_RD_CFG_RD_ADDR_SEL_SHIFT 19 +#define QSPI_RD_CFG_RD_ADDR_SEL_MASK (0x1 << QSPI_RD_CFG_RD_ADDR_SEL_SHIFT) +#define QSPI_RD_CFG_RD_LATENCY_SHIFT 18 +#define QSPI_RD_CFG_RD_LATENCY_MASK (0x1 << QSPI_RD_CFG_RD_LATENCY_SHIFT) +#define QSPI_RD_CFG_MODE_BYTE_SHIFT 17 +#define QSPI_RD_CFG_MODE_BYTE_MASK (0x1 << QSPI_RD_CFG_MODE_BYTE_SHIFT) +#define QSPI_RD_CFG_CMD_SIGN_SHIFT 9 +#define QSPI_RD_CFG_CMD_SIGN_MASK (0xFF << QSPI_RD_CFG_CMD_SIGN_SHIFT) +#define QSPI_RD_CFG_DUMMY_SHIFT 4 +#define QSPI_RD_CFG_DUMMY_MASK (0x1F << QSPI_RD_CFG_DUMMY_SHIFT) +#define QSPI_RD_CFG_D_BUFFER_SHIFT 3 +#define QSPI_RD_CFG_D_BUFFER_MASK (0x1 << QSPI_RD_CFG_D_BUFFER_SHIFT) +#define QSPI_RD_CFG_RD_SCK_SEL_SHIFT 0 +#define QSPI_RD_CFG_RD_SCK_SEL_MASK (0x3 << QSPI_RD_CFG_RD_SCK_SEL_SHIFT) + +#define QSPI_WR_CFG_WR_CMD_SHIFT 24 +#define QSPI_WR_CFG_WR_CMD_MASK (0xFF << QSPI_WR_CFG_WR_CMD_SHIFT) +#define QSPI_WR_CFG_WR_WAIT_SHIFT 9 +#define QSPI_WR_CFG_WR_WAIT_MASK (0x01 << QSPI_WR_CFG_WR_WAIT_SHIFT) +#define QSPI_WR_CFG_WR_THROUGH_SHIFT 8 +#define QSPI_WR_CFG_WR_THROUGH_MAS (0x01 << QSPI_WR_CFG_WR_THROUGH_SHIFT) +#define QSPI_WR_CFG_WR_TRANSFER_SHIFT 5 +#define QSPI_WR_CFG_WR_TRANSFER_MASK (0X7 << QSPI_WR_CFG_WR_TRANSFER_SHIFT) +#define QSPI_WR_CFG_WR_ADDR_SEL_SHIFT 4 +#define QSPI_WR_CFG_WR_ADDR_SEL_MASK (0x1 << QSPI_WR_CFG_WR_ADDR_SEL_SHIFT) +#define QSPI_WR_CFG_WR_MODE_SHIFT 3 +#define QSPI_WR_CFG_WR_MODE (0x01 << QSPI_WR_CFG_WR_MODE_SHIFT) +#define QSPI_WR_CFG_WR_SCK_SEL_SHIFT 0 +#define QSPI_WR_CFG_WR_SCK_SEL_MASK (0x7 << QSPI_WR_CFG_WR_SCK_SEL_SHIFT) + +#define QSPI_FLUSH_EN (0x1 << 0) + +#define QSPI_CMD_PORT_CMD_SHIFT 24 +#define QSPI_CMD_PORT_CMD_MASK (0xFF << QSPI_CMD_PORT_CMD_SHIFT) +#define QSPI_CMD_PORT_WAIT_SHIFT 22 +#define QSPI_CMD_PORT_WAIT_MASK (0x1 << QSPI_CMD_PORT_WAIT_SHIFT) +#define QSPI_CMD_PORT_THROUGH_SHIFT 21 +#define QSPI_CMD_PORT_THROUGH_MASK (0x1 << QSPI_CMD_PORT_THROUGH_SHIFT) +#define QSPI_CMD_PORT_CS_SHIFT 19 +#define QSPI_CMD_PORT_CS_MASK (0x3 << QSPI_CMD_PORT_CS_SHIFT) +#define QSPI_CMD_PORT_TRANSFER_SHIFT 16 +#define QSPI_CMD_PORT_TRANSFER_MASK (0x7 << QSPI_CMD_PORT_TRANSFER_SHIFT) +#define QSPI_CMD_PORT_CMD_ADDR_SHIFT 15 +#define QSPI_CMD_PORT_CMD_ADDR_MASK (0x1 << QSPI_CMD_PORT_CMD_ADDR_SHIFT) +#define QSPI_CMD_PORT_LATENCY_SHIFT 14 +#define QSPI_CMD_PORT_LATENCY_MASK (0x1 << QSPI_CMD_PORT_LATENCY_SHIFT) +#define QSPI_CMD_PORT_DATA_TRANSFER_SHIFT 13 +#define QSPI_CMD_PORT_DATA_TRANSFER_MASK (0x1 << 13) +#define QSPI_CMD_PORT_SEL_SHIFT 12 +#define QSPI_CMD_PORT_SEL_MASK (0x1 << QSPI_CMD_PORT_SEL_SHIFT) +#define QSPI_CMD_PORT_DUMMY_SHIFT 7 +#define QSPI_CMD_PORT_DUMMY_MASK (0x1F << QSPI_CMD_PORT_DUMMY_SHIFT) +#define QSPI_CMD_PORT_DUMMY(x) (((x) << QSPI_CMD_PORT_DUMMY_SHIFT) & QSPI_CMD_PORT_DUMMY_MASK) +#define QSPI_CMD_PORT_P_BUFFER_SHIFT 6 +#define QSPI_CMD_PORT_P_BUFFER_MASK (0x1 << QSPI_CMD_PORT_P_BUFFER_SHIFT) +#define QSPI_CMD_PORT_RW_NUM_SHIFT 3 +#define QSPI_CMD_PORT_RW_NUM_MASK (0x7 << QSPI_CMD_PORT_RW_NUM_SHIFT) +#define QSPI_CMD_PORT_SCK_SEL_SHIFT 0 +#define QSPI_CMD_PORT_SCK_SEL_MASK (0x7 << QSPI_CMD_PORT_SCK_SEL_SHIFT) + +#define QSPI_FUN_SET_HOLD_SHIFT 24 +#define QSPI_FUN_SET_HOLD_MASK (0xFF << QSPI_FUN_SET_HOLD_SHIFT) +#define QSPI_FUN_SET_SETUP_SHIFT 16 +#define QSPI_FUN_SET_SETUP_MASK (0xFF << QSPI_FUN_SET_SETUP_SHIFT) +#define QSPI_FUN_SET_DELAY_SHIFT 0 +#define QSPI_FUN_SET_DELAY_MASK (0xFFFF << QSPI_FUN_SET_DELAY_SHIFT) + +#define QSPI_WIP_W_CMD_SHIFT 24 +#define QSPI_WIP_W_CMD_MASK (0xFF << QSPI_WIP_W_CMD_SHIFT) +#define QSPI_WIP_W_TRANSFER_SHIFT 3 +#define QSPI_WIP_W_TRANSFER_MASK (0x3 << QSPI_WIP_W_TRANSFER_SHIFT) +#define QSPI_WIP_W_SCK_SEL_SHIFT 0 +#define QSPI_WIP_W_SCK_SEL_MASK (0x7 << QSPI_WIP_W_SCK_SEL_SHIFT) + +#define QSPI_WP_EN_SHIFT 17 +#define QSPI_WP_EN_MASK (0x1 << QSPI_WP_EN_SHIFT) +#define QSPI_WP_IO2_SHIFT 16 +#define QSPI_WP_IO2_MASK (0x1 << QSPI_WP_IO2_SHIFT) +#define QSPI_WP_HOLD_SHIFT 8 +#define QSPI_WP_HOLD_MASK (0xFF << QSPI_WP_HOLD_SHIFT) +#define QSPI_WP_SETUP_SHIFT 0 +#define QSPI_WP_SETUP_MASK (0xFF << QSPI_WP_SETUP_SHIFT) + +#define QSPI_MODE_VALID_SHIFT 8 +#define QSPI_MODE_VALID_MASK (0xFF << QSPI_MODE_VALID_SHIFT) +#define QSPI_MODE_SHIFT 0 +#define QSPI_MODE_MASK (0xFF << QSPI_MODE_SHIFT) + +#define FSIZE_VAL(size) (__fls(size) - 1) + +#define PHYTIUM_MAX_MMAP_S SZ_512M +#define PHYTIUM_MAX_NORCHIP 4 + +#define PHYTIUM_QSPI_FIFO_SZ 32 +#define PHYTIUM_QSPI_FIFO_TIMEOUT_US 50000 +#define PHYTIUM_QSPI_BUSY_TIMEOUT_US 100000 + +#define PHYTIUM_SCK_SEL 0x05 +#define PHYTIUM_CMD_SCK_SEL 0x07 + +#define PHYTIUM_FMODE_MM 0x01 +#define PHYTIUM_FMODE_IN 0x02 + +/* + * the codes of the different commands + */ +#define CMD_WRDI 0x04 +#define CMD_RDID 0x9F +#define CMD_RDSR 0x05 +#define CMD_WREN 0x06 +#define CMD_RDAR 0x65 +#define CMD_P4E 0x20 +#define CMD_4P4E 0x21 +#define CMD_BE 0x60 +#define CMD_4BE 0xC7 +#define CMD_READ 0x03 +#define CMD_FAST_READ 0x0B +#define CMD_QOR 0x6B +#define CMD_QIOR 0xEB +#define CMD_DDRFR 0x0D +#define CMD_DDRQIOQ 0xED +#define CMD_PP 0x02 +#define CMD_QPP 0x32 +#define CMD_SE 0xD8 +#define CMD_4FAST_READ 0x0C +#define CMD_4READ 0x13 +#define CMD_4QOR 0x6C +#define CMD_4QIOR 0xEC +#define CMD_4DDRFR 0x0E +#define CMD_4DDRQIOR 0xEE +#define CMD_4PP 0x12 +#define CMD_4QPP 0x34 +#define CMD_4SE 0xDC + +#define PHYTIUM_QSPI_1_1_1 0 +#define PHYTIUM_QSPI_1_1_2 1 +#define PHYTIUM_QSPI_1_1_4 2 +#define PHYTIUM_QSPI_1_2_2 3 +#define PHYTIUM_QSPI_1_4_4 4 +#define PHYTIUM_QSPI_2_2_2 5 +#define PHYTIUM_QSPI_4_4_4 6 + +struct phytium_qspi_flash { + struct spi_nor nor; + struct phytium_qspi *qspi; + u32 cs; + u32 fsize; + u32 presc; + u32 clk_div; + u32 read_mode; + bool registered; + u32 prefetch_limit; + u32 addr_width; + u32 read_cmd; +}; + +struct phytium_qspi { + struct device *dev; + void __iomem *io_base; + void __iomem *mm_base; + resource_size_t mm_size; + u32 nor_num; + struct clk *clk; + u32 clk_rate; + struct phytium_qspi_flash flash[PHYTIUM_MAX_NORCHIP]; + + spinlock_t spinlock; + + /* + * to protect device configuration, could be different between + * 2 flash access (bk1, bk2) + */ + struct mutex lock; +}; + +/* Need to enable p_buffer */ +static int memcpy_from_ftreg(struct phytium_qspi *qspi, u_char *buf, size_t len) +{ + int i; + u32 val = 0; + + if (!qspi || !buf) + return -EINVAL; + + for (i = 0; i < len; i++) { + if (0 == i % 4) + val = readl_relaxed(qspi->io_base + QSPI_LD_PORT_REG); + + buf[i] = (u_char) (val >> (i % 4) * 8) & 0xFF; + } + + return 0; +} + +/* Not to enable p_buffer */ +static int memcpy_to_ftreg(struct phytium_qspi *qspi, u_char *buf, size_t len) +{ + u32 val = 0; + + if (!qspi || !buf || (len >= 8)) + return -EINVAL; + + if (1 == len) { + val = buf[0]; + } else if (2 == len) { + val = buf[1]; + val = (val << 8) + buf[0]; + } else if (3 == len) { + val = buf[2]; + val = (val << 8) + buf[1]; + val = (val << 8) + buf[0]; + } else if (4 == len) { + val = buf[3]; + val = (val << 8) + buf[2]; + val = (val << 8) + buf[1]; + val = (val << 8) + buf[0]; + } + + writel_relaxed(val, qspi->io_base + QSPI_LD_PORT_REG); + + return 0; +} + +static int phytium_qspi_wait_cmd(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash) +{ + u32 cmd = 0; + u32 cnt = 0; + + cmd |= CMD_RDSR << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + + cnt = PHYTIUM_QSPI_BUSY_TIMEOUT_US / 10; + while (readl_relaxed(qspi->io_base + QSPI_LD_PORT_REG) & 0x01) { + udelay(10); + cnt--; + if (!cnt) { + dev_err(qspi->dev, "wait command process timeout\n"); + break; + } + } + + return !cnt; +} + +static int phytium_qspi_cmd_enable(struct phytium_qspi *qspi) +{ + u32 val = 0; + + writel_relaxed(val, qspi->io_base + QSPI_LD_PORT_REG); + + return 0; +} + +static int phytium_qspi_write_enable(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash) +{ + u32 cmd = 0; + + cmd = CMD_WREN << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + return 0; +} + +static int phytium_qspi_write_disable(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash) +{ + u32 cmd = 0; + + cmd = CMD_WRDI << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + return 0; +} + +static int phytium_qspi_read_flash_id(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash, u8 opcode, u8 *buf, int len) +{ + u32 cmd = 0; + unsigned long iflags; + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_P_BUFFER_SHIFT); + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + spin_lock_irqsave(&qspi->spinlock, iflags); + memcpy_from_ftreg(qspi, buf, len); + spin_unlock_irqrestore(&qspi->spinlock, iflags); + + dev_dbg(qspi->dev, "read flash id:%x\n", *(u32 *)buf); + return 0; +} + +static int phytium_qspi_read_flash_sfdp(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash, struct spi_nor *nor, loff_t from, u8 *buf, int len) +{ + unsigned long iflags; + u32 cmd = 0; + u8 opcode = nor->read_opcode; + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_P_BUFFER_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_LATENCY_SHIFT); + cmd |= QSPI_CMD_PORT_DUMMY(nor->read_dummy - 1); + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + writel_relaxed(from, qspi->io_base + QSPI_ADDR_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + spin_lock_irqsave(&qspi->spinlock, iflags); + memcpy_from_ftreg(qspi, buf, len); + spin_unlock_irqrestore(&qspi->spinlock, iflags); + + dev_dbg(qspi->dev, "read flash sfdp:0x%llx 0x%llx\n", + *(u64 *)buf, *(u64 *)(buf + 8)); + return len; +} + +static int phytium_qspi_read_flash_sr1(struct phytium_qspi *qspi, + struct phytium_qspi_flash *flash, u8 opcode, u8 *buf, int len) +{ + u32 cmd = 0; + u32 val; + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= (len << QSPI_CMD_PORT_RW_NUM_SHIFT) & QSPI_CMD_PORT_RW_NUM_MASK; + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + val = readl_relaxed(qspi->io_base + QSPI_LD_PORT_REG); + buf[0] = (u8)val; + + return 0; +} + +static int phytium_qspi_read_reg(struct spi_nor *nor, + u8 opcode, u8 *buf, int len) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + unsigned long iflags; + u32 cmd = 0; + + dev_dbg(dev, "read_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len); + + switch (opcode) { + case CMD_RDID: + phytium_qspi_read_flash_id(qspi, flash, opcode, buf, len); + return 0; + case CMD_RDSR: + phytium_qspi_read_flash_sr1(qspi, flash, opcode, buf, len); + return 0; + default: + break; + } + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_P_BUFFER_SHIFT); + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + + spin_lock_irqsave(&qspi->spinlock, iflags); + memcpy_from_ftreg(qspi, buf, len); + spin_unlock_irqrestore(&qspi->spinlock, iflags); + + return 0; +} + +static int phytium_qspi_write_reg(struct spi_nor *nor, u8 opcode, + u8 *buf, int len) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = 0; + + dev_dbg(dev, "write_reg: cmd:%#.2x buf:%pK len:%#x\n", + opcode, buf, len); + + switch(opcode){ + case CMD_WREN: + phytium_qspi_write_enable(qspi, flash); + return 0; + case CMD_WRDI: + phytium_qspi_write_disable(qspi, flash); + return 0; + default: + break; + } + + cmd = opcode << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + if ((len > 8) || (NULL == buf)) { + dev_err(dev, "data length exceed. commad %x, len:%d \n", opcode, len); + return -EINVAL; + } + else if(len > 0){ + cmd |= ((len - 1) << QSPI_CMD_PORT_RW_NUM_SHIFT) & QSPI_CMD_PORT_RW_NUM_MASK; + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + } + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + memcpy_to_ftreg(qspi, buf, len); + + return 0; +} + +static ssize_t phytium_qspi_read_tmp(struct phytium_qspi *qspi, u32 read_cmd, + loff_t from, size_t len, u_char *buf) +{ + u32 addr = (u32)from; + u64 val = 0; + + if (!qspi) + return -1; + + dev_dbg(qspi->dev, "read cmd:%x, addr:%x len:%zx\n", read_cmd, addr, len); + writel_relaxed(read_cmd, qspi->io_base + QSPI_RD_CFG_REG); + + memcpy_fromio(buf, qspi->mm_base + addr, len); + + val = *(u64 *)(buf); + dev_dbg(qspi->dev, "read val:%llx\n", val); + + return len; +} + +static ssize_t phytium_qspi_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *buf) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = nor->read_opcode; + u32 addr = (u32)from; + + addr = addr + flash->cs * flash->fsize; + dev_dbg(qspi->dev, "read(%#.2x): buf:%pK from:%#.8x len:%#zx\n", + nor->read_opcode, buf, addr, len); + + cmd = cmd << QSPI_RD_CFG_RD_CMD_SHIFT; + cmd |= BIT(QSPI_RD_CFG_D_BUFFER_SHIFT); + cmd |= flash->clk_div << QSPI_CMD_PORT_SCK_SEL_SHIFT; + + cmd &= ~QSPI_RD_CFG_RD_TRANSFER_MASK; + cmd |= (flash->addr_width << QSPI_RD_CFG_RD_TRANSFER_SHIFT); + + switch (nor->read_opcode) { + case CMD_READ: + case CMD_FAST_READ: + case CMD_QIOR: + case CMD_QOR: + cmd &= ~QSPI_RD_CFG_RD_ADDR_SEL_MASK; + break; + case CMD_4READ: + case CMD_4FAST_READ: + case CMD_4QOR: + case CMD_4QIOR: + cmd |= BIT(QSPI_RD_CFG_RD_ADDR_SEL_SHIFT); + break; + case 0x5A: + cmd &= ~QSPI_RD_CFG_RD_ADDR_SEL_MASK; + return phytium_qspi_read_flash_sfdp(qspi, flash, nor, from, buf, len); + break; + default: + break; + } + + if((PHYTIUM_QSPI_1_1_4 == flash->addr_width) || + (PHYTIUM_QSPI_1_4_4 == flash->addr_width)) { + cmd |= BIT(QSPI_RD_CFG_RD_LATENCY_SHIFT); + + cmd &= ~QSPI_RD_CFG_DUMMY_MASK; + cmd |= (0x07 << QSPI_RD_CFG_DUMMY_SHIFT); + } + + dev_dbg(qspi->dev, "read(%#.2x): cmd:%#x\n", nor->read_opcode, cmd); + if (cmd != flash->read_cmd) + flash->read_cmd = cmd; + + writel_relaxed(cmd, qspi->io_base + QSPI_RD_CFG_REG); + + memcpy_fromio(buf, qspi->mm_base + addr, len); + + return len; +} + +static ssize_t phytium_qspi_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = nor->program_opcode; + u32 addr = (u32)to; + int i; + u_char tmp[8] = {0}; + size_t mask = 0x03; + + addr = addr + flash->cs * flash->fsize; + dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#zx\n", + nor->program_opcode, buf, addr, len); + + if (addr & 0x03) { + dev_err(dev, "Addr not four-byte aligned!\n"); + return -EINVAL; + } + + cmd = cmd << QSPI_WR_CFG_WR_CMD_SHIFT; + cmd |= BIT(QSPI_WR_CFG_WR_MODE_SHIFT); + cmd |= PHYTIUM_CMD_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + + switch (nor->program_opcode) { + case CMD_PP: + case CMD_QPP: + cmd &= ~QSPI_WR_CFG_WR_ADDR_SEL_MASK; + break; + case CMD_4PP: + case CMD_4QPP: + cmd |= BIT(QSPI_WR_CFG_WR_ADDR_SEL_SHIFT); + break; + default: + dev_err(qspi->dev, "Not support program command:%#x\n", + nor->erase_opcode); + return -EINVAL; + } + + dev_dbg(qspi->dev, "write cmd:%x\n", cmd); + writel_relaxed(cmd, qspi->io_base + QSPI_WR_CFG_REG); + + for (i = 0; i < len/4; i++) { + writel_relaxed(*(u32 *)(buf + 4*i), qspi->mm_base + addr + 4*i); + } + + if (len & mask) { + addr = addr + (len & ~mask); + phytium_qspi_read_tmp(qspi, flash->read_cmd, addr, 4, &tmp[0]); + memcpy(tmp, buf + (len & ~mask), len & mask); + writel_relaxed(*(u32 *)(tmp), qspi->mm_base + addr); + } + + writel_relaxed(QSPI_FLUSH_EN, qspi->io_base + QSPI_FLUSH_REG); + + phytium_qspi_wait_cmd(qspi, flash); + + return len; +} + +static int phytium_qspi_erase(struct spi_nor *nor, loff_t offs) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = nor->erase_opcode; + u32 addr = (u32)offs; + + dev_dbg(dev, "erase(%#.2x):offs:%#x\n", nor->erase_opcode, (u32)offs); + + phytium_qspi_write_enable(qspi, flash); + cmd = cmd << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= PHYTIUM_SCK_SEL << QSPI_CMD_PORT_SCK_SEL_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + + /* s25fl256s1 not supoort D8, DC, 20, 21 */ + switch (nor->erase_opcode) { + case CMD_SE: + cmd &= ~QSPI_CMD_PORT_SEL_MASK; + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + break; + case CMD_4SE: + cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + break; + case CMD_P4E: + cmd &= ~QSPI_CMD_PORT_SEL_MASK; + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + break; + case CMD_4P4E: + cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + break; + case CMD_BE: + cmd &= ~QSPI_CMD_PORT_SEL_MASK; + break; + case CMD_4BE: + cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); + break; + default: + dev_err(qspi->dev, "Not support erase command:%#x\n", + nor->erase_opcode); + return -EINVAL; + } + + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + phytium_qspi_cmd_enable(qspi); + phytium_qspi_wait_cmd(qspi, flash); + + return 0; +} + +static int phytium_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct phytium_qspi *qspi = flash->qspi; + + mutex_lock(&qspi->lock); + return 0; +} + +static void phytium_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct phytium_qspi *qspi = flash->qspi; + + mutex_unlock(&qspi->lock); +} + +static int phytium_qspi_get_flash_size(struct phytium_qspi *qspi, u32 size) +{ + int ret = 0; + u32 value; + + switch (size) { + case SZ_4M: + value = 0; + break; + case SZ_8M: + value = 1; + break; + case SZ_16M: + value = 2; + break; + case SZ_32M: + value = 3; + break; + case SZ_64M: + value = 4; + break; + case SZ_128M: + value = 5; + break; + case SZ_256M: + value = 6; + break; + case SZ_512M: + value = 7; + break; + default: + value = 0; + + ret = -EINVAL; + return ret; + } + + return value; +} +static int phytium_qspi_flash_setup(struct phytium_qspi *qspi, + struct fwnode_handle *np) +{ + struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; + u32 width, presc; + u32 cs_num = 0; + u32 max_rate = 0; + u32 clk_div = 0; + u32 flash_cap = 0; + u32 addr_width = PHYTIUM_QSPI_1_1_1; + struct phytium_qspi_flash *flash; + struct mtd_info *mtd; + int ret; + + fwnode_property_read_u32(np, "reg", &cs_num); + if (cs_num >= PHYTIUM_MAX_NORCHIP) + return -EINVAL; + + fwnode_property_read_u32(np, "spi-max-frequency", &max_rate); + if (!max_rate) + return -EINVAL; + + fwnode_property_read_u32(np, "spi-clk-div", &clk_div); + if (!clk_div) + clk_div = PHYTIUM_SCK_SEL; + + if (clk_div < 4) + return -EINVAL; + + presc = DIV_ROUND_UP(qspi->clk_rate, max_rate) - 1; + + fwnode_property_read_u32(np, "spi-rx-bus-width", &width); + if (!width) + width = 1; + + if (width == 4) { + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; + addr_width = PHYTIUM_QSPI_1_1_4; + } else if (width == 2) { + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + addr_width = PHYTIUM_QSPI_1_1_2; + } else if (width != 1) + return -EINVAL; + + flash = &qspi->flash[cs_num]; + flash->qspi = qspi; + flash->cs = cs_num; + flash->presc = presc; + flash->clk_div = clk_div; + flash->addr_width = addr_width; + + flash->nor.dev = qspi->dev; + if (qspi->dev->of_node) + spi_nor_set_flash_node(&flash->nor, qspi->dev->of_node); + flash->nor.priv = flash; + mtd = &flash->nor.mtd; + + flash->nor.read = phytium_qspi_read; + flash->nor.write = phytium_qspi_write; + flash->nor.erase = phytium_qspi_erase; + flash->nor.read_reg = phytium_qspi_read_reg; + flash->nor.write_reg = phytium_qspi_write_reg; + flash->nor.prepare = phytium_qspi_prep; + flash->nor.unprepare = phytium_qspi_unprep; + + ret = spi_nor_scan(&flash->nor, NULL, &hwcaps); + if (ret) { + dev_err(qspi->dev, "device scan failed\n"); + return ret; + } + + flash->fsize = mtd->size; + flash->prefetch_limit = mtd->size - PHYTIUM_QSPI_FIFO_SZ; + + ret = phytium_qspi_get_flash_size(flash->qspi, mtd->size); + if (ret < 0) { + dev_err(qspi->dev, "flash size invalid\n"); + return ret; + } + + flash_cap = cs_num << QSPI_FLASH_CAP_NUM_SHIFT; + flash_cap |= ret; + writel_relaxed(flash_cap, qspi->io_base + QSPI_FLASH_CAP_REG); + + flash->read_mode = PHYTIUM_FMODE_MM; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(qspi->dev, "mtd device parse failed\n"); + return ret; + } + + flash->registered = true; + + dev_dbg(qspi->dev, "read mm:%s %px cs:%d bus:%d clk-div:%d\n", + flash->read_mode == PHYTIUM_FMODE_MM ? "yes" : "no", + qspi->mm_base, cs_num, width, clk_div); + + dev_dbg(qspi->dev, "mtd->size:%llx, mtd->erasesize:%x, fsize:%x\n", + mtd->size, mtd->erasesize, flash->fsize); + + return 0; +} + +static void phytium_qspi_mtd_free(struct phytium_qspi *qspi) +{ + int i; + + for (i = 0; i < PHYTIUM_MAX_NORCHIP; i++) + if (qspi->flash[i].registered) + mtd_device_unregister(&qspi->flash[i].nor.mtd); +} + +static ssize_t clk_div_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct phytium_qspi *qspi = dev_get_drvdata(dev); + struct phytium_qspi_flash *flash = &qspi->flash[0]; + + return sprintf(buf, "Flash 0 clk-div: %d\n", flash->clk_div); +} + +static ssize_t clk_div_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t size) +{ + struct phytium_qspi *qspi = dev_get_drvdata(dev); + struct phytium_qspi_flash *flash = &qspi->flash[0]; + long value; + char *token; + ssize_t status; + + token = strsep ((char **)&buf, " "); + if (!token) + return -EINVAL; + + status = kstrtol(token, 0, &value); + if (status) + return status; + + flash->clk_div = (u8)value; + + return size; +} +static DEVICE_ATTR_RW(clk_div); + +static struct attribute *phytium_qspi_attrs[] = { + &dev_attr_clk_div.attr, + NULL, +}; + +static struct attribute_group phytium_qspi_attr_group = { + .attrs = phytium_qspi_attrs, +}; + +static int phytium_qspi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct fwnode_handle *flash_np; + struct phytium_qspi *qspi; + struct resource *res; + int ret; + + qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL); + if (!qspi) + return -ENOMEM; + + qspi->nor_num = device_get_child_node_count(dev); + if (!qspi->nor_num || qspi->nor_num > PHYTIUM_MAX_NORCHIP) + return -ENODEV; + + if (dev->of_node) + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi"); + else if (has_acpi_companion(dev)) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + } + qspi->io_base = devm_ioremap_resource(dev, res); + + if (IS_ERR(qspi->io_base)) + return PTR_ERR(qspi->io_base); + + + if (dev->of_node) + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm"); + else if (has_acpi_companion(dev)) + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + + qspi->mm_base = devm_ioremap_resource(dev, res); + if (IS_ERR(qspi->mm_base)) + return PTR_ERR(qspi->mm_base); + + + qspi->mm_size = resource_size(res); + + if (dev->of_node) { + qspi->clk = devm_clk_get(dev, NULL); + if (IS_ERR(qspi->clk)) + return PTR_ERR(qspi->clk); + + qspi->clk_rate = clk_get_rate(qspi->clk); + if (!qspi->clk_rate) + return -EINVAL; + + ret = clk_prepare_enable(qspi->clk); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + return ret; + } + } + else if (has_acpi_companion(dev)) { /* ACPI table not pass clk rate */ + qspi->clk_rate = 50000000; + } + + qspi->dev = dev; + platform_set_drvdata(pdev, qspi); + mutex_init(&qspi->lock); + spin_lock_init(&qspi->spinlock); + + fwnode_for_each_available_child_node(dev_fwnode(dev), flash_np) { + ret = phytium_qspi_flash_setup(qspi, flash_np); + if (ret) { + dev_err(dev, "unable to setup flash chip\n"); + goto err_flash; + } + } + + ret = sysfs_create_group(&qspi->dev->kobj, &phytium_qspi_attr_group); + if (ret) { + dev_err(dev, "unable to create sysfs\n"); + goto err_flash; + } + + return 0; + +err_flash: + mutex_destroy(&qspi->lock); + phytium_qspi_mtd_free(qspi); + + clk_disable_unprepare(qspi->clk); + return ret; +} + +static int phytium_qspi_remove(struct platform_device *pdev) +{ + struct phytium_qspi *qspi = platform_get_drvdata(pdev); + + sysfs_remove_group(&qspi->dev->kobj, &phytium_qspi_attr_group); + + phytium_qspi_mtd_free(qspi); + mutex_destroy(&qspi->lock); + + clk_disable_unprepare(qspi->clk); + return 0; +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_qspi_acpi_ids[] = { + { "PHYT0011", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_qspi_acpi_ids); +#endif + +static const struct of_device_id phytium_qspi_match[] = { + {.compatible = "phytium,qspi"}, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_qspi_match); + +static struct platform_driver phytium_qspi_driver = { + .probe = phytium_qspi_probe, + .remove = phytium_qspi_remove, + .driver = { + .name = "phytium-quadspi", + .of_match_table = phytium_qspi_match, + .acpi_match_table = ACPI_PTR(phytium_qspi_acpi_ids), + }, +}; + +module_platform_driver(phytium_qspi_driver); + +MODULE_AUTHOR("Mingshuai Zhu "); +MODULE_DESCRIPTION("Phytium QuadSPI driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index ff641c06003a8fb81f8ee6264a8c9c97a24c868d..b6bb1f9350cd574af88fca6fb0bd4018d58f2b6c 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -988,6 +988,10 @@ static const struct flash_info spi_nor_ids[] = { { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) }, + /* boya */ + { "by25q64as", INFO(0x684017, 0, 64 * 1024, 128, SECT_4K) }, + { "by25q128as", INFO(0x684018, 0, 64 * 1024, 256, SECT_4K) }, + /* EON -- en25xxx */ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, @@ -1044,6 +1048,11 @@ static const struct flash_info spi_nor_ids[] = { SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { + "gd25q128e", INFO(0xc86018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | @@ -1263,6 +1272,7 @@ static const struct flash_info spi_nor_ids[] = { /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "XM25QH128B", INFO(0x205018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { }, }; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 7cdd0cead693dc5d78565f5fd988c6c25c7a2740..ee84f63e6e3138531940c24431dd2eb4fbab97ac 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -158,6 +158,7 @@ source "drivers/net/can/ifi_canfd/Kconfig" source "drivers/net/can/m_can/Kconfig" source "drivers/net/can/mscan/Kconfig" source "drivers/net/can/peak_canfd/Kconfig" +source "drivers/net/can/phytium/Kconfig" source "drivers/net/can/rcar/Kconfig" source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/softing/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 44922bf29b6a0653ea1eda6ad96c30b3a645fbaa..be9c70e77df317e95a7d5cc622ddcd4bb0b0f1e9 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -33,5 +33,5 @@ obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o obj-$(CONFIG_PCH_CAN) += pch_can.o - +obj-$(CONFIG_CAN_PHYTIUM) += phytium/ subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG diff --git a/drivers/net/can/phytium/Kconfig b/drivers/net/can/phytium/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..a23216d23c22626a97244e0e48f66411ca4ed1b7 --- /dev/null +++ b/drivers/net/can/phytium/Kconfig @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0-only +menuconfig CAN_PHYTIUM + tristate "Phytium CAN support" + help + Say Y here if you want support for Phytium CAN controller framework. + This is common support for devices that embed the Phytium CAN IP. + + To compile this driver as a module, choose M here: the module will + be called phytium_can. + +if CAN_PHYTIUM + +config CAN_PHYTIUM_PLATFORM + tristate "Phytium CAN support for io-mapped devices" + depends on HAS_IOMEM + help + Say Y here is you want to support for IO Mapped Phytium CAN controller. + This support is for devices that have the Phytium CAN controller IP + embedded into the device and the IP is IO Mapped to the processor. + + To compile this driver as a module, choose M here: the module will + be called phytium_can_platform. + +config CAN_PHYTIUM_PCI + tristate "Phytium CAN support for PCI devices" + depends on PCI + help + Say Y here is you want to support for Phytium CAN controller connected + to the PCI bus. This support is for devices that have the Phytium CAN + controller IP embedded into a PCI device. + + To compile this driver as a module, choose M here: the module will + be called phytium_can_pci. +endif diff --git a/drivers/net/can/phytium/Makefile b/drivers/net/can/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..7ef554fca58e4b5ded60b3bcdb6f9c282d5deda3 --- /dev/null +++ b/drivers/net/can/phytium/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Phytium CAN controller drivers. +# +# + +obj-$(CONFIG_CAN_PHYTIUM) += phytium_can.o +obj-$(CONFIG_CAN_PHYTIUM_PLATFORM) += phytium_can_platform.o +obj-$(CONFIG_CAN_PHYTIUM_PCI) += phytium_can_pci.o diff --git a/drivers/net/can/phytium/phytium_can.c b/drivers/net/can/phytium/phytium_can.c new file mode 100644 index 0000000000000000000000000000000000000000..e03e0c0d41d8a73b0fba7a925d3ea27e97809130 --- /dev/null +++ b/drivers/net/can/phytium/phytium_can.c @@ -0,0 +1,1144 @@ +// SPDX-License-Identifier: GPL-2.0 +/* CAN bus driver for Phytium CAN controller + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include + +#include "phytium_can.h" + +/* register definition */ +enum phytium_can_reg { + CAN_CTRL = 0x00, /* Global control register */ + CAN_INTR = 0x04, /* Interrupt register */ + CAN_ARB_RATE_CTRL = 0x08, /* Arbitration rate control register */ + CAN_DAT_RATE_CTRL = 0x0c, /* Data rate control register */ + CAN_ACC_ID0 = 0x10, /* Acceptance identifier0 register */ + CAN_ACC_ID1 = 0x14, /* Acceptance identifier1 register */ + CAN_ACC_ID2 = 0x18, /* Acceptance identifier2 register */ + CAN_ACC_ID3 = 0x1c, /* Acceptance identifier3 register */ + CAN_ACC_ID0_MASK = 0x20, /* Acceptance identifier0 mask register */ + CAN_ACC_ID1_MASK = 0x24, /* Acceptance identifier1 mask register */ + CAN_ACC_ID2_MASK = 0x28, /* Acceptance identifier2 mask register */ + CAN_ACC_ID3_MASK = 0x2c, /* Acceptance identifier3 mask register */ + CAN_XFER_STS = 0x30, /* Transfer status register */ + CAN_ERR_CNT = 0x34, /* Error counter register */ + CAN_FIFO_CNT = 0x38, /* FIFO counter register */ + CAN_DMA_CTRL = 0x3c, /* DMA request control register */ + CAN_XFER_EN = 0x40, /* Transfer enable register */ + CAN_INTR1 = 0x44, /* Interrupt register 1 */ + CAN_FRM_INFO = 0x48, /* Frame valid number register */ + CAN_TIME_OUT = 0x4c, /* Timeout register */ + CAN_TIME_OUT_CNT = 0x50, /* Timeout counter register */ + CAN_INTR2 = 0x54, /* Interrupt register 2 */ + CAN_TX_FIFO = 0x100, /* TX FIFO shadow register */ + CAN_RX_FIFO = 0x200, /* RX FIFO shadow register */ + CAN_RX_INFO_FIFO = 0x300, /* RX information FIFO shadow register */ + CAN_PIDR4 = 0xfd0, /* Peripheral Identification Register 4 */ + CAN_PIDR0 = 0xfe0, /* Peripheral Identification Register 0 */ + CAN_PIDR1 = 0xfe4, /* Peripheral Identification Register 1 */ + CAN_PIDR2 = 0xfe8, /* Peripheral Identification Register 2 */ + CAN_PIDR3 = 0xfec, /* Peripheral Identification Register 3 */ + CAN_CIDR0 = 0xff0, /* Component Identification Register 0 */ + CAN_CIDR1 = 0xff4, /* Component Identification Register 1 */ + CAN_CIDR2 = 0xff8, /* Component Identification Register 2 */ + CAN_CIDR3 = 0xffc, /* Component Identification Register 3 */ +}; + +/* Global control register (CTRL) */ +#define CTRL_XFER BIT(0) /* Transfer enable */ +#define CTRL_TXREQ BIT(1) /* Transmit request */ +#define CTRL_AIME BIT(2) /* Acceptance identifier mask enable */ +#define CTRL_TTS BIT(3) /* Transmit trigger strategy */ +#define CTRL_RST BIT(7) /* Write 1 to soft reset and self clear */ +#define CTRL_RFEIDF BIT(8) /* Allow RX frame end interrupt during ID filtered frame */ +#define CTRL_RFEDT BIT(9) /* Allow RX frame end interrupt during TX frame */ +#define CTRL_IOF BIT(10) /* Ignore overload flag internally */ +#define CTRL_FDCRC BIT(11) /* CANFD CRC mode */ + +/* Interrupt register (INTR) */ +#define INTR_BOIS BIT(0) /* Bus off interrupt status */ +#define INTR_PWIS BIT(1) /* Passive warning interrupt status */ +#define INTR_PEIS BIT(2) /* Passive error interrupt status */ +#define INTR_RFIS BIT(3) /* RX FIFO full interrupt status */ +#define INTR_TFIS BIT(4) /* TX FIFO empty interrupt status */ +#define INTR_REIS BIT(5) /* RX frame end interrupt status */ +#define INTR_TEIS BIT(6) /* TX frame end interrupt status */ +#define INTR_EIS BIT(7) /* Error interrupt status */ +#define INTR_BOIE BIT(8) /* Bus off interrupt enable */ +#define INTR_PWIE BIT(9) /* Passive warning interrupt enable */ +#define INTR_PEIE BIT(10) /* Passive error interrupt enable */ +#define INTR_RFIE BIT(11) /* RX FIFO full interrupt enable */ +#define INTR_TFIE BIT(12) /* TX FIFO empty interrupt enable */ +#define INTR_REIE BIT(13) /* RX frame end interrupt enable */ +#define INTR_TEIE BIT(14) /* TX frame end interrupt enable */ +#define INTR_EIE BIT(15) /* Error interrupt enable */ +#define INTR_BOIC BIT(16) /* Bus off interrupt clear */ +#define INTR_PWIC BIT(17) /* Passive warning interrupt clear */ +#define INTR_PEIC BIT(18) /* Passive error interrupt clear */ +#define INTR_RFIC BIT(19) /* RX FIFO full interrupt clear */ +#define INTR_TFIC BIT(20) /* TX FIFO empty interrupt clear */ +#define INTR_REIC BIT(21) /* RX frame end interrupt clear */ +#define INTR_TEIC BIT(22) /* TX frame end interrupt clear */ +#define INTR_EIC BIT(23) /* Error interrupt clear */ + +#define INTR_STATUS_MASK (INTR_BOIS | INTR_PWIS | INTR_PEIS | INTR_RFIS | \ + INTR_TFIS | INTR_REIS | INTR_TEIS | INTR_EIS) +#define INTR_EN_MASK (INTR_BOIE | INTR_RFIE | INTR_REIE | INTR_TEIE | \ + INTR_EIE) +#define INTR_CLEAR_MASK (INTR_BOIC | INTR_PWIC | INTR_PEIC | INTR_RFIC | \ + INTR_TFIC | INTR_REIC | INTR_TEIC | INTR_EIC) + +/* Arbitration rate control register (ARB_RATE_CTRL) */ +#define ARB_RATE_CTRL_ARJW GENMASK(1, 0) /* Arbitration field resync jump width */ +#define ARB_RATE_CTRL_APRS GENMASK(4, 2) /* Arbitration field propagation segment */ +#define ARB_RATE_CTRL_APH1S GENMASK(7, 5) /* Arbitration field phase1 segment */ +#define ARB_RATE_CTRL_APH2S GENMASK(10, 8) /* Arbitration field phase2 segment */ +#define ARB_RATE_CTRL_APD GENMASK(28, 16) /* Arbitration field prescaler divider */ + +/* Data rate control register (DAT_RATE_CTRL) */ +#define DAT_RATE_CTRL_DRJW GENMASK(1, 0) /* Data field resync jump width */ +#define DAT_RATE_CTRL_DPRS GENMASK(4, 2) /* Data field propagation segment */ +#define DAT_RATE_CTRL_DPH1S GENMASK(7, 5) /* Data field phase1 segment */ +#define DAT_RATE_CTRL_DPH2S GENMASK(10, 8) /* Data field phase2 segment */ +#define DAT_RATE_CTRL_DPD GENMASK(28, 16) /* Data field prescaler divider */ + +/* Acceptance identifierX register (ACC_IDX) */ +#define ACC_IDX_AID_MASK GENMASK(28, 0) /* Acceptance identifier */ + +/* Acceptance identifier0 mask register (ACC_ID0_MASK) */ +#define ACC_IDX_MASK_AID_MASK GENMASK(28, 0) /* Acceptance identifier mask */ + +/* Transfer status register (XFER_STS) */ +#define XFER_STS_FRAS GENMASK(2, 0) /* Frame status */ +#define XFER_STS_FIES GENMASK(7, 3) /* Field status */ +#define XFER_STS_FIES_IDLE (0x0) /* idle */ +#define XFER_STS_FIES_ARBITRATION (0x1) /* arbitration */ +#define XFER_STS_FIES_TX_CTRL (0x2) /* transmit control */ +#define XFER_STS_FIES_TX_DATA (0x3) /* transmit data */ +#define XFER_STS_FIES_TX_CRC (0x4) /* transmit crc */ +#define XFER_STS_FIES_TX_FRM (0x5) /* transmit frame */ +#define XFER_STS_FIES_RX_CTRL (0x6) /* receive control */ +#define XFER_STS_FIES_RX_DATA (0x7) /* receive data */ +#define XFER_STS_FIES_RX_CRC (0x8) /* receive crc */ +#define XFER_STS_FIES_RX_FRM (0x9) /* receive frame */ +#define XFER_STS_FIES_INTERMISSION (0xa) /* intermission */ +#define XFER_STS_FIES_TX_SUSPD (0xb) /* transmit suspend */ +#define XFER_STS_FIES_BUS_IDLE (0xc) /* bus idle */ +#define XFER_STS_FIES_OVL_FLAG (0xd) /* overload flag */ +#define XFER_STS_FIES_OVL_DLM (0xe) /* overload delimiter */ +#define XFER_STS_FIES_ERR_FLAG (0xf) /* error flag */ +#define XFER_STS_FIES_ERR_DLM (0x10) /* error delimiter */ +#define XFER_STS_FIES_BUS_OFF (0x11) /* bus off */ +#define XFER_STS_TS BIT(8) /* Transmit status */ +#define XFER_STS_RS BIT(9) /* Receive status */ +#define XFER_STS_XFERS BIT(10) /* Transfer status */ + +/* Error counter register (ERR_CNT) */ +#define ERR_CNT_REC GENMASK(8, 0) /* Receive error counter */ +#define ERR_CNT_TEC GENMASK(24, 16) /* Transmit error counter */ + +/* FIFO counter register (FIFO_CNT) */ +#define FIFO_CNT_RFN GENMASK(6, 0) /* Receive FIFO valid data number */ +#define FIFO_CNT_TFN GENMASK(22, 16) /* Transmit FIFO valid data number */ + +/* DMA request control register (DMA_CTRL) */ +#define DMA_CTRL_RFTH GENMASK(5, 0) /* Receive FIFO DMA request threshold */ +#define DMA_CTRL_RFRE BIT(6) /* Receive FIFO DMA request enable */ +#define DMA_CTRL_TFTH GENMASK(21, 16) /* Transmit FIFO DMA request threshold */ +#define DMA_CTRL_TFRE BIT(22) /* Transmit FIFO DMA request enable */ + +/* Transfer enable register (XFER_EN) */ +#define XFER_EN_XFER BIT(0) /* Transfer enable */ + +/* Interrupt register 1 (INTR1) */ +#define INTR1_RF1IS BIT(0) /* RX FIFO 1/4 interrupt status */ +#define INTR1_RF2IS BIT(1) /* RX FIFO 1/2 interrupt status */ +#define INTR1_RF3IS BIT(2) /* RX FIFO 3/4 interrupt status */ +#define INTR1_RF4IS BIT(3) /* RX FIFO full interrupt status */ +#define INTR1_TF1IS BIT(4) /* TX FIFO 1/4 interrupt status */ +#define INTR1_TF2IS BIT(5) /* TX FIFO 1/2 interrupt status */ +#define INTR1_TF3IS BIT(6) /* TX FIFO 3/4 interrupt status */ +#define INTR1_TF4IS BIT(7) /* TX FIFO empty interrupt status */ +#define INTR1_RF1IE BIT(8) /* RX FIFO 1/4 interrupt enable */ +#define INTR1_RF2IE BIT(9) /* RX FIFO 1/2 interrupt enable */ +#define INTR1_RF3IE BIT(10) /* RX FIFO 3/4 interrupt enable */ +#define INTR1_RF4IE BIT(11) /* RX FIFO full interrupt enable */ +#define INTR1_TF1IE BIT(12) /* TX FIFO 1/4 interrupt enable */ +#define INTR1_TF2IE BIT(13) /* TX FIFO 1/2 interrupt enable */ +#define INTR1_TF3IE BIT(14) /* TX FIFO 3/4 interrupt enable */ +#define INTR1_TF4IE BIT(15) /* TX FIFO empty interrupt enable */ +#define INTR1_RF1IC BIT(16) /* RX FIFO 1/4 interrupt clear */ +#define INTR1_RF2IC BIT(17) /* RX FIFO 1/2 interrupt clear */ +#define INTR1_RF3IC BIT(18) /* RX FIFO 3/4 interrupt clear */ +#define INTR1_RF4IC BIT(19) /* RX FIFO full interrupt clear */ +#define INTR1_TF1IC BIT(20) /* TX FIFO 1/4 interrupt clear */ +#define INTR1_TF2IC BIT(21) /* TX FIFO 1/2 interrupt clear */ +#define INTR1_TF3IC BIT(22) /* TX FIFO 3/4 interrupt clear */ +#define INTR1_TF4IC BIT(23) /* TX FIFO empty interrupt clear */ +#define INTR1_RF1RIS BIT(24) /* RX FIFO 1/4 raw interrupt status */ +#define INTR1_RF2RIS BIT(25) /* RX FIFO 1/2 raw interrupt status */ +#define INTR1_RF3RIS BIT(26) /* RX FIFO 3/4 raw interrupt status */ +#define INTR1_RF4RIS BIT(27) /* RX FIFO full raw interrupt status */ +#define INTR1_TF1RIS BIT(28) /* TX FIFO 1/4 raw interrupt status */ +#define INTR1_TF2RIS BIT(29) /* TX FIFO 1/2 raw interrupt status */ +#define INTR1_TF3RIS BIT(30) /* TX FIFO 3/4 raw interrupt status */ +#define INTR1_TF4RIS BIT(31) /* TX FIFO empty raw interrupt status */ + +/* Frame valid number register (FRM_INFO) */ +#define FRM_INFO_RXFC GENMASK(5, 0) /* Valid frame number in RX FIFO */ +#define FRM_INFO_SSPD GENMASK(31, 16) /* Secondary sample point delay */ + +/* Interrupt register 2 (INTR2) */ +#define INTR2_TOIS BIT(0) /* RX FIFO time out interrupt status */ +#define INTR2_TOIM BIT(8) /* RX FIFO time out interrupt mask */ +#define INTR2_TOIC BIT(16) /* RX FIFO time out interrupt clear */ +#define INTR2_TORIS BIT(24) /* RX FIFO time out raw interrupt status */ + +/* RX information FIFO shadow register (RX_INFO_FIFO) */ +#define RX_INFO_FIFO_WNORF GENMASK(4, 0) /* Word (4-byte) number of current receive frame */ +#define RX_INFO_FIFO_RORF BIT(5) /* RTR value of current receive frame */ +#define RX_INFO_FIFO_FORF BIT(6) /* FDF value of current receive frame */ +#define RX_INFO_FIFO_IORF BIT(7) /* IDE value of current receive frame */ + +/* Arbitration Bits */ +#define CAN_ID1_MASK GENMASK(31, 21) /* Base identifer */ +/* Standard Remote Transmission Request */ +#define CAN_ID1_RTR_MASK BIT(20) +/* Extended Substitute remote TXreq */ +#define CAN_ID2_SRR_MASK BIT(20) +#define CAN_IDE_MASK BIT(19) /* IDentifier extension flag */ +#define CAN_ID2_MASK GENMASK(18, 1) /* Identifier extension */ +/* Extended frames remote TX request */ +#define CAN_ID2_RTR_MASK BIT(0) +#define CAN_ID1_FDF_MASK BIT(18) +#define CAN_ID1_DLC_MASK GENMASK(17, 14) +#define CANFD_ID1_BRS_MASK BIT(16) +#define CANFD_ID1_ESI_MASK BIT(15) +#define CANFD_ID1_DLC_MASK GENMASK(14, 11) + +#define CAN_ID2_FDF_MASK BIT(31) +#define CAN_ID2_DLC_MASK GENMASK(29, 26) +#define CANFD_ID2_BRS_MASK BIT(29) +#define CANFD_ID2_ESI_MASK BIT(28) +#define CANFD_ID2_DLC_MASK GENMASK(27, 24) + +#define CAN_ID1_DLC_OFF 14 +#define CANFD_ID1_DLC_OFF 11 +#define CAN_ID2_DLC_OFF 26 +#define CANFD_ID2_DLC_OFF 24 + +#define CAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ +#define CAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ +#define CAN_IDR_SDLC_SHIFT 14 +#define CAN_IDR_EDLC_SHIFT 26 + +/* CANFD Standard msg padding 1 */ +#define CANFD_IDR_PAD_MASK 0x000007FF +#define CAN_IDR_PAD_MASK 0x00003FFF /* Standard msg padding 1 */ + +/** + * phytium_can_set_reg_bits - set a bit value to the device register + * @cdev: Driver private data structure + * @reg: Register offset + * @bs: The bit mask + * + * Read data from the particular CAN register + * Return: value read from the CAN register + */ +static void +phytium_can_set_reg_bits(const struct phytium_can_dev *cdev, + enum phytium_can_reg reg, u32 bs) +{ + u32 val = readl(cdev->base + reg); + + val |= bs; + writel(val, cdev->base + reg); +} + +/** + * phytium_can_clr_reg_bits - clear a bit value to the device register + * @cdev: Driver private data structure + * @reg: Register offset + * @bs: The bit mask + * + * Read data from the particular CAN register + * Return: value read from the CAN register + */ +static void +phytium_can_clr_reg_bits(const struct phytium_can_dev *cdev, + enum phytium_can_reg reg, u32 bs) +{ + u32 val = readl(cdev->base + reg); + + val &= ~bs; + writel(val, cdev->base + reg); +} + +static inline u32 phytium_can_read(const struct phytium_can_dev *cdev, enum phytium_can_reg reg) +{ + return readl(cdev->base + reg); +} + +static inline void phytium_can_write(const struct phytium_can_dev *cdev, enum phytium_can_reg reg, + u32 val) +{ + writel(val, cdev->base + reg); +} + +static inline void phytium_can_enable_all_interrupts(struct phytium_can_dev *cdev) +{ + phytium_can_write(cdev, CAN_INTR, INTR_EN_MASK); +} + +static inline void phytium_can_disable_all_interrupt(struct phytium_can_dev *cdev) +{ + phytium_can_write(cdev, CAN_INTR, 0x0); +} + +static int phytium_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + + bec->rxerr = phytium_can_read(cdev, CAN_ERR_CNT) & ERR_CNT_REC; + bec->txerr = (phytium_can_read(cdev, CAN_ERR_CNT) & ERR_CNT_TEC) >> 16; + + return 0; +} + +static int phytium_can_read_fifo(struct net_device *dev) +{ + struct net_device_stats *stats = &dev->stats; + struct phytium_can_dev *cdev = netdev_priv(dev); + struct canfd_frame *cf; + struct sk_buff *skb; + u32 id, dlc, i; + + /* Read the frame header from FIFO */ + id = phytium_can_read(cdev, CAN_RX_FIFO); + id = be32_to_cpup(&id); + if (id & CAN_IDE_MASK) { + /* Received an extended frame */ + dlc = phytium_can_read(cdev, CAN_RX_FIFO); + dlc = be32_to_cpup(&dlc); + if (dlc & CAN_ID2_FDF_MASK) + skb = alloc_canfd_skb(dev, &cf); + else + skb = alloc_can_skb(dev, (struct can_frame **)&cf); + + if (unlikely(!skb)) { + stats->rx_dropped++; + return 0; + } + + if (dlc & CAN_ID2_FDF_MASK) { + /* CAN FD extended frame */ + if (dlc & CANFD_ID2_BRS_MASK) + cf->flags |= CANFD_BRS; + if (dlc & CANFD_ID2_ESI_MASK) + cf->flags |= CANFD_ESI; + cf->len = can_dlc2len((dlc & CANFD_ID2_DLC_MASK) >> CANFD_ID2_DLC_OFF); + } else { + /* CAN extended frame */ + cf->len = get_can_dlc((dlc & CAN_ID2_DLC_MASK) >> CAN_ID2_DLC_OFF); + } + + cf->can_id = (id & CAN_ID1_MASK) >> 3; + cf->can_id |= (id & CAN_ID2_MASK) >> 1; + cf->can_id |= CAN_EFF_FLAG; + + if (id & CAN_ID2_RTR_MASK) + cf->can_id |= CAN_RTR_FLAG; + } else { + /* Received a standard frame */ + if (id & CAN_ID1_FDF_MASK) + skb = alloc_canfd_skb(dev, &cf); + else + skb = alloc_can_skb(dev, (struct can_frame **)&cf); + + if (unlikely(!skb)) { + stats->rx_dropped++; + return 0; + } + + if (id & CAN_ID1_FDF_MASK) { + /* CAN FD extended frame */ + if (id & CANFD_ID1_BRS_MASK) + cf->flags |= CANFD_BRS; + if (id & CANFD_ID1_ESI_MASK) + cf->flags |= CANFD_ESI; + cf->len = can_dlc2len((id & CANFD_ID1_DLC_MASK) >> CANFD_ID1_DLC_OFF); + } else { + /* CAN extended frame */ + cf->len = get_can_dlc((id & CAN_ID1_DLC_MASK) >> CAN_ID1_DLC_OFF); + } + + cf->can_id = (id & CAN_ID1_MASK) >> 21; + + if (id & CAN_ID1_RTR_MASK) + cf->can_id |= CAN_RTR_FLAG; + } + + if (!(cf->can_id & CAN_RTR_FLAG)) + /* Receive data frames */ + for (i = 0; i < cf->len; i += 4) + *(__be32 *)(cf->data + i) = phytium_can_read(cdev, CAN_RX_FIFO); + + stats->rx_packets++; + stats->rx_bytes += cf->len; + netif_receive_skb(skb); + + return 1; +} + +static int phytium_can_do_rx_poll(struct net_device *dev, int quota) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + u32 rxfs, pkts = 0; + + rxfs = phytium_can_read(cdev, CAN_FIFO_CNT) & FIFO_CNT_RFN; + if (!rxfs) { + netdev_dbg(dev, "no messages in RX FIFO\n"); + return 0; + } + + while ((rxfs != 0) && (quota > 0)) { + pkts += phytium_can_read_fifo(dev); + quota--; + rxfs = phytium_can_read(cdev, CAN_FIFO_CNT) & FIFO_CNT_RFN; + netdev_dbg(dev, "Next received %d frame again.\n", rxfs); + } + + if (pkts) + can_led_event(dev, CAN_LED_EVENT_RX); + + return pkts; +} + +static int phytium_can_rx_handler(struct net_device *dev, int quota) +{ + int work_done = 0; + int rx_work_or_err; + + /* Handle RX IRQ */ + rx_work_or_err = phytium_can_do_rx_poll(dev, (quota - work_done)); + if (rx_work_or_err < 0) + return rx_work_or_err; + + work_done += rx_work_or_err; + + return 0; +} + +static int phytium_can_poll(struct napi_struct *napi, int quota) +{ + struct net_device *dev = napi->dev; + struct phytium_can_dev *cdev = netdev_priv(dev); + int work_done; + + netdev_dbg(dev, "The receive processing is going on !\n"); + + work_done = phytium_can_rx_handler(dev, quota); + + /* Don't re-enable interrupts if the driver had a fatal error + * (e.g., FIFO read failure) + */ + if (work_done >= 0 && work_done < quota) { + napi_complete_done(napi, work_done); + phytium_can_enable_all_interrupts(cdev); + } + + return work_done; +} + +static void phytium_can_write_frame(struct phytium_can_dev *cdev) +{ + struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; + struct net_device *dev = cdev->net; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb = cdev->tx_skb; + u32 i, id, dlc = 0, frame_head[2] = {0, 0}; + u32 data_len; + + data_len = can_len2dlc(cf->len); + cdev->tx_skb = NULL; + + phytium_can_clr_reg_bits(cdev, CAN_CTRL, CTRL_XFER); + + /* Watch carefully on the bit sequence */ + if (cf->can_id & CAN_EFF_FLAG) { + /* Extended CAN ID format */ + id = ((cf->can_id & CAN_EFF_MASK) << 1) & CAN_ID2_MASK; + id |= (((cf->can_id & CAN_EFF_MASK) >> + (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << + CAN_IDR_ID1_SHIFT) & CAN_ID1_MASK; + + /* The substibute remote TX request bit should be "1" + * for extended frames as in the Phytium CAN datasheet + */ + id |= CAN_IDE_MASK | CAN_ID2_SRR_MASK; + + if (cf->can_id & CAN_RTR_FLAG) + /* Extended frames remote TX request */ + id |= CAN_ID2_RTR_MASK; + if ((cdev->can.ctrlmode & CAN_CTRLMODE_FD) && + can_is_canfd_skb(skb)) + dlc = data_len << CANFD_ID2_DLC_OFF; + else + dlc = data_len << CAN_ID2_DLC_OFF; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + dlc |= CAN_ID2_FDF_MASK; + if (cf->flags & CANFD_BRS) + dlc |= CANFD_ID2_BRS_MASK; + if (cf->flags & CANFD_ESI) + dlc |= CANFD_ID2_ESI_MASK; + } + + frame_head[0] = cpu_to_be32p(&id); + frame_head[1] = cpu_to_be32p(&dlc); + + /* Write the Frame to Phytium CAN TX FIFO */ + phytium_can_write(cdev, CAN_TX_FIFO, frame_head[0]); + phytium_can_write(cdev, CAN_TX_FIFO, frame_head[1]); + netdev_dbg(dev, "Write atbitration field [0]:0x%x [1]:0x%x\n", + frame_head[0], frame_head[1]); + } else { + /* Standard CAN ID format */ + id = ((cf->can_id & CAN_SFF_MASK) << CAN_IDR_ID1_SHIFT) + & CAN_ID1_MASK; + + if (cf->can_id & CAN_RTR_FLAG) + /* Standard frames remote TX request */ + id |= CAN_ID1_RTR_MASK; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) + dlc = (data_len << CANFD_ID1_DLC_OFF) + | CANFD_IDR_PAD_MASK; + else + dlc = (data_len << CAN_ID1_DLC_OFF) | CAN_IDR_PAD_MASK; + + id |= dlc; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + id |= CAN_ID1_FDF_MASK; + if (cf->flags & CANFD_BRS) + id |= CANFD_ID1_BRS_MASK; + if (cf->flags & CANFD_ESI) + id |= CANFD_ID1_ESI_MASK; + } + + frame_head[0] = cpu_to_be32p(&id); + /* Write the Frame to Phytium CAN TX FIFO */ + phytium_can_write(cdev, CAN_TX_FIFO, frame_head[0]); + netdev_dbg(dev, "Write atbitration field [0] 0x%x\n", + frame_head[0]); + } + + if (!(cf->can_id & CAN_RTR_FLAG)) { + netdev_dbg(dev, "Write CAN data frame\n"); + for (i = 0; i < cf->len; i += 4) { + phytium_can_write(cdev, CAN_TX_FIFO, + *(__be32 *)(cf->data + i)); + netdev_dbg(dev, "[%d]:%x\n", i, + *(__be32 *)(cf->data + i)); + } + } + + stats->tx_bytes += cf->len; + can_put_echo_skb(skb, dev, cdev->tx_head % cdev->tx_max); + cdev->tx_head++; + + netif_stop_queue(dev); + /* triggers tranmission */ + phytium_can_set_reg_bits(cdev, CAN_CTRL, CTRL_TXREQ | CTRL_XFER); + + netdev_dbg(dev, "Trigger send message!\n"); + + return; +} + +static netdev_tx_t phytium_can_tx_handler(struct phytium_can_dev *cdev) +{ + struct net_device *dev = cdev->net; + u32 tx_fifo_used; + + /* Check if the TX buffer is full */ + tx_fifo_used = (phytium_can_read(cdev, CAN_FIFO_CNT) & FIFO_CNT_TFN) >> 16; + if (tx_fifo_used == cdev->tx_max) { + netif_stop_queue(dev); + netdev_err(dev, "BUG!, TX FIFO full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + if (cdev->tx_head == cdev->tx_tail) { + cdev->tx_head = 0; + cdev->tx_tail = 0; + } + + phytium_can_write_frame(cdev); + + return NETDEV_TX_OK; +} + +/** + * phytium_can_tx_interrupt - Tx Done Isr + * @ndev: net_device pointer + * @isr: Interrupt status register value + */ +static void phytium_can_tx_interrupt(struct net_device *ndev, u32 isr) +{ + struct phytium_can_dev *cdev = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + + while ((cdev->tx_head - cdev->tx_tail > 0) && (isr & INTR_TEIS)) { + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_TEIC | INTR_REIC); + can_get_echo_skb(ndev, cdev->tx_tail % cdev->tx_max); + cdev->tx_tail++; + stats->tx_packets++; + isr = (phytium_can_read(cdev, CAN_INTR) & INTR_STATUS_MASK); + } + + netdev_dbg(ndev, "Finish transform packets %lu\n", stats->tx_packets); + netdev_dbg(ndev, "\n-------------------\n"); + can_led_event(ndev, CAN_LED_EVENT_TX); + netif_wake_queue(ndev); +} + +static void phytium_can_err_interrupt(struct net_device *ndev, u32 isr) +{ + struct phytium_can_dev *cdev = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u32 txerr = 0, rxerr = 0; + + skb = alloc_can_err_skb(ndev, &cf); + + rxerr = phytium_can_read(cdev, CAN_ERR_CNT) & ERR_CNT_REC; + txerr = ((phytium_can_read(cdev, CAN_ERR_CNT) & ERR_CNT_TEC) >> 16); + + if (isr & INTR_BOIS) { + netdev_dbg(ndev, "bus_off %s: txerr :%u rxerr :%u\n", + __func__, txerr, rxerr); + cdev->can.state = CAN_STATE_BUS_OFF; + cdev->can.can_stats.bus_off++; + /* Leave device in Config Mode in bus-off state */ + phytium_can_write(cdev, CAN_CTRL, CTRL_RST); + can_bus_off(ndev); + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; + } else if ((isr & INTR_PEIS) == INTR_PEIS) { + netdev_dbg(ndev, "error_passive %s: txerr :%u rxerr :%u\n", + __func__, txerr, rxerr); + cdev->can.state = CAN_STATE_ERROR_PASSIVE; + cdev->can.can_stats.error_passive++; + /* Clear interrupt condition */ + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_PEIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_PWIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_TEIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_EIC); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (rxerr > 127) ? + CAN_ERR_CRTL_RX_PASSIVE : + CAN_ERR_CRTL_TX_PASSIVE; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + } else if (isr & INTR_PWIS) { + netdev_dbg(ndev, "error_warning %s: txerr :%u rxerr :%u\n", + __func__, txerr, rxerr); + cdev->can.state = CAN_STATE_ERROR_WARNING; + cdev->can.can_stats.error_warning++; + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_PWIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_TEIC); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_EIC); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= (txerr > rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + } + + /* Check for RX FIFO Overflow interrupt */ + if (isr & INTR_RFIS) { + stats->rx_over_errors++; + stats->rx_errors++; + + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; + } + } + + if (skb) { + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } +} + +/** + * phytium_can_isr - CAN Isr + * @irq: irq number + * @dev_id: device id poniter + * + * This is the phytium CAN Isr. It checks for the type of interrupt + * and invokes the corresponding ISR. + * + * Return: + * * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise + */ +static irqreturn_t phytium_can_isr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct phytium_can_dev *cdev = netdev_priv(dev); + u32 isr; + + /* Get the interrupt status */ + isr = phytium_can_read(cdev, CAN_INTR) & INTR_STATUS_MASK; + if (!isr) + return IRQ_NONE; + + /* Check for FIFO full interrupt and alarm */ + if ((isr & INTR_RFIS)) { + netdev_dbg(dev, "rx_fifo is full!.\n"); + phytium_can_clr_reg_bits(cdev, CAN_INTR, INTR_RFIE); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_RFIC); + napi_schedule(&cdev->napi); + } + + /* Check for the type of error interrupt and Processing it */ + if (isr & (INTR_EIS | INTR_RFIS | INTR_BOIS)) { + phytium_can_clr_reg_bits(cdev, CAN_INTR, (INTR_EIE + | INTR_RFIE | INTR_BOIE)); + phytium_can_err_interrupt(dev, isr); + phytium_can_set_reg_bits(cdev, CAN_INTR, (INTR_EIC + | INTR_RFIC | INTR_BOIC)); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_EIE | INTR_BOIE); + return IRQ_HANDLED; + } + + /* Check for Tx interrupt and Processing it */ + if (isr & INTR_TEIS) { + phytium_can_tx_interrupt(dev, isr); + } + + /* Check for the type of receive interrupt and Processing it */ + if (isr & INTR_REIS) { + phytium_can_clr_reg_bits(cdev, CAN_INTR, INTR_REIE); + phytium_can_set_reg_bits(cdev, CAN_INTR, INTR_REIC); + napi_schedule(&cdev->napi); + } + + return IRQ_HANDLED; +} + +/** + * phytium_can_set_bittiming - CAN set bit timing routine + * @dev: Pointer to net_device structure + * + * This is the driver set bittiming routine. + * Return: 0 on success and failure value on error + */ +static int phytium_can_set_bittiming(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + const struct can_bittiming *bt = &cdev->can.bittiming; + const struct can_bittiming *dbt = &cdev->can.data_bittiming; + u32 btr, dbtr; + u32 is_config_mode; + + /** + * Check whether Phytium CAN is in configuration mode. + * It cannot set bit timing if Phytium CAN is not in configuration mode. + */ + is_config_mode = phytium_can_read(cdev, CAN_CTRL) & CTRL_XFER; + if (is_config_mode) { + netdev_alert(dev, "BUG! Cannot set bittiming - CAN is not in config mode\n"); + return -EPERM; + } + + /* Setting Baud Rate prescalar value in BRPR Register */ + btr = (bt->brp - 1) << 16; + + /* Setting Time Segment 1 in BTR Register */ + btr |= (bt->prop_seg - 1) << 2; + + btr |= (bt->phase_seg1 - 1) << 5; + + /* Setting Time Segment 2 in BTR Register */ + btr |= (bt->phase_seg2 - 1) << 8; + + /* Setting Synchronous jump width in BTR Register */ + btr |= (bt->sjw - 1); + + dbtr = (dbt->brp - 1) << 16; + dbtr |= (dbt->prop_seg - 1) << 2; + dbtr |= (dbt->phase_seg1 - 1) << 5; + dbtr |= (dbt->phase_seg2 - 1) << 8; + dbtr |= (dbt->sjw - 1); + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { + phytium_can_write(cdev, CAN_ARB_RATE_CTRL, btr); + phytium_can_write(cdev, CAN_DAT_RATE_CTRL, dbtr); + } else { + phytium_can_write(cdev, CAN_ARB_RATE_CTRL, btr); + phytium_can_write(cdev, CAN_DAT_RATE_CTRL, btr); + } + + netdev_dbg(dev, "DAT=0x%08x, ARB=0x%08x\n", + phytium_can_read(cdev, CAN_DAT_RATE_CTRL), + phytium_can_read(cdev, CAN_ARB_RATE_CTRL)); + + return 0; +} + +/** + * phytium_can_start - This the drivers start routine + * @dev: Pointer to net_device structure + * + * This is the drivers start routine. + * Based on the State of the CAN device it puts + * the CAN device into a proper mode. + * + * Return: 0 on success and failure value on error + */ +static void phytium_can_start(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + u32 ctrl; + + /* Disable transfer */ + ctrl = phytium_can_read(cdev, CAN_CTRL); + ctrl &= ~CTRL_XFER; + phytium_can_write(cdev, CAN_CTRL, ctrl); + + /* XXX: If CANFD, reset the controller */ + phytium_can_write(cdev, CAN_CTRL, (ctrl | CTRL_RST)); + + /* Bittiming setup */ + phytium_can_set_bittiming(dev); + + /* Acceptance identifier mask setup */ + phytium_can_write(cdev, CAN_ACC_ID0_MASK, ACC_IDX_MASK_AID_MASK); + phytium_can_write(cdev, CAN_ACC_ID1_MASK, ACC_IDX_MASK_AID_MASK); + phytium_can_write(cdev, CAN_ACC_ID2_MASK, ACC_IDX_MASK_AID_MASK); + phytium_can_write(cdev, CAN_ACC_ID3_MASK, ACC_IDX_MASK_AID_MASK); + ctrl |= CTRL_AIME; + + if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) + ctrl |= CTRL_IOF | CTRL_FDCRC; + + phytium_can_write(cdev, CAN_CTRL, ctrl); + + cdev->can.state = CAN_STATE_ERROR_ACTIVE; + + phytium_can_enable_all_interrupts(cdev); + + if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + ctrl |= CTRL_XFER; + else + ctrl |= CTRL_XFER | CTRL_TXREQ; + + phytium_can_write(cdev, CAN_CTRL, ctrl); +} + +/** + * phytium_can_stop - Driver stop routine + * @dev: Pointer to net_device structure + * + * This is the drivers stop routine. It will disable the + * interrupts and put the device into configuration mode. + */ +static void phytium_can_stop(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + u32 ctrl; + + /* Disable all interrupts */ + phytium_can_disable_all_interrupt(cdev); + + /* Disable transfer and switch to receive-only mode */ + ctrl = phytium_can_read(cdev, CAN_CTRL); + ctrl &= ~(CTRL_XFER | CTRL_TXREQ); + phytium_can_write(cdev, CAN_CTRL, ctrl); + + /* Set the state as STOPPED */ + cdev->can.state = CAN_STATE_STOPPED; +} + +static void phytium_can_clean(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + + if (cdev->tx_skb) { + dev->stats.tx_errors++; + can_free_echo_skb(cdev->net, 0); + cdev->tx_skb = NULL; + } +} + +static int phytium_can_set_mode(struct net_device *dev, enum can_mode mode) +{ + switch (mode) { + case CAN_MODE_START: + phytium_can_clean(dev); + phytium_can_start(dev); + netif_wake_queue(dev); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * phytium_can_open - Driver open routine + * @dev: Pointer to net_device structure + * + * This is the driver open routine. + * Return: 0 on success and failure value on error + */ +static int phytium_can_open(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + int ret; + + /* Start clock */ + ret = pm_runtime_resume(cdev->dev); + if (ret) + return ret; + + /* Open the CAN device */ + ret = open_candev(dev); + if (ret) { + netdev_err(dev, "failed to open can device\n"); + goto disable_clk; + } + + /* Register interrupt handler */ + ret = request_irq(dev->irq, phytium_can_isr, + IRQF_SHARED, dev->name, dev); + if (ret < 0) { + netdev_err(dev, "failed to request interrupt\n"); + goto fail; + } + + /* Start the controller */ + phytium_can_start(dev); + + can_led_event(dev, CAN_LED_EVENT_OPEN); + napi_enable(&cdev->napi); + netif_start_queue(dev); + + return 0; + +fail: + close_candev(dev); +disable_clk: + pm_runtime_put_sync(cdev->dev); + return ret; +} + +/** + * phytium_can_close - Driver close routine + * @dev: Pointer to net_device structure + * + * Return: 0 always + */ +static int phytium_can_close(struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&cdev->napi); + + phytium_can_stop(dev); + free_irq(dev->irq, dev); + pm_runtime_put_sync(cdev->dev); + + close_candev(dev); + can_led_event(dev, CAN_LED_EVENT_STOP); + + return 0; +} + +/** + * phytium_can_start_xmit - Starts the transmission + * + * Return: 0 on success. + */ +static netdev_tx_t phytium_can_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct phytium_can_dev *cdev = netdev_priv(dev); + + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; + + cdev->tx_skb = skb; + + return phytium_can_tx_handler(cdev); +} + +static const struct net_device_ops phytium_can_netdev_ops = { + .ndo_open = phytium_can_open, + .ndo_stop = phytium_can_close, + .ndo_start_xmit = phytium_can_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static int register_phytium_can_dev(struct net_device *dev) +{ + dev->flags |= IFF_ECHO; + dev->netdev_ops = &phytium_can_netdev_ops; + + return register_candev(dev); +} + +static int phytium_can_dev_setup(struct phytium_can_dev *cdev) +{ + struct net_device *dev = cdev->net; + + netif_napi_add(dev, &cdev->napi, phytium_can_poll, 64); + + cdev->can.do_set_mode = phytium_can_set_mode; + cdev->can.do_get_berr_counter = phytium_can_get_berr_counter; + + cdev->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_BERR_REPORTING; + cdev->can.bittiming_const = cdev->bit_timing; + + if (cdev->fdmode) { + cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + dev->mtu = CANFD_MTU; + cdev->can.ctrlmode = CAN_CTRLMODE_FD; + cdev->can.data_bittiming_const = cdev->bit_timing; + } + + return 0; +} + +struct phytium_can_dev *phytium_can_allocate_dev(struct device *dev, int sizeof_priv, + int tx_fifo_depth) +{ + struct phytium_can_dev *cdev = NULL; + struct net_device *net_dev; + + /* Allocate the can device struct */ + net_dev = alloc_candev(sizeof_priv, tx_fifo_depth); + if (!net_dev) { + dev_err(dev, "Failed to allocate CAN device.\n"); + goto out; + } + + cdev = netdev_priv(net_dev); + cdev->net = net_dev; + cdev->dev = dev; + SET_NETDEV_DEV(net_dev, dev); + +out: + return cdev; +} +EXPORT_SYMBOL(phytium_can_allocate_dev); + +void phytium_can_free_dev(struct net_device *net) +{ + free_candev(net); +} +EXPORT_SYMBOL(phytium_can_free_dev); + +int phytium_can_register(struct phytium_can_dev *cdev) +{ + int ret; + + ret = pm_runtime_resume(cdev->dev); + if (ret) + return ret; + + ret = phytium_can_dev_setup(cdev); + if (ret) + goto fail; + + ret = register_phytium_can_dev(cdev->net); + if (ret) { + dev_err(cdev->dev, "registering %s failed (err=%d)\n", + cdev->net->name, ret); + goto fail; + } + + devm_can_led_init(cdev->net); + + dev_info(cdev->dev, "%s device registered (irq=%d)\n", + KBUILD_MODNAME, cdev->net->irq); + + /* Probe finished + * Stop clocks. They will be reactivated once the device is opened. + */ + pm_runtime_put_sync(cdev->dev); + + return 0; + +fail: + pm_runtime_put_sync(cdev->dev); + return ret; +} +EXPORT_SYMBOL(phytium_can_register); + +void phytium_can_unregister(struct phytium_can_dev *cdev) +{ + unregister_candev(cdev->net); +} +EXPORT_SYMBOL(phytium_can_unregister); + +int phytium_can_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct phytium_can_dev *cdev = netdev_priv(ndev); + + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + phytium_can_stop(ndev); + pm_runtime_put_sync(cdev->dev); + } + + cdev->can.state = CAN_STATE_SLEEPING; + + return 0; +} +EXPORT_SYMBOL(phytium_can_suspend); + +int phytium_can_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct phytium_can_dev *cdev = netdev_priv(ndev); + int ret; + + cdev->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_running(ndev)) { + ret = pm_runtime_resume(cdev->dev); + if (ret) + return ret; + + phytium_can_start(ndev); + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; +} +EXPORT_SYMBOL(phytium_can_resume); + +MODULE_AUTHOR("Cheng Quan "); +MODULE_AUTHOR("Chen Baozi "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("CAN bus driver for Phytium CAN controller"); + diff --git a/drivers/net/can/phytium/phytium_can.h b/drivers/net/can/phytium/phytium_can.h new file mode 100644 index 0000000000000000000000000000000000000000..3f125548c4a6f1dc20d90076102b8a49f1b1d96e --- /dev/null +++ b/drivers/net/can/phytium/phytium_can.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium CAN controller driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_CAN_H_ +#define _PHYTIUM_CAN_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum phytium_can_ip_type { + PHYTIUM_CAN = 0, + PHYTIUM_CANFD, +}; + +struct phytium_can_devtype { + enum phytium_can_ip_type cantype; + const struct can_bittiming_const *bittiming_const; +}; + +struct phytium_can_dev { + struct can_priv can; + unsigned int tx_head; + unsigned int tx_tail; + unsigned int tx_max; + struct napi_struct napi; + struct net_device *net; + struct device *dev; + struct clk *clk; + + struct sk_buff *tx_skb; + + const struct can_bittiming_const *bit_timing; + + int fdmode; + u32 isr; + u32 tx_fifo_depth; + + void __iomem *base; +}; + +struct phytium_can_dev *phytium_can_allocate_dev(struct device *dev, int sizeof_priv, + int tx_fifo_depth); +void phytium_can_free_dev(struct net_device *net); + +int phytium_can_register(struct phytium_can_dev *cdev); +void phytium_can_unregister(struct phytium_can_dev *cdev); + +int phytium_can_suspend(struct device *dev); +int phytium_can_resume(struct device *dev); +#endif /* _PHYTIUM_CAN_H_ */ diff --git a/drivers/net/can/phytium/phytium_can_pci.c b/drivers/net/can/phytium/phytium_can_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..401ec7e444a035a717dc2f65c39d87947fce4d63 --- /dev/null +++ b/drivers/net/can/phytium/phytium_can_pci.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Platform CAN bus driver for Phytium CAN controller + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include + +#include "phytium_can.h" + +struct phytium_can_pci_config { + const struct phytium_can_devtype *devtype; + unsigned int clock_freq; + unsigned int tx_fifo_depth; +}; + +#define cdev2priv(dev) container_of(dev, struct phytium_can_pci, cdev) + +struct phytium_can_pci { + struct phytium_can_dev cdev; + + void __iomem *base; +}; + +static const struct can_bittiming_const phytium_bittiming_const_8192 = { + .name = "phytium_can", + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, /* Synchronisation jump width */ + .brp_min = 1, /* Bit-rate prescaler */ + .brp_max = 8192, + .brp_inc = 2, +}; + +static const struct phytium_can_devtype phytium_can_pci = { + .cantype = PHYTIUM_CAN, + .bittiming_const = &phytium_bittiming_const_8192, +}; + +static const struct phytium_can_pci_config phytium_can_pci_data = { + .devtype = &phytium_can_pci, + .clock_freq = 600000000, + .tx_fifo_depth = 64, +}; + +static int phytium_can_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + const struct phytium_can_pci_config *cfg; + struct phytium_can_dev *cdev; + struct phytium_can_pci *priv; + int ret; + + cfg = (const struct phytium_can_pci_config *)id->driver_data; + + ret = pcim_enable_device(pdev); + if (ret) + goto err; + + ret = pcim_iomap_regions(pdev, 0x1, pci_name(pdev)); + if (ret) + goto err; + + cdev = phytium_can_allocate_dev(&pdev->dev, sizeof(struct phytium_can_pci), + cfg->tx_fifo_depth); + if (!cdev) + return -ENOMEM; + + priv = cdev2priv(cdev); + priv->base = pcim_iomap_table(pdev)[0]; + + cdev->dev = &pdev->dev; + cdev->fdmode = cfg->devtype->cantype; + cdev->bit_timing = cfg->devtype->bittiming_const; + cdev->can.clock.freq = cfg->clock_freq; + cdev->tx_fifo_depth = cfg->tx_fifo_depth; + + cdev->base = priv->base; + cdev->net->irq = pdev->irq; + + pci_set_drvdata(pdev, cdev->net); + + pm_runtime_enable(cdev->dev); + ret = phytium_can_register(cdev); + if (ret) + goto err; + + return 0; +err: + return ret; +} + +static void phytium_can_pci_remove(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct phytium_can_dev *cdev = netdev_priv(dev); + + phytium_can_unregister(cdev); + phytium_can_free_dev(cdev->net); +} + +static __maybe_unused int phytium_can_pci_suspend(struct device *dev) +{ + return phytium_can_suspend(dev); +} + +static __maybe_unused int phytium_can_pci_resume(struct device *dev) +{ + return phytium_can_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(phytium_can_pci_pm_ops, + phytium_can_pci_suspend, phytium_can_pci_resume); + +static const struct pci_device_id phytium_can_pci_id_table[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc2d), (kernel_ulong_t)&phytium_can_pci_data, }, + { /* sentinel */ }, +}; + +static struct pci_driver phytium_can_pci_driver = { + .name = KBUILD_MODNAME, + .probe = phytium_can_pci_probe, + .remove = phytium_can_pci_remove, + .id_table = phytium_can_pci_id_table, + .driver = { + .pm = &phytium_can_pci_pm_ops, + }, +}; + +module_pci_driver(phytium_can_pci_driver); + +MODULE_AUTHOR("Cheng Quan +#include +#include + +#include "phytium_can.h" + +#define cdev2priv(dev) container_of(dev, struct phytium_can_plat, cdev) + +struct phytium_can_plat { + struct phytium_can_dev cdev; + struct phytium_can_devtype *devtype; + + int irq; + void __iomem *reg_base; +}; + +static const struct can_bittiming_const phytium_bittiming_const_512 = { + .name = "phytium_can", + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, /* Synchronisation jump width */ + .brp_min = 1, /* Bit-rate prescaler */ + .brp_max = 512, + .brp_inc = 2, +}; + +static const struct can_bittiming_const phytium_bittiming_const_8192 = { + .name = "phytium_can", + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, /* Synchronisation jump width */ + .brp_min = 1, /* Bit-rate prescaler */ + .brp_max = 8192, + .brp_inc = 2, +}; + +static const struct phytium_can_devtype phytium_can_data = { + .cantype = PHYTIUM_CAN, + .bittiming_const = &phytium_bittiming_const_512, +}; + +static const struct phytium_can_devtype phytium_canfd_data = { + .cantype = PHYTIUM_CANFD, + .bittiming_const = &phytium_bittiming_const_8192, +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_can_acpi_ids[] = { + { "PHYT000A", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_can_acpi_ids); +#endif + +#ifdef CONFIG_OF +static const struct of_device_id phytium_can_of_ids[] = { + { .compatible = "phytium,can", .data = &phytium_can_data }, + { .compatible = "phytium,canfd", .data = &phytium_canfd_data }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, phytium_can_of_ids); +#endif + + +static int phytium_can_plat_probe(struct platform_device *pdev) +{ + struct phytium_can_dev *cdev; + struct phytium_can_plat *priv; + struct resource *res; + const struct of_device_id *of_id; + const struct phytium_can_devtype *devtype = &phytium_can_data; + u32 tx_fifo_depth; + int ret; + const char *mode; + + ret = fwnode_property_read_u32(dev_fwnode(&pdev->dev), "tx-fifo-depth", &tx_fifo_depth); + if (ret) + tx_fifo_depth = 64; + + cdev = phytium_can_allocate_dev(&pdev->dev, sizeof(struct phytium_can_plat), + tx_fifo_depth); + if (!cdev) + return -ENOMEM; + + priv = cdev2priv(cdev); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->reg_base = devm_ioremap_resource(&pdev->dev, res); + priv->irq = platform_get_irq(pdev, 0); + if (IS_ERR(priv->reg_base) || cdev->net->irq < 0) { + ret = -EINVAL; + goto fail; + } + + if (pdev->dev.of_node) { + cdev->clk = devm_clk_get(&pdev->dev, "can_clk"); + if (IS_ERR(cdev->clk)) { + dev_err(&pdev->dev, "no clock found\n"); + ret = -ENODEV; + goto fail; + } + cdev->can.clock.freq = clk_get_rate(cdev->clk); + + of_id = of_match_device(phytium_can_of_ids, &pdev->dev); + if (of_id && of_id->data) + devtype = of_id->data; + } else if (has_acpi_companion(&pdev->dev)) { + ret = fwnode_property_read_u32(dev_fwnode(&pdev->dev), + "clock-frequency", + &cdev->can.clock.freq); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get clock frequency.\n"); + goto fail; + } + ret = fwnode_property_read_string(dev_fwnode(&pdev->dev), "mode-select", &mode); + if (ret < 0) { + dev_info(&pdev->dev, "get mode-select ret: %d\n", ret); + } + else { + if (strncmp("canfd", mode, strlen("canfd")) == 0) { + dev_info(&pdev->dev, "use mode-select: canfd\n"); + devtype = &phytium_canfd_data; + } + } + } + + cdev->tx_fifo_depth = tx_fifo_depth; + cdev->tx_head = 0; + cdev->tx_tail = 0; + cdev->tx_max = tx_fifo_depth; + + if (devtype->cantype == PHYTIUM_CANFD) + cdev->fdmode = 1; + else + cdev->fdmode = 0; + + if (fwnode_property_present(dev_fwnode(&pdev->dev), "extend_brp")) + cdev->bit_timing = &phytium_bittiming_const_8192; + else + cdev->bit_timing = devtype->bittiming_const; + cdev->can.bittiming_const = devtype->bittiming_const; + cdev->base = priv->reg_base; + cdev->net->irq = priv->irq; + + platform_set_drvdata(pdev, cdev->net); + + pm_runtime_enable(cdev->dev); + ret = phytium_can_register(cdev); + if (ret) + goto out_runtime_disable; + + return ret; + +out_runtime_disable: + pm_runtime_disable(cdev->dev); +fail: + phytium_can_free_dev(cdev->net); + return ret; +} + +static __maybe_unused int phytium_can_plat_suspend(struct device *dev) +{ + return phytium_can_suspend(dev); +} + +static __maybe_unused int phytium_can_plat_resume(struct device *dev) +{ + return phytium_can_resume(dev); +} + +static int phytium_can_plat_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct phytium_can_dev *cdev = netdev_priv(dev); + + phytium_can_unregister(cdev); + + phytium_can_free_dev(cdev->net); + + return 0; +} + +static int __maybe_unused phytium_can_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct phytium_can_dev *cdev = netdev_priv(ndev); + + clk_disable_unprepare(cdev->clk); + + return 0; +} + +static int __maybe_unused phytium_can_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct phytium_can_dev *cdev = netdev_priv(ndev); + + return clk_prepare_enable(cdev->clk); +} + +static const struct dev_pm_ops phytium_can_plat_pm_ops = { + SET_RUNTIME_PM_OPS(phytium_can_runtime_suspend, + phytium_can_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(phytium_can_suspend, phytium_can_resume) +}; + +static struct platform_driver phytium_can_plat_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = of_match_ptr(phytium_can_of_ids), + .acpi_match_table = ACPI_PTR(phytium_can_acpi_ids), + .pm = &phytium_can_plat_pm_ops, + }, + .probe = phytium_can_plat_probe, + .remove = phytium_can_plat_remove, +}; + +module_platform_driver(phytium_can_plat_driver); + +MODULE_AUTHOR("Cheng Quan "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium CAN driver for IO Mapped controllers"); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index efb44d5ab02156282f477fde76e8ca3bf4675f1b..1968936e2268771c502eb11cbd908c7e740355a7 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -83,6 +83,7 @@ #define GEM_USRIO 0x000c /* User IO */ #define GEM_DMACFG 0x0010 /* DMA Configuration */ #define GEM_JML 0x0048 /* Jumbo Max Length */ +#define GEM_HS_MAC_CONFIG 0x0050 /* GEM high speed config */ #define GEM_HRB 0x0080 /* Hash Bottom */ #define GEM_HRT 0x0084 /* Hash Top */ #define GEM_SA1B 0x0088 /* Specific1 Bottom */ @@ -167,6 +168,8 @@ #define GEM_DCFG7 0x0298 /* Design Config 7 */ #define GEM_DCFG8 0x029C /* Design Config 8 */ #define GEM_DCFG10 0x02A4 /* Design Config 10 */ +#define GEM_USX_CONTROL 0x0A80 /* High speed PCS control register */ +#define GEM_USX_STATUS 0x0A88 /* High speed PCS status register */ #define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */ #define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */ @@ -202,6 +205,31 @@ #define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2)) #define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2)) +#define GEM_SRC_SEL_LN 0x1C04 +#define GEM_DIV_SEL0_LN 0x1C08 +#define GEM_DIV_SEL1_LN 0x1C0C +#define GEM_PMA_XCVR_POWER_STATE 0x1C10 +#define GEM_SPEED_MODE 0x1C14 +#define GEM_MII_SELECT 0x1C18 +#define GEM_SEL_MII_ON_RGMII 0x1C1C +#define GEM_TX_CLK_SEL0 0x1C20 +#define GEM_TX_CLK_SEL1 0x1C24 +#define GEM_TX_CLK_SEL2 0x1C28 +#define GEM_TX_CLK_SEL3 0x1C2C +#define GEM_RX_CLK_SEL0 0x1C30 +#define GEM_RX_CLK_SEL1 0x1C34 +#define GEM_CLK_250M_DIV10_DIV100_SEL 0x1C38 +#define GEM_TX_CLK_SEL5 0x1C3C +#define GEM_TX_CLK_SEL6 0x1C40 +#define GEM_RX_CLK_SEL4 0x1C44 +#define GEM_RX_CLK_SEL5 0x1C48 +#define GEM_TX_CLK_SEL3_0 0x1C70 +#define GEM_TX_CLK_SEL4_0 0x1C74 +#define GEM_RX_CLK_SEL3_0 0x1C78 +#define GEM_RX_CLK_SEL4_0 0x1C7C +#define GEM_RGMII_TX_CLK_SEL0 0x1C80 +#define GEM_RGMII_TX_CLK_SEL1 0x1C84 + /* Bitfields in NCR */ #define MACB_LB_OFFSET 0 /* reserved */ #define MACB_LB_SIZE 1 @@ -458,6 +486,10 @@ #define MACB_REV_OFFSET 0 #define MACB_REV_SIZE 16 +/* Bitfield in HS_MAC_CONFIG */ +#define GEM_HS_MAC_SPEED_OFFSET 0 +#define GEM_HS_MAC_SPEED_SIZE 3 + /* Bitfields in DCFG1. */ #define GEM_IRQCOR_OFFSET 23 #define GEM_IRQCOR_SIZE 1 @@ -497,13 +529,27 @@ #define GEM_RXBD_RDBUFF_OFFSET 8 #define GEM_RXBD_RDBUFF_SIZE 4 +/* Bitfields in USX_CONTROL. */ +#define GEM_USX_CTRL_SPEED_OFFSET 14 +#define GEM_USX_CTRL_SPEED_SIZE 3 +#define GEM_SERDES_RATE_OFFSET 12 +#define GEM_SERDES_RATE_SIZE 2 +#define GEM_RX_SCR_BYPASS_OFFSET 9 +#define GEM_RX_SCR_BYPASS_SIZE 1 +#define GEM_TX_SCR_BYPASS_OFFSET 8 +#define GEM_TX_SCR_BYPASS_SIZE 1 +#define GEM_TX_EN_OFFSET 1 +#define GEM_TX_EN_SIZE 1 +#define GEM_SIGNAL_OK_OFFSET 0 +#define GEM_SIGNAL_OK_SIZE 1 + /* Bitfields in TISUBN */ #define GEM_SUBNSINCR_OFFSET 0 #define GEM_SUBNSINCRL_OFFSET 24 -#define GEM_SUBNSINCRL_SIZE 8 +#define GEM_SUBNSINCRL_SIZE 8 #define GEM_SUBNSINCRH_OFFSET 0 -#define GEM_SUBNSINCRH_SIZE 16 -#define GEM_SUBNSINCR_SIZE 24 +#define GEM_SUBNSINCRH_SIZE 16 +#define GEM_SUBNSINCR_SIZE 24 /* Bitfields in TI */ #define GEM_NSINCR_OFFSET 0 @@ -631,6 +677,8 @@ #define GEM_CLK_DIV48 3 #define GEM_CLK_DIV64 4 #define GEM_CLK_DIV96 5 +#define GEM_CLK_DIV128 6 +#define GEM_CLK_DIV224 7 /* Constants for MAN register */ #define MACB_MAN_SOF 1 @@ -652,6 +700,7 @@ #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 #define MACB_CAPS_MACB_IS_GEM 0x80000000 +#define MACB_CAPS_SEL_CLK_HW 0x00001000 /* LSO settings */ #define MACB_LSO_UFO_ENABLE 0x01 @@ -1087,7 +1136,7 @@ struct macb_config { unsigned int dma_burst_length; int (*clk_init)(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, - struct clk **rx_clk); + struct clk **rx_clk, struct clk **tsu_clk); int (*init)(struct platform_device *pdev); int jumbo_max_len; }; @@ -1167,7 +1216,9 @@ struct macb { struct clk *hclk; struct clk *tx_clk; struct clk *rx_clk; + struct clk *tsu_clk; struct net_device *dev; + struct ncsi_dev *ndev; union { struct macb_stats macb; struct gem_stats gem; @@ -1180,6 +1231,8 @@ struct macb { int link; int speed; int duplex; + int use_ncsi; + int force_phy_mode; u32 caps; unsigned int dma_burst_length; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 50331b202f73ba794e1427657eb0d4c21304f916..b074e175650447aee519a5d171323b12f03ff0e4 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -36,6 +36,8 @@ #include #include #include +#include +#include #include "macb.h" #define MACB_RX_BUFFER_SIZE 128 @@ -75,6 +77,13 @@ #define GEM_MTU_MIN_SIZE ETH_MIN_MTU #define MACB_NETIF_LSO NETIF_F_TSO +#define HS_SPEED_100M 0 +#define HS_SPEED_1000M 1 +#define HS_SPEED_2500M 2 +#define HS_SPEED_5000M 3 +#define HS_SPEED_10000M 4 +#define MACB_SERDES_RATE_10G 1 + #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) #define MACB_WOL_ENABLED (0x1 << 1) @@ -405,12 +414,127 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) netdev_err(dev, "adjusting tx_clk failed.\n"); } +static int phytium_gem_sel_clk(struct macb *bp) +{ + int speed = 0; + + if (bp->phy_interface == PHY_INTERFACE_MODE_USXGMII) { + if (bp->speed == SPEED_10000) { + gem_writel(bp, SRC_SEL_LN, 0x1); /*0x1c04*/ + gem_writel(bp, DIV_SEL0_LN, 0x4); /*0x1c08*/ + gem_writel(bp, DIV_SEL1_LN, 0x1); /*0x1c0c*/ + gem_writel(bp, PMA_XCVR_POWER_STATE, 0x1); /*0x1c10*/ + speed = HS_SPEED_10000M; + } + } else if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { + if (bp->speed == SPEED_2500) { + gem_writel(bp, DIV_SEL0_LN, 0x1); /*0x1c08*/ + gem_writel(bp, DIV_SEL1_LN, 0x2); /*0x1c0c*/ + gem_writel(bp, PMA_XCVR_POWER_STATE, 0x1); /*0x1c10*/ + gem_writel(bp, TX_CLK_SEL0, 0x0); /*0x1c20*/ + gem_writel(bp, TX_CLK_SEL1, 0x1); /*0x1c24*/ + gem_writel(bp, TX_CLK_SEL2, 0x1); /*0x1c28*/ + gem_writel(bp, TX_CLK_SEL3, 0x1); /*0x1c2c*/ + gem_writel(bp, RX_CLK_SEL0, 0x1); /*0x1c30*/ + gem_writel(bp, RX_CLK_SEL1, 0x0); /*0x1c34*/ + gem_writel(bp, TX_CLK_SEL3_0, 0x0); /*0x1c70*/ + gem_writel(bp, TX_CLK_SEL4_0, 0x0); /*0x1c74*/ + gem_writel(bp, RX_CLK_SEL3_0, 0x0); /*0x1c78*/ + gem_writel(bp, RX_CLK_SEL4_0, 0x0); /*0x1c7c*/ + speed = HS_SPEED_2500M; + } else if (bp->speed == SPEED_1000) { + gem_writel(bp, DIV_SEL0_LN, 0x4); /*0x1c08*/ + gem_writel(bp, DIV_SEL1_LN, 0x8); /*0x1c0c*/ + gem_writel(bp, PMA_XCVR_POWER_STATE, 0x1); /*0x1c10*/ + gem_writel(bp, TX_CLK_SEL0, 0x0); /*0x1c20*/ + gem_writel(bp, TX_CLK_SEL1, 0x0); /*0x1c24*/ + gem_writel(bp, TX_CLK_SEL2, 0x0); /*0x1c28*/ + gem_writel(bp, TX_CLK_SEL3, 0x1); /*0x1c2c*/ + gem_writel(bp, RX_CLK_SEL0, 0x1); /*0x1c30*/ + gem_writel(bp, RX_CLK_SEL1, 0x0); /*0x1c34*/ + gem_writel(bp, TX_CLK_SEL3_0, 0x0); /*0x1c70*/ + gem_writel(bp, TX_CLK_SEL4_0, 0x0); /*0x1c74*/ + gem_writel(bp, RX_CLK_SEL3_0, 0x0); /*0x1c78*/ + gem_writel(bp, RX_CLK_SEL4_0, 0x0); /*0x1c7c*/ + speed = HS_SPEED_1000M; + } else if (bp->speed == SPEED_100 || bp->speed == SPEED_10) { + gem_writel(bp, DIV_SEL0_LN, 0x4); /*0x1c08*/ + gem_writel(bp, DIV_SEL1_LN, 0x8); /*0x1c0c*/ + gem_writel(bp, PMA_XCVR_POWER_STATE, 0x1); /*0x1c10*/ + gem_writel(bp, TX_CLK_SEL0, 0x0); /*0x1c20*/ + gem_writel(bp, TX_CLK_SEL1, 0x0); /*0x1c24*/ + gem_writel(bp, TX_CLK_SEL2, 0x1); /*0x1c28*/ + gem_writel(bp, TX_CLK_SEL3, 0x1); /*0x1c2c*/ + gem_writel(bp, RX_CLK_SEL0, 0x1); /*0x1c30*/ + gem_writel(bp, RX_CLK_SEL1, 0x0); /*0x1c34*/ + gem_writel(bp, TX_CLK_SEL3_0, 0x1); /*0x1c70*/ + gem_writel(bp, TX_CLK_SEL4_0, 0x0); /*0x1c74*/ + gem_writel(bp, RX_CLK_SEL3_0, 0x0); /*0x1c78*/ + gem_writel(bp, RX_CLK_SEL4_0, 0x1); /*0x1c7c*/ + speed = HS_SPEED_100M; + } + } else if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) { + if (bp->speed == SPEED_1000) { + gem_writel(bp, MII_SELECT, 0x1); /*0x1c18*/ + gem_writel(bp, SEL_MII_ON_RGMII, 0x0); /*0x1c1c*/ + gem_writel(bp, TX_CLK_SEL0, 0x0); /*0x1c20*/ + gem_writel(bp, TX_CLK_SEL1, 0x1); /*0x1c24*/ + gem_writel(bp, TX_CLK_SEL2, 0x0); /*0x1c28*/ + gem_writel(bp, TX_CLK_SEL3, 0x0); /*0x1c2c*/ + gem_writel(bp, RX_CLK_SEL0, 0x0); /*0x1c30*/ + gem_writel(bp, RX_CLK_SEL1, 0x1); /*0x1c34*/ + gem_writel(bp, CLK_250M_DIV10_DIV100_SEL, 0x0); /*0x1c38*/ + gem_writel(bp, RX_CLK_SEL5, 0x1); /*0x1c48*/ + gem_writel(bp, RGMII_TX_CLK_SEL0, 0x1); /*0x1c80*/ + gem_writel(bp, RGMII_TX_CLK_SEL1, 0x0); /*0x1c84*/ + speed = HS_SPEED_1000M; + } else if (bp->speed == SPEED_100) { + gem_writel(bp, MII_SELECT, 0x1); /*0x1c18*/ + gem_writel(bp, SEL_MII_ON_RGMII, 0x0); /*0x1c1c*/ + gem_writel(bp, TX_CLK_SEL0, 0x0); /*0x1c20*/ + gem_writel(bp, TX_CLK_SEL1, 0x1); /*0x1c24*/ + gem_writel(bp, TX_CLK_SEL2, 0x0); /*0x1c28*/ + gem_writel(bp, TX_CLK_SEL3, 0x0); /*0x1c2c*/ + gem_writel(bp, RX_CLK_SEL0, 0x0); /*0x1c30*/ + gem_writel(bp, RX_CLK_SEL1, 0x1); /*0x1c34*/ + gem_writel(bp, CLK_250M_DIV10_DIV100_SEL, 0x0); /*0x1c38*/ + gem_writel(bp, RX_CLK_SEL5, 0x1); /*0x1c48*/ + gem_writel(bp, RGMII_TX_CLK_SEL0, 0x0); /*0x1c80*/ + gem_writel(bp, RGMII_TX_CLK_SEL1, 0x0); /*0x1c84*/ + speed = HS_SPEED_100M; + } else { + gem_writel(bp, MII_SELECT, 0x1); /*0x1c18*/ + gem_writel(bp, SEL_MII_ON_RGMII, 0x0); /*0x1c1c*/ + gem_writel(bp, TX_CLK_SEL0, 0x0); /*0x1c20*/ + gem_writel(bp, TX_CLK_SEL1, 0x1); /*0x1c24*/ + gem_writel(bp, TX_CLK_SEL2, 0x0); /*0x1c28*/ + gem_writel(bp, TX_CLK_SEL3, 0x0); /*0x1c2c*/ + gem_writel(bp, RX_CLK_SEL0, 0x0); /*0x1c30*/ + gem_writel(bp, RX_CLK_SEL1, 0x1); /*0x1c34*/ + gem_writel(bp, CLK_250M_DIV10_DIV100_SEL, 0x1); /*0x1c38*/ + gem_writel(bp, RX_CLK_SEL5, 0x1); /*0x1c48*/ + gem_writel(bp, RGMII_TX_CLK_SEL0, 0x0); /*0x1c80*/ + gem_writel(bp, RGMII_TX_CLK_SEL1, 0x0); /*0x1c84*/ + speed = HS_SPEED_100M; + } + } else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) { + speed = HS_SPEED_100M; + gem_writel(bp, RX_CLK_SEL5, 0x1); /*0x1c48*/ + } + + /*HS_MAC_CONFIG(0x0050) provide rate to the external*/ + gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, speed, + gem_readl(bp, HS_MAC_CONFIG))); + + return 0; +} + static void macb_handle_link_change(struct net_device *dev) { struct macb *bp = netdev_priv(dev); struct phy_device *phydev = dev->phydev; unsigned long flags; - int status_change = 0; + int err, status_change = 0; spin_lock_irqsave(&bp->lock, flags); @@ -459,12 +583,25 @@ static void macb_handle_link_change(struct net_device *dev) */ macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); + /* phytium need hwclock */ + if (bp->caps & MACB_CAPS_SEL_CLK_HW) + phytium_gem_sel_clk(bp); + netif_carrier_on(dev); netdev_info(dev, "link up (%d/%s)\n", phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half"); + + if (bp->use_ncsi) { + err = ncsi_start_dev(bp->ndev); + if (err) + netdev_err(dev, "Ncsi start dev failed (error %d)\n", err); + } } else { + if (bp->use_ncsi) + ncsi_stop_dev(bp->ndev); + netif_carrier_off(dev); netdev_info(dev, "link down\n"); } @@ -1775,12 +1912,8 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) *skb = nskb; } - if (padlen) { - if (padlen >= ETH_FCS_LEN) - skb_put_zero(*skb, padlen - ETH_FCS_LEN); - else - skb_trim(*skb, ETH_FCS_LEN - padlen); - } + if (padlen > ETH_FCS_LEN) + skb_put_zero(*skb, padlen - ETH_FCS_LEN); add_fcs: /* set FCS to packet */ @@ -2137,6 +2270,9 @@ static u32 gem_mdc_clk_div(struct macb *bp) u32 config; unsigned long pclk_hz = clk_get_rate(bp->pclk); + if (!pclk_hz) + pclk_hz = 250000000; + if (pclk_hz <= 20000000) config = GEM_BF(CLK, GEM_CLK_DIV8); else if (pclk_hz <= 40000000) @@ -2247,6 +2383,31 @@ static void macb_configure_dma(struct macb *bp) } } +static int macb_usx_pcs_config(struct macb *bp) +{ + gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) | + GEM_BIT(SIGNAL_OK)); + + return 0; +} + +static void macb_usx_pcs_link_up(struct macb *bp) +{ + u32 config; + + config = gem_readl(bp, USX_CONTROL); + if (bp->speed == SPEED_10000) { + gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M, + gem_readl(bp, HS_MAC_CONFIG))); + config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config); + config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config); + } + + config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS)); + config |= GEM_BIT(TX_EN); + gem_writel(bp, USX_CONTROL, config); +} + static void macb_init_hw(struct macb *bp) { struct macb_queue *queue; @@ -2277,8 +2438,16 @@ static void macb_init_hw(struct macb *bp) macb_writel(bp, NCFGR, config); if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) gem_writel(bp, JML, bp->jumbo_max_len); - bp->speed = SPEED_10; - bp->duplex = DUPLEX_HALF; + + if (bp->phy_interface == PHY_INTERFACE_MODE_USXGMII) { + macb_usx_pcs_config(bp); + if (bp->link) + macb_usx_pcs_link_up(bp); + } else { + bp->speed = SPEED_10; + bp->duplex = DUPLEX_HALF; + } + bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; if (bp->caps & MACB_CAPS_JUMBO) bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; @@ -2431,7 +2600,10 @@ static void macb_set_rx_mode(struct net_device *dev) static int macb_open(struct net_device *dev) { struct macb *bp = netdev_priv(dev); - size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; + /* adjust bufsz to be at least the size of a standard frame, + * to fix rx error when set small size mtu. + */ + size_t bufsz = (dev->mtu < ETH_DATA_LEN ? ETH_DATA_LEN : dev->mtu) + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; struct macb_queue *queue; unsigned int q; int err; @@ -2441,10 +2613,6 @@ static int macb_open(struct net_device *dev) /* carrier starts down */ netif_carrier_off(dev); - /* if the phy is not yet register, retry later*/ - if (!dev->phydev) - return -EAGAIN; - /* RX buffers initialization */ macb_init_rx_buffer_size(bp, bufsz); @@ -2461,8 +2629,12 @@ static int macb_open(struct net_device *dev) bp->macbgem_ops.mog_init_rings(bp); macb_init_hw(bp); - /* schedule a link state check */ - phy_start(dev->phydev); + if (dev->phydev) + /* schedule a link state check */ + phy_start(dev->phydev); + + if (bp->link) + netif_carrier_on(dev); netif_tx_start_all_queues(dev); @@ -2803,14 +2975,17 @@ static unsigned int gem_get_tsu_rate(struct macb *bp) unsigned int tsu_rate; tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); - if (!IS_ERR(tsu_clk)) + if (!IS_ERR_OR_NULL(tsu_clk)) { tsu_rate = clk_get_rate(tsu_clk); /* try pclk instead */ - else if (!IS_ERR(bp->pclk)) { + } else if (!IS_ERR_OR_NULL(bp->pclk)) { tsu_clk = bp->pclk; tsu_rate = clk_get_rate(tsu_clk); - } else + } else if (has_acpi_companion(&bp->pdev->dev)) { + tsu_rate = 250000000; + } else { return -ENOTSUPP; + } return tsu_rate; } @@ -3280,6 +3455,8 @@ static const struct net_device_ops macb_netdev_ops = { #endif .ndo_set_features = macb_set_features, .ndo_features_check = macb_features_check, + .ndo_vlan_rx_add_vid = ncsi_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ncsi_vlan_rx_kill_vid, }; /* Configure peripheral capabilities according to device tree @@ -3348,11 +3525,20 @@ static void macb_probe_queues(void __iomem *mem, static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, - struct clk **rx_clk) + struct clk **rx_clk, struct clk **tsu_clk) { struct macb_platform_data *pdata; int err; - + /* clk get not support ACPI */ + if (has_acpi_companion(&pdev->dev)) { + dev_info(&pdev->dev, "ACPI skip get macb clk\n"); + *pclk = NULL; + *hclk = NULL; + *tx_clk = NULL; + *rx_clk = NULL; + *tsu_clk = NULL; + return 0; + } pdata = dev_get_platdata(&pdev->dev); if (pdata) { *pclk = pdata->pclk; @@ -3388,6 +3574,10 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, if (IS_ERR(*rx_clk)) *rx_clk = NULL; + *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk"); + if (IS_ERR(*tsu_clk)) + *tsu_clk = NULL; + err = clk_prepare_enable(*pclk); if (err) { dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); @@ -3412,8 +3602,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, goto err_disable_txclk; } + err = clk_prepare_enable(*tsu_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable tsu_clk (%u)\n", err); + goto err_disable_rxclk; + } + return 0; +err_disable_rxclk: + clk_disable_unprepare(*rx_clk); + err_disable_txclk: clk_disable_unprepare(*tx_clk); @@ -3527,6 +3726,12 @@ static int macb_init(struct platform_device *pdev) /* Checksum offload is only available on gem with packet buffer */ if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + + if (bp->use_ncsi) { + dev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } + if (bp->caps & MACB_CAPS_SG_DISABLED) dev->hw_features &= ~NETIF_F_SG; dev->features = dev->hw_features; @@ -3864,13 +4069,14 @@ static const struct net_device_ops at91ether_netdev_ops = { static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, - struct clk **rx_clk) + struct clk **rx_clk, struct clk **tsu_clk) { int err; *hclk = NULL; *tx_clk = NULL; *rx_clk = NULL; + *tsu_clk = NULL; *pclk = devm_clk_get(&pdev->dev, "ether_clk"); if (IS_ERR(*pclk)) @@ -3986,6 +4192,18 @@ static const struct macb_config zynq_config = { .init = macb_init, }; +static const struct macb_config phytium_config = { + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | + MACB_CAPS_JUMBO | + MACB_CAPS_GEM_HAS_PTP | + MACB_CAPS_BD_RD_PREFETCH | + MACB_CAPS_SEL_CLK_HW, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, + .jumbo_max_len = 16360, +}; + static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,at32ap7000-macb" }, { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, @@ -4001,11 +4219,23 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,emac", .data = &emac_config }, { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, { .compatible = "cdns,zynq-gem", .data = &zynq_config }, + { .compatible = "cdns,phytium-gem", .data = &phytium_config }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, macb_dt_ids); #endif /* CONFIG_OF */ +#ifdef CONFIG_ACPI +static const struct acpi_device_id macb_acpi_ids[] = { + { .id = "PHYT0036", .driver_data = (kernel_ulong_t)&phytium_config }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, macb_acpi_ids); +#else +#define macb_acpi_ids NULL +#endif + static const struct macb_config default_gem_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | @@ -4016,19 +4246,46 @@ static const struct macb_config default_gem_config = { .jumbo_max_len = 10240, }; +static void gem_ncsi_handler(struct ncsi_dev *nd) +{ + if (unlikely(nd->state != ncsi_dev_state_functional)) + return; + + netdev_dbg(nd->dev, "NCSI interface %s\n", + nd->link_up ? "up" : "down"); +} + +static int macb_get_phy_mode(struct platform_device *pdev) +{ + const char *pm; + int err, i; + + err = device_property_read_string(&pdev->dev, "phy-mode", &pm); + if (err < 0) + return err; + + for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { + if (!strcasecmp(pm, phy_modes(i))) + return i; + } + + return -ENODEV; +} + static int macb_probe(struct platform_device *pdev) { const struct macb_config *macb_config = &default_gem_config; int (*clk_init)(struct platform_device *, struct clk **, - struct clk **, struct clk **, struct clk **) - = macb_config->clk_init; + struct clk **, struct clk **, struct clk **, + struct clk **) = macb_config->clk_init; int (*init)(struct platform_device *) = macb_config->init; struct device_node *np = pdev->dev.of_node; struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; + struct clk *tsu_clk = NULL; unsigned int queue_mask, num_queues; struct macb_platform_data *pdata; bool native_io; - struct phy_device *phydev; + struct phy_device *phydev = NULL; struct net_device *dev; struct resource *regs; void __iomem *mem; @@ -4050,9 +4307,18 @@ static int macb_probe(struct platform_device *pdev) clk_init = macb_config->clk_init; init = macb_config->init; } + } else if (has_acpi_companion(&pdev->dev)) { + const struct acpi_device_id *match; + + match = acpi_match_device(macb_acpi_ids, &pdev->dev); + if (match && match->driver_data) { + macb_config = (void *)match->driver_data; + clk_init = macb_config->clk_init; + init = macb_config->init; + } } - err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk); + err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); if (err) return err; @@ -4089,11 +4355,12 @@ static int macb_probe(struct platform_device *pdev) bp->hclk = hclk; bp->tx_clk = tx_clk; bp->rx_clk = rx_clk; + bp->tsu_clk = tsu_clk; if (macb_config) bp->jumbo_max_len = macb_config->jumbo_max_len; bp->wol = 0; - if (of_get_property(np, "magic-packet", NULL)) + if (device_property_read_bool(&pdev->dev, "magic-packet")) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); @@ -4151,7 +4418,7 @@ static int macb_probe(struct platform_device *pdev) } } - err = of_get_phy_mode(np); + err = macb_get_phy_mode(pdev); if (err < 0) { pdata = dev_get_platdata(&pdev->dev); if (pdata && pdata->is_rmii) @@ -4162,16 +4429,40 @@ static int macb_probe(struct platform_device *pdev) bp->phy_interface = err; } + if (bp->phy_interface == PHY_INTERFACE_MODE_USXGMII) { + bp->link = 1; + bp->duplex = 1; + bp->speed = SPEED_10000; + } + /* IP specific init */ err = init(pdev); if (err) goto err_out_free_netdev; - err = macb_mii_init(bp); - if (err) - goto err_out_free_netdev; + if (device_property_read_bool(&pdev->dev, "use-mii")) { + if (device_property_read_bool(&pdev->dev, "force-phy-mode")) { + bp->force_phy_mode = 1; + } + err = macb_mii_init(bp); + if (err) + goto err_out_free_netdev; + phydev = dev->phydev; + } - phydev = dev->phydev; + if (device_property_read_bool(&pdev->dev, "use-ncsi")) { + if (!IS_ENABLED(CONFIG_NET_NCSI)) { + dev_err(&pdev->dev, "NCSI stack not enabled\n"); + goto err_out_free_netdev; + } + dev_notice(&pdev->dev, "Using NCSI interface\n"); + bp->use_ncsi = 1; + bp->ndev = ncsi_register_dev(dev, gem_ncsi_handler); + if (!bp->ndev) + goto err_out_free_netdev; + } else { + bp->use_ncsi = 0; + } netif_carrier_off(dev); @@ -4184,7 +4475,8 @@ static int macb_probe(struct platform_device *pdev) tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task, (unsigned long)bp); - phy_attached_info(phydev); + if (phydev) + phy_attached_info(phydev); netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), @@ -4208,6 +4500,7 @@ static int macb_probe(struct platform_device *pdev) clk_disable_unprepare(hclk); clk_disable_unprepare(pclk); clk_disable_unprepare(rx_clk); + clk_disable_unprepare(tsu_clk); return err; } @@ -4224,11 +4517,15 @@ static int macb_remove(struct platform_device *pdev) bp = netdev_priv(dev); if (dev->phydev) phy_disconnect(dev->phydev); - mdiobus_unregister(bp->mii_bus); + if (device_property_read_bool(&pdev->dev, "use-mii")) + mdiobus_unregister(bp->mii_bus); + if (bp->ndev) + ncsi_unregister_dev(bp->ndev); if (np && of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); dev->phydev = NULL; - mdiobus_free(bp->mii_bus); + if (bp->mii_bus) + mdiobus_free(bp->mii_bus); unregister_netdev(dev); tasklet_kill(&bp->hresp_err_tasklet); @@ -4236,6 +4533,7 @@ static int macb_remove(struct platform_device *pdev) clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); clk_disable_unprepare(bp->rx_clk); + clk_disable_unprepare(bp->tsu_clk); of_node_put(bp->phy_node); free_netdev(dev); } @@ -4261,6 +4559,7 @@ static int __maybe_unused macb_suspend(struct device *dev) clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); clk_disable_unprepare(bp->rx_clk); + clk_disable_unprepare(bp->tsu_clk); } return 0; @@ -4282,6 +4581,7 @@ static int __maybe_unused macb_resume(struct device *dev) clk_prepare_enable(bp->tx_clk); clk_prepare_enable(bp->rx_clk); } + clk_prepare_enable(bp->tsu_clk); netif_device_attach(netdev); @@ -4296,6 +4596,7 @@ static struct platform_driver macb_driver = { .driver = { .name = "macb", .of_match_table = of_match_ptr(macb_dt_ids), + .acpi_match_table = macb_acpi_ids, .pm = &macb_pm_ops, }, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index b3365b34cac7c8ae52e3b72ac6bc20f224896b68..99d821c727f1666c53e217ade893202d8701a522 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -9,6 +9,7 @@ * warranty of any kind, whether express or implied. */ +#include #include #include #include @@ -32,6 +33,12 @@ static int dwmac_generic_probe(struct platform_device *pdev) dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); } + } else if (has_acpi_companion(&pdev->dev)) { + plat_dat = stmmac_probe_config_acpi(pdev, &stmmac_res.mac); + if (!plat_dat) { + dev_err(&pdev->dev, "acpi configuration failed\n"); + return -EINVAL; + } } else { plat_dat = dev_get_platdata(&pdev->dev); if (!plat_dat) { @@ -85,6 +92,17 @@ static const struct of_device_id dwmac_generic_match[] = { }; MODULE_DEVICE_TABLE(of, dwmac_generic_match); +#ifdef CONFIG_ACPI +static const struct acpi_device_id dwmac_acpi_ids[] = { + { .id = "PHYT0004" }, + {}, +}; + +MODULE_DEVICE_TABLE(acpi, dwmac_acpi_ids); +#else +#define dwmac_acpi_ids NULL +#endif + static struct platform_driver dwmac_generic_driver = { .probe = dwmac_generic_probe, .remove = stmmac_pltfr_remove, @@ -92,6 +110,7 @@ static struct platform_driver dwmac_generic_driver = { .name = STMMAC_RESOURCE_NAME, .pm = &stmmac_pltfr_pm_ops, .of_match_table = of_match_ptr(dwmac_generic_match), + .acpi_match_table = ACPI_PTR(dwmac_acpi_ids), }, }; module_platform_driver(dwmac_generic_driver); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0a4d093adfc9394826ebb0790dc6613a91cc2292..6493192b6bfd603982cc3e973b5deb8b3454e64a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -24,6 +24,7 @@ https://bugzilla.stlinux.com/ *******************************************************************************/ +#include #include #include #include diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 9762e687fc73ae18acccb97d6864e31f971bff14..c4a1715a5406fb18662ff1da634249cbb53bcc8b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -18,6 +18,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#include +#include +#include #include #include #include @@ -614,6 +617,248 @@ void stmmac_remove_config_dt(struct platform_device *pdev, EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); +#ifdef CONFIG_ACPI +/* + * Parse ACPI _DSD to setup AXI register + */ +static struct stmmac_axi * stmmac_axi_setup_acpi(struct platform_device *pdev) +{ + struct fwnode_handle *np = dev_fwnode(&(pdev->dev)); + struct stmmac_axi * axi; + + axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); + if (!axi) + return ERR_PTR(-ENOMEM); + + axi->axi_lpi_en = fwnode_property_read_bool(np, "snps,lpi_en"); + axi->axi_xit_frm = fwnode_property_read_bool(np, "snps,xit_frm"); + axi->axi_kbbe = fwnode_property_read_bool(np, "snps,axi_kbbe"); + axi->axi_fb = fwnode_property_read_bool(np, "snps,axi_fb"); + axi->axi_mb = fwnode_property_read_bool(np, "snps,axi_mb"); + axi->axi_rb = fwnode_property_read_bool(np, "snps,axi_rb"); + + if (fwnode_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) + axi->axi_wr_osr_lmt = 1; + if (fwnode_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt)) + axi->axi_rd_osr_lmt = 1; + fwnode_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); + + return axi; +} + +/** + * Parse ACPI _DSD parameters for multiple queues configuration + */ +static void stmmac_mtl_setup_acpi(struct platform_device *pdev, + struct plat_stmmacenet_data *plat) +{ + plat->rx_queues_to_use = 1; + plat->tx_queues_to_use = 1; + + /** + * First Queue must always be in DCB mode. As MTL_QUEUE_DCB=1 we need + * to always set this, otherwise Queue will be classified as AVB + * (because MTL_QUEUE_AVB = 0). + */ + plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; + plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; + + plat->rx_queues_cfg[0].use_prio = true; + + plat->rx_queues_cfg[0].pkt_route = 0x0; + + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; + + plat->tx_queues_cfg[0].use_prio = true; +} + +static int stmmac_acpi_phy(struct plat_stmmacenet_data *plat, + struct fwnode_handle *np, struct device *dev) +{ + plat->mdio_bus_data = devm_kzalloc(dev, + sizeof(struct stmmac_mdio_bus_data), + GFP_KERNEL); + + return 0; +} + +int fw_get_phy_mode(struct fwnode_handle *np) +{ + const char *pm; + int err, i; + + err = fwnode_property_read_string(np, "phy-mode", &pm); + if (err < 0) + err = fwnode_property_read_string(np, "phy-connection-mode", &pm); + if (err < 0) + return err; + + for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { + if (!strcasecmp(pm, phy_modes(i))) + return i; + } + + return -ENODEV; +} + +int stmmac_acpi_clock_setup(struct plat_stmmacenet_data *plat, + struct platform_device *pdev) +{ + struct fwnode_handle *np = dev_fwnode(&(pdev->dev)); + struct device * dev = &pdev->dev; + struct clk *clk = ERR_PTR(-ENODEV); + u64 clk_freq = 0; + int err; + + err = fwnode_property_read_u64(np, "clock-frequency", &clk_freq); + if (err < 0) + clk_freq = 125000000; /* default to 125MHz */ + + plat->stmmac_clk = devm_clk_get(dev, dev_name(dev)); + if (IS_ERR(plat->stmmac_clk)) { + clk = clk_register_fixed_rate(dev, dev_name(dev), NULL, 0, clk_freq); + if (IS_ERR(clk)) + return -1; + if (clk_register_clkdev(clk, dev_name(dev), dev_name(dev))) + return -1; + plat->stmmac_clk = clk; + } + clk_prepare_enable(plat->stmmac_clk); + + plat->pclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(plat->pclk)) + plat->pclk = NULL; + clk_prepare_enable(plat->pclk); + + plat->clk_ptp_ref = devm_clk_get(dev, "ptp_ref"); + if (IS_ERR(plat->clk_ptp_ref)) { + plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); + plat->clk_ptp_ref = NULL; + } + + plat->stmmac_rst = devm_reset_control_get(dev,STMMAC_RESOURCE_NAME); + if (IS_ERR(plat->stmmac_rst)) { + dev_info(dev, "no reset control found\n"); + plat->stmmac_rst = NULL; + } + + return 0; +} + +/** + * Parse ACPI driver parameters + */ +struct plat_stmmacenet_data * +stmmac_probe_config_acpi(struct platform_device *pdev, const char **mac) +{ + struct fwnode_handle *np; + struct plat_stmmacenet_data *plat; + struct stmmac_dma_cfg *dma_cfg; + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return ERR_PTR(-ENOMEM); + + np = dev_fwnode(&(pdev->dev)); + + plat->interface = fw_get_phy_mode(np); + + /* Get max speed of operation from device tree */ + if (fwnode_property_read_u32(np, "max-speed", &plat->max_speed)) + plat->max_speed = -1; + + if (fwnode_property_read_u32(np, "bus_id", &plat->bus_id)) + plat->bus_id = 2; + + /* Default to PHY auto-detection */ + plat->phy_addr = -1; + + /* "snps,phy-addr" is not a standard property. Mark it as deprecated + * and warn of its use. Remove this when PHY node support is added. + */ + if (fwnode_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) + dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); + + if (stmmac_acpi_phy(plat, np, &pdev->dev)) + return ERR_PTR(-ENODEV); + + fwnode_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); + fwnode_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); + if (plat->tx_fifo_size == 0) + plat->tx_fifo_size = 0x10000; + if (plat->rx_fifo_size == 0) + plat->rx_fifo_size = 0x10000; + + plat->force_sf_dma_mode = + fwnode_property_read_bool(np, "snps,force_sf_dma_mode"); + plat->en_tx_lpi_clockgating = + fwnode_property_read_bool(np, "snps,en-tx-lpi-clockgating"); + + /* Set the maxmtu to a default of JUMBO_LEN in case the + * parameter is not present. + */ + plat->maxmtu = JUMBO_LEN; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + /* Only to "snps,dwmac" */ + fwnode_property_read_u32(np, "max-frame-size", &plat->maxmtu); + fwnode_property_read_u32(np, "snps,multicast-filter-bins", + &plat->multicast_filter_bins); + fwnode_property_read_u32(np, "snps,perfect-filter-entries", + &plat->unicast_filter_entries); + plat->unicast_filter_entries = dwmac1000_validate_ucast_entries( + plat->unicast_filter_entries); + plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( + plat->multicast_filter_bins); + plat->has_gmac = 1; + plat->pmt = 1; + + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); + if (!dma_cfg) + return ERR_PTR(-ENOMEM); + plat->dma_cfg = dma_cfg; + + fwnode_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); + if (!dma_cfg->pbl) + dma_cfg->pbl = DEFAULT_DMA_PBL; + + fwnode_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl); + fwnode_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl); + dma_cfg->pblx8 = !fwnode_property_read_bool(np, "snps,no-pbl-x8"); + + dma_cfg->aal = fwnode_property_read_bool(np, "snps,aal"); + dma_cfg->fixed_burst = fwnode_property_read_bool(np, "snps,fixed-burst"); + dma_cfg->mixed_burst = fwnode_property_read_bool(np, "snps,mixed-burst"); + + plat->force_thresh_dma_mode = fwnode_property_read_bool(np, "snps,force_thresh_dma_mode"); + if (plat->force_thresh_dma_mode) + plat->force_sf_dma_mode = 0; + + fwnode_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); + + plat->axi = stmmac_axi_setup_acpi(pdev); + + stmmac_mtl_setup_acpi(pdev, plat); + + stmmac_acpi_clock_setup(plat,pdev); + + return plat; +} +#else +struct plat_stmmacenet_data * +stmmac_probe_config_acpi(struct platform_device *pdev, const char **mac) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_ACPI */ +EXPORT_SYMBOL_GPL(stmmac_probe_config_acpi); + int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res) { @@ -624,33 +869,43 @@ int stmmac_get_platform_resources(struct platform_device *pdev, /* Get IRQ information early to have an ability to ask for deferred * probe if needed before we went too far with resource allocation. */ - stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); - if (stmmac_res->irq < 0) { - if (stmmac_res->irq != -EPROBE_DEFER) { - dev_err(&pdev->dev, - "MAC IRQ configuration information not found\n"); + if (pdev->dev.of_node) { + stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); + if (stmmac_res->irq < 0) { + if (stmmac_res->irq != -EPROBE_DEFER) { + dev_err(&pdev->dev, + "MAC IRQ configuration information not found\n"); + } + return stmmac_res->irq; } - return stmmac_res->irq; - } - /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq - * The external wake up irq can be passed through the platform code - * named as "eth_wake_irq" - * - * In case the wake up interrupt is not passed from the platform - * so the driver will continue to use the mac irq (ndev->irq) - */ - stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); - if (stmmac_res->wol_irq < 0) { - if (stmmac_res->wol_irq == -EPROBE_DEFER) + /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq + * The external wake up irq can be passed through the platform code + * named as "eth_wake_irq" + * + * In case the wake up interrupt is not passed from the platform + * so the driver will continue to use the mac irq (ndev->irq) + */ + stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (stmmac_res->wol_irq < 0) { + if (stmmac_res->wol_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + stmmac_res->wol_irq = stmmac_res->irq; + } + + stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); + if (stmmac_res->lpi_irq == -EPROBE_DEFER) return -EPROBE_DEFER; + } else if (has_acpi_companion(&pdev->dev)) { + stmmac_res->irq = platform_get_irq(pdev, 0); + if (stmmac_res->irq < 0) + dev_err(&pdev->dev, + "MAC IRQ configuration information not found\n"); + stmmac_res->wol_irq = stmmac_res->irq; + stmmac_res->lpi_irq = -1; } - stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); - if (stmmac_res->lpi_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index b72eb0de57b70199c25213ee1b93da856cac22c2..8e117ad0e42a8360a786293c0874f199bc4a0553 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -23,6 +23,8 @@ struct plat_stmmacenet_data * stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); +struct plat_stmmacenet_data * +stmmac_probe_config_acpi(struct platform_device *pdev, const char **mac); void stmmac_remove_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 2386871e129494fc533a1aaeb176011941f329a1..fc3129f6989432463cfa4bafaa5e375a70a27e6b 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -385,6 +385,12 @@ config MICROSEMI_PHY ---help--- Currently supports VSC8530, VSC8531, VSC8540 and VSC8541 PHYs +config MOTORCOMM_PHY + tristate "Motorcomm PHYs" + help + Enables support for Motorcomm network PHYs. + Currently supports the YT8511 gigabit PHY. + config NATIONAL_PHY tristate "National Semiconductor PHYs" ---help--- diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index f21cda9d865edee42ed74f1e1afb05e423b224e5..c71e5d34be55e4e42cfc8ef007e5169b1dad3bf7 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -75,6 +75,7 @@ obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MICROCHIP_PHY) += microchip.o obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o obj-$(CONFIG_MICROSEMI_PHY) += mscc.o +obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm.o obj-$(CONFIG_NATIONAL_PHY) += national.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o obj-$(CONFIG_REALTEK_PHY) += realtek.o diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 59820164502eba50479301ae2813505956a28c76..953ff54dd5dfdcaafd8e8c6d6fd794e930d6163f 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include "swphy.h" @@ -36,7 +35,6 @@ struct fixed_mdio_bus { struct fixed_phy { int addr; struct phy_device *phydev; - seqcount_t seqcount; struct fixed_phy_status status; int (*link_update)(struct net_device *, struct fixed_phy_status *); struct list_head node; @@ -62,18 +60,15 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) list_for_each_entry(fp, &fmb->phys, node) { if (fp->addr == phy_addr) { struct fixed_phy_status state; - int s; - - do { - s = read_seqcount_begin(&fp->seqcount); - /* Issue callback if user registered it. */ - if (fp->link_update) - fp->link_update(fp->phydev->attached_dev, - &fp->status); - /* Check the GPIO for change in status */ - fixed_phy_update(fp); - state = fp->status; - } while (read_seqcount_retry(&fp->seqcount, s)); + + /* Issue callback if user registered it. */ + if (fp->link_update) + fp->link_update(fp->phydev->attached_dev, + &fp->status); + + /* Check the GPIO for change in status */ + fixed_phy_update(fp); + state = fp->status; return swphy_read_reg(reg_num, &state); } @@ -131,8 +126,6 @@ int fixed_phy_add(unsigned int irq, int phy_addr, if (!fp) return -ENOMEM; - seqcount_init(&fp->seqcount); - if (irq != PHY_POLL) fmb->mii_bus->irq[phy_addr] = irq; diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c new file mode 100755 index 0000000000000000000000000000000000000000..9d6d19f99bc8d688e078b7a4c361aa820d2a334f --- /dev/null +++ b/drivers/net/phy/motorcomm.c @@ -0,0 +1,2390 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * drivers/net/phy/motorcomm.c + * + * Driver for Motorcomm PHYs + * + * Author: yinghong.zhang + * + * Copyright (c) 2019 Motorcomm, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Support : Motorcomm Phys: + * Giga phys: yt8511, yt8521, yt8531, yt8614, yt8618 + * 100/10 Phys : yt8512, yt8512b, yt8510 + * Automotive 100Mb Phys : yt8010 + * Automotive 100/10 hyper range Phys: yt8510 + */ + +#include +#include +#include +#include +#include +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +/* for wol feature, 20210604 */ +#include + +#define YT_LINUX_MAJOR 2 +#define YT_LINUX_MINOR 2 +#define YT_LINUX_SUBVERSION 8661 +#define YT_LINUX_VERSIONID "2.2.8661" + +/******************************************** + **** configuration section begin ***********/ + +/* if system depends on ethernet packet to restore from sleep, + * please define this macro to 1 otherwise, define it to 0. + */ +#define SYS_WAKEUP_BASED_ON_ETH_PKT 1 + +/* to enable system WOL feature of phy, please define this macro to 1 + * otherwise, define it to 0. + */ +#define YTPHY_WOL_FEATURE_ENABLE 0 + +/* some GMAC need clock input from PHY, for eg., 125M, + * please enable this macro + * by degault, it is set to 0 + * NOTE: this macro will need macro SYS_WAKEUP_BASED_ON_ETH_PKT to set to 1 + */ +#define GMAC_CLOCK_INPUT_NEEDED 0 + +/* the max number of yt8521 chip on pcb board + * the most case is only 1 chip per board, but + * by default, we support up to 8. + */ +#define YTPHY_BOARD_MAX_NUM_OF_CHIP_8521 8 +#define YTPHY_BOARD_MAX_NUM_OF_CHIP_8614 4 + +/* for YT8531 package A xtal init config */ +#define YTPHY8531A_XTAL_INIT (0) + +/**** configuration section end *********** + ******************************************/ + +/* no need to change below */ +#define MOTORCOMM_PHY_ID_MASK 0x00000fff +#define MOTORCOMM_PHY_ID_8531_MASK 0xffffffff +#define MOTORCOMM_MPHY_ID_MASK 0x0000ffff +#define MOTORCOMM_MPHY_ID_MASK_8614 0xffffffff +#define MOTORCOMM_PHY_ID_MASK_8821 0xffffffff + +#define PHY_ID_YT8010 0x00000309 +#define PHY_ID_YT8010AS 0x4f51eb19 +#define PHY_ID_YT8510 0x00000109 +#define PHY_ID_YT8511 0x0000010a +#define PHY_ID_YT8512 0x00000118 +#define PHY_ID_YT8512B 0x00000128 +#define PHY_ID_YT8521 0x0000011a +#define PHY_ID_YT8531S 0x4f51e91a +#define PHY_ID_YT8531 0x4f51e91b +#define PHY_ID_YT8614 0x4F51E899 +#define PHY_ID_YT8618 0x0000e889 +#define PHY_ID_YT8821 0x4f51ea10 + +#define REG_PHY_SPEC_STATUS 0x11 +#define REG_DEBUG_ADDR_OFFSET 0x1e +#define REG_DEBUG_DATA 0x1f + +#define YT8512_EXTREG_LED0 0x40c0 +#define YT8512_EXTREG_LED1 0x40c3 + +#define YT8512_EXTREG_SLEEP_CONTROL1 0x2027 + +#define YT_SOFTWARE_RESET 0x8000 + +#define YT8512_LED0_ACT_BLK_IND 0x1000 +#define YT8512_LED0_DIS_LED_AN_TRY 0x0001 +#define YT8512_LED0_BT_BLK_EN 0x0002 +#define YT8512_LED0_HT_BLK_EN 0x0004 +#define YT8512_LED0_COL_BLK_EN 0x0008 +#define YT8512_LED0_BT_ON_EN 0x0010 +#define YT8512_LED1_BT_ON_EN 0x0010 +#define YT8512_LED1_TXACT_BLK_EN 0x0100 +#define YT8512_LED1_RXACT_BLK_EN 0x0200 +#define YT8512_SPEED_MODE 0xc000 +#define YT8512_DUPLEX 0x2000 + +#define YT8512_SPEED_MODE_BIT 14 +#define YT8512_DUPLEX_BIT 13 +#define YT8512_EN_SLEEP_SW_BIT 15 + +#define YT8521_EXTREG_SLEEP_CONTROL1 0x27 +#define YT8521_EN_SLEEP_SW_BIT 15 + +#define YT8521_SPEED_MODE 0xc000 +#define YT8521_DUPLEX 0x2000 +#define YT8521_SPEED_MODE_BIT 14 +#define YT8521_DUPLEX_BIT 13 +#define YT8521_LINK_STATUS_BIT 10 + +/* based on yt8521 wol feature config register */ +#define YTPHY_UTP_INTR_REG 0x12 +/* WOL Feature Event Interrupt Enable */ +#define YTPHY_WOL_FEATURE_INTR BIT(6) + +/* Magic Packet MAC address registers */ +#define YTPHY_WOL_FEATURE_MACADDR2_4_MAGIC_PACKET 0xa007 +#define YTPHY_WOL_FEATURE_MACADDR1_4_MAGIC_PACKET 0xa008 +#define YTPHY_WOL_FEATURE_MACADDR0_4_MAGIC_PACKET 0xa009 + +#define YTPHY_WOL_FEATURE_REG_CFG 0xa00a +#define YTPHY_WOL_FEATURE_TYPE_CFG BIT(0) /* WOL TYPE Config */ +#define YTPHY_WOL_FEATURE_ENABLE_CFG BIT(3) /* WOL Enable Config */ +#define YTPHY_WOL_FEATURE_INTR_SEL_CFG BIT(6) /* WOL Event Interrupt Enable Config */ +#define YTPHY_WOL_FEATURE_WIDTH1_CFG BIT(1) /* WOL Pulse Width Config */ +#define YTPHY_WOL_FEATURE_WIDTH2_CFG BIT(2) /* WOL Pulse Width Config */ + +#define YTPHY_REG_SPACE_UTP 0 +#define YTPHY_REG_SPACE_FIBER 2 + +enum ytphy_wol_feature_trigger_type_e { + YTPHY_WOL_FEATURE_PULSE_TRIGGER, + YTPHY_WOL_FEATURE_LEVEL_TRIGGER, + YTPHY_WOL_FEATURE_TRIGGER_TYPE_MAX +}; + +enum ytphy_wol_feature_pulse_width_e { + YTPHY_WOL_FEATURE_672MS_PULSE_WIDTH, + YTPHY_WOL_FEATURE_336MS_PULSE_WIDTH, + YTPHY_WOL_FEATURE_168MS_PULSE_WIDTH, + YTPHY_WOL_FEATURE_84MS_PULSE_WIDTH, + YTPHY_WOL_FEATURE_PULSE_WIDTH_MAX +}; + +struct ytphy_wol_feature_cfg { + bool enable; + int type; + int width; +}; + +#if (YTPHY_WOL_FEATURE_ENABLE) +#undef SYS_WAKEUP_BASED_ON_ETH_PKT +#define SYS_WAKEUP_BASED_ON_ETH_PKT 1 +#endif + +/* YT8521 polling mode */ +#define YT8521_PHY_MODE_FIBER 1 //fiber mode only +#define YT8521_PHY_MODE_UTP 2 //utp mode only +#define YT8521_PHY_MODE_POLL 3 //fiber and utp, poll mode + +/* below are for bitmap */ +#define YT_PHY_MODE_FIBER 1 //fiber/sgmii mode only +#define YT_PHY_MODE_UTP 2 //utp mode only +#define YT_PHY_MODE_QSGMII 4 //qsgmii mode only +#define YT_PHY_MODE_POLL (YT_PHY_MODE_FIBER | YT_PHY_MODE_UTP | YT_PHY_MODE_QSGMII) //qsgmii, fiber/sgmii and utp, poll mode + +/* support automatically check polling mode for yt8521 + * for Fiber only system, please define YT8521_PHY_MODE_CURR 1 + * for UTP only system, please define YT8521_PHY_MODE_CURR 2 + * for combo system, please define YT8521_PHY_MODE_CURR 3 + */ +#define YTPHY_861X_ABC_VER 0 +#if (YTPHY_861X_ABC_VER) +static int yt8614_get_port_from_phydev(struct phy_device *phydev); +#endif +static int yt8521_hw_strap_polling(struct phy_device *phydev); +static int yt8614_hw_strap_polling(struct phy_device *phydev); +#define YT8521_PHY_MODE_CURR yt8521_hw_strap_polling(phydev) +#define YT8614_PHY_MODE_CURR yt8614_hw_strap_polling(phydev) + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) +static int ytphy_config_init(struct phy_device *phydev) +{ + int val; + + val = phy_read(phydev, 3); + + return 0; +} +#endif + + +#if (KERNEL_VERSION(5, 5, 0) > LINUX_VERSION_CODE) +static inline void phy_lock_mdio_bus(struct phy_device *phydev) +{ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + mutex_lock(&phydev->bus->mdio_lock); +#else + mutex_lock(&phydev->mdio.bus->mdio_lock); +#endif +} + +static inline void phy_unlock_mdio_bus(struct phy_device *phydev) +{ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + mutex_unlock(&phydev->bus->mdio_lock); +#else + mutex_unlock(&phydev->mdio.bus->mdio_lock); +#endif +} +#endif + +#if (KERNEL_VERSION(4, 16, 0) > LINUX_VERSION_CODE) +static inline int __phy_read(struct phy_device *phydev, u32 regnum) +{ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + struct mii_bus *bus = phydev->bus; + int addr = phydev->addr; + return bus->read(bus, phydev->addr, regnum); +#else + struct mii_bus *bus = phydev->mdio.bus; + int addr = phydev->mdio.addr; +#endif + return bus->read(bus, addr, regnum); +} + +static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) +{ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + struct mii_bus *bus = phydev->bus; + int addr = phydev->addr; +#else + struct mii_bus *bus = phydev->mdio.bus; + int addr = phydev->mdio.addr; +#endif + return bus->write(bus, addr, regnum, val); +} +#endif + +static int ytphy_read_ext(struct phy_device *phydev, u32 regnum) +{ + int ret; + + phy_lock_mdio_bus(phydev); + ret = __phy_write(phydev, REG_DEBUG_ADDR_OFFSET, regnum); + if (ret < 0) + goto err_handle; + + ret = __phy_read(phydev, REG_DEBUG_DATA); + if (ret < 0) + goto err_handle; + +err_handle: + phy_unlock_mdio_bus(phydev); + return ret; +} + +static int ytphy_write_ext(struct phy_device *phydev, u32 regnum, u16 val) +{ + int ret; + + phy_lock_mdio_bus(phydev); + ret = __phy_write(phydev, REG_DEBUG_ADDR_OFFSET, regnum); + if (ret < 0) + goto err_handle; + + ret = __phy_write(phydev, REG_DEBUG_DATA, val); + if (ret < 0) + goto err_handle; + +err_handle: + phy_unlock_mdio_bus(phydev); + return ret; +} + +static int ytphy_soft_reset(struct phy_device *phydev) +{ + int ret = 0, val = 0; + + val = phy_read(phydev, MII_BMCR); + if (val < 0) + return val; + + ret = phy_write(phydev, MII_BMCR, val | BMCR_RESET); + if (ret < 0) + return ret; + + return ret; +} + + +#if (YTPHY8531A_XTAL_INIT) +static int yt8531a_xtal_init(struct phy_device *phydev) +{ + int ret = 0; + int val = 0; + bool state = false; + + msleep(50); + + do { + ret = ytphy_write_ext(phydev, 0xa012, 0x88); + if (ret < 0) + return ret; + + msleep(100); + + val = ytphy_read_ext(phydev, 0xa012); + if (val < 0) + return val; + + usleep_range(10000, 20000); + } while (val != 0x88); + + ret = ytphy_write_ext(phydev, 0xa012, 0xc8); + if (ret < 0) + return ret; + + return ret; +} +#endif + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#else +int yt8010_soft_reset(struct phy_device *phydev) +{ + ytphy_soft_reset(phydev); + + return 0; +} + +int yt8010AS_soft_reset(struct phy_device *phydev) +{ + int ret = 0; + + /* sgmii */ + ytphy_write_ext(phydev, 0xe, 1); + ret = ytphy_soft_reset(phydev); + if (ret < 0) { + ytphy_write_ext(phydev, 0xe, 0); + return ret; + } + + /* utp */ + ytphy_write_ext(phydev, 0xe, 0); + ret = ytphy_soft_reset(phydev); + if (ret < 0) + return ret; + + return 0; +} +#endif + +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) +int yt8010_aneg_done(struct phy_device *phydev) +{ + int val = 0; + + val = phy_read(phydev, 0x1); + val = phy_read(phydev, 0x1); + + return (val < 0) ? val : (val & BMSR_LSTATUS); +} +#endif + +static int yt8010_config_aneg(struct phy_device *phydev) +{ + phydev->speed = SPEED_100; + return 0; +} + +static int yt8010_read_status(struct phy_device *phydev) +{ + int ret = 0; + + ret = genphy_update_link(phydev); + if (ret) + return ret; + + /* for 8010, no definition mii reg 0x04, 0x11, here force 100/full */ + phydev->speed = SPEED_100; + phydev->duplex = DUPLEX_FULL; + + return 0; +} + +static int yt8010AS_config_init(struct phy_device *phydev) +{ + phydev->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static int yt8512_led_init(struct phy_device *phydev) +{ + int ret; + int val; + int mask; + + val = ytphy_read_ext(phydev, YT8512_EXTREG_LED0); + if (val < 0) + return val; + + val |= YT8512_LED0_ACT_BLK_IND; + + mask = YT8512_LED0_DIS_LED_AN_TRY | YT8512_LED0_BT_BLK_EN | + YT8512_LED0_HT_BLK_EN | YT8512_LED0_COL_BLK_EN | + YT8512_LED0_BT_ON_EN; + val &= ~mask; + + ret = ytphy_write_ext(phydev, YT8512_EXTREG_LED0, val); + if (ret < 0) + return ret; + + val = ytphy_read_ext(phydev, YT8512_EXTREG_LED1); + if (val < 0) + return val; + + val |= YT8512_LED1_BT_ON_EN; + + mask = YT8512_LED1_TXACT_BLK_EN | YT8512_LED1_RXACT_BLK_EN; + val &= ~mask; + + ret = ytphy_write_ext(phydev, YT8512_EXTREG_LED1, val); + + return ret; +} + +static int yt8512_config_init(struct phy_device *phydev) +{ + int ret; + int val; + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + if (ret < 0) + return ret; + + ret = yt8512_led_init(phydev); + + /* disable auto sleep */ + val = ytphy_read_ext(phydev, YT8512_EXTREG_SLEEP_CONTROL1); + if (val < 0) + return val; + + val &= (~BIT(YT8512_EN_SLEEP_SW_BIT)); + + ret = ytphy_write_ext(phydev, YT8512_EXTREG_SLEEP_CONTROL1, val); + if (ret < 0) + return ret; + + return ret; +} + +static int yt8512_read_status(struct phy_device *phydev) +{ + int ret; + int val; + int speed, speed_mode, duplex; + + ret = genphy_update_link(phydev); + if (ret) + return ret; + + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; + + duplex = (val & YT8512_DUPLEX) >> YT8512_DUPLEX_BIT; + speed_mode = (val & YT8512_SPEED_MODE) >> YT8512_SPEED_MODE_BIT; + switch (speed_mode) { + case 0: + speed = SPEED_10; + break; + case 1: + speed = SPEED_100; + break; + case 2: + case 3: + default: +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + speed = -1; +#else + speed = SPEED_UNKNOWN; +#endif + break; + } + + phydev->speed = speed; + phydev->duplex = duplex; + + return 0; +} + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#else +int yt8521_soft_reset(struct phy_device *phydev) +{ + int ret, val; + + if (YT8521_PHY_MODE_CURR == YT8521_PHY_MODE_UTP) { + ytphy_write_ext(phydev, 0xa000, 0); + ytphy_soft_reset(phydev); + } + + if (YT8521_PHY_MODE_CURR == YT8521_PHY_MODE_FIBER) { + ytphy_write_ext(phydev, 0xa000, 2); + ytphy_soft_reset(phydev); + + ytphy_write_ext(phydev, 0xa000, 0); + } + + if (YT8521_PHY_MODE_CURR == YT8521_PHY_MODE_POLL) { + val = ytphy_read_ext(phydev, 0xa001); + ytphy_write_ext(phydev, 0xa001, (val & ~0x8000)); + + ytphy_write_ext(phydev, 0xa000, 0); + ret = ytphy_soft_reset(phydev); + } + + return 0; +} +#endif + +#if GMAC_CLOCK_INPUT_NEEDED +static int ytphy_mii_rd_ext(struct mii_bus *bus, int phy_id, u32 regnum) +{ + int ret; + int val; + + ret = bus->write(bus, phy_id, REG_DEBUG_ADDR_OFFSET, regnum); + if (ret < 0) + return ret; + + val = bus->read(bus, phy_id, REG_DEBUG_DATA); + + return val; +} + +static int ytphy_mii_wr_ext(struct mii_bus *bus + int phy_id, + u32 regnum, + u16 val) +{ + int ret; + + ret = bus->write(bus, phy_id, REG_DEBUG_ADDR_OFFSET, regnum); + if (ret < 0) + return ret; + + ret = bus->write(bus, phy_id, REG_DEBUG_DATA, val); + + return ret; +} + +int yt8511_config_dis_txdelay(struct mii_bus *bus, int phy_id) +{ + int ret; + int val; + + /* disable auto sleep */ + val = ytphy_mii_rd_ext(bus, phy_id, 0x27); + if (val < 0) + return val; + + val &= (~BIT(15)); + + ret = ytphy_mii_wr_ext(bus, phy_id, 0x27, val); + if (ret < 0) + return ret; + + /* enable RXC clock when no wire plug */ + val = ytphy_mii_rd_ext(bus, phy_id, 0xc); + if (val < 0) + return val; + + /* ext reg 0xc b[7:4] + * Tx Delay time = 150ps * N - 250ps + */ + val &= ~(0xf << 4); + ret = ytphy_mii_wr_ext(bus, phy_id, 0xc, val); + + return ret; +} + +int yt8511_config_out_125m(struct mii_bus *bus, int phy_id) +{ + int ret; + int val; + + /* disable auto sleep */ + val = ytphy_mii_rd_ext(bus, phy_id, 0x27); + if (val < 0) + return val; + + val &= (~BIT(15)); + + ret = ytphy_mii_wr_ext(bus, phy_id, 0x27, val); + if (ret < 0) + return ret; + + /* enable RXC clock when no wire plug */ + val = ytphy_mii_rd_ext(bus, phy_id, 0xc); + if (val < 0) + return val; + + /* ext reg 0xc.b[2:1] + * 00-----25M from pll; + * 01---- 25M from xtl;(default) + * 10-----62.5M from pll; + * 11----125M from pll(here set to this value) + */ + val |= (3 << 1); + ret = ytphy_mii_wr_ext(bus, phy_id, 0xc, val); + +#ifdef YT_8511_INIT_TO_MASTER + /* for customer, please enable it based on demand. + * configure to master + */ + + /* master/slave config reg*/ + val = bus->read(bus, phy_id, 0x9); + /* to be manual config and force to be master */ + val |= (0x3<<11); + /* take effect until phy soft reset */ + ret = bus->write(bus, phy_id, 0x9, val); + if (ret < 0) + return ret; +#endif + + return ret; +} + +static int yt8511_config_init(struct phy_device *phydev) +{ + int ret; + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s done, phy addr: %d\n", __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, "%s done, phy addr: %d\n", __func__, phydev->mdio.addr); +#endif + + return ret; +} +#endif /* GMAC_CLOCK_INPUT_NEEDED */ + +#if (YTPHY_WOL_FEATURE_ENABLE) +static int ytphy_switch_reg_space(struct phy_device *phydev, int space) +{ + int ret; + + if (space == YTPHY_REG_SPACE_UTP) + ret = ytphy_write_ext(phydev, 0xa000, 0); + else + ret = ytphy_write_ext(phydev, 0xa000, 2); + + return ret; +} + +static int ytphy_wol_feature_enable_cfg(struct phy_device *phydev, + struct ytphy_wol_feature_cfg wol_cfg) +{ + int ret = 0; + int val = 0; + + val = ytphy_read_ext(phydev, YTPHY_WOL_FEATURE_REG_CFG); + if (val < 0) + return val; + + if (wol_cfg.enable) { + val |= YTPHY_WOL_FEATURE_ENABLE_CFG; + + if (wol_cfg.type == YTPHY_WOL_FEATURE_LEVEL_TRIGGER) { + val &= ~YTPHY_WOL_FEATURE_TYPE_CFG; + val &= ~YTPHY_WOL_FEATURE_INTR_SEL_CFG; + } else if (wol_cfg.type == YTPHY_WOL_FEATURE_PULSE_TRIGGER) { + val |= YTPHY_WOL_FEATURE_TYPE_CFG; + val |= YTPHY_WOL_FEATURE_INTR_SEL_CFG; + + if (wol_cfg.width == YTPHY_WOL_FEATURE_84MS_PULSE_WIDTH) { + val &= ~YTPHY_WOL_FEATURE_WIDTH1_CFG; + val &= ~YTPHY_WOL_FEATURE_WIDTH2_CFG; + } else if (wol_cfg.width == YTPHY_WOL_FEATURE_168MS_PULSE_WIDTH) { + val |= YTPHY_WOL_FEATURE_WIDTH1_CFG; + val &= ~YTPHY_WOL_FEATURE_WIDTH2_CFG; + } else if (wol_cfg.width == YTPHY_WOL_FEATURE_336MS_PULSE_WIDTH) { + val &= ~YTPHY_WOL_FEATURE_WIDTH1_CFG; + val |= YTPHY_WOL_FEATURE_WIDTH2_CFG; + } else if (wol_cfg.width == YTPHY_WOL_FEATURE_672MS_PULSE_WIDTH) { + val |= YTPHY_WOL_FEATURE_WIDTH1_CFG; + val |= YTPHY_WOL_FEATURE_WIDTH2_CFG; + } + } + } else { + val &= ~YTPHY_WOL_FEATURE_ENABLE_CFG; + val &= ~YTPHY_WOL_FEATURE_INTR_SEL_CFG; + } + + ret = ytphy_write_ext(phydev, YTPHY_WOL_FEATURE_REG_CFG, val); + if (ret < 0) + return ret; + + return 0; +} + +static void ytphy_wol_feature_get(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + int val = 0; + + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + + val = ytphy_read_ext(phydev, YTPHY_WOL_FEATURE_REG_CFG); + if (val < 0) + return; + + if (val & YTPHY_WOL_FEATURE_ENABLE_CFG) + wol->wolopts |= WAKE_MAGIC; + + //return; +} + +static int ytphy_wol_feature_set(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + int ret, curr_reg_space, val; + struct ytphy_wol_feature_cfg wol_cfg; + struct net_device *p_attached_dev = phydev->attached_dev; + + memset(&wol_cfg, 0, sizeof(struct ytphy_wol_feature_cfg)); + curr_reg_space = ytphy_read_ext(phydev, 0xa000); + if (curr_reg_space < 0) + return curr_reg_space; + + /* Switch to phy UTP page */ + ret = ytphy_switch_reg_space(phydev, YTPHY_REG_SPACE_UTP); + if (ret < 0) + return ret; + + if (wol->wolopts & WAKE_MAGIC) { + /* Enable the WOL feature interrupt */ + val = phy_read(phydev, YTPHY_UTP_INTR_REG); + val |= YTPHY_WOL_FEATURE_INTR; + ret = phy_write(phydev, YTPHY_UTP_INTR_REG, val); + if (ret < 0) + return ret; + + /* Set the WOL feature config */ + wol_cfg.enable = true; + wol_cfg.type = YTPHY_WOL_FEATURE_PULSE_TRIGGER; + wol_cfg.width = YTPHY_WOL_FEATURE_672MS_PULSE_WIDTH; + ret = ytphy_wol_feature_enable_cfg(phydev, wol_cfg); + if (ret < 0) + return ret; + + /* Store the device address for the magic packet */ + ret = ytphy_write_ext(phydev, YTPHY_WOL_FEATURE_MACADDR2_4_MAGIC_PACKET, + ((p_attached_dev->dev_addr[0] << 8) | + p_attached_dev->dev_addr[1])); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, YTPHY_WOL_FEATURE_MACADDR1_4_MAGIC_PACKET, + ((p_attached_dev->dev_addr[2] << 8) | + p_attached_dev->dev_addr[3])); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, YTPHY_WOL_FEATURE_MACADDR0_4_MAGIC_PACKET, + ((p_attached_dev->dev_addr[4] << 8) | + p_attached_dev->dev_addr[5])); + if (ret < 0) + return ret; + } else { + wol_cfg.enable = false; + wol_cfg.type = YTPHY_WOL_FEATURE_TRIGGER_TYPE_MAX; + wol_cfg.width = YTPHY_WOL_FEATURE_PULSE_WIDTH_MAX; + ret = ytphy_wol_feature_enable_cfg(phydev, wol_cfg); + if (ret < 0) + return ret; + } + + /* Recover to previous register space page */ + ret = ytphy_switch_reg_space(phydev, curr_reg_space); + if (ret < 0) + return ret; + + return 0; +} +#endif /*(YTPHY_WOL_FEATURE_ENABLE)*/ + +static int yt8521_hw_strap_polling(struct phy_device *phydev) +{ + int val = 0; + + val = ytphy_read_ext(phydev, 0xa001) & 0x7; + switch (val) { + case 1: + case 4: + case 5: + return YT8521_PHY_MODE_FIBER; + case 2: + case 6: + case 7: + return YT8521_PHY_MODE_POLL; + case 3: + case 0: + default: + return YT8521_PHY_MODE_UTP; + } +} + + +static int yt8521_config_init(struct phy_device *phydev) +{ + int ret; + int val, hw_strap_mode; + +#if (YTPHY_WOL_FEATURE_ENABLE) + struct ethtool_wolinfo wol; + + /* set phy wol enable */ + memset(&wol, 0x0, sizeof(struct ethtool_wolinfo)); + wol.wolopts |= WAKE_MAGIC; + ytphy_wol_feature_set(phydev, &wol); +#endif + if (phydev->force_mode) { + hw_strap_mode = ytphy_read_ext(phydev, 0xa001) & 0x7; + hw_strap_mode = hw_strap_mode & 0x7ff8; + hw_strap_mode = hw_strap_mode |0x140; + ytphy_write_ext(phydev, 0xa001, hw_strap_mode); + } + + phydev->irq = PHY_POLL; + /* NOTE: this function should not be called more than one for each chip. */ + hw_strap_mode = ytphy_read_ext(phydev, 0xa001) & 0x7; + + ytphy_write_ext(phydev, 0xa000, 0); +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + if (ret < 0) + return ret; + + /* disable auto sleep */ + val = ytphy_read_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1); + if (val < 0) + return val; + + val &= (~BIT(YT8521_EN_SLEEP_SW_BIT)); + + ret = ytphy_write_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1, val); + if (ret < 0) + return ret; + + /* enable RXC clock when no wire plug */ + val = ytphy_read_ext(phydev, 0xc); + if (val < 0) + return val; + val &= ~(1 << 12); + ret = ytphy_write_ext(phydev, 0xc, val); + if (ret < 0) + return ret; + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s done, phy addr: %d, strap mode = %d, polling mode = %d\n", + __func__, phydev->addr, hw_strap_mode, yt8521_hw_strap_polling(phydev)); +#else + netdev_info(phydev->attached_dev, "%s done, phy addr: %d, strap mode = %d, polling mode = %d\n", + __func__, phydev->mdio.addr, hw_strap_mode, yt8521_hw_strap_polling(phydev)); +#endif + return ret; +} + +/* for fiber mode, there is no 10M speed mode and + * this function is for this purpose. + */ +static int yt8521_adjust_status(struct phy_device *phydev, int val, int is_utp) +{ + int speed_mode, duplex; +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + int speed = -1; +#else + int speed = SPEED_UNKNOWN; +#endif + + if (is_utp) + duplex = (val & YT8512_DUPLEX) >> YT8521_DUPLEX_BIT; + else + duplex = 1; + speed_mode = (val & YT8521_SPEED_MODE) >> YT8521_SPEED_MODE_BIT; + switch (speed_mode) { + case 0: + if (is_utp) + speed = SPEED_10; + break; + case 1: + speed = SPEED_100; + break; + case 2: + speed = SPEED_1000; + break; + case 3: + break; + default: +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + speed = -1; +#else + speed = SPEED_UNKNOWN; +#endif + break; + } + + phydev->speed = speed; + phydev->duplex = duplex; + + return 0; +} + +/* for fiber mode, when speed is 100M, there is no definition for + * autonegotiation, and this function handles this case and return + * 1 per linux kernel's polling. + */ +int yt8521_aneg_done(struct phy_device *phydev) +{ + int link_fiber = 0, link_utp = 0; + + /* reading Fiber */ + ytphy_write_ext(phydev, 0xa000, 2); + link_fiber = !!(phy_read(phydev, REG_PHY_SPEC_STATUS) & (BIT(YT8521_LINK_STATUS_BIT))); + + /* reading UTP */ + ytphy_write_ext(phydev, 0xa000, 0); + if (!link_fiber) + link_utp = !!(phy_read(phydev, REG_PHY_SPEC_STATUS) & (BIT(YT8521_LINK_STATUS_BIT))); + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link_fiber: %d, link_utp: %d\n", + __func__, phydev->addr, link_fiber, link_utp); +#else + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link_fiber: %d, link_utp: %d\n", + __func__, phydev->mdio.addr, link_fiber, link_utp); +#endif + return !!(link_fiber | link_utp); +} + +static int yt8521_read_status(struct phy_device *phydev) +{ + int ret; + int val; + int yt8521_fiber_latch_val; + int yt8521_fiber_curr_val; + int link; + int link_utp = 0, link_fiber = 0; + + if (YT8521_PHY_MODE_CURR != YT8521_PHY_MODE_FIBER) { + /* reading UTP */ + ret = ytphy_write_ext(phydev, 0xa000, 0); + if (ret < 0) + return ret; + + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; + + link = val & (BIT(YT8521_LINK_STATUS_BIT)); + if (link) { + link_utp = 1; + yt8521_adjust_status(phydev, val, 1); + } else { + link_utp = 0; + } + } //(YT8521_PHY_MODE_CURR != YT8521_PHY_MODE_FIBER) + + if (YT8521_PHY_MODE_CURR != YT8521_PHY_MODE_UTP) { + /* reading Fiber */ + ret = ytphy_write_ext(phydev, 0xa000, 2); + if (ret < 0) + return ret; + + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; + + //note: below debug information is used to check multiple PHy ports. + + /* for fiber, from 1000m to 100m, there is not link down from 0x11, + * and check reg 1 to identify such case this is important for Linux + * kernel for that, missing linkdown event will cause problem. + */ + yt8521_fiber_latch_val = phy_read(phydev, MII_BMSR); + yt8521_fiber_curr_val = phy_read(phydev, MII_BMSR); + link = val & (BIT(YT8521_LINK_STATUS_BIT)); + if (link && yt8521_fiber_latch_val != yt8521_fiber_curr_val) { + link = 0; +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s, phy addr: %d, fiber link down detect, latch = %04x, curr = %04x\n", + __func__, phydev->addr, yt8521_fiber_latch_val, yt8521_fiber_curr_val); +#else + netdev_info(phydev->attached_dev, "%s, phy addr: %d, fiber link down detect, latch = %04x, curr = %04x\n", + __func__, phydev->mdio.addr, yt8521_fiber_latch_val, yt8521_fiber_curr_val); +#endif + } + + if (link) { + link_fiber = 1; + yt8521_adjust_status(phydev, val, 0); + } else { + link_fiber = 0; + } + } //(YT8521_PHY_MODE_CURR != YT8521_PHY_MODE_UTP) + + if (link_utp || link_fiber) { + if (phydev->link == 0) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link up, media: %s, mii reg 0x11 = 0x%x\n", + __func__, phydev->addr, (link_utp && link_fiber) ? "UNKNOWN MEDIA" : (link_utp ? "UTP" : "Fiber"), (unsigned int)val); +#else + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link up, media: %s, mii reg 0x11 = 0x%x\n", + __func__, phydev->mdio.addr, (link_utp && link_fiber) ? "UNKNOWN MEDIA" : (link_utp ? "UTP" : "Fiber"), (unsigned int)val); +#endif + phydev->link = 1; + } else { + if (phydev->link == 1) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link down\n", __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link down\n", __func__, phydev->mdio.addr); +#endif + phydev->link = 0; + } + + if (YT8521_PHY_MODE_CURR != YT8521_PHY_MODE_FIBER) { //utp or combo + if (link_fiber) + ytphy_write_ext(phydev, 0xa000, 2); + if (link_utp) + ytphy_write_ext(phydev, 0xa000, 0); + } + return 0; +} + +int yt8521_suspend(struct phy_device *phydev) +{ +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) + int value; + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_lock(&phydev->lock); +#else + /* no need lock in 4.19 */ +#endif + + ytphy_write_ext(phydev, 0xa000, 0); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value | BMCR_PDOWN); + + ytphy_write_ext(phydev, 0xa000, 2); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value | BMCR_PDOWN); + + ytphy_write_ext(phydev, 0xa000, 0); + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_unlock(&phydev->lock); +#else + /* no need lock/unlock in 4.19 */ +#endif +#endif /*!(SYS_WAKEUP_BASED_ON_ETH_PKT)*/ + + return 0; +} + +int yt8521_resume(struct phy_device *phydev) +{ + int value, ret; + + /* disable auto sleep */ + value = ytphy_read_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1); + if (value < 0) + return value; + + value &= (~BIT(YT8521_EN_SLEEP_SW_BIT)); + + ret = ytphy_write_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1, value); + if (ret < 0) + return ret; + +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_lock(&phydev->lock); +#else + /* no need lock/unlock in 4.19 */ +#endif + + if (YT8521_PHY_MODE_CURR != YT8521_PHY_MODE_FIBER) { + ytphy_write_ext(phydev, 0xa000, 0); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); + } + + if (YT8521_PHY_MODE_CURR != YT8521_PHY_MODE_UTP) { + ytphy_write_ext(phydev, 0xa000, 2); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); + + ytphy_write_ext(phydev, 0xa000, 0); + } + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_unlock(&phydev->lock); +#else + /* no need lock/unlock in 4.19 */ +#endif +#endif /*!(SYS_WAKEUP_BASED_ON_ETH_PKT)*/ + + return 0; +} + +static int yt8531S_config_init(struct phy_device *phydev) +{ + int ret = 0; + +#if (YTPHY8531A_XTAL_INIT) + ret = yt8531a_xtal_init(phydev); + if (ret < 0) + return ret; +#endif + + ret = yt8521_config_init(phydev); + + return ret; +} + +static int yt8531_config_init(struct phy_device *phydev) +{ + int ret = 0; + +#if (YTPHY8531A_XTAL_INIT) + ret = yt8531a_xtal_init(phydev); + if (ret < 0) + return ret; +#endif + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + if (ret < 0) + return ret; + + return 0; +} + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#else +int yt8618_soft_reset(struct phy_device *phydev) +{ + int ret; + + ytphy_write_ext(phydev, 0xa000, 0); + ret = ytphy_soft_reset(phydev); + if (ret < 0) + return ret; + + return 0; +} + +int yt8614_soft_reset(struct phy_device *phydev) +{ + int ret; + + /* qsgmii */ + ytphy_write_ext(phydev, 0xa000, 2); + ret = ytphy_soft_reset(phydev); + if (ret < 0) { + ytphy_write_ext(phydev, 0xa000, 0); + return ret; + } + + /* sgmii */ + ytphy_write_ext(phydev, 0xa000, 3); + ret = ytphy_soft_reset(phydev); + if (ret < 0) { + ytphy_write_ext(phydev, 0xa000, 0); + return ret; + } + + /* utp */ + ytphy_write_ext(phydev, 0xa000, 0); + ret = ytphy_soft_reset(phydev); + if (ret < 0) + return ret; + + return 0; +} +#endif + +static int yt8618_config_init(struct phy_device *phydev) +{ + int ret; + int val; + unsigned int retries = 12; +#if (YTPHY_861X_ABC_VER) + int port = 0; +#endif + + phydev->irq = PHY_POLL; + +#if (YTPHY_861X_ABC_VER) + port = yt8614_get_port_from_phydev(phydev); +#endif + + ytphy_write_ext(phydev, 0xa000, 0); +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + if (ret < 0) + return ret; + + /* for utp to optimize signal */ + ret = ytphy_write_ext(phydev, 0x41, 0x33); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x42, 0x66); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x43, 0xaa); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x44, 0xd0d); + if (ret < 0) + return ret; + +#if (YTPHY_861X_ABC_VER) + if ((port == 2) || (port == 5)) { + ret = ytphy_write_ext(phydev, 0x57, 0x2929); + if (ret < 0) + return ret; + } +#endif + + val = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, val | BMCR_RESET); + do { + msleep(50); + ret = phy_read(phydev, MII_BMCR); + if (ret < 0) + return ret; + } while ((ret & BMCR_RESET) && --retries); + if (ret & BMCR_RESET) + return -ETIMEDOUT; + + /* for QSGMII optimization */ + ytphy_write_ext(phydev, 0xa000, 0x02); + + ret = ytphy_write_ext(phydev, 0x3, 0x4F80); + if (ret < 0) { + ytphy_write_ext(phydev, 0xa000, 0); + return ret; + } + ret = ytphy_write_ext(phydev, 0xe, 0x4F80); + if (ret < 0) { + ytphy_write_ext(phydev, 0xa000, 0); + return ret; + } + + ytphy_write_ext(phydev, 0xa000, 0); + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s done, phy addr: %d\n", __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, "%s done, phy addr: %d\n", __func__, phydev->mdio.addr); +#endif + return ret; +} + +static int yt8614_hw_strap_polling(struct phy_device *phydev) +{ + int val = 0; + + val = ytphy_read_ext(phydev, 0xa007) & 0xf; + switch (val) { + case 8: //4'b1000, Fiber x4 + Copper x4 + case 12: //4'b1100, QSGMII x1 + Combo x4 mode; + case 13: //4'b1101, QSGMII x1 + Combo x4 mode; + return (YT_PHY_MODE_FIBER | YT_PHY_MODE_UTP); + case 14: //4'b1110, QSGMII x1 + SGMII(MAC) x4 mode; + case 11: //4'b1011, QSGMII x1 + Fiber x4 mode; + return YT_PHY_MODE_FIBER; + case 9: //4'b1001, Reserved. + case 10: //4'b1010, QSGMII x1 + Copper x4 mode + case 15: //4'b1111, SGMII(PHY) x4 + Copper x4 mode + default: + return YT_PHY_MODE_UTP; + } +} + +#if (YTPHY_861X_ABC_VER) +static int yt8614_get_port_from_phydev(struct phy_device *phydev) +{ + int tmp = ytphy_read_ext(phydev, 0xa0ff); + int phy_addr = 0; + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + phy_addr = (unsigned int)phydev->addr; +#else + phy_addr = (unsigned int)phydev->mdio.addr; +#endif + + if ((phy_addr - tmp) < 0) { + ytphy_write_ext(phydev, 0xa0ff, phy_addr); + tmp = phy_addr; + } + + return (phy_addr - tmp); +} +#endif + +static int yt8614_config_init(struct phy_device *phydev) +{ + int ret = 0; + int val, hw_strap_mode; + unsigned int retries = 12; +#if (YTPHY_861X_ABC_VER) + int port = 0; +#endif + phydev->irq = PHY_POLL; + + /* NOTE: this function should not be called more than one for each chip. */ + hw_strap_mode = ytphy_read_ext(phydev, 0xa007) & 0xf; + +#if (YTPHY_861X_ABC_VER) + port = yt8614_get_port_from_phydev(phydev); +#endif + + ytphy_write_ext(phydev, 0xa000, 0); + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + if (ret < 0) + return ret; + + /* for utp to optimize signal */ + ret = ytphy_write_ext(phydev, 0x41, 0x33); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x42, 0x66); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x43, 0xaa); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x44, 0xd0d); + if (ret < 0) + return ret; + +#if (YTPHY_861X_ABC_VER) + if (port == 2) { + ret = ytphy_write_ext(phydev, 0x57, 0x2929); + if (ret < 0) + return ret; + } +#endif + + /* soft reset to take config effect */ + val = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, val | BMCR_RESET); + do { + msleep(50); + ret = phy_read(phydev, MII_BMCR); + if (ret < 0) + return ret; + } while ((ret & BMCR_RESET) && --retries); + if (ret & BMCR_RESET) + return -ETIMEDOUT; + + /* for QSGMII optimization */ + ytphy_write_ext(phydev, 0xa000, 0x02); + ret = ytphy_write_ext(phydev, 0x3, 0x4F80); + if (ret < 0) { + ytphy_write_ext(phydev, 0xa000, 0); + return ret; + } + ret = ytphy_write_ext(phydev, 0xe, 0x4F80); + if (ret < 0) { + ytphy_write_ext(phydev, 0xa000, 0); + return ret; + } + + /* for SGMII optimization */ + ytphy_write_ext(phydev, 0xa000, 0x03); + ret = ytphy_write_ext(phydev, 0x3, 0x2420); + if (ret < 0) { + ytphy_write_ext(phydev, 0xa000, 0); + return ret; + } + ret = ytphy_write_ext(phydev, 0xe, 0x24a0); + if (ret < 0) { + ytphy_write_ext(phydev, 0xa000, 0); + return ret; + } + + /* back up to utp*/ + ytphy_write_ext(phydev, 0xa000, 0); + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s done, phy addr: %d, chip mode: %d\n", __func__, phydev->addr, hw_strap_mode); +#else + netdev_info(phydev->attached_dev, "%s done, phy addr: %d, chip mode: %d\n", __func__, phydev->mdio.addr, hw_strap_mode); +#endif + return ret; +} + +int yt8618_aneg_done(struct phy_device *phydev) +{ +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + return genphy_aneg_done(phydev); +#else + return 1; +#endif +} + +int yt8614_aneg_done(struct phy_device *phydev) +{ + int link_fiber = 0, link_utp = 0; + + if (YT8614_PHY_MODE_CURR & YT_PHY_MODE_FIBER) { + /* reading Fiber */ + ytphy_write_ext(phydev, 0xa000, 3); + link_fiber = !!(phy_read(phydev, REG_PHY_SPEC_STATUS) & (BIT(YT8521_LINK_STATUS_BIT))); + } + + if (YT8614_PHY_MODE_CURR & YT_PHY_MODE_UTP) { + /* reading UTP */ + ytphy_write_ext(phydev, 0xa000, 0); + link_utp = !!(phy_read(phydev, REG_PHY_SPEC_STATUS) & (BIT(YT8521_LINK_STATUS_BIT))); + } + + return !!(link_fiber | link_utp); +} + +static int yt8614_read_status(struct phy_device *phydev) +{ + int ret; + int val, yt8614_fiber_latch_val, yt8614_fiber_curr_val; + int link; + int link_utp = 0, link_fiber = 0; + + if (YT8614_PHY_MODE_CURR & YT_PHY_MODE_UTP) { + /* switch to utp and reading regs */ + ret = ytphy_write_ext(phydev, 0xa000, 0); + if (ret < 0) + return ret; + + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; + + link = val & (BIT(YT8521_LINK_STATUS_BIT)); + if (link) { + link_utp = 1; + // here is same as 8521 and re-use the function; + yt8521_adjust_status(phydev, val, 1); + } else { + link_utp = 0; + } + } + + if (YT8614_PHY_MODE_CURR & YT_PHY_MODE_FIBER) { + /* reading Fiber/sgmii */ + ret = ytphy_write_ext(phydev, 0xa000, 3); + if (ret < 0) + return ret; + + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; + + /* for fiber, from 1000m to 100m, there is not link down from 0x11, + * and check reg 1 to identify such case + */ + yt8614_fiber_latch_val = phy_read(phydev, MII_BMSR); + yt8614_fiber_curr_val = phy_read(phydev, MII_BMSR); + link = val & (BIT(YT8521_LINK_STATUS_BIT)); + if (link && yt8614_fiber_latch_val != yt8614_fiber_curr_val) { + link = 0; +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s, phy addr: %d, fiber link down detect, latch = %04x, curr = %04x\n", + __func__, phydev->addr, yt8614_fiber_latch_val, yt8614_fiber_curr_val); +#else + netdev_info(phydev->attached_dev, "%s, phy addr: %d, fiber link down detect, latch = %04x, curr = %04x\n", + __func__, phydev->mdio.addr, yt8614_fiber_latch_val, yt8614_fiber_curr_val); +#endif + } + + if (link) { + link_fiber = 1; + yt8521_adjust_status(phydev, val, 0); + } else { + link_fiber = 0; + } + } + + if (link_utp || link_fiber) { + if (phydev->link == 0) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link up, media %s\n", + __func__, phydev->addr, (link_utp && link_fiber) ? "both UTP and Fiber" : (link_utp ? "UTP" : "Fiber")); +#else + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link up, media %s\n", + __func__, phydev->mdio.addr, (link_utp && link_fiber) ? "both UTP and Fiber" : (link_utp ? "UTP" : "Fiber")); +#endif + phydev->link = 1; + } else { + if (phydev->link == 1) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link down\n", __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link down\n", __func__, phydev->mdio.addr); +#endif + phydev->link = 0; + } + + if (YT8614_PHY_MODE_CURR & YT_PHY_MODE_UTP) { + if (link_utp) + ytphy_write_ext(phydev, 0xa000, 0); + } + return 0; +} + +static int yt8618_read_status(struct phy_device *phydev) +{ + int ret; + /* maybe for 8614 yt8521_fiber_latch_val, yt8521_fiber_curr_val; */ + int val; + int link; + int link_utp = 0, link_fiber = 0; + + /* switch to utp and reading regs */ + ret = ytphy_write_ext(phydev, 0xa000, 0); + if (ret < 0) + return ret; + + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; + + link = val & (BIT(YT8521_LINK_STATUS_BIT)); + if (link) { + link_utp = 1; + yt8521_adjust_status(phydev, val, 1); + } else { + link_utp = 0; + } + + if (link_utp || link_fiber) + phydev->link = 1; + else + phydev->link = 0; + + return 0; +} + +int yt8618_suspend(struct phy_device *phydev) +{ +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) + int value; + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_lock(&phydev->lock); +#else + /* no need lock in 4.19 */ +#endif + + ytphy_write_ext(phydev, 0xa000, 0); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value | BMCR_PDOWN); + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_unlock(&phydev->lock); +#else + /* no need lock/unlock in 4.19 */ +#endif +#endif /*!(SYS_WAKEUP_BASED_ON_ETH_PKT)*/ + + return 0; +} + +int yt8618_resume(struct phy_device *phydev) +{ +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) + int value; + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_lock(&phydev->lock); +#else + /* no need lock/unlock in 4.19 */ +#endif + + ytphy_write_ext(phydev, 0xa000, 0); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_unlock(&phydev->lock); +#else + /* no need lock/unlock in 4.19 */ +#endif +#endif /*!(SYS_WAKEUP_BASED_ON_ETH_PKT)*/ + + return 0; +} + +int yt8614_suspend(struct phy_device *phydev) +{ +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) + int value; + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_lock(&phydev->lock); +#else + /* no need lock in 4.19 */ +#endif + + ytphy_write_ext(phydev, 0xa000, 0); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value | BMCR_PDOWN); + + ytphy_write_ext(phydev, 0xa000, 3); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value | BMCR_PDOWN); + + ytphy_write_ext(phydev, 0xa000, 0); + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_unlock(&phydev->lock); +#else + /* no need lock/unlock in 4.19 */ +#endif +#endif /*!(SYS_WAKEUP_BASED_ON_ETH_PKT)*/ + + return 0; +} + +int yt8614_resume(struct phy_device *phydev) +{ +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) + int value; + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_lock(&phydev->lock); +#else + /* no need lock/unlock in 4.19 */ +#endif + + ytphy_write_ext(phydev, 0xa000, 0); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); + + ytphy_write_ext(phydev, 0xa000, 3); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); + + ytphy_write_ext(phydev, 0xa000, 0); + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_unlock(&phydev->lock); +#else + /* no need lock/unlock in 4.19 */ +#endif +#endif /* !(SYS_WAKEUP_BASED_ON_ETH_PKT) */ + + return 0; +} + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#else +int yt8821_soft_reset(struct phy_device *phydev) +{ + int ret, val; + + val = ytphy_read_ext(phydev, 0xa001); + ytphy_write_ext(phydev, 0xa001, (val & ~0x8000)); + + ytphy_write_ext(phydev, 0xa000, 0); + ret = ytphy_soft_reset(phydev); + + return ret; +} +#endif + +static int yt8821_init(struct phy_device *phydev) +{ + int ret = 0; + int val = 0; + + /* sds pll cfg */ + ret = ytphy_write_ext(phydev, 0xa050, 0x1000); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa000, 0x2); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x23, 0x47a1); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xbd, 0x3547); + if (ret < 0) + return ret; + + /* wait 1s */ + msleep(1000); + + /* calibration dcc */ + ret = ytphy_write_ext(phydev, 0xbd, 0xa547); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x29, 0x3003); + if (ret < 0) + return ret; + + /* sds driver swing */ + ret = ytphy_write_ext(phydev, 0x25, 0x788); + if (ret < 0) + return ret; + + /* phy cfg */ + ret = ytphy_write_ext(phydev, 0xa000, 0x0); + if (ret < 0) + return ret; + + /* phy template cfg */ + ret = ytphy_write_ext(phydev, 0x471, 0x4545); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x476, 0x4848); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x477, 0x4848); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x478, 0x4848); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x479, 0x4848); + if (ret < 0) + return ret; + + /* calibrate phy lc pll */ + ret = ytphy_write_ext(phydev, 0x600, 0x2300); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x8, 0x8041); + if (ret < 0) + return ret; + + /* prm_small_lng/med */ + ret = ytphy_write_ext(phydev, 0x388, 0x90); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x387, 0x90); + if (ret < 0) + return ret; + + /* echo_delay_cfg */ + ret = ytphy_write_ext(phydev, 0x3, 0xa026); + if (ret < 0) + return ret; + + /* pbo setting */ + ret = ytphy_write_ext(phydev, 0x47e, 0x3535); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x47f, 0x3535); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x480, 0x3535); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x481, 0x3535); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x483, 0x2a2a); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x484, 0x2a2a); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x485, 0x2a2a); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x486, 0x2a2a); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x488, 0x2121); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x489, 0x2121); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x48a, 0x2121); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x48b, 0x2121); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x48d, 0x1a1a); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x48e, 0x1a1a); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x48f, 0x1a1a); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x490, 0x1a1a); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x492, 0x1515); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x493, 0x1515); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x494, 0x1515); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x495, 0x1515); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x497, 0x1111); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x498, 0x1111); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x499, 0x1111); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x49a, 0x1111); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x49c, 0x0d0d); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x49d, 0x0d0d); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x49e, 0x0d0d); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0x49f, 0x0d0d); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa052, 0x7); + if (ret < 0) + return ret; + + /* fast link down cfg */ + ret = ytphy_write_ext(phydev, 0x355, 0x7d07); + if (ret < 0) + return ret; + + /* soft reset */ + val = phy_read(phydev, MII_BMCR); + if (val < 0) + return val; + ret = phy_write(phydev, MII_BMCR, val | BMCR_RESET); + + return ret; +} + +static int yt8821_config_init(struct phy_device *phydev) +{ + int ret; + int val, hw_strap_mode; + + phydev->irq = PHY_POLL; + + /* NOTE: this function should not be called more than one for each chip. */ + hw_strap_mode = ytphy_read_ext(phydev, 0xa001) & 0x7; + + ytphy_write_ext(phydev, 0xa000, 0); +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + if (ret < 0) + return ret; + + ret = yt8821_init(phydev); + if (ret < 0) + return ret; + + /* disable auto sleep */ + val = ytphy_read_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1); + if (val < 0) + return val; + + val &= (~BIT(YT8521_EN_SLEEP_SW_BIT)); + + ret = ytphy_write_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1, val); + if (ret < 0) + return ret; + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s done, phy addr: %d, strap mode = %d\n", __func__, phydev->addr, hw_strap_mode); +#else + netdev_info(phydev->attached_dev, "%s done, phy addr: %d, strap mode = %d\n", __func__, phydev->mdio.addr, hw_strap_mode); +#endif + + return ret; +} + +/* for fiber mode, there is no 10M speed mode and + * this function is for this purpose. + */ +static int yt8821_adjust_status(struct phy_device *phydev, int val, int is_utp) +{ + int speed_mode, duplex; + int speed_mode_bit15_14, speed_mode_bit9; +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + int speed = -1; +#else + int speed = SPEED_UNKNOWN; +#endif + + if (is_utp) + duplex = (val & YT8512_DUPLEX) >> YT8521_DUPLEX_BIT; + else + duplex = 1; + + /* Bit9-Bit15-Bit14 speed mode 100---2.5G; 010---1000M; 001---100M; 000---10M */ + speed_mode_bit15_14 = (val & YT8521_SPEED_MODE) >> YT8521_SPEED_MODE_BIT; + speed_mode_bit9 = (val & BIT(9)) >> 9; + speed_mode = (speed_mode_bit9 << 2) | speed_mode_bit15_14; + switch (speed_mode) { + case 0: + if (is_utp) + speed = SPEED_10; + break; + case 1: + speed = SPEED_100; + break; + case 2: + speed = SPEED_1000; + break; + case 4: + speed = SPEED_2500; + break; + default: +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + speed = -1; +#else + speed = SPEED_UNKNOWN; +#endif + break; + } + + phydev->speed = speed; + phydev->duplex = duplex; + + return 0; +} + +static int yt8821_read_status(struct phy_device *phydev) +{ + int ret; + int val; + int yt8521_fiber_latch_val; + int yt8521_fiber_curr_val; + int link; + int link_utp = 0, link_fiber = 0; + + if (YT8521_PHY_MODE_CURR != YT8521_PHY_MODE_FIBER) { + /* reading UTP */ + ret = ytphy_write_ext(phydev, 0xa000, 0); + if (ret < 0) + return ret; + + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; + + link = val & (BIT(YT8521_LINK_STATUS_BIT)); + if (link) { + link_utp = 1; + yt8821_adjust_status(phydev, val, 1); /* speed(2500), duplex */ + } else { + link_utp = 0; + } + } //(YT8521_PHY_MODE_CURR != YT8521_PHY_MODE_FIBER) + + if (YT8521_PHY_MODE_CURR != YT8521_PHY_MODE_UTP) { + /* reading Fiber */ + ret = ytphy_write_ext(phydev, 0xa000, 2); + if (ret < 0) + return ret; + + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; + + //note: below debug information is used to check multiple PHy ports. + + /* for fiber, from 1000m to 100m, there is not link down from 0x11, + * and check reg 1 to identify such case this is important for Linux + * kernel for that, missing linkdown event will cause problem. + */ + yt8521_fiber_latch_val = phy_read(phydev, MII_BMSR); + yt8521_fiber_curr_val = phy_read(phydev, MII_BMSR); + link = val & (BIT(YT8521_LINK_STATUS_BIT)); + if (link && yt8521_fiber_latch_val != yt8521_fiber_curr_val) { + link = 0; +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s, phy addr: %d, fiber link down detect, latch = %04x, curr = %04x\n", + __func__, phydev->addr, yt8521_fiber_latch_val, yt8521_fiber_curr_val); +#else + netdev_info(phydev->attached_dev, "%s, phy addr: %d, fiber link down detect, latch = %04x, curr = %04x\n", + __func__, phydev->mdio.addr, yt8521_fiber_latch_val, yt8521_fiber_curr_val); +#endif + } + + if (link) { + link_fiber = 1; + yt8821_adjust_status(phydev, val, 0); + } else { + link_fiber = 0; + } + } //(YT8521_PHY_MODE_CURR != YT8521_PHY_MODE_UTP) + + if (link_utp || link_fiber) { + if (phydev->link == 0) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link up, media: %s, mii reg 0x11 = 0x%x\n", + __func__, phydev->addr, (link_utp && link_fiber) ? "UNKNOWN MEDIA" : (link_utp ? "UTP" : "Fiber"), (unsigned int)val); +#else + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link up, media: %s, mii reg 0x11 = 0x%x\n", + __func__, phydev->mdio.addr, (link_utp && link_fiber) ? "UNKNOWN MEDIA" : (link_utp ? "UTP" : "Fiber"), (unsigned int)val); +#endif + phydev->link = 1; + } else { + if (phydev->link == 1) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link down\n", __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link down\n", __func__, phydev->mdio.addr); +#endif + + phydev->link = 0; + } + + if (YT8521_PHY_MODE_CURR != YT8521_PHY_MODE_FIBER) { + if (link_fiber) + ytphy_write_ext(phydev, 0xa000, 2); + if (link_utp) + ytphy_write_ext(phydev, 0xa000, 0); + } + return 0; +} + +#if (KERNEL_VERSION(5, 1, 21) < LINUX_VERSION_CODE) +static int yt8821_get_features(struct phy_device *phydev) +{ + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported, 1); + return genphy_read_abilities(phydev); +} +#endif + +static struct phy_driver ytphy_drvs[] = { + { + .phy_id = PHY_ID_YT8010, + .name = "YT8010 Automotive Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_BASIC_FEATURES, + .flags = PHY_POLL, +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8010_soft_reset, +#endif + .config_aneg = yt8010_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8010_aneg_done, +#endif +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + .config_init = ytphy_config_init, +#else + .config_init = genphy_config_init, +#endif + .read_status = yt8010_read_status, + }, { + .phy_id = PHY_ID_YT8010AS, + .name = "YT8010AS Automotive Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_BASIC_FEATURES, + .flags = PHY_POLL, +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8010AS_soft_reset, +#endif +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8010_aneg_done, +#endif + .config_init = yt8010AS_config_init, + .read_status = yt8010_read_status, + }, { + .phy_id = PHY_ID_YT8510, + .name = "YT8510 100/10Mb Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_BASIC_FEATURES, + .flags = PHY_POLL, + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + .config_init = ytphy_config_init, +#else + .config_init = genphy_config_init, +#endif + .read_status = genphy_read_status, + }, { + .phy_id = PHY_ID_YT8511, + .name = "YT8511 Gigabit Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, + .config_aneg = genphy_config_aneg, +#if GMAC_CLOCK_INPUT_NEEDED + .config_init = yt8511_config_init, +#else +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + .config_init = ytphy_config_init, +#else + .config_init = genphy_config_init, +#endif +#endif + .read_status = genphy_read_status, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_YT8512, + .name = "YT8512 Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_BASIC_FEATURES, + .flags = PHY_POLL, + .config_aneg = genphy_config_aneg, + .config_init = yt8512_config_init, + .read_status = yt8512_read_status, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_YT8512B, + .name = "YT8512B Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_BASIC_FEATURES, + .flags = PHY_POLL, + .config_aneg = genphy_config_aneg, + .config_init = yt8512_config_init, + .read_status = yt8512_read_status, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_YT8521, + .name = "YT8521 Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8521_soft_reset, +#endif + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8521_aneg_done, +#endif + .config_init = yt8521_config_init, + .read_status = yt8521_read_status, + .suspend = yt8521_suspend, + .resume = yt8521_resume, +#if (YTPHY_WOL_FEATURE_ENABLE) + .get_wol = &ytphy_wol_feature_get, + .set_wol = &ytphy_wol_feature_set, +#endif + }, { + /* same as 8521 */ + .phy_id = PHY_ID_YT8531S, + .name = "YT8531S Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8521_soft_reset, +#endif + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8521_aneg_done, +#endif + .config_init = yt8531S_config_init, + .read_status = yt8521_read_status, + .suspend = yt8521_suspend, + .resume = yt8521_resume, +#if (YTPHY_WOL_FEATURE_ENABLE) + .get_wol = &ytphy_wol_feature_get, + .set_wol = &ytphy_wol_feature_set, +#endif + }, { + /* same as 8511 */ + .phy_id = PHY_ID_YT8531, + .name = "YT8531 Gigabit Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, + .config_aneg = genphy_config_aneg, + + .config_init = yt8531_config_init, + .read_status = genphy_read_status, + .suspend = genphy_suspend, + .resume = genphy_resume, +#if (YTPHY_WOL_FEATURE_ENABLE) + .get_wol = &ytphy_wol_feature_get, + .set_wol = &ytphy_wol_feature_set, +#endif + }, { + .phy_id = PHY_ID_YT8618, + .name = "YT8618 Ethernet", + .phy_id_mask = MOTORCOMM_MPHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8618_soft_reset, +#endif + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8618_aneg_done, +#endif + .config_init = yt8618_config_init, + .read_status = yt8618_read_status, + .suspend = yt8618_suspend, + .resume = yt8618_resume, + }, + { + .phy_id = PHY_ID_YT8614, + .name = "YT8614 Ethernet", + .phy_id_mask = MOTORCOMM_MPHY_ID_MASK_8614, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8614_soft_reset, +#endif + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8614_aneg_done, +#endif + .config_init = yt8614_config_init, + .read_status = yt8614_read_status, + .suspend = yt8614_suspend, + .resume = yt8614_resume, + }, + { + .phy_id = PHY_ID_YT8821, + .name = "YT8821 2.5Gb Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK_8821, +#if (KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE) + .features = PHY_GBIT_FEATURES, +#endif + .flags = PHY_POLL, +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8821_soft_reset, +#endif + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8521_aneg_done, +#endif +#if (KERNEL_VERSION(5, 1, 21) < LINUX_VERSION_CODE) + .get_features = yt8821_get_features, +#endif + .config_init = yt8821_config_init, + .read_status = yt8821_read_status, + .suspend = yt8521_suspend, + .resume = yt8521_resume, + }, +}; + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +static int ytphy_drivers_register(struct phy_driver *phy_drvs, int size) +{ + int i, j; + int ret; + + for (i = 0; i < size; i++) { + ret = phy_driver_register(&phy_drvs[i]); + if (ret) + goto err; + } + + return 0; + +err: + for (j = 0; j < i; j++) + phy_driver_unregister(&phy_drvs[j]); + + return ret; +} + +static void ytphy_drivers_unregister(struct phy_driver *phy_drvs, int size) +{ + int i; + + for (i = 0; i < size; i++) + phy_driver_unregister(&phy_drvs[i]); +} + +static int __init ytphy_init(void) +{ + return ytphy_drivers_register(ytphy_drvs, ARRAY_SIZE(ytphy_drvs)); +} + +static void __exit ytphy_exit(void) +{ + ytphy_drivers_unregister(ytphy_drvs, ARRAY_SIZE(ytphy_drvs)); +} + +module_init(ytphy_init); +module_exit(ytphy_exit); +#else +/* for linux 4.x */ +module_phy_driver(ytphy_drvs); +#endif + +MODULE_DESCRIPTION("Motorcomm PHY driver"); +MODULE_AUTHOR("Leilei Zhao"); +MODULE_LICENSE("GPL"); + +static struct mdio_device_id __maybe_unused motorcomm_tbl[] = { + { PHY_ID_YT8010, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8510, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8511, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8512, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8512B, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8521, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8531S, MOTORCOMM_PHY_ID_8531_MASK }, + { PHY_ID_YT8531, MOTORCOMM_PHY_ID_8531_MASK }, + { PHY_ID_YT8618, MOTORCOMM_MPHY_ID_MASK }, + { PHY_ID_YT8614, MOTORCOMM_MPHY_ID_MASK_8614 }, + { PHY_ID_YT8821, MOTORCOMM_PHY_ID_MASK_8821 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, motorcomm_tbl); + diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index a04d5984302285f3c585c29d6168e99f2117a6a2..e0046c35a0bdb5310384e2729a2983d7953895c0 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -697,8 +697,8 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv, while (!ctx->done.done && msecs--) udelay(1000); } else { - wait_event_interruptible(ctx->done.wait, - ctx->done.done); + swait_event_interruptible_exclusive(ctx->done.wait, + ctx->done.done); } break; default: diff --git a/drivers/of/base.c b/drivers/of/base.c index f0dbb7ad88cf68019581399178c7c59d2f49080a..c59b30bab0e0d4c9edcf12883bc96920f0b27a05 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -130,31 +130,34 @@ static u32 phandle_cache_mask; /* * Caller must hold devtree_lock. */ -static void __of_free_phandle_cache(void) +static struct device_node** __of_free_phandle_cache(void) { u32 cache_entries = phandle_cache_mask + 1; u32 k; + struct device_node **shadow; if (!phandle_cache) - return; + return NULL; for (k = 0; k < cache_entries; k++) of_node_put(phandle_cache[k]); - kfree(phandle_cache); + shadow = phandle_cache; phandle_cache = NULL; + return shadow; } int of_free_phandle_cache(void) { unsigned long flags; + struct device_node **shadow; raw_spin_lock_irqsave(&devtree_lock, flags); - __of_free_phandle_cache(); + shadow = __of_free_phandle_cache(); raw_spin_unlock_irqrestore(&devtree_lock, flags); - + kfree(shadow); return 0; } #if !defined(CONFIG_MODULES) @@ -189,10 +192,11 @@ void of_populate_phandle_cache(void) u32 cache_entries; struct device_node *np; u32 phandles = 0; + struct device_node **shadow; raw_spin_lock_irqsave(&devtree_lock, flags); - __of_free_phandle_cache(); + shadow = __of_free_phandle_cache(); for_each_of_allnodes(np) if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) @@ -200,12 +204,14 @@ void of_populate_phandle_cache(void) if (!phandles) goto out; + raw_spin_unlock_irqrestore(&devtree_lock, flags); cache_entries = roundup_pow_of_two(phandles); phandle_cache_mask = cache_entries - 1; phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), GFP_ATOMIC); + raw_spin_lock_irqsave(&devtree_lock, flags); if (!phandle_cache) goto out; @@ -217,6 +223,7 @@ void of_populate_phandle_cache(void) out: raw_spin_unlock_irqrestore(&devtree_lock, flags); + kfree(shadow); } void __init of_core_init(void) diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 028b287466fbf9bf69130282c24e7dbe26e12a92..a1a024fe78e36ea9a2cf9c86f86d85629396235a 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -278,5 +278,15 @@ config VMD To compile this driver as a module, choose M here: the module will be called vmd. +config PCIE_PHYTIUM_EP + tristate "Phytium PCIe endpoint controller" + depends on OF + depends on PCI_ENDPOINT + help + Say Y here if you want to support Phytium PCIe controller in + endpoint mode on Phytium SoC. The controller can act as Root Port + or End Point with different phytium firmware. But End Point mode only support + one physical function. + source "drivers/pci/controller/dwc/Kconfig" endmenu diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index d56a507495c5edd902a6012420924b9b837203a5..45fbf1ff335477bdeb527b9afd40a3563b8735d3 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -29,6 +29,8 @@ obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o obj-$(CONFIG_VMD) += vmd.o +obj-$(CONFIG_PCIE_PHYTIUM_EP) += pcie-phytium-ep.o + # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW obj-y += dwc/ diff --git a/drivers/pci/controller/pcie-phytium-ep.c b/drivers/pci/controller/pcie-phytium-ep.c new file mode 100644 index 0000000000000000000000000000000000000000..7be67f0b258240c1497b585b50861dfd50ae4d51 --- /dev/null +++ b/drivers/pci/controller/pcie-phytium-ep.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium PCIe Endpoint controller driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-phytium-ep.h" +#include "pcie-phytium-register.h" + +#define PHYTIUM_PCIE_EP_IRQ_PCI_ADDR_NONE 0x0 +#define PHYTIUM_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x1 + +static int phytium_pcie_ep_write_header(struct pci_epc *epc, unsigned char fn, + struct pci_epf_header *hdr) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + u16 tmp = 0; + + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_VENDOR_ID, hdr->vendorid); + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_DEVICE_ID, hdr->deviceid); + phytium_pcie_writeb(priv, fn, PHYTIUM_PCI_REVISION_ID, hdr->revid); + phytium_pcie_writeb(priv, fn, PHYTIUM_PCI_CLASS_PROG, hdr->progif_code); + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_CLASS_DEVICE, + hdr->subclass_code | (hdr->baseclass_code << 8)); + + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_SUBSYS_VENDOR_ID, + hdr->subsys_vendor_id); + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_SUBSYS_DEVICE_ID, + hdr->subsys_id); + + tmp = phytium_pcie_readw(priv, fn, PHYTIUM_PCI_INTERRUPT_PIN); + tmp = ((tmp & (~INTERRUPT_PIN_MASK)) | hdr->interrupt_pin); + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_INTERRUPT_PIN, tmp); + + tmp = phytium_pcie_readw(priv, fn, PHYTIUM_PCI_MSIX_CAP); + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_MSIX_CAP, MSIX_DISABLE); + + return 0; +} + +static int phytium_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, + struct pci_epf_bar *epf_bar) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + u64 sz = 0, sz_mask, atr_size; + int flags = epf_bar->flags; + u32 setting, src_addr0, src_addr1, trsl_addr0, trsl_addr1, trsl_param; + enum pci_barno barno = epf_bar->barno; + struct pci_epc_mem *mem = epc->mem; + + if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (barno & 1)) { + dev_err(&epc->dev, "bar %d do not support mem64\n", barno); + return -EINVAL; + } + + if (barno & 1) { + dev_err(&epc->dev, "not support bar 1/3/5\n"); + return -EINVAL; + } + dev_dbg(epc->dev.parent, "set bar%d mapping address 0x%pa size 0x%lx\n", + barno, &(epf_bar->phys_addr), epf_bar->size); + + if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { + setting = BAR_IO_TYPE; + sz = max_t(size_t, epf_bar->size, BAR_IO_MIN_APERTURE); + sz = 1 << fls64(sz - 1); + sz_mask = ~(sz - 1); + setting |= sz_mask; + trsl_param = TRSL_ID_IO; + } else { + setting = BAR_MEM_TYPE; + sz = max_t(size_t, epf_bar->size, BAR_MEM_MIN_APERTURE); + sz = 1 << fls64(sz - 1); + sz_mask = ~(sz - 1); + setting |= lower_32_bits(sz_mask); + + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) + setting |= BAR_MEM_64BIT; + + if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) + setting |= BAR_MEM_PREFETCHABLE; + + trsl_param = TRSL_ID_MASTER; + } + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_BAR(barno), setting); + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_BAR(barno + 1), + upper_32_bits(sz_mask)); + dev_dbg(epc->dev.parent, "set bar%d mapping address 0x%pa size 0x%llx 0x%x\n", + barno, &(epf_bar->phys_addr), sz, lower_32_bits(epf_bar->phys_addr)); + sz = ALIGN(sz, mem->page_size); + atr_size = fls64(sz - 1) - 1; + src_addr0 = ATR_IMPL | ((atr_size & ATR_SIZE_MASK) << ATR_SIZE_SHIFT); + src_addr1 = 0; + trsl_addr0 = (lower_32_bits(epf_bar->phys_addr) & TRSL_ADDR_32_12_MASK); + trsl_addr1 = upper_32_bits(epf_bar->phys_addr); + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_SRC_ADDR0(barno), + src_addr0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_SRC_ADDR1(barno), + src_addr1); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_ADDR0(barno), + trsl_addr0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_ADDR1(barno), + trsl_addr1); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_PARAM(barno), + trsl_param); + + return 0; +} + +static void phytium_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, + struct pci_epf_bar *epf_bar) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + int flags = epf_bar->flags; + enum pci_barno barno = epf_bar->barno; + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_BAR(barno), 0); + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_BAR(barno + 1), 0); + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_SRC_ADDR0(barno), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_SRC_ADDR1(barno), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_ADDR0(barno), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_ADDR1(barno), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_WIN0_TRSL_PARAM(barno), 0); +} + +static int phytium_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, + phys_addr_t addr, u64 pci_addr, + size_t size) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + u32 src_addr0, src_addr1, trsl_addr0, trsl_addr1, trsl_param, atr_size; + u64 sz = 0; + u32 r; + struct pci_epc_mem *mem = epc->mem; + + r = find_first_zero_bit(&priv->ob_region_map, + sizeof(priv->ob_region_map) * BITS_PER_LONG); + if (r >= priv->max_regions) { + dev_err(&epc->dev, "no free outbound region\n"); + return -EINVAL; + } + + dev_dbg(epc->dev.parent, "set slave %d: mapping address 0x%pa to pci 0x%llx, size 0x%zx\n", + r, &addr, pci_addr, size); + + sz = ALIGN(size, mem->page_size); + atr_size = fls64(sz - 1) - 1; + src_addr0 = ATR_IMPL | ((atr_size & ATR_SIZE_MASK) << ATR_SIZE_SHIFT); + src_addr0 |= (lower_32_bits(addr) & SRC_ADDR_32_12_MASK); + src_addr1 = upper_32_bits(addr); + trsl_addr0 = (lower_32_bits(pci_addr) & TRSL_ADDR_32_12_MASK); + trsl_addr1 = upper_32_bits(pci_addr); + trsl_param = TRSL_ID_PCIE_TR; + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR0(r), + src_addr0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR1(r), + src_addr1); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR0(r), + trsl_addr0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR1(r), + trsl_addr1); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_PARAM(r), + trsl_param); + set_bit(r, &priv->ob_region_map); + priv->ob_addr[r] = addr; + + return 0; +} + +static void phytium_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, + phys_addr_t addr) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + u32 r; + + for (r = 0; r < priv->max_regions; r++) + if (priv->ob_addr[r] == addr) + break; + + if (r == priv->max_regions) { + dev_err(&epc->dev, "used unmap addr 0x%pa\n", &addr); + return; + } + dev_dbg(epc->dev.parent, "set slave %d: unmapping address 0x%pa\n", r, &addr); + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR0(r), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR1(r), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR0(r), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR1(r), 0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_PARAM(r), 0); + priv->ob_addr[r] = 0; + clear_bit(r, &priv->ob_region_map); +} + +static int phytium_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + u16 flags = 0; + + flags = (mmc & MSI_NUM_MASK) << MSI_NUM_SHIFT; + flags &= ~MSI_MASK_SUPPORT; + phytium_pcie_writew(priv, fn, PHYTIUM_PCI_INTERRUPT_PIN, flags); + + return 0; +} + +static int phytium_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + u16 flags, mme; + u32 cap = PHYTIUM_PCI_CF_MSI_BASE; + + flags = phytium_pcie_readw(priv, fn, cap + PCI_MSI_FLAGS); + if (!(flags & PCI_MSI_FLAGS_ENABLE)) + return -EINVAL; + + mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; + + return mme; +} + +static int phytium_pcie_ep_send_msi_irq(struct phytium_pcie_ep *priv, u8 fn, + u8 interrupt_num) +{ + u32 cap = PHYTIUM_PCI_CF_MSI_BASE; + u16 flags, mme, data_mask, data; + u8 msi_count; + u64 pci_addr, pci_addr_mask = IRQ_MAPPING_SIZE - 1; + u32 src_addr0, src_addr1, trsl_addr0, trsl_addr1, trsl_param, atr_size; + + flags = phytium_pcie_readw(priv, fn, cap + PCI_MSI_FLAGS); + if (!(flags & PCI_MSI_FLAGS_ENABLE)) + return -EINVAL; + + mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; + msi_count = 1 << mme; + if (!interrupt_num || interrupt_num > msi_count) + return -EINVAL; + + data_mask = msi_count - 1; + data = phytium_pcie_readw(priv, fn, cap + PCI_MSI_DATA_64); + data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); + + /* Get the PCI address */ + pci_addr = phytium_pcie_readl(priv, fn, cap + PCI_MSI_ADDRESS_HI); + pci_addr <<= 32; + pci_addr |= phytium_pcie_readl(priv, fn, cap + PCI_MSI_ADDRESS_LO); + pci_addr &= GENMASK_ULL(63, 2); + + if (priv->irq_pci_addr != (pci_addr & ~pci_addr_mask) || (priv->irq_pci_fn != fn)) { + /* First region for IRQ writes. */ + atr_size = fls64(pci_addr_mask) - 1; + src_addr0 = ATR_IMPL | ((atr_size & ATR_SIZE_MASK) << ATR_SIZE_SHIFT); + src_addr0 |= (lower_32_bits(priv->irq_phys_addr) & SRC_ADDR_32_12_MASK); + src_addr1 = upper_32_bits(priv->irq_phys_addr); + trsl_addr0 = (lower_32_bits(pci_addr) & TRSL_ADDR_32_12_MASK); + trsl_addr1 = upper_32_bits(pci_addr); + trsl_param = TRSL_ID_PCIE_TR; + + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR0(0), + src_addr0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_SRC_ADDR1(0), + src_addr1); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR0(0), + trsl_addr0); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_ADDR1(0), + trsl_addr1); + phytium_pcie_writel(priv, fn, PHYTIUM_PCI_SLAVE0_TRSL_PARAM(0), + trsl_param); + priv->irq_pci_addr = (pci_addr & ~pci_addr_mask); + priv->irq_pci_fn = fn; + } + + dev_dbg(priv->epc->dev.parent, "send event %d\n", data); + writew(data, priv->irq_cpu_addr + (pci_addr & pci_addr_mask)); + + return 0; +} + +static int phytium_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, + enum pci_epc_irq_type type, + u16 interrupt_num) +{ + struct phytium_pcie_ep *priv = epc_get_drvdata(epc); + + switch (type) { + case PCI_EPC_IRQ_MSI: + return phytium_pcie_ep_send_msi_irq(priv, fn, interrupt_num); + + default: + break; + } + + return -EINVAL; +} + +static int phytium_pcie_ep_start(struct pci_epc *epc) +{ + struct pci_epf *epf; + u32 cfg; + + cfg = BIT(0); + list_for_each_entry(epf, &epc->pci_epf, list) + cfg |= BIT(epf->func_no); + + list_for_each_entry(epf, &epc->pci_epf, list) + pci_epf_linkup(epf); + + return 0; +} + +static const struct pci_epc_ops phytium_pcie_epc_ops = { + .write_header = phytium_pcie_ep_write_header, + .set_bar = phytium_pcie_ep_set_bar, + .clear_bar = phytium_pcie_ep_clear_bar, + .map_addr = phytium_pcie_ep_map_addr, + .unmap_addr = phytium_pcie_ep_unmap_addr, + .set_msi = phytium_pcie_ep_set_msi, + .get_msi = phytium_pcie_ep_get_msi, + .raise_irq = phytium_pcie_ep_raise_irq, + .start = phytium_pcie_ep_start, +}; + + + +static int phytium_pcie_ep_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phytium_pcie_ep *priv = NULL; + struct resource *res; + struct device_node *np = dev->of_node; + struct pci_epc *epc; + int ret = 0, value; + + dev_dbg(dev, "enter %s\n", __func__); + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); + priv->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->reg_base)) { + dev_err(dev, "missing \"reg\"\n"); + return PTR_ERR(priv->reg_base); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); + if (!res) { + dev_err(dev, "missing \"mem\"\n"); + return -EINVAL; + } + priv->mem_res = res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hpb"); + priv->hpb_base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->hpb_base)) { + dev_err(dev, "missing \"hpb\"\n"); + return PTR_ERR(priv->hpb_base); + } + + ret = of_property_read_u32(np, "max-outbound-regions", &priv->max_regions); + if (ret < 0) { + dev_err(dev, "missing \"max-outbound-regions\"\n"); + return ret; + } + dev_info(dev, "%s max-outbound-regions %d\n", __func__, priv->max_regions); + + priv->ob_addr = devm_kcalloc(dev, priv->max_regions, + sizeof(*priv->ob_addr), GFP_KERNEL); + if (!priv->ob_addr) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + epc = devm_pci_epc_create(dev, &phytium_pcie_epc_ops); + if (IS_ERR(epc)) { + dev_err(dev, "failed to create epc device\n"); + return PTR_ERR(epc); + } + + priv->epc = epc; + epc_set_drvdata(epc, priv); + + if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) + epc->max_functions = 1; + dev_info(dev, "%s epc->max_functions %d\n", __func__, epc->max_functions); + + + ret = pci_epc_mem_init(epc, priv->mem_res->start, + resource_size(priv->mem_res)); + if (ret < 0) { + dev_err(dev, "failed to initialize the memory space\n"); + return ret; + } + + priv->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &priv->irq_phys_addr, + SZ_4K); + if (!priv->irq_cpu_addr) { + dev_err(dev, "failed to reserve memory space for MSI\n"); + ret = -ENOMEM; + goto err_alloc_irq_mem; + } + priv->irq_pci_addr = PHYTIUM_PCIE_EP_IRQ_PCI_ADDR_NONE; + /* Reserve region 0 for IRQS */ + set_bit(0, &priv->ob_region_map); + + value = ((lower_32_bits(priv->mem_res->start) >> C0_PREF_VALUE_SHIFT) + & C0_PREF_BASE_MASK) << C0_PREF_BASE_SHIFT; + value |= (((lower_32_bits(priv->mem_res->end) >> C0_PREF_VALUE_SHIFT) + & C0_PREF_LIMIT_MASK) << C0_PREF_LIMIT_SHIFT); + phytium_hpb_writel(priv, PHYTIUM_HPB_C0_PREF_BASE_LIMIT, value); + + value = ((upper_32_bits(priv->mem_res->start) >> C0_PREF_UP32_VALUE_SHIFT) + & C0_PREF_BASE_UP32_MASK) << C0_PREF_BASE_UP32_SHIFT; + value |= (((upper_32_bits(priv->mem_res->end) >> C0_PREF_UP32_VALUE_SHIFT) + & C0_PREF_LIMIT_UP32_MASK) << C0_PREF_LIMIT_UP32_SHIFT); + phytium_hpb_writel(priv, PHYTIUM_HPB_C0_PREF_BASE_LIMIT_UP32, value); + + dev_dbg(dev, "exit %s successful\n", __func__); + return 0; + +err_alloc_irq_mem: + pci_epc_mem_exit(epc); + return ret; +} + +static int phytium_pcie_ep_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phytium_pcie_ep *priv = dev_get_drvdata(dev); + struct pci_epc *epc = priv->epc; + + pci_epc_mem_exit(epc); + + return 0; +} + +static const struct of_device_id phytium_pcie_ep_of_match[] = { + { .compatible = "phytium,pd2008-pcie-ep" }, + { }, +}; + +static struct platform_driver phytium_pcie_ep_driver = { + .driver = { + .name = "phytium-pcie-ep", + .of_match_table = phytium_pcie_ep_of_match, + }, + .probe = phytium_pcie_ep_probe, + .remove = phytium_pcie_ep_remove, +}; + +module_platform_driver(phytium_pcie_ep_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yang Xun "); +MODULE_DESCRIPTION("Phytium Pcie Controller Endpoint driver"); diff --git a/drivers/pci/controller/pcie-phytium-ep.h b/drivers/pci/controller/pcie-phytium-ep.h new file mode 100644 index 0000000000000000000000000000000000000000..bfdc062b165fad15d44bbdf40d1fb1ce6f63cc52 --- /dev/null +++ b/drivers/pci/controller/pcie-phytium-ep.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium pcie endpoint driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PCIE_PHYTIUM_EP_H__ +#define __PCIE_PHYTIUM_EP_H__ + +#include "pcie-phytium-register.h" + +#define IRQ_MAPPING_SIZE 0x1000 +struct phytium_pcie_ep { + void __iomem *reg_base; + struct resource *mem_res; + void __iomem *hpb_base; + unsigned int max_regions; + unsigned long ob_region_map; + phys_addr_t *ob_addr; + phys_addr_t irq_phys_addr; + void __iomem *irq_cpu_addr; + unsigned long irq_pci_addr; + u8 irq_pci_fn; + struct pci_epc *epc; +}; + +static inline void +phytium_pcie_writeb(struct phytium_pcie_ep *priv, u8 fn, u32 reg, u8 value) +{ + pr_debug("Write 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); + writeb(value, priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); +} + +static inline unsigned char +phytium_pcie_readb(struct phytium_pcie_ep *priv, u8 fn, u32 reg) +{ + unsigned char value; + + value = readb(priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); + pr_debug("Read 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); + + return value; +} + +static inline void +phytium_pcie_writew(struct phytium_pcie_ep *priv, u8 fn, u32 reg, u16 value) +{ + pr_debug("Write 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); + writew(value, priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); +} + +static inline unsigned short +phytium_pcie_readw(struct phytium_pcie_ep *priv, u8 fn, u32 reg) +{ + unsigned short value; + + value = readw(priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); + pr_debug("Read 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); + + return value; +} + +static inline void +phytium_pcie_writel(struct phytium_pcie_ep *priv, u8 fn, u32 reg, u32 value) +{ + pr_debug("Write 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); + writel(value, priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); +} + +static inline unsigned int +phytium_pcie_readl(struct phytium_pcie_ep *priv, u8 fn, u32 reg) +{ + unsigned int value; + + value = readl(priv->reg_base + PHYTIUM_PCIE_FUNC_BASE(fn) + reg); + pr_debug("Read 32'h%08lx 32'h%08x\n", PHYTIUM_PCIE_FUNC_BASE(fn) + reg, value); + + return value; +} + +static inline void +phytium_hpb_writel(struct phytium_pcie_ep *priv, u32 reg, u32 value) +{ + pr_debug("Write 32'h%08x 32'h%08x\n", reg, value); + writel(value, priv->hpb_base + reg); +} +#endif diff --git a/drivers/pci/controller/pcie-phytium-register.h b/drivers/pci/controller/pcie-phytium-register.h new file mode 100644 index 0000000000000000000000000000000000000000..b6dfb3fab45b0d63f01eb51bf89d290bfe0b0b57 --- /dev/null +++ b/drivers/pci/controller/pcie-phytium-register.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium pcie endpoint driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#ifndef __PCIE_PHYTIUM_REGISTER_H__ +#define __PCIE_PHYTIUM_REGISTER_H__ + +#define PHYTIUM_PCIE_FUNC_BASE(fn) (((fn) << 14) & GENMASK(16, 14)) +#define PHYTIUM_PCI_VENDOR_ID 0x98 +#define PHYTIUM_PCI_DEVICE_ID 0x9a +#define PHYTIUM_PCI_REVISION_ID 0x9c +#define PHYTIUM_PCI_CLASS_PROG 0x9d +#define PHYTIUM_PCI_CLASS_DEVICE 0x9e +#define PHYTIUM_PCI_SUBSYS_VENDOR_ID 0xa0 +#define PHYTIUM_PCI_SUBSYS_DEVICE_ID 0xa2 +#define PHYTIUM_PCI_INTERRUPT_PIN 0xa8 +#define INTERRUPT_PIN_MASK 0x7 +#define MSI_DISABLE (1 << 3) +#define MSI_NUM_MASK (0x7) +#define MSI_NUM_SHIFT 4 +#define MSI_MASK_SUPPORT (1 << 7) +#define PHYTIUM_PCI_MSIX_CAP 0xaa + #define MSIX_DISABLE (0 << 15) + +#define PHYTIUM_PCI_BAR_0 0xe4 +#define PHYTIUM_PCI_BAR(bar_num) (0xe4 + bar_num * 4) +#define BAR_IO_TYPE (1 << 0) +#define BAR_MEM_TYPE (0 << 0) +#define BAR_MEM_64BIT (1 << 2) +#define BAR_MEM_PREFETCHABLE (1 << 3) +#define BAR_IO_MIN_APERTURE 4 +#define BAR_MEM_MIN_APERTURE 16 + + +#define PHYTIUM_PCI_WIN0_BASE 0x600 +#define PHYTIUM_PCI_WIN0_SRC_ADDR0(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0x0) +#define ATR_IMPL 0x1 +#define ATR_SIZE_MASK 0x3f +#define ATR_SIZE_SHIFT 1 +#define ATR_SIZE_ALIGN 0x1000 +#define SRC_ADDR_32_12_MASK 0xfffff000 + +#define PHYTIUM_PCI_WIN0_SRC_ADDR1(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0x4) +#define PHYTIUM_PCI_WIN0_TRSL_ADDR0(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0x8) +#define TRSL_ADDR_32_12_MASK 0xfffff000 + +#define PHYTIUM_PCI_WIN0_TRSL_ADDR1(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0xc) +#define PHYTIUM_PCI_WIN0_TRSL_PARAM(table) (PHYTIUM_PCI_WIN0_BASE + 0X20 * table + 0x10) +#define TRSL_ID_IO 0x1 +#define TRSL_ID_MASTER 0x4 +#define TRSL_ID_PCIE_TR 0x0 + +#define PHYTIUM_PCI_SLAVE0_BASE 0x800 +#define PHYTIUM_PCI_SLAVE0_SRC_ADDR0(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0x0) +#define PHYTIUM_PCI_SLAVE0_SRC_ADDR1(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0x4) +#define PHYTIUM_PCI_SLAVE0_TRSL_ADDR0(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0x8) +#define PHYTIUM_PCI_SLAVE0_TRSL_ADDR1(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0xc) +#define PHYTIUM_PCI_SLAVE0_TRSL_PARAM(table) (PHYTIUM_PCI_SLAVE0_BASE + 0X20 * table + 0x10) + +#define PHYTIUM_PCI_CF_MSI_BASE 0x10e0 +#define PHYTIUM_PCI_CF_MSI_CONTROL 0x10e2 + +#define PHYTIUM_HPB_C0_PREF_BASE_LIMIT 0xa30 + #define C0_PREF_LIMIT_MASK 0xfff + #define C0_PREF_LIMIT_SHIFT 20 + #define C0_PREF_BASE_MASK 0xfff + #define C0_PREF_BASE_SHIFT 4 + #define C0_PREF_VALUE_SHIFT 20 +#define PHYTIUM_HPB_C0_PREF_BASE_LIMIT_UP32 0xa34 + #define C0_PREF_LIMIT_UP32_MASK 0xff + #define C0_PREF_LIMIT_UP32_SHIFT 8 + #define C0_PREF_BASE_UP32_MASK 0xff + #define C0_PREF_BASE_UP32_SHIFT 0 + #define C0_PREF_UP32_VALUE_SHIFT 0 +#endif + + diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index ef60718070728c227ffd954f2d3777292e5ffd53..6a77382672303338dc99126444901373e55e59fe 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -129,6 +129,10 @@ struct controller { unsigned int ist_running; int request_result; wait_queue_head_t requester; +#ifdef CONFIG_ARCH_PHYTIUM + u32 buses; + u16 slot_ctrl_t; +#endif }; /** diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index c71964e29b0135537ad6815aa8bc2cfc49cac004..7917d2dae15849fb0dd9d7a7ac6d4da6697be21c 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -226,6 +226,10 @@ void pciehp_handle_disable_request(struct slot *slot) void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events) { struct controller *ctrl = slot->ctrl; +#ifdef CONFIG_ARCH_PHYTIUM + struct pci_dev *pdev = ctrl->pcie->port; + u16 slot_ctrl_val; +#endif bool link_active; u8 present; @@ -248,6 +252,20 @@ void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events) ctrl_info(ctrl, "Slot(%s): Card not present\n", slot_name(slot)); pciehp_disable_slot(slot); +#ifdef CONFIG_ARCH_PHYTIUM + if ((ctrl->buses > 0) && (ctrl->slot_ctrl > 0)) { + pci_write_config_dword(pdev, PCI_PRIMARY_BUS, ctrl->buses); + slot_ctrl_val = ctrl->slot_ctrl_t | PCI_EXP_SLTCTL_ABPE | + PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | + PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_CCIE | + PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_ATTN_IND_BLINK | + PCI_EXP_SLTCTL_PWR_IND_ON | PCI_EXP_SLTCTL_PWR_OFF | + PCI_EXP_SLTCTL_DLLSCE; + pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl_val); + ctrl_info(ctrl, "Ctrl buses=0x%x, slot_ctrl=0x%x\n", + ctrl->buses, slot_ctrl_val); + } +#endif break; default: mutex_unlock(&slot->lock); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 2795445233b3b9117d44f35a12cea41628f3fc64..5d9feaa9c8b2243ec204ba63865674461825ac62 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -636,7 +636,13 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) struct pci_dev *pdev = ctrl_dev(ctrl); struct slot *slot = ctrl->slot; irqreturn_t ret; +#ifdef CONFIG_ARCH_PHYTIUM + u32 events, buses; + u16 slot_ctrl; + bool link_active; +#else u32 events; +#endif ctrl->ist_running = true; pci_config_pm_runtime_get(pdev); @@ -656,6 +662,23 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) goto out; } +#ifdef CONFIG_ARCH_PHYTIUM + if(slot->state == ON_STATE) { + pci_read_config_dword(pdev, PCI_PRIMARY_BUS, &buses); + pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); + ctrl->buses = buses; + ctrl->slot_ctrl_t = slot_ctrl; + ctrl_dbg(ctrl, "Ctrl buses=0x%x, slot_ctrl=0x%x\n", + ctrl->buses, ctrl->slot_ctrl_t); + } + + mdelay(1000); + + link_active = pciehp_check_link_active(ctrl); + if((slot->state == ON_STATE) && (link_active == false)) + events |= PCI_EXP_SLTSTA_DLLSC; +#endif + /* Check Attention Button Pressed */ if (events & PCI_EXP_SLTSTA_ABP) { ctrl_info(ctrl, "Slot(%s): Attention button pressed\n", diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 2498b2d340095e53cf2191bdf992ab866139614a..b1ac9eca836409e3c08261eab416f22d2bbf547a 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -116,6 +116,18 @@ void pcie_port_bus_unregister(void); struct pci_dev; +#ifdef CONFIG_HOTPLUG_PCI_PCIE +extern bool pciehp_msi_disabled; + +static inline bool pciehp_no_msi(void) +{ + return pciehp_msi_disabled; +} + +#else /* !CONFIG_HOTPLUG_PCI_PCIE */ +static inline bool pciehp_no_msi(void) { return false; } +#endif /* !CONFIG_HOTPLUG_PCI_PCIE */ + #ifdef CONFIG_PCIE_PME extern bool pcie_pme_msi_disabled; diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 7c37d815229e9ee20e7732695dda58344fc77a20..f8d5a93ba21e5cc17b3d0fced4aeb2fb2fbb01fb 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -25,6 +25,17 @@ struct portdrv_service_data { u32 service; }; +bool pciehp_msi_disabled; + +static int __init pciehp_setup(char *str) +{ + if (!strncmp(str, "nomsi", 5)) + pciehp_msi_disabled = true; + + return 1; +} +__setup("pcie_hp=", pciehp_setup); + /** * release_pcie_device - free PCI Express port service device structure * @dev: Port service device to release @@ -166,13 +177,16 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask) irqs[i] = -1; /* - * If we support PME but can't use MSI/MSI-X for it, we have to - * fall back to INTx or other interrupts, e.g., a system shared - * interrupt. + * If we support PME or hotplug, but we can't use MSI/MSI-X for + * them, we have to fall back to INTx or other interrupts, e.g., a + * system shared interrupt. */ if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) goto legacy_irq; + if ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi()) + goto legacy_irq; + /* Try to use MSI-X or MSI if supported */ if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0) return 0; diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 291c0074ad6f465d7aaa8d2f517aa7113dc0187b..25d1f96cddc1d8dee6868aefc6fd490be96a5869 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -43,10 +43,11 @@ struct switchtec_user { enum mrpc_state state; - struct completion comp; + wait_queue_head_t cmd_comp; struct kref kref; struct list_head list; + bool cmd_done; u32 cmd; u32 status; u32 return_code; @@ -68,7 +69,7 @@ static struct switchtec_user *stuser_create(struct switchtec_dev *stdev) stuser->stdev = stdev; kref_init(&stuser->kref); INIT_LIST_HEAD(&stuser->list); - init_completion(&stuser->comp); + init_waitqueue_head(&stuser->cmd_comp); stuser->event_cnt = atomic_read(&stdev->event_cnt); dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); @@ -147,7 +148,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser) kref_get(&stuser->kref); stuser->read_len = sizeof(stuser->data); stuser_set_state(stuser, MRPC_QUEUED); - reinit_completion(&stuser->comp); + stuser->cmd_done = false; list_add_tail(&stuser->list, &stdev->mrpc_queue); mrpc_cmd_submit(stdev); @@ -184,7 +185,8 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev) stuser->read_len); out: - complete_all(&stuser->comp); + stuser->cmd_done = true; + wake_up_interruptible(&stuser->cmd_comp); list_del_init(&stuser->list); stuser_put(stuser); stdev->mrpc_busy = 0; @@ -354,7 +356,7 @@ static int switchtec_dev_open(struct inode *inode, struct file *filp) return PTR_ERR(stuser); filp->private_data = stuser; - nonseekable_open(inode, filp); + stream_open(inode, filp); dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); @@ -454,10 +456,11 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data, mutex_unlock(&stdev->mrpc_mutex); if (filp->f_flags & O_NONBLOCK) { - if (!try_wait_for_completion(&stuser->comp)) + if (!READ_ONCE(stuser->cmd_done)) return -EAGAIN; } else { - rc = wait_for_completion_interruptible(&stuser->comp); + rc = wait_event_interruptible(stuser->cmd_comp, + stuser->cmd_done); if (rc < 0) return rc; } @@ -505,7 +508,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) struct switchtec_dev *stdev = stuser->stdev; __poll_t ret = 0; - poll_wait(filp, &stuser->comp.wait, wait); + poll_wait(filp, &stuser->cmd_comp, wait); poll_wait(filp, &stdev->event_wq, wait); if (lock_mutex_and_test_alive(stdev)) @@ -513,7 +516,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) mutex_unlock(&stdev->mrpc_mutex); - if (try_wait_for_completion(&stuser->comp)) + if (READ_ONCE(stuser->cmd_done)) ret |= EPOLLIN | EPOLLRDNORM; if (stuser->event_cnt != atomic_read(&stdev->event_cnt)) @@ -1037,7 +1040,8 @@ static void stdev_kill(struct switchtec_dev *stdev) /* Wake up and kill any users waiting on an MRPC request */ list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) { - complete_all(&stuser->comp); + stuser->cmd_done = true; + wake_up_interruptible(&stuser->cmd_comp); list_del_init(&stuser->list); stuser_put(stuser); } diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 504d252716f2e10db4914c8de651dff05106b82c..498081d89ae4c16c596a32eac9b2adc7b5c7102b 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -322,6 +322,16 @@ config PWM_PCA9685 To compile this driver as a module, choose M here: the module will be called pwm-pca9685. +config PWM_PHYTIUM + tristate "Phytium PWM support" + depends on ARCH_PHYTIUM + help + Generic PWM framework driver for the PWM controller found on + Phytium SoCs. + + To compile this driver as a module, choose M here: the module + will be called pwm-phytium. + config PWM_PUV3 tristate "PKUnity NetBook-0916 PWM support" depends on ARCH_PUV3 diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 9c676a0dadf55e2b06fe81a6d620806393c98d48..7f5d9d6da1f43f909d7bffbd74ecb1a3936399b2 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_PWM_MTK_DISP) += pwm-mtk-disp.o obj-$(CONFIG_PWM_MXS) += pwm-mxs.o obj-$(CONFIG_PWM_OMAP_DMTIMER) += pwm-omap-dmtimer.o obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o +obj-$(CONFIG_PWM_PHYTIUM) += pwm-phytium.o obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o obj-$(CONFIG_PWM_PXA) += pwm-pxa.o obj-$(CONFIG_PWM_RCAR) += pwm-rcar.o diff --git a/drivers/pwm/pwm-phytium.c b/drivers/pwm/pwm-phytium.c new file mode 100644 index 0000000000000000000000000000000000000000..76ddf5d1816131d90b7742e2603b5a1ea96eb576 --- /dev/null +++ b/drivers/pwm/pwm-phytium.c @@ -0,0 +1,572 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium PWM driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define REG_TCNT 0x00 +#define REG_TCTRL 0x04 +#define REG_STAT 0x08 + +#define REG_TPERIOD 0x0c +#define REG_PWMCTRL 0x10 +#define REG_PWMCCR 0x14 + +#define TCTRL_DIV_MASK 0x1ff8 +#define TCTRL_PWMMOD_MASK 0x4 +#define TCTRL_CAPMOD_MASK 0x3 +#define PWM_PERIOD_MASK 0xffff +#define PWM_DUTY_MASK 0xffff +#define PWM_MODE_MASK 0x4 +#define PWM_CTRL_INIT 0xc4 + +#define PWM_NUM 2 + +#define REG_DBCTRL 0x00 +#define REG_DBCLY 0x04 +#define PWM_UPDBCLY_MASK 0x3ff +#define PWM_DWDBCLY_MASK 0xffc00 +#define PWM_DB_POLARITY_MASK 0xc + +#define PWM_N(x) ((0x400)*(x)) +#define MAX_PARAMETER 2 + +struct phytium_pwm_state { + int rst; + int cntmod; + int dutymod; + unsigned int div; + int db_rst; + unsigned int updbcly; + unsigned int dwdbcly; + unsigned int dbpolarity; +}; + +struct phytium_pwm_param { + int cntmod; + int dutymod; + unsigned int div; + unsigned int updbcly; + unsigned int dwdbcly; + unsigned int dbpolarity; +}; + +struct phytium_pwm_variant { + u8 rst_mask; + u8 div; + int counter_mode; + int periodns; + int duty_ns; + int pwm_mode; + u8 duty_mode; + int updbcly; + int dwdbcly; +}; + +struct phytium_pwm_channel { + u32 period_ns; + u32 duty_ns; + u32 tin_ns; +}; + +struct phytium_pwm_chip { + struct pwm_chip chip; + struct pwm_state state_pm[PWM_NUM]; + struct phytium_pwm_variant variant; + struct phytium_pwm_state state; + u8 inverter_mask; + u8 disabled_mask; + int db_init; + void __iomem *base; + void __iomem *base1; + struct phytium_pwm_param parameter[MAX_PARAMETER]; + unsigned int num_parameters; + + unsigned long clk_rate; + struct clk *base_clk; +}; + +static inline struct phytium_pwm_chip *to_phytium_pwm_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct phytium_pwm_chip, chip); +} + + +static void pwm_phytium_free(struct pwm_chip *chip, struct pwm_device *pwm) +{ + devm_kfree(chip->dev, pwm_get_chip_data(pwm)); + pwm_set_chip_data(pwm, NULL); +} + +static int pwm_phytium_enable(struct pwm_chip *chip, struct pwm_device *pwm, int n) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + u32 reg; + + reg = readl(our_chip->base + PWM_N(n) + REG_TCTRL); + reg |= 0x2; + our_chip->state_pm[n].enabled = 1; + writel(reg, our_chip->base + PWM_N(n) + REG_TCTRL); + + return 0; +} + +static void pwm_phytium_disable(struct pwm_chip *chip, struct pwm_device *pwm, int n) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + u32 reg; + + reg = readl(our_chip->base + PWM_N(n) + REG_TCTRL); + reg &= 0xfffffffd; + our_chip->state_pm[n].enabled = 0; + writel(reg, our_chip->base + PWM_N(n) + REG_TCTRL); +} + +static void pwm_phytium_dutymod(struct pwm_chip *chip, int dutymod, int n) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + u32 reg; + + reg = readl(our_chip->base + PWM_N(n) + REG_PWMCTRL); + + if (dutymod == 0) + reg &= 0xfffffeff; + else if (dutymod == 1) + reg |= 0x100; + + writel(reg, our_chip->base + PWM_N(n) + REG_PWMCTRL); +} + +static void pwm_phytium_set_div(struct pwm_chip *chip, unsigned int div, int n) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + u32 reg; + + reg = readl(our_chip->base + PWM_N(n) + REG_TCTRL); + reg &= 0xffff; + reg |= (div<<16); + writel(reg, our_chip->base + PWM_N(n) + REG_TCTRL); +} + +static void pwm_phytium_set_tmode(struct pwm_chip *chip, int tmode, int n) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + u32 reg; + + reg = readl(our_chip->base + PWM_N(n) + REG_TCTRL); + if (tmode == 0) + reg &= 0xfffffffb; + else if (tmode == 1) + reg |= 0x4; + + writel(reg, our_chip->base + PWM_N(n) + REG_TCTRL); +} + +static void pwm_phytium_set_periodns(struct pwm_chip *chip, unsigned int periodns, int n) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + u32 reg; + int div = our_chip->state.div; + u64 cycles; + + cycles = our_chip->clk_rate; + cycles *= (periodns / (div + 1)); + do_div(cycles, NSEC_PER_SEC); + + reg = readl(our_chip->base + PWM_N(n) + REG_TPERIOD); + cycles = (cycles & PWM_PERIOD_MASK) - 0x1; + our_chip->state_pm[n].period = cycles; + + writel(cycles, our_chip->base + PWM_N(n) + REG_TPERIOD); +} + +static void pwm_phytium_set_duty(struct pwm_chip *chip, unsigned int duty, int n) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + u32 reg; + int div = our_chip->state.div; + u64 cycles; + + cycles = our_chip->clk_rate; + cycles *= (duty / (div + 1)); + do_div(cycles, NSEC_PER_SEC); + + reg = readl(our_chip->base + PWM_N(n) + REG_PWMCCR); + cycles = (cycles & PWM_DUTY_MASK) - 0x1; + our_chip->state_pm[n].duty_cycle = cycles; + + writel(cycles, our_chip->base + PWM_N(n) + REG_PWMCCR); +} + +static int pwm_phytium_set_dbcly(struct pwm_chip *chip, unsigned int updbcly, unsigned int dwdbcly) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + u32 reg; + u64 dbcly, cycles, upcycles, dwcycles; + + reg = readl(our_chip->base + REG_TPERIOD); + cycles = our_chip->clk_rate; + dbcly &= 0x0; + if (updbcly) { + upcycles = cycles * updbcly; + do_div(upcycles, NSEC_PER_SEC); + + if (upcycles < reg) + dbcly |= (upcycles & PWM_UPDBCLY_MASK); + else + return -EINVAL; + } + + if (dwdbcly) { + dwcycles = cycles * dwdbcly; + do_div(dwcycles, NSEC_PER_SEC); + + if (dwcycles < reg) + dbcly |= ((dwcycles << 10) & PWM_DWDBCLY_MASK); + else + return -EINVAL; + } + + writel(dbcly, our_chip->base1 + REG_DBCLY); + + reg = readl(our_chip->base1 + REG_DBCTRL); + reg |= 0x30; + writel(reg, our_chip->base1 + REG_DBCTRL); + + return 0; +} + +static void pwm_phytium_set_dbpolarity(struct pwm_chip *chip, unsigned int db_polarity) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + u32 reg; + + reg = readl(our_chip->base1 + REG_DBCTRL); + reg &= 0x33; + reg |= ((db_polarity<<2) & PWM_DB_POLARITY_MASK); + writel(reg, our_chip->base1 + REG_DBCTRL); +} + +static int pwm_phytium_init(struct pwm_chip *chip, struct pwm_device *pwm, int n) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + + writel(PWM_CTRL_INIT, our_chip->base + PWM_N(n) + REG_PWMCTRL); + + pwm_phytium_dutymod(chip, our_chip->state.dutymod, n); + pwm_phytium_set_div(chip, our_chip->state.div, n); + pwm_phytium_set_tmode(chip, our_chip->state.cntmod, n); + + return 0; +} + +static int pwm_phytium_db_init(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + + pwm_phytium_set_dbcly(chip, our_chip->state.updbcly, our_chip->state.dwdbcly); + pwm_phytium_set_dbpolarity(chip, our_chip->state.dbpolarity); + + return 0; +} + +static int __pwm_phytium_config(struct pwm_chip *chip, struct pwm_device *pwm) +{ + pwm_phytium_init(chip, pwm, 0); + pwm_phytium_init(chip, pwm, 1); + return 0; +} + +static int pwm_phytium_set_polarity(struct pwm_chip *chip, enum pwm_polarity polarity, int n) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + u32 value; + + value = readl(our_chip->base + PWM_N(n) + REG_PWMCTRL); + + if (polarity == PWM_POLARITY_INVERSED) { + value &= 0xffffff0f; + value |= 0x30; + } else if (polarity == PWM_POLARITY_NORMAL) { + value &= 0xffffff0f; + value |= 0x40; + } + + our_chip->state_pm[n].polarity = polarity; + writel(value, our_chip->base + PWM_N(n) + REG_PWMCTRL); + + return 0; +} + +static int pwm_phytium_apply(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct phytium_pwm_chip *phytium_pwm = to_phytium_pwm_chip(chip); + struct pwm_state cstate; + u32 reg; + int n; + + pwm_get_state(pwm, &cstate); + + n = pwm->hwpwm & BIT(0); + + if ((state->polarity != cstate.polarity) && !state->enabled) + pwm_phytium_set_polarity(chip, state->polarity, n); + + if (state->enabled && !cstate.enabled) + pwm_phytium_enable(chip, pwm, n); + + if (!state->enabled && cstate.enabled) + pwm_phytium_disable(chip, pwm, n); + + if (state->period != cstate.period) { + pwm_phytium_set_periodns(chip, state->period, n); + if ((phytium_pwm->db_init == 1) && (n == 0)) + pwm_phytium_db_init(chip, pwm); + } + + if (state->duty_cycle != cstate.duty_cycle) { + if (phytium_pwm->state.dutymod == true) { + reg = readl(phytium_pwm->base + PWM_N(n) + REG_STAT); + if ((reg & 0x8) != 0x8) + pwm_phytium_set_duty(chip, state->duty_cycle, n); + } else { + pwm_phytium_set_duty(chip, state->duty_cycle, n); + } + } + + return 0; +} + +static int pwm_phytium_request(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct phytium_pwm_chip *our_chip = to_phytium_pwm_chip(chip); + struct phytium_pwm_channel *our_chan; + + our_chan = devm_kzalloc(chip->dev, sizeof(*our_chan), GFP_KERNEL); + if (!our_chan) + return -ENOMEM; + + pwm_set_chip_data(pwm, our_chan); + __pwm_phytium_config(&our_chip->chip, our_chip->chip.pwms); + + return 0; +} + +static const struct pwm_ops pwm_phytium_ops = { + .request = pwm_phytium_request, + .free = pwm_phytium_free, + .apply = pwm_phytium_apply, + .owner = THIS_MODULE, +}; + +static int phytium_pwm_set_parameter(struct phytium_pwm_chip *priv) +{ + unsigned int i; + + for (i = 0; i < priv->num_parameters; i++) { + if (priv->parameter[i].updbcly > 0 || priv->parameter[i].dwdbcly > 0) { + priv->db_init = 1; + priv->state.db_rst = 1; + } + + priv->state.cntmod = priv->parameter[i].cntmod; + priv->state.dutymod = priv->parameter[i].dutymod; + priv->state.div = priv->parameter[i].div; + priv->state.updbcly = priv->parameter[i].updbcly; + priv->state.dwdbcly = priv->parameter[i].dwdbcly; + priv->state.dbpolarity = priv->parameter[i].dbpolarity; + } + priv->state.rst = 1; + + return 0; +} + +static int pwm_phytium_probe_parameter(struct phytium_pwm_chip *priv, + struct fwnode_handle *np) +{ + int nb, ret, array_size; + unsigned int i; + + array_size = fwnode_property_read_u32_array(np, "phytium,db", NULL, 0); + nb = array_size / (sizeof(struct phytium_pwm_param) / sizeof(u32)); + if (nb <= 0 || nb > MAX_PARAMETER) + return -EINVAL; + + priv->num_parameters = nb; + ret = fwnode_property_read_u32_array(np, "phytium,db", + (u32 *)priv->parameter, array_size); + if (ret) + return ret; + + for (i = 0; i < priv->num_parameters; i++) { + if (priv->parameter[i].cntmod > 1 || + priv->parameter[i].dutymod > 1 || + priv->parameter[i].div > 4096 || + priv->parameter[i].dbpolarity > 3) + return -EINVAL; + } + + return phytium_pwm_set_parameter(priv); +} +static int pwm_phytium_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct fwnode_handle *np = dev_fwnode(dev); + struct phytium_pwm_chip *chip; + struct resource *res; + int ret; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + + if (chip == NULL) + return -ENOMEM; + + chip->chip.dev = &pdev->dev; + chip->chip.ops = &pwm_phytium_ops; + chip->chip.base = -1; + chip->chip.npwm = PWM_NUM; + chip->inverter_mask = BIT(PWM_NUM) - 1; + + if (dev->of_node) { + chip->chip.of_xlate = of_pwm_xlate_with_flags; + chip->chip.of_pwm_n_cells = 3; + } + ret = pwm_phytium_probe_parameter(chip, np); + + if (ret) { + dev_err(dev, "failed to set parameter\n"); + return ret; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chip->base1 = devm_ioremap_resource(&pdev->dev, res); + chip->base = (chip->base1 + 0x400); + + if (IS_ERR(chip->base)) { + dev_err(dev, "failed to get base_addr\n"); + return PTR_ERR(chip->base); + } + + if (dev->of_node) { + chip->base_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(chip->base_clk)) { + dev_err(dev, "failed to get clk\n"); + return PTR_ERR(chip->base_clk); + } + + ret = clk_prepare_enable(chip->base_clk); + if (ret < 0) { + dev_err(dev, "failed to enable clk\n"); + return ret; + } + chip->clk_rate = clk_get_rate(chip->base_clk); + + } else if (has_acpi_companion(dev)){ + if(fwnode_property_read_u32(dev_fwnode(dev),"clock-frequency", (u32 *)&(chip->clk_rate) ) <0) + chip->clk_rate = 50000000; + } + + platform_set_drvdata(pdev, chip); + + ret = pwmchip_add(&chip->chip); + + if (ret < 0) { + dev_err(dev, "failed to register PWM chip\n"); + return ret; + } + + return 0; +} + +static int pwm_phytium_remove(struct platform_device *pdev) +{ + struct phytium_pwm_chip *chip = platform_get_drvdata(pdev); + + pwmchip_remove(&chip->chip); + + clk_disable_unprepare(chip->base_clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int pwm_phytium_pm_init(struct phytium_pwm_chip *priv) +{ + int i; + + __pwm_phytium_config(&priv->chip, priv->chip.pwms); + for (i = 0; i < priv->chip.npwm; i++) { + writel(priv->state_pm[i].period, priv->base + PWM_N(i) + REG_TPERIOD); + if ((priv->db_init == 1) && (i == 0)) + pwm_phytium_db_init(&priv->chip, priv->chip.pwms); + writel(priv->state_pm[i].duty_cycle, priv->base + PWM_N(i) + REG_PWMCTRL); + pwm_phytium_set_polarity(&priv->chip, priv->state_pm[i].polarity, i); + if (priv->state_pm[i].enabled) + pwm_phytium_enable(&priv->chip, priv->chip.pwms, i); + } + + return 0; +} + +static int pwm_phytium_suspend(struct device *dev) +{ + return 0; +} + +static int pwm_phytium_resume(struct device *dev) +{ + struct phytium_pwm_chip *priv = dev_get_drvdata(dev); + + pwm_phytium_pm_init(priv); + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_pwm_dev_pm_ops, pwm_phytium_suspend, pwm_phytium_resume); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_pwm_acpi_ids[] = { + { "PHYT0029", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_pwm_acpi_ids); +#endif + +static const struct of_device_id phytium_pwm_matches[] = { + { .compatible = "phytium,pwm" }, + {}, +}; +MODULE_DEVICE_TABLE(of, phytium_pwm_matches); + +static struct platform_driver pwm_phytium_driver = { + .driver = { + .name = "phytium-pwm", + .pm = &phytium_pwm_dev_pm_ops, + .of_match_table = phytium_pwm_matches, + .acpi_match_table = ACPI_PTR(phytium_pwm_acpi_ids), + }, + .probe = pwm_phytium_probe, + .remove = pwm_phytium_remove, +}; +module_platform_driver(pwm_phytium_driver); + +MODULE_DESCRIPTION("Phytium SoC PWM driver"); +MODULE_AUTHOR("Yang Liu "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 72bdda4ccebfd27d27a07205c6d4aa03299a567b..cc96cbca268a63bb2f42866ee575da5eb6d6db9b 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -267,7 +267,7 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm) export->child.parent = parent; export->child.devt = MKDEV(0, 0); export->child.groups = pwm_groups; - dev_set_name(&export->child, "pwm%u", pwm->hwpwm); + dev_set_name(&export->child, "pwm%u", pwm->pwm); ret = device_register(&export->child); if (ret) { diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index e48069db17033bb79ca2ad68d6e3fd5ef6ca2633..3ead97058d07a6e8cb20bd9ff0aa76e0cea48e08 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -42,6 +42,7 @@ #include #include #include +#include #include "remoteproc_internal.h" @@ -257,6 +258,9 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) rsc = (void *)rproc->table_ptr + rvdev->rsc_offset; rsc->vring[i].da = dma; rsc->vring[i].notifyid = notifyid; + + __flush_dcache_area(rproc->table_ptr, rproc->table_sz); + return 0; } @@ -1778,6 +1782,27 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type) } EXPORT_SYMBOL(rproc_report_crash); +void (*rproc_handle_arch_irq)(void); + +int rproc_set_handle_irq(void (handle_irq)(void)) +{ + if (rproc_handle_arch_irq) + return -EBUSY; + + rproc_handle_arch_irq = handle_irq; + return 0; +} +EXPORT_SYMBOL(rproc_set_handle_irq); + +void rproc_handle_ipi(int ipinr) +{ + if (rproc_handle_arch_irq) + rproc_handle_arch_irq(); + + return ; +} +EXPORT_SYMBOL(rproc_handle_ipi); + static int __init remoteproc_init(void) { rproc_init_sysfs(); diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index bbecd44df7e8de3b28ee10b7066c42913aec540a..8126935d0ac97e724db9d491789a82668c2f8941 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "remoteproc_internal.h" @@ -177,6 +178,7 @@ static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status) rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; rsc->status = status; + __flush_dcache_area(rvdev->rproc->table_ptr, rvdev->rproc->table_sz); dev_dbg(&vdev->dev, "status: %d\n", status); } diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c index d153fb1bf65f157a229b28d2a4cf00728b3a884f..8f0c4b0a3e73a7a4f48d9ff5c35fc295ec83b3b5 100644 --- a/drivers/rpmsg/rpmsg_char.c +++ b/drivers/rpmsg/rpmsg_char.c @@ -518,12 +518,18 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev) put_device(&ctrldev->dev); } +struct rpmsg_device_id rpmsg_char_id_table[] = { + {.name = "rpmsg-openamp-demo-channel"}, + {} +}; + static struct rpmsg_driver rpmsg_chrdev_driver = { .probe = rpmsg_chrdev_probe, .remove = rpmsg_chrdev_remove, .drv = { .name = "rpmsg_chrdev", }, + .id_table = rpmsg_char_id_table, }; static int rpmsg_char_init(void) diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig old mode 100644 new mode 100755 index b5845f16a3a2651108b97a057cffe8752064555d..f9efd766451f1389498e7bfe0c5061023eff3ec8 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -647,6 +647,26 @@ config RTC_DRV_S5M This driver can also be built as a module. If so, the module will be called rtc-s5m. +config RTC_DRV_SD3068 + tristate "ZXW Shenzhen whwave SD3068" + select REGMAP_I2C + help + If you say yes here you get support for the ZXW Shenzhen whwave + SD3068 RTC chips. + + This driver can also be built as a module. If so, the module + will be called rtc-sd3068 + +config RTC_DRV_SD3078 + tristate "ZXW Shenzhen whwave SD3078" + select REGMAP_I2C + help + If you say yes here you get support for the ZXW Shenzhen whwave + SD3078 RTC chips. + + This driver can also be built as a module. If so, the module + will be called rtc-sd3078 + endif # I2C comment "SPI RTC drivers" diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile old mode 100644 new mode 100755 index 5ff2fc0c361a84bcabe19642c9232b42cf0c25dd..e3172c48bd459f3de418e92cf544a522ba7a38d4 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -176,3 +176,5 @@ obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o obj-$(CONFIG_RTC_DRV_XGENE) += rtc-xgene.o obj-$(CONFIG_RTC_DRV_ZYNQMP) += rtc-zynqmp.o obj-$(CONFIG_RTC_DRV_GOLDFISH) += rtc-goldfish.o +obj-$(CONFIG_RTC_DRV_SD3068) += rtc-sd3068.o +obj-$(CONFIG_RTC_DRV_SD3078) += rtc-sd3078.o diff --git a/drivers/rtc/rtc-sd3068.c b/drivers/rtc/rtc-sd3068.c new file mode 100755 index 0000000000000000000000000000000000000000..6abd0cd852f3aa70cfb1bd594236baf423f9fbd0 --- /dev/null +++ b/drivers/rtc/rtc-sd3068.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * rtc-sd3068.c - RTC driver for some mostly-compatible I2C chips. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SD3068_REG_SC 0x00 +#define SD3068_REG_MN 0x01 +#define SD3068_REG_HR 0x02 +#define SD3068_REG_DW 0x03 +#define SD3068_REG_DM 0x04 +#define SD3068_REG_MO 0x05 +#define SD3068_REG_YR 0x06 + +#define SD3068_REG_CTRL1 0x0f +#define SD3068_REG_CTRL2 0x10 +#define SD3068_REG_CTRL3 0x11 + +#define KEY_WRITE1 0x80 +#define KEY_WRITE2 0x04 +#define KEY_WRITE3 0x80 + +#define NUM_TIME_REGS (SD3068_REG_YR - SD3068_REG_SC + 1) + +/* + * The sd3068 has write protection + * and we can choose whether or not to use it. + * Write protection is turned off by default. + */ +#define WRITE_PROTECT_EN 1 + +struct sd3068 { + struct rtc_device *rtc; + struct regmap *regmap; +}; + +/* + * In order to prevent arbitrary modification of the time register, + * when modification of the register, + * the "write" bit needs to be written in a certain order. + * 1. set WRITE1 bit + * 2. set WRITE2 bit + * 3. set WRITE3 bit + */ +static void sd3068_enable_reg_write(struct sd3068 *sd3068) +{ + regmap_update_bits(sd3068->regmap, SD3068_REG_CTRL2, + KEY_WRITE1, KEY_WRITE1); + regmap_update_bits(sd3068->regmap, SD3068_REG_CTRL1, + KEY_WRITE2, KEY_WRITE2); + regmap_update_bits(sd3068->regmap, SD3068_REG_CTRL1, + KEY_WRITE3, KEY_WRITE3); +} + +#if WRITE_PROTECT_EN +/* + * In order to prevent arbitrary modification of the time register, + * we should disable the write function. + * when disable write, + * the "write" bit needs to be clear in a certain order. + * 1. clear WRITE2 bit + * 2. clear WRITE3 bit + * 3. clear WRITE1 bit + */ +static void sd3068_disable_reg_write(struct sd3068 *sd3068) +{ + regmap_update_bits(sd3068->regmap, SD3068_REG_CTRL1, + KEY_WRITE2, 0); + regmap_update_bits(sd3068->regmap, SD3068_REG_CTRL1, + KEY_WRITE3, 0); + regmap_update_bits(sd3068->regmap, SD3068_REG_CTRL2, + KEY_WRITE1, 0); +} +#endif + +static int sd3068_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned char hour; + unsigned char rtc_data[NUM_TIME_REGS] = {0}; + struct i2c_client *client = to_i2c_client(dev); + struct sd3068 *sd3068 = i2c_get_clientdata(client); + int ret; + pr_debug("sd3068 read\n"); + + ret = regmap_bulk_read(sd3068->regmap, SD3068_REG_SC, rtc_data, + NUM_TIME_REGS); + if (ret < 0) { + dev_err(dev, "reading from RTC failed with err:%d\n", ret); + return ret; + } + + tm->tm_sec = bcd2bin(rtc_data[SD3068_REG_SC] & 0x7F); + tm->tm_min = bcd2bin(rtc_data[SD3068_REG_MN] & 0x7F); + + /* + * The sd3068 supports 12/24 hour mode. + * When getting time, + * we need to convert the 12 hour mode to the 24 hour mode. + */ + hour = rtc_data[SD3068_REG_HR]; + if (hour & 0x80) /* 24H MODE */ + tm->tm_hour = bcd2bin(rtc_data[SD3068_REG_HR] & 0x3F); + else if (hour & 0x20) /* 12H MODE PM */ + tm->tm_hour = bcd2bin(rtc_data[SD3068_REG_HR] & 0x1F) + 12; + else /* 12H MODE AM */ + tm->tm_hour = bcd2bin(rtc_data[SD3068_REG_HR] & 0x1F); + + tm->tm_mday = bcd2bin(rtc_data[SD3068_REG_DM] & 0x3F); + tm->tm_wday = rtc_data[SD3068_REG_DW] & 0x07; + tm->tm_mon = bcd2bin(rtc_data[SD3068_REG_MO] & 0x1F) - 1; + tm->tm_year = bcd2bin(rtc_data[SD3068_REG_YR]) + 100; + + return 0; +} + +static int sd3068_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + unsigned char rtc_data[NUM_TIME_REGS]; + struct i2c_client *client = to_i2c_client(dev); + struct sd3068 *sd3068 = i2c_get_clientdata(client); + int ret; + pr_debug("sd3068 set\n"); + + rtc_data[SD3068_REG_SC] = bin2bcd(tm->tm_sec); + rtc_data[SD3068_REG_MN] = bin2bcd(tm->tm_min); + rtc_data[SD3068_REG_HR] = bin2bcd(tm->tm_hour) | 0x80; + rtc_data[SD3068_REG_DM] = bin2bcd(tm->tm_mday); + rtc_data[SD3068_REG_DW] = tm->tm_wday & 0x07; + rtc_data[SD3068_REG_MO] = bin2bcd(tm->tm_mon) + 1; + rtc_data[SD3068_REG_YR] = bin2bcd(tm->tm_year - 100); + +#if WRITE_PROTECT_EN + sd3068_enable_reg_write(sd3068); +#endif + + ret = regmap_bulk_write(sd3068->regmap, SD3068_REG_SC, rtc_data, + NUM_TIME_REGS); + if (ret < 0) { + dev_err(dev, "writing to RTC failed with err:%d\n", ret); + return ret; + } + +#if WRITE_PROTECT_EN + sd3068_disable_reg_write(sd3068); +#endif + + return 0; +} + +static const struct rtc_class_ops sd3068_rtc_ops = { + .read_time = sd3068_rtc_read_time, + .set_time = sd3068_rtc_set_time, +}; + +static const struct regmap_config regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x11, +}; + +static int sd3068_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret; + struct sd3068 *sd3068; + unsigned char rtc_data[NUM_TIME_REGS] = {0}; + pr_debug("probed\n"); + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -ENODEV; + + sd3068 = devm_kzalloc(&client->dev, sizeof(*sd3068), GFP_KERNEL); + if (!sd3068) + return -ENOMEM; + + sd3068->regmap = devm_regmap_init_i2c(client, ®map_config); + if (IS_ERR(sd3068->regmap)) { + dev_err(&client->dev, "regmap allocation failed\n"); + return PTR_ERR(sd3068->regmap); + } + + i2c_set_clientdata(client, sd3068); + + sd3068->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(sd3068->rtc)) + return PTR_ERR(sd3068->rtc); + + sd3068->rtc->ops = &sd3068_rtc_ops; + sd3068->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + sd3068->rtc->range_max = RTC_TIMESTAMP_END_2099; + + ret = regmap_bulk_read(sd3068->regmap, SD3068_REG_SC, rtc_data, + NUM_TIME_REGS); + if (ret < 0) { + dev_info(&client->dev, "can not read time data when probe\n"); + return ret; + } + + ret = rtc_register_device(sd3068->rtc); + if (ret) + return ret; + + sd3068_enable_reg_write(sd3068); + + return 0; +} + + +static const struct acpi_device_id ds1307_acpi_ids[] = { + { .id = "DS1339", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids); + +static const struct i2c_device_id sd3068_id[] = { + { "sd3068", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c,sd3068_id); + +static const struct of_device_id sd3068_of_match[] = { + { .compatible = "wave,sd3068" }, + { } +}; + +static struct i2c_driver sd3068_driver = { + .driver = { + .name = "rtc-sd3068", + .of_match_table = of_match_ptr(sd3068_of_match), + .acpi_match_table = ACPI_PTR(ds1307_acpi_ids), + }, + .probe = sd3068_probe, + .id_table = sd3068_id, +}; + +module_i2c_driver(sd3068_driver); +MODULE_DEVICE_TABLE(of, sd3068_of_match); + +MODULE_DESCRIPTION("RTC driver for SD3068 and similar chips"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c new file mode 100644 index 0000000000000000000000000000000000000000..dd315f023963b50f52cde656a9bc6fc34a9c064e --- /dev/null +++ b/drivers/rtc/rtc-sd3078.c @@ -0,0 +1,1000 @@ +/* + * drivers sd3078 + * + * Copyright (C) 2022 shenzhen wave + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Driver for sd3078 RTC + * + * Converted to the generic RTC susbsystem by zlh (2022) + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "rtc-sd3078.h" +/*time reg*/ +#define SD3078_REG_SEC 0x00 +#define SD3078_REG_MIN 0x01 +#define SD3078_REG_HOUR 0x02 +#define SD3078_REG_WDAY 0x03 +#define SD3078_REG_MDAY 0x04 +#define SD3078_REG_MON 0x05 +#define SD3078_REG_YEAR 0x06 +/*alarm reg*/ +#define SD3078_REG_ALARM_SEC 0x07 +#define SD3078_REG_ALARM_MIN 0x08 +#define SD3078_REG_ALARM_HOUR 0x09 +#define SD3078_REG_ALARM_WEEK 0x0A +#define SD3078_REG_ALARM_DAY 0x0B +#define SD3078_REG_ALARM_MONTH 0x0C +#define SD3078_REG_ALARM_YEAR 0x0D +#define SD3078_REG_ALARM_OE 0x0E +/* control reg*/ +#define SD3078_REG_CTR1 0x0F +#define SD3078_REG_CTR2 0x10 +#define SD3078_REG_CTR3 0x11 +#define SD3078_REG_TTF 0x12 +#define SD3078_REG_TD0 0x13 +#define SD3078_REG_TD1 0x14 +#define SD3078_REG_TD2 0x15 +/*temperature reg*/ +#define SD3078_REG_TEMP 0x16 +/*i2c control reg*/ +#define SD3078_REG_I2C_CTRL 0x17 +/*charge reg*/ +#define SD3078_REG_CHARGE 0x18 +/*extend control reg*/ +#define SD3078_REG_CTR4 0x19 +#define SD3078_REG_CTR5 0x1A +/*battery voltage reg*/ +#define SD3078_REG_BAT_VAL 0x1B +/*temperature low/hign alarm reg*/ +#define SD3078_REG_TEMP_AL 0x1C +#define SD3078_REG_TEMP_AH 0x1D +/*history max/min temperature reg*/ +#define SD3078_REG_HIS_L 0x1E +#define SD3078_REG_HIS_H 0x1F +/*history temperature lowest time ram*/ +#define SD3078_REG_HIS_L_MIN 0x20 +#define SD3078_REG_HIS_L_HOUR 0x21 +#define SD3078_REG_HIS_L_WEEK 0x22 +#define SD3078_REG_HIS_L_DAY 0x23 +#define SD3078_REG_HIS_L_MON 0x24 +#define SD3078_REG_HIS_L_YEAR 0x25 +/*history temperature highest time ram*/ +#define SD3078_REG_HIS_H_MIN 0x26 +#define SD3078_REG_HIS_H_HOUR 0x27 +#define SD3078_REG_HIS_H_WEEK 0x28 +#define SD3078_REG_HIS_H_DAY 0x29 +#define SD3078_REG_HIS_H_MON 0x2A +#define SD3078_REG_HIS_H_YEAR 0x2B +/*user ram reg 2cH -71H*/ +#define SD3078_REG_USER_RAM_START 0x2C +#define SD3078_REG_USER_RAM_END 0x71 +/*device id 72H-79H*/ +#define SD3078_REG_DEVICE_ID 0x72 + +/*reg bit define*/ +#define SD3078_WRTC1 BIT(7) +#define SD3078_WRTC2 BIT(2) +#define SD3078_WRTC3 BIT(7) + +#define SD3078_TEMP_AL_OE BIT(2) /*temperature lowest alarm enable*/ +#define SD3078_TEMP_AH_OE BIT(3) /*temperature lowest alarm enable*/ +#define SD3078_IM BIT(6) +#define SD3078_INTS1 BIT(5) +#define SD3078_INTS0 BIT(4) +#define SD3078_ALARM_EN BIT(7) +#define SD3078_INT_AF BIT(5) +#define SD3078_INTFE BIT(0) +#define SD3078_INTAE BIT(1) +#define SD3078_INTDE BIT(2) +#define SD3078_TDS1 BIT(5) +#define SD3078_TDS0 BIT(4) +struct sd3078_data { + struct i2c_client *client; + struct rtc_device *rtc; +}; + +static int sd3078_set_alarm_ext(struct i2c_client *client, struct sd3078_alarm_ext *alarm_ext); + +/** + * @brief sd3078 read protect disable + * + */ + static int sd3078_read_block_data(struct i2c_client *client, unsigned char reg, + unsigned char length, unsigned char *buf) +{ + int ret; + + struct i2c_msg msgs[] = { + {/* setup read ptr */ + .addr = client->addr, + .len = 1, + .buf = ®, + }, + { + .addr = client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = buf + }, + }; + ret = i2c_transfer(client->adapter, msgs, 2); + if (ret != 2) { + dev_err(&client->dev, "%s: read error=%d\n", __func__, ret); + return -EIO; + } + + return 0; +} + +/** + * @brief sd3078 write block + * + */ +static int sd3078_write_block_data(struct i2c_client *client, + unsigned char reg, unsigned char length, + unsigned char *buf) +{ + unsigned char temp[0x80]; + int ret; + + struct i2c_msg msgs[] = { + { + .addr = client->addr, + .len = length+1, + .buf = temp, + } + }; + if(length >=0x80) + { + return -EIO; + } + temp[0] = reg; + memcpy(&temp[1], buf, length); + ret = i2c_transfer(client->adapter, msgs, 1); + if (ret != 1) { + dev_err(&client->dev, "%s: write error\n", __func__); + return -EIO; + } + return 0; +} + +/** + * @brief sd3078 write protect enable + * + */ +static int sd3078_write_enable(struct i2c_client *client) +{ + unsigned char buf[2]; + int ret; + + ret = sd3078_read_block_data(client, SD3078_REG_CTR1, 2, buf); + if (ret != 0) + return ret < 0 ? ret : -EIO; + + buf[1] |= SD3078_WRTC1; + ret = sd3078_write_block_data(client, SD3078_REG_CTR2,1, &buf[1]); + if (ret != 0) + return ret < 0 ? ret : -EIO; + buf[0] |= SD3078_WRTC2; + buf[0] |= SD3078_WRTC3; + ret = sd3078_write_block_data(client, SD3078_REG_CTR1, 1, &buf[0]); + if (ret != 0) + return ret < 0 ? ret : -EIO; + return 0; +} + +/** + * @brief sd3078 write protect disable + * + */ +static int sd3078_write_disable(struct i2c_client *client) +{ + unsigned char buf[2]; + int ret; + + ret = sd3078_read_block_data(client, SD3078_REG_CTR1, 2, buf); + if (ret != 0) + return ret < 0 ? ret : -EIO; + + buf[0] &= ~SD3078_WRTC2; + buf[0] &= ~SD3078_WRTC3; + buf[1] &= ~SD3078_WRTC1; + + ret = sd3078_write_block_data(client, SD3078_REG_CTR1, 2, buf); + if (ret != 0) + return ret < 0 ? ret : -EIO; + return 0; +} + +/** + * @brief sd3078 get time + * + */ +static int sd3078_get_time(struct device *dev, struct rtc_time *dt) +{ + struct sd3078_data *sd3078 = dev_get_drvdata(dev); + int ret; + unsigned char date[7]; + + ret = sd3078_read_block_data(sd3078->client, SD3078_REG_SEC,7, date); + if (ret != 0) + { + return ret < 0 ? ret : -EIO; + } + + dt->tm_sec = bcd2bin(date[SD3078_REG_SEC - SD3078_REG_SEC] & 0x7f); + dt->tm_min = bcd2bin(date[SD3078_REG_MIN - SD3078_REG_SEC] & 0x7f); + dt->tm_hour = bcd2bin(date[SD3078_REG_HOUR - SD3078_REG_SEC] & 0x3f); + dt->tm_wday = bcd2bin(date[SD3078_REG_WDAY - SD3078_REG_SEC] & 0x03); + dt->tm_mday = bcd2bin(date[SD3078_REG_MDAY - SD3078_REG_SEC] & 0x3f); + dt->tm_mon = bcd2bin(date[SD3078_REG_MON - SD3078_REG_SEC] & 0x1f); + dt->tm_year = bcd2bin(date[SD3078_REG_YEAR - SD3078_REG_SEC]) + 100; + + return 0; +} + +/** + * @brief sd3078 set time + * + */ +static int sd3078_set_time(struct device *dev, struct rtc_time *dt) +{ + struct sd3078_data *sd3078 = dev_get_drvdata(dev); + unsigned char date[7]; + int ret; + + if ((dt->tm_year < 100) || (dt->tm_year > 199)) + return -EINVAL; + date[SD3078_REG_SEC - SD3078_REG_SEC] = bin2bcd(dt->tm_sec); + date[SD3078_REG_MIN - SD3078_REG_SEC] = bin2bcd(dt->tm_min); + date[SD3078_REG_HOUR - SD3078_REG_SEC] = bin2bcd(dt->tm_hour); + date[SD3078_REG_WDAY - SD3078_REG_SEC] = bin2bcd( dt->tm_wday); + date[SD3078_REG_MDAY - SD3078_REG_SEC] = bin2bcd(dt->tm_mday); + date[SD3078_REG_MON - SD3078_REG_SEC] = bin2bcd(dt->tm_mon); + date[SD3078_REG_YEAR - SD3078_REG_SEC] = bin2bcd(dt->tm_year - 100); + + sd3078_write_enable(sd3078->client); + ret = sd3078_write_block_data(sd3078->client, SD3078_REG_SEC, 7, date); + sd3078_write_disable(sd3078->client); + if (ret != 0) + return ret < 0 ? ret : -EIO; + + return 0; +} + +static int sd3078_read_alarm(struct device *dev, struct rtc_wkalrm *tm) +{ + unsigned char buf[8]; + int ret; + struct sd3078_data *sd3078 = dev_get_drvdata(dev); + + ret = sd3078_read_block_data(sd3078->client, SD3078_REG_ALARM_SEC,8, buf); + if (ret != 0) + return ret < 0 ? ret : -EIO; + + tm->time.tm_sec = buf[0]; + tm->time.tm_min = buf[1]; + tm->time.tm_hour = buf[2]; + tm->time.tm_wday = buf[3]; + tm->time.tm_yday = buf[4]; + tm->time.tm_mon = buf[5]; + tm->time.tm_year = buf[6]; + + if((buf[7] & 0x7f) != 0) + { + tm->enabled = 1; + } + ret = sd3078_read_block_data(sd3078->client, SD3078_REG_CTR1,1, &buf[0]); + if (ret != 0) + return ret < 0 ? ret : -EIO; + if((buf[0] & SD3078_INT_AF) != 0) + { + tm->pending = 1; + } + else + { + tm->pending = 0; + } + return 0; +} + +static int sd3078_set_alarm(struct device *dev, struct rtc_wkalrm *tm) +{ + int ret; + struct sd3078_data *sd3078 = dev_get_drvdata(dev); + struct sd3078_alarm_ext alarm_ext; + + alarm_ext.sec_a = tm->time.tm_sec ; + alarm_ext.min_a = tm->time.tm_min ; + alarm_ext.hour_a = tm->time.tm_hour; + alarm_ext.week_a = tm->time.tm_wday; + alarm_ext.day_a = tm->time.tm_yday; + alarm_ext.mon_a = tm->time.tm_mon; + alarm_ext.year_a = tm->time.tm_year; + if(tm->enabled == 1) + { + alarm_ext.oe_a = 0x7f; + } + else + { + alarm_ext.oe_a = 0; + } + alarm_ext.ie_a = 1; + alarm_ext.int_period = 0; + ret = sd3078_set_alarm_ext(sd3078->client, &alarm_ext); + return ret; +} + +/** + * @brief get sd3078 chip temperature + * + */ +static int sd3078_get_temperature( struct i2c_client *client, int *temp, unsigned int cmd) +{ + int ret; + unsigned reg; + unsigned char buf; + if(cmd == RTC_SD3078_RD_TEMP) + reg = SD3078_REG_TEMP; + else if(cmd == RTC_SD3078_TEMP_HIS_L) + reg = SD3078_REG_HIS_L; + else if(cmd == RTC_SD3078_TEMP_HIS_H) + reg = SD3078_REG_HIS_H; + else + return -EIO; + + ret = sd3078_read_block_data(client, reg,1, &buf); + if (ret != 0) + return ret < 0 ? ret : -EIO; + *temp = (signed char)buf; + return 0; +} + +/** + * @brief sd3078 set when power supply by battery i2c work state, + * en:0,battery supply i2c is not work,1, battery supply i2c is work + * + */ +static int sd3078_battery_i2c_ctrl(struct i2c_client *client, int en) +{ + int ret; + unsigned char buf; + + if(en !=0 &&en !=1) + return -EIO; + if(en) + buf = 0x80; + else + buf =0x00; + sd3078_write_enable(client); + ret = sd3078_write_block_data(client, SD3078_REG_I2C_CTRL, 1, &buf); + sd3078_write_disable(client); + if (ret != 0) + return ret < 0 ? ret : -EIO; + return 0; +} + +/** + * @brief sd3078 read battery voltage + * + */ +static int sd3078_read_battery_voltage( struct i2c_client *client, unsigned int *vol) +{ + int ret; + unsigned char buf[2]; + + ret = sd3078_read_block_data(client, SD3078_REG_CTR5, 2, buf); + if (ret != 0) + return ret < 0 ? ret : -EIO; + *vol = (buf[0] >>7) | buf[1] ; + return 0; +} + +/** + * @brief sd3078 read battery charge control + * + */ +static int sd3078_battery_charge_ctrl(struct i2c_client *client, struct sd3078_charge *ctrl) +{ + unsigned char buf; + int ret; + + if (ctrl->chage_en > 1) + return -EIO; + if (ctrl->resistance > 3) + return -EIO; + buf = (ctrl->chage_en <<7) | ctrl->resistance; + sd3078_write_enable(client); + ret = sd3078_write_block_data(client, SD3078_REG_CHARGE, 1, &buf); + sd3078_write_disable(client); + if (ret != 0) + return ret < 0 ? ret : -EIO; + return 0; +} + +/** + * @brief sd3078 set temperature alarm value; + * cmd:0:set lowest temperature alarm;1:set highest temperature alarm + * + */ +static int sd3078_set_alarm_temp(struct i2c_client *client, struct sd3078_temp_alarm *temp, unsigned int cmd) +{ + unsigned char buf; + int ret; + unsigned char reg; + unsigned char oe_bit; + if(cmd == RTC_SD3078_TEMP_AL) + { + reg = SD3078_REG_TEMP_AL; + oe_bit = SD3078_TEMP_AL_OE; + } + + else if(cmd == RTC_SD3078_TEMP_AH) + { + reg = SD3078_REG_TEMP_AH; + oe_bit = SD3078_TEMP_AH_OE; + } + else + return -EIO; + + + sd3078_write_enable(client); + buf = (unsigned char)temp->temp; + sd3078_write_block_data(client, reg, 1, &buf); + + ret = sd3078_read_block_data(client, SD3078_REG_CTR4, 1, &buf); + if (ret != 0) + return ret < 0 ? ret : -EIO; + /*enable temperature alarm*/ + if (temp->oe == 1) + { + buf |= oe_bit; + } + else + { + buf &= (~oe_bit); + } + ret = sd3078_write_block_data(client, SD3078_REG_CTR4, 1, &buf); + sd3078_write_disable(client); + if (ret != 0) + return ret < 0 ? ret : -EIO; + return 0; +} + +/** + * @brief sd3078 historical minimum / maximum temperature occurrence time + * + */ +static int sd3078_get_his_temp_time(struct i2c_client *client, struct rtc_time *dt, unsigned int cmd) +{ + int ret; + unsigned char date[6]; + unsigned char reg; + + if(cmd == RTC_SD3078_TEMP_HIS_L_T) + reg = SD3078_REG_HIS_L_MIN; + else if(cmd == RTC_SD3078_TEMP_HIS_H_T) + reg = SD3078_REG_HIS_H_MIN; + else + return -EIO; + ret = sd3078_read_block_data(client, reg, 6, date); + if (ret != 0) + return ret < 0 ? ret : -EIO; + dt->tm_sec = 0; + dt->tm_min = bcd2bin(date[SD3078_REG_MIN - SD3078_REG_MIN] & 0x7f); + dt->tm_hour = bcd2bin(date[SD3078_REG_HOUR - SD3078_REG_MIN] & 0x1f); + dt->tm_wday = bcd2bin(date[SD3078_REG_WDAY - SD3078_REG_MIN] & 0x03); + dt->tm_mday = bcd2bin(date[SD3078_REG_MDAY - SD3078_REG_MIN] & 0x3f); + dt->tm_mon = bcd2bin(date[SD3078_REG_MON - SD3078_REG_MIN] & 0x1f) ; + dt->tm_year = bcd2bin(date[SD3078_REG_YEAR - SD3078_REG_MIN]) + 100; + return 0; +} + +/** + * @brief sd3078 read user ram; + * + */ +static int sd3078_read_ram(struct i2c_client *client, unsigned char *buf, struct sd3078_ram *ram) +{ + int ret; + + if(ram->st_addr < SD3078_REG_USER_RAM_START || + ram->end_addr > SD3078_REG_USER_RAM_END || + ram->end_addr < ram->st_addr) + return -EIO; + + ret = sd3078_read_block_data(client, ram->st_addr, ram->end_addr - ram->st_addr + 1, buf); + if (ret != 0) + { + return ret ; + } + return 0; +} + +/** + * @brief sd3078 write user ram; + * + */ +static int sd3078_write_ram(struct i2c_client *client, unsigned char *buf, struct sd3078_ram *ram) +{ + int ret; + + if(ram->st_addr < SD3078_REG_USER_RAM_START || + ram->end_addr > SD3078_REG_USER_RAM_END || + ram->end_addr < ram->st_addr) + return -EIO; + sd3078_write_enable(client); + ret = sd3078_write_block_data(client, ram->st_addr, ram->end_addr - ram->st_addr + 1, buf); + sd3078_write_disable(client); + if (ret != 0) + return ret < 0 ? ret : -EIO; + return 0; +} + +/** + * @brief sd3078 read device id; + * + */ +static int sd3078_read_device_id(struct i2c_client *client, unsigned char *buf) +{ + int ret; + +ret = i2c_smbus_read_i2c_block_data(client, SD3078_REG_DEVICE_ID, + 8, buf); + if (ret != 8) + return ret < 0 ? ret : -EIO; + /*ret = sd3078_read_block_data(client, SD3078_REG_DEVICE_ID, 8, buf); + if (ret != 0) + return ret < 0 ? ret : -EIO;*/ + return 0; +} + +/** + * @brief sd3078 set alarm extend; + * + */ +static int sd3078_set_alarm_ext(struct i2c_client *client, struct sd3078_alarm_ext *alarm_ext) +{ + int ret; + unsigned char buf[8]; + + buf[0] = bin2bcd(alarm_ext->sec_a); + buf[1] = bin2bcd(alarm_ext->min_a); + buf[2] = bin2bcd(alarm_ext->hour_a); + buf[3] = bin2bcd(alarm_ext->week_a); + buf[4] = bin2bcd(alarm_ext->day_a); + buf[5] = bin2bcd(alarm_ext->mon_a); + buf[6] = bin2bcd(alarm_ext->year_a); + buf[7] = alarm_ext->oe_a; + ret = sd3078_write_enable(client); + if (ret != 0) + { + return ret < 0 ? ret : -EIO; + } + ret = sd3078_write_block_data(client, SD3078_REG_ALARM_SEC, 8, buf); + if (ret != 0) + { + sd3078_write_disable(client); + return ret < 0 ? ret : -EIO; + } + + /*if enable interrupt out?*/ + ret = sd3078_read_block_data(client, SD3078_REG_CTR2,1, &buf[0]); + if (ret != 0) + { + sd3078_write_disable(client); + return ret < 0 ? ret : -EIO; + } + buf[0] = buf[0]; + buf[0] &= ~SD3078_INTS1; + buf[0] |= SD3078_INTS0; + if(alarm_ext->ie_a == 1) + { + buf[0] |= SD3078_INTAE; + } + else + { + buf[0] &= ~SD3078_INTAE; + } + if(alarm_ext->int_period == 1) + { + buf[0] |= SD3078_IM; + } + else + { + buf[0] &= ~SD3078_IM; + } + ret = sd3078_write_block_data(client, SD3078_REG_CTR2, 1, &buf[0]); + if (ret != 0) + { + sd3078_write_disable(client); + return ret < 0 ? ret : -EIO; + } + sd3078_write_disable(client); + return 0; +} + +/** + * @brief sd3078 clock out + * + */ +static int sd3078_clk_out(struct i2c_client *client, struct sd3078_clk_out *clk) +{ + int ret; + unsigned char buf[2]; + + if(clk->freq > RTC_3078_CLK_OUT_1_SEC) + return -EIO; + + ret = sd3078_write_enable(client); + if (ret != 0) + { + return ret < 0 ? ret : -EIO; + } + ret = sd3078_read_block_data(client, SD3078_REG_CTR2, 2, buf); + if (ret != 0) + { + sd3078_write_disable(client); + return ret < 0 ? ret : -EIO; + } + + buf[1] &= 0xf0; + buf[1] |= clk->freq; + if(clk->oe == 1) + { + buf[0] |= SD3078_INTFE; + } + else + { + buf[0] &= ~SD3078_INTFE; + } + buf[0] |= SD3078_INTS1; + buf[0] &= ~SD3078_INTS0; + + ret = sd3078_write_block_data(client, SD3078_REG_CTR2, 2, buf); + sd3078_write_disable(client); + if (ret != 0) + { + return ret < 0 ? ret : -EIO; + } + return 0; +} + +/** + * @brief sd3078 count down + * + */ +static int sd3078_count_down(struct i2c_client *client, struct sd3078_count_down *count_down) +{ + int ret; + unsigned char buf[3]; + + if(count_down->count > 0xffffff) + return -EIO; + if(count_down->source > RTC_3078_COUNT_DOWN_1_60) + return -EIO; + + + ret = sd3078_write_enable(client); + if (ret != 0) + { + return ret < 0 ? ret : -EIO; + } + ret = sd3078_read_block_data(client, SD3078_REG_CTR2, 2, buf); + if (ret != 0) + { + sd3078_write_disable(client); + return ret < 0 ? ret : -EIO; + } + + /*set INTDF =0*/ + buf[0] &= ~SD3078_INTDE; + ret = sd3078_write_block_data(client, SD3078_REG_CTR2, 1, &buf[0]); + if (ret != 0) + { + sd3078_write_disable(client); + return ret < 0 ? ret : -EIO; + } + + if (count_down->ie == 1) + { + buf[0] |= SD3078_INTDE; + buf[0] |= SD3078_INTS1; + buf[0] |= SD3078_INTS0; + } + + if (count_down->int_period == 1) + { + buf[0] |= SD3078_IM; + } + else + { + buf[0] &= ~SD3078_IM; + } + buf[1] &= ~(0x03 << 4); + buf[1] |= (count_down->source << 4); + ret = sd3078_write_block_data(client, SD3078_REG_CTR2, 2, buf); + if (ret != 0) + { + sd3078_write_disable(client); + return ret < 0 ? ret : -EIO; + } + buf[0] = count_down->count & 0xff; + buf[1] = (count_down->count >> 8 )& 0xff; + buf[2] = (count_down->count >> 16 )& 0xff; + ret = sd3078_write_block_data(client, SD3078_REG_TD0, 3, buf); + sd3078_write_disable(client); + if (ret != 0) + { + return ret < 0 ? ret : -EIO; + } + return 0; +} +/** + * @brief sd3078 io ctrl; + * + */ +static int sd3078_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) +{ + struct sd3078_data *sd3078 = dev_get_drvdata(dev); + int ret; + + //printk(KERN_EMERG"sd3078_ioctl=%d,arg=%ld,%d\n",cmd,arg, RTC_SD3078_TEMP_AL); + switch (cmd) { + case RTC_SD3078_RD_TEMP: //read chip temperature + case RTC_SD3078_TEMP_HIS_L: //history lowest temperature value + case RTC_SD3078_TEMP_HIS_H : //history highest temperature value + { + int temp; + + ret = sd3078_get_temperature(sd3078->client, &temp, cmd); + if (ret != 0) + return ret; + if (copy_to_user((void __user *)arg, &temp, sizeof(int))) + return -EFAULT; + } + break; + case RTC_SD3078_BAT_I2C: //battery i2c ctrl + { + ret = sd3078_battery_i2c_ctrl(sd3078->client, arg); + if (ret != 0) + { + return ret; + } + } + break; + case RTC_SD3078_BAT_VOL: //battery voltage read + { + unsigned int vol; + + if(sd3078_read_battery_voltage(sd3078->client, &vol) == 0) + { + if (copy_to_user((void __user *)arg, &vol, sizeof(vol))) + return -EFAULT; + } + else + { + return -EFAULT; + } + } + break; + case RTC_SD3078_EN_CHARGE: //battery charge control + { + struct sd3078_charge ctrl; + + if (copy_from_user(&ctrl, (void __user *)arg, sizeof(ctrl ))) + return -EFAULT; + if (sd3078_battery_charge_ctrl(sd3078->client, &ctrl) != 0) + return -EFAULT; + } + break; + case RTC_SD3078_TEMP_AL : //lowest temperature alarm + case RTC_SD3078_TEMP_AH : //highest temperature alarm + { + struct sd3078_temp_alarm alarm; + + if (copy_from_user(&alarm, (void __user *)arg, sizeof(alarm))) + return -EFAULT; + if (sd3078_set_alarm_temp(sd3078->client, &alarm, cmd) != 0) + return -EFAULT; + } + break; + case RTC_SD3078_TEMP_HIS_L_T : //the occurrence time of the lowest temperature in history + case RTC_SD3078_TEMP_HIS_H_T : //the occurrence time of the highest temperature in history + { + struct rtc_time dt; + + if(sd3078_get_his_temp_time(sd3078->client, &dt, cmd) == 0) + { + if (copy_to_user((void __user *)arg, &dt, sizeof(dt))) + return -EFAULT; + } + else + return -EFAULT; + + } + break; + case RTC_SD3078_RAM_RD: //user ram read + { + struct sd3078_ram ram_r; + unsigned char buf_r[SD3078_REG_USER_RAM_END - SD3078_REG_USER_RAM_START + 1]; + + if (copy_from_user(&ram_r, (void __user *)arg, sizeof(ram_r ))) + return -EFAULT; + if (sd3078_read_ram(sd3078->client, buf_r, &ram_r) == 0) + { + if (copy_to_user((void __user *)ram_r.buf, buf_r, ram_r.end_addr - ram_r.st_addr +1)) + return -EFAULT; + } + else + { + return -EFAULT; + } + } + break; + case RTC_SD3078_RAM_WR: //user ram write + { + struct sd3078_ram ram_w; + unsigned char buf_w[SD3078_REG_USER_RAM_END - SD3078_REG_USER_RAM_START + 1]; + + if (copy_from_user(&ram_w, (void __user *)arg, sizeof(ram_w ))) + return -EFAULT; + if (copy_from_user(buf_w, ram_w.buf, ram_w.end_addr - ram_w.st_addr +1)) + return -EFAULT; + if(sd3078_write_ram(sd3078->client, buf_w, &ram_w) != 0) + return -EFAULT; + } + break; + case RTC_SD3078_DEVICE_ID: //read device id + { + unsigned char device_id[8]; + + if (sd3078_read_device_id(sd3078->client, device_id) == 0) + { + if (copy_to_user((void __user *)arg, device_id, sizeof(device_id))) + return -EFAULT; + } + else + { + return -EFAULT; + } + + } + break; + case RTC_SD3078_ALARM_EXT_SET: /*alarm extend set*/ + { + struct sd3078_alarm_ext alarm_ext; + + if (copy_from_user(&alarm_ext, (void __user *)arg, sizeof(alarm_ext ))) + return -EFAULT; + if (sd3078_set_alarm_ext(sd3078->client, &alarm_ext) != 0) + return -EFAULT; + } + break; + case RTC_SD3078_CLK_OUT: /*freq output ctrl*/ + { + struct sd3078_clk_out clk; + + if (copy_from_user(&clk, (void __user *)arg, sizeof(clk ))) + return -EFAULT; + if (sd3078_clk_out(sd3078->client, &clk) != 0) + return -EFAULT; + } + break; + case RTC_SD3078_COUNT_DOWN_SET: + { + struct sd3078_count_down count_down; + + if (copy_from_user(&count_down, (void __user *)arg, sizeof(count_down ))) + return -EFAULT; + if (sd3078_count_down(sd3078->client, &count_down) != 0) + return -EFAULT; + } + break; + default: + return -ENOIOCTLCMD; + } + return 0; +} + +static struct rtc_class_ops sd3078_rtc_ops = { + .read_time = sd3078_get_time, + .set_time = sd3078_set_time, + .set_alarm = sd3078_set_alarm, + .read_alarm = sd3078_read_alarm, + .ioctl = sd3078_ioctl, +}; + +static irqreturn_t sd3078_irq_1_handler(int irq, void *dev_id) +{ + struct i2c_client *client = dev_id; + struct sd3078_data *sd3078 = i2c_get_clientdata(client); + + mutex_lock(&sd3078->rtc->ops_lock); + /*user code*/ + mutex_unlock(&sd3078->rtc->ops_lock); + return IRQ_HANDLED; +} + +static int sd3078_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct sd3078_data *sd3078; + int err = 0; + + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA + | I2C_FUNC_SMBUS_I2C_BLOCK)) { + dev_err(&adapter->dev, "doesn't support required functionality\n"); + return -EIO; + } + + sd3078 = devm_kzalloc(&client->dev, sizeof(struct sd3078_data), + GFP_KERNEL); + if (!sd3078) + return -ENOMEM; + + sd3078->client = client; + i2c_set_clientdata(client, sd3078); + sd3078->rtc = devm_rtc_device_register(&client->dev, client->name, + &sd3078_rtc_ops, THIS_MODULE); + if (IS_ERR(sd3078->rtc)) { + dev_err(&client->dev, "unable to register the class device\n"); + return PTR_ERR(sd3078->rtc); + } + if (client->irq > 0) { + dev_info(&client->dev, "IRQ %d supplied\n", client->irq); + err = devm_request_threaded_irq(&client->dev, client->irq, NULL, + sd3078_irq_1_handler, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + "sd3078", client); + + if (err) { + dev_err(&client->dev, "unable to request IRQ\n"); + client->irq = 0; + } + } + sd3078->rtc->max_user_freq = 1; + sd3078->rtc->uie_unsupported = 1; + printk(KERN_EMERG "sd3078 probe!,version=V1.0.0\n"); + return err; +} +static const struct i2c_device_id sd3078_id[] = { + { "sd3078", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c,sd3078_id); + +static const struct of_device_id sd3078_of_match[] = { + { .compatible = "wave,sd3078" }, + { } +}; + +static struct i2c_driver sd3078_driver = { + .driver = { + .name = "rtc-sd3078", + .of_match_table = of_match_ptr(sd3078_of_match), + }, + .probe = sd3078_probe, + .id_table = sd3078_id, +}; + +MODULE_DEVICE_TABLE(of, sd3078_of_match); + +module_i2c_driver(sd3078_driver); + +MODULE_AUTHOR("support@whwave.com.cn"); +MODULE_DESCRIPTION("wave sd3078 rtc driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-sd3078.h b/drivers/rtc/rtc-sd3078.h new file mode 100644 index 0000000000000000000000000000000000000000..0c51a6ad07981605002971031ee84af5b9d202ca --- /dev/null +++ b/drivers/rtc/rtc-sd3078.h @@ -0,0 +1,95 @@ +#ifndef __RTC_SD3078__H +#define __RTC_SD3078__H +#include +/*clk output frequency define*/ +#define RTC_3078_CLK_OUT_4096 0x02 +#define RTC_3078_CLK_OUT_1024 0x03 +#define RTC_3078_CLK_OUT_64 0x04 +#define RTC_3078_CLK_OUT_32 0x05 +#define RTC_3078_CLK_OUT_16 0x06 +#define RTC_3078_CLK_OUT_8 0x07 +#define RTC_3078_CLK_OUT_4 0x08 +#define RTC_3078_CLK_OUT_2 0x09 +#define RTC_3078_CLK_OUT_1 0x0a +#define RTC_3078_CLK_OUT_1_2 0x0b +#define RTC_3078_CLK_OUT_1_4 0x0c +#define RTC_3078_CLK_OUT_1_8 0x0d +#define RTC_3078_CLK_OUT_1_16 0x0e +#define RTC_3078_CLK_OUT_1_SEC 0x0f + +/*conut down frequency source define*/ +#define RTC_3078_COUNT_DOWN_4096 0x00 +#define RTC_3078_COUNT_DOWN_1024 0x01 +#define RTC_3078_COUNT_DOWN_1 0x02 +#define RTC_3078_COUNT_DOWN_1_60 0x03 +/*charge control struct*/ + struct sd3078_charge +{ + unsigned char chage_en; /*0:disable,1:enable*/ + unsigned char resistance; /*00:10k;01:5k;10:2k;11:open*/ +}; + +/*user ram write/read struct*/ + struct sd3078_ram + { + unsigned char st_addr; + unsigned char end_addr; + unsigned char *buf; + }; + + /*temperature lowest/highest alarm set*/ + struct sd3078_temp_alarm + { + unsigned char type;/*0:lowest alarm,1:highest alarm*/ + unsigned char oe;/*temperature alarm set,0:disable,1:enable*/ + unsigned char ie;/*interrupt enable,when ie=1 and alarm happen,int pin output 0*/ + int temp; + }; +/*time alarm extend struct*/ + struct sd3078_alarm_ext + { + unsigned char sec_a; + unsigned char min_a; + unsigned char hour_a; + unsigned char week_a; + unsigned char day_a; + unsigned char mon_a; + unsigned char year_a; + + unsigned char ie_a;/*interrupt enable ,0:disable,1:enable*/ + unsigned char int_period;/*0:int outpute 0,1:int output pulse*/ + unsigned char oe_a;/*alarm enable;bit[6]:year,bit[5]:month,bit[4]:day,bit[3]:week,bit[2]:hour,bit[1]:minute,bit[0]:second*/ + }; + + /*frequency output struct*/ + struct sd3078_clk_out + { + unsigned char freq; + unsigned char oe;/*= clk out enable, set,0:disable,1:enable*/ + }; + /*count down struct*/ + struct sd3078_count_down + { + unsigned int count; + unsigned char source; + unsigned char ie;/*interrupt enable ,0:disable,1:enable*/ + unsigned char int_period;/*0:int outpute 0,1:int output pulse*/ + }; +/*SD3078 io ctrl cmd,start with rtc.h ioctrl */ +#define RTC_SD3078_RD_TEMP _IOR('p', 0x15, int) /*read chip temperature*/ +#define RTC_SD3078_BAT_I2C _IOW('p', 0x16, unsigned char) /*battery i2c ctrl*/ +#define RTC_SD3078_BAT_VOL _IOR('p', 0x17, unsigned int) /*battery voltage read*/ +#define RTC_SD3078_EN_CHARGE _IOW('p', 0x18, struct sd3078_charge) /*battery charge control*/ +#define RTC_SD3078_TEMP_AL _IOW('p', 0x19, struct sd3078_temp_alarm) /*set lowest temperature alarm value*/ +#define RTC_SD3078_TEMP_AH _IOW('p', 0x1a, struct sd3078_temp_alarm) /*set highest temperature alarm value*/ +#define RTC_SD3078_TEMP_HIS_L _IOR('p', 0x1b, signed char) /*history lowest temperature value*/ +#define RTC_SD3078_TEMP_HIS_H _IOR('p', 0x1c, signed char) /*history highest temperature value*/ +#define RTC_SD3078_TEMP_HIS_L_T _IOR('p', 0x1d, struct rtc_time) /*the occurrence time of the lowest temperature in history*/ +#define RTC_SD3078_TEMP_HIS_H_T _IOR('p', 0x1e, struct rtc_time) /*the occurrence time of the highest temperature in history*/ +#define RTC_SD3078_RAM_RD _IOW('p', 0x1f, struct sd3078_ram) /*user ram read*/ +#define RTC_SD3078_RAM_WR _IOW('p', 0x20, struct sd3078_ram) /*user ram write*/ +#define RTC_SD3078_DEVICE_ID _IOR('p', 0x21, int) /*read device id*/ +#define RTC_SD3078_ALARM_EXT_SET _IOW('p', 0x22, struct sd3078_alarm_ext) /*alarm extend set*/ +#define RTC_SD3078_CLK_OUT _IOW('p', 0x23, signed char) /*freq output ctrl*/ +#define RTC_SD3078_COUNT_DOWN_SET _IOW('p', 0x24, signed char) /*count down set*/ +#endif diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 6768b2e8148a2db0100ff92d0b4067dc58be770a..c20f51af6bdf6bab8571cfd50ad82a7bf1d3c0d6 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1459,11 +1459,11 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) { struct fcoe_percpu_s *fps; - int rc; + int rc, cpu = get_cpu_light(); - fps = &get_cpu_var(fcoe_percpu); + fps = &per_cpu(fcoe_percpu, cpu); rc = fcoe_get_paged_crc_eof(skb, tlen, fps); - put_cpu_var(fcoe_percpu); + put_cpu_light(); return rc; } @@ -1650,11 +1650,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport, return 0; } - stats = per_cpu_ptr(lport->stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu_light()); stats->InvalidCRCCount++; if (stats->InvalidCRCCount < 5) printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); - put_cpu(); + put_cpu_light(); return -EINVAL; } @@ -1697,7 +1697,7 @@ static void fcoe_recv_frame(struct sk_buff *skb) */ hp = (struct fcoe_hdr *) skb_network_header(skb); - stats = per_cpu_ptr(lport->stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu_light()); if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { if (stats->ErrorFrames < 5) printk(KERN_WARNING "fcoe: FCoE version " @@ -1729,13 +1729,13 @@ static void fcoe_recv_frame(struct sk_buff *skb) goto drop; if (!fcoe_filter_frames(lport, fp)) { - put_cpu(); + put_cpu_light(); fc_exch_recv(lport, fp); return; } drop: stats->ErrorFrames++; - put_cpu(); + put_cpu_light(); kfree_skb(skb); } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 658c0726581f930b6153a5b6edbd732864af480c..bceab74eecf73526f9961b1be95480708ce64c0c 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -838,7 +838,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) INIT_LIST_HEAD(&del_list); - stats = per_cpu_ptr(fip->lp->stats, get_cpu()); + stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; @@ -874,7 +874,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) sel_time = fcf->time; } } - put_cpu(); + put_cpu_light(); list_for_each_entry_safe(fcf, next, &del_list, list) { /* Removes fcf from current list */ diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 9fa0aa235cb4cef42b3ef5303f6ef891000ee913..ed538164320a9aa4218ee4fe1268da02450fc0eb 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -833,10 +833,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, } memset(ep, 0, sizeof(*ep)); - cpu = get_cpu(); + cpu = get_cpu_light(); pool = per_cpu_ptr(mp->pool, cpu); spin_lock_bh(&pool->lock); - put_cpu(); + put_cpu_light(); /* peek cache of free slot */ if (pool->left != FC_XID_UNKNOWN) { diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 0a7fd56c1ed9d15480d9c23a57a5fdea5657853b..c9d9f49526c1218e1970b367cc7d5848e2f18227 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -454,6 +454,31 @@ config SPI_ORION This enables using the SPI master controller on the Orion and MVEBU chips. +config SPI_PHYTIUM + tristate + depends on ARCH_PHYTIUM || COMPILE_TEST + +config SPI_PHYTIUM_PLAT + tristate "Phytium SPI controller platform support" + select SPI_PHYTIUM + help + This selects a platform driver for Phytium SPI controller. + + If you say yes to this option, support will be included for + Phytium Soc families of SPI controller. + +config SPI_PHYTIUM_PCI + tristate "Phytium SPI controller PCI support" + depends on PCI + select SPI_PHYTIUM + help + This selects a PCI driver for Phytium SPI controller. + + If you say yes to this option, support will be included for + Phytium PCIe chipset of SPI controller. + + If unsure, say N. + config SPI_PIC32 tristate "Microchip PIC32 series SPI" depends on MACH_PIC32 || COMPILE_TEST diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index a90d55970036536d4bedeccfe533864ddb02622e..01d74f2a1afefaaced47cf3a1585adeecf2c5922 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -35,6 +35,9 @@ obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o +obj-$(CONFIG_SPI_PHYTIUM) += spi-phytium.o +obj-$(CONFIG_SPI_PHYTIUM_PLAT) += spi-phytium-plat.o +obj-$(CONFIG_SPI_PHYTIUM_PCI) += spi-phytium-pci.o obj-$(CONFIG_SPI_EFM32) += spi-efm32.o obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o obj-$(CONFIG_SPI_FALCON) += spi-falcon.o diff --git a/drivers/spi/spi-phytium-pci.c b/drivers/spi/spi-phytium-pci.c new file mode 100644 index 0000000000000000000000000000000000000000..a6ed0bb4492cad93733f3b9278197b51bc197c7b --- /dev/null +++ b/drivers/spi/spi-phytium-pci.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SPI core controller PCI driver. + * + * Copyright (c) 2019-2023 Phytium Technology Co., Ltd. + * + * Derived from drivers/spi/spi-dw-pci.c + * Copyright (c) 2009, 2014 Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-phytium.h" + +#define DRIVER_NAME "phytium_spi_pci" + +static int phytium_spi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_spi *fts; + int pci_bar = 0; + int ret; + + fts = devm_kzalloc(&pdev->dev, sizeof(struct phytium_spi), + GFP_KERNEL); + if (!fts) + return -ENOMEM; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, 1 << pci_bar, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "pci iomap failed?\n"); + return ret; + } + + fts->regs = pcim_iomap_table(pdev)[pci_bar]; + if (IS_ERR(fts->regs)) { + dev_err(&pdev->dev, "SPI region map failed\n"); + return PTR_ERR(fts->regs); + } + + fts->irq = pdev->irq; + if (fts->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return fts->irq; /* -ENXIO */ + } + + fts->bus_num = -1; + + fts->max_freq = 48000000; + + fts->num_cs = 4; + + fts->global_cs = 1; + + ret = phytium_spi_add_host(&pdev->dev, fts); + if (ret) + return ret; + + pci_set_drvdata(pdev, fts); + return 0; +} + +static void phytium_spi_pci_remove(struct pci_dev *pdev) +{ + struct phytium_spi *fts = pci_get_drvdata(pdev); + + phytium_spi_remove_host(fts); +} + + +#ifdef CONFIG_PM_SLEEP +static int spi_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct phytium_spi *fts = spi_master_get_devdata(master); + + return phytium_spi_suspend_host(fts); +} + +static int spi_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct phytium_spi *fts = spi_master_get_devdata(master); + + return phytium_spi_resume_host(fts); +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_spi_pm_ops, spi_suspend, spi_resume); + +static const struct pci_device_id phytium_device_pci_tbl[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc2c) }, + {}, +}; + +static struct pci_driver phytium_spi_pci_driver = { + .name = DRIVER_NAME, + .id_table = phytium_device_pci_tbl, + .probe = phytium_spi_pci_probe, + .remove = phytium_spi_pci_remove, + .driver = { + .pm = &phytium_spi_pm_ops, + } +}; + +module_pci_driver(phytium_spi_pci_driver); + +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_DESCRIPTION("PCI Driver for Phytium SPI controller core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-phytium-plat.c b/drivers/spi/spi-phytium-plat.c new file mode 100644 index 0000000000000000000000000000000000000000..3acd3c657781a4612aeb4f237a6143cd6f233601 --- /dev/null +++ b/drivers/spi/spi-phytium-plat.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SPI core controller platform driver. + * + * Copyright (c) 2019-2023 Phytium Technology Co., Ltd. + * + * Derived from drivers/spi/spi-dw-pci.c + * Copyright (c) 2009, 2014 Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-phytium.h" + +#define DRIVER_NAME "phytium_spi" + +static int phytium_spi_probe(struct platform_device *pdev) +{ + struct phytium_spi *fts; + struct resource *mem; + int ret; + int num_cs; + int cs_gpio; + int global_cs; + int i; + + fts = devm_kzalloc(&pdev->dev, sizeof(struct phytium_spi), + GFP_KERNEL); + if (!fts) + return -ENOMEM; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no mem resource?\n"); + return -EINVAL; + } + + fts->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(fts->regs)) { + dev_err(&pdev->dev, "SPI region map failed\n"); + return PTR_ERR(fts->regs); + } + + fts->irq = platform_get_irq(pdev, 0); + if (fts->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return fts->irq; /* -ENXIO */ + } + + if (pdev->dev.of_node) { + fts->clk = devm_clk_get(&pdev->dev, NULL); + + if (IS_ERR(fts->clk)) + return PTR_ERR(fts->clk); + ret = clk_prepare_enable(fts->clk); + if (ret) + return ret; + + fts->max_freq = clk_get_rate(fts->clk); + } else if (has_acpi_companion(&pdev->dev)) { + fts->max_freq = 48000000; + } + + fts->bus_num = pdev->id; + device_property_read_u32(&pdev->dev, "reg-io-width", &fts->reg_io_width); + + num_cs = 4; + + device_property_read_u32(&pdev->dev, "num-cs", &num_cs); + + fts->num_cs = num_cs; + + if (pdev->dev.of_node) { + int i; + + for (i = 0; i < fts->num_cs; i++) { + cs_gpio = of_get_named_gpio(pdev->dev.of_node, + "cs-gpios", i); + + if (cs_gpio == -EPROBE_DEFER) { + ret = cs_gpio; + goto out; + } + + if (gpio_is_valid(cs_gpio)) { + ret = devm_gpio_request(&pdev->dev, cs_gpio, + dev_name(&pdev->dev)); + if (ret) + goto out; + } + } + } else if(has_acpi_companion(&pdev->dev)) { + int n; + int *cs; + struct gpio_desc *gpiod; + + n = gpiod_count(&pdev->dev, "cs"); + + cs = devm_kcalloc(&pdev->dev, n, sizeof(int), GFP_KERNEL); + fts->cs = cs; + + for (i = 0; i < n; i++) { + gpiod = devm_gpiod_get_index_optional(&pdev->dev, "cs", i, + GPIOD_OUT_LOW); + + if (IS_ERR(gpiod)) { + ret = PTR_ERR(gpiod); + goto out; + } + + cs_gpio = desc_to_gpio(gpiod); + cs[i] = cs_gpio; + } + } + + device_property_read_u32(&pdev->dev, "global-cs", &global_cs); + fts->global_cs = global_cs; + + ret = phytium_spi_add_host(&pdev->dev, fts); + if (ret) + goto out; + + platform_set_drvdata(pdev, fts); + return 0; + +out: + clk_disable_unprepare(fts->clk); + return ret; +} + +static int phytium_spi_remove(struct platform_device *pdev) +{ + struct phytium_spi *fts = platform_get_drvdata(pdev); + + phytium_spi_remove_host(fts); + clk_disable_unprepare(fts->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int spi_suspend(struct device *dev) +{ + struct phytium_spi *fts = dev_get_drvdata(dev); + + return phytium_spi_suspend_host(fts); +} + +static int spi_resume(struct device *dev) +{ + struct phytium_spi *fts = dev_get_drvdata(dev); + + return phytium_spi_resume_host(fts); +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_spi_pm_ops, spi_suspend, spi_resume); + +static const struct of_device_id phytium_spi_of_match[] = { + { .compatible = "phytium,spi", .data = (void *)0 }, + { /* end of table */} +}; +MODULE_DEVICE_TABLE(of, phytium_spi_of_match); + +static const struct acpi_device_id phytium_spi_acpi_match[] = { + {"PHYT000E", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, phytium_spi_acpi_match); + +static struct platform_driver phytium_spi_driver = { + .probe = phytium_spi_probe, + .remove = phytium_spi_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(phytium_spi_of_match), + .acpi_match_table = ACPI_PTR(phytium_spi_acpi_match), + .pm = &phytium_spi_pm_ops, + }, +}; +module_platform_driver(phytium_spi_driver); + +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_DESCRIPTION("Platform Driver for Phytium SPI controller core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-phytium.c b/drivers/spi/spi-phytium.c new file mode 100644 index 0000000000000000000000000000000000000000..6e29cf8f0bad7f2899665d1da98636a8c6eafa57 --- /dev/null +++ b/drivers/spi/spi-phytium.c @@ -0,0 +1,534 @@ +/* + * Phytium SPI core controller driver. + * + * Copyright (c) 2019-2023 Phytium Technology Co., Ltd. + * + * Derived from drivers/spi/spi-dw-pci.c + * Copyright (c) 2009, 2014 Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "spi-phytium.h" + +static inline u32 phytium_readl(struct phytium_spi *fts, u32 offset) +{ + return __raw_readl(fts->regs + offset); +} + +static inline u16 phytium_readw(struct phytium_spi *fts, u32 offset) +{ + return __raw_readw(fts->regs + offset); +} + +static inline void phytium_writel(struct phytium_spi *fts, u32 offset, u32 val) +{ + __raw_writel(val, fts->regs + offset); +} + +static inline void phytium_writew(struct phytium_spi *fts, u32 offset, u16 val) +{ + __raw_writew(val, fts->regs + offset); +} + +static inline u32 phytium_read_io_reg(struct phytium_spi *fts, u32 offset) +{ + switch (fts->reg_io_width) { + case 2: + return phytium_readw(fts, offset); + case 4: + default: + return phytium_readl(fts, offset); + } +} + +static inline void phytium_write_io_reg(struct phytium_spi *fts, u32 offset, u32 val) +{ + switch (fts->reg_io_width) { + case 2: + phytium_writew(fts, offset, val); + break; + case 4: + default: + phytium_writel(fts, offset, val); + break; + } +} + +static inline void spi_enable_chip(struct phytium_spi *fts, int enable) +{ + phytium_writel(fts, SSIENR, (enable ? 1 : 0)); +} + +static inline void spi_set_clk(struct phytium_spi *fts, u16 div) +{ + phytium_writel(fts, BAUDR, div); +} + +static inline void spi_mask_intr(struct phytium_spi *fts, u32 mask) +{ + u32 new_mask; + + new_mask = phytium_readl(fts, IMR) & ~mask; + phytium_writel(fts, IMR, new_mask); +} + +static inline void spi_umask_intr(struct phytium_spi *fts, u32 mask) +{ + u32 new_mask; + + new_mask = phytium_readl(fts, IMR) | mask; + phytium_writel(fts, IMR, new_mask); +} + +static inline void spi_global_cs(struct phytium_spi *fts) +{ + u32 global_cs_en, mask, setmask; + + mask = GENMASK(fts->num_cs-1, 0) << fts->num_cs; + setmask = ~GENMASK(fts->num_cs-1, 0); + global_cs_en = (phytium_readl(fts, GCSR) | mask) & setmask; + + phytium_writel(fts, GCSR, global_cs_en); +} + +static inline void spi_reset_chip(struct phytium_spi *fts) +{ + spi_enable_chip(fts, 0); + if (fts->global_cs) + spi_global_cs(fts); + spi_mask_intr(fts, 0xff); + spi_enable_chip(fts, 1); +} + +static inline void spi_shutdown_chip(struct phytium_spi *fts) +{ + spi_enable_chip(fts, 0); + spi_set_clk(fts, 0); +} + +struct phytium_spi_chip { + u8 poll_mode; + u8 type; + void (*cs_control)(u32 command); +}; + +struct chip_data { + u8 cs; + u8 tmode; + u8 type; + + u8 poll_mode; + + u16 clk_div; + u32 speed_hz; + void (*cs_control)(u32 command); +}; + +static void phytium_spi_set_cs(struct spi_device *spi, bool enable) +{ + struct phytium_spi *fts = spi_master_get_devdata(spi->master); + struct chip_data *chip = spi_get_ctldata(spi); + u32 origin; + + if (chip && chip->cs_control) + chip->cs_control(!enable); + + if (!enable) { + phytium_writel(fts, SER, BIT(spi->chip_select)); + if (fts->global_cs) { + origin = phytium_readl(fts, GCSR); + phytium_writel(fts, GCSR, origin | (1 << spi->chip_select)); + } + } else { + if (fts->global_cs) { + origin = phytium_readl(fts, GCSR); + phytium_writel(fts, GCSR, origin & ~(1 << spi->chip_select)); + } + } +} + +static inline u32 tx_max(struct phytium_spi *fts) +{ + u32 tx_left, tx_room, rxtx_gap; + + tx_left = (fts->tx_end - fts->tx) / fts->n_bytes; + tx_room = fts->fifo_len - phytium_readl(fts, TXFLR); + + rxtx_gap = ((fts->rx_end - fts->rx) - (fts->tx_end - fts->tx)) + / fts->n_bytes; + + return min3(tx_left, tx_room, (u32) (fts->fifo_len - rxtx_gap)); +} + +static inline u32 rx_max(struct phytium_spi *fts) +{ + u32 rx_left = (fts->rx_end - fts->rx) / fts->n_bytes; + + return min_t(u32, rx_left, phytium_readl(fts, RXFLR)); +} + +static void phytium_writer(struct phytium_spi *fts) +{ + u32 max = tx_max(fts); + u16 txw = 0; + + while (max--) { + if (fts->tx_end - fts->len) { + if (fts->n_bytes == 1) + txw = *(u8 *)(fts->tx); + else + txw = *(u16 *)(fts->tx); + } + phytium_write_io_reg(fts, DR, txw); + fts->tx += fts->n_bytes; + } +} + +static void phytium_reader(struct phytium_spi *fts) +{ + u32 max = rx_max(fts); + u16 rxw; + + while (max--) { + rxw = phytium_read_io_reg(fts, DR); + if (fts->rx_end - fts->len) { + if (fts->n_bytes == 1) + *(u8 *)(fts->rx) = rxw; + else + *(u16 *)(fts->rx) = rxw; + } + fts->rx += fts->n_bytes; + } +} + +static void int_error_stop(struct phytium_spi *fts, const char *msg) +{ + spi_reset_chip(fts); + + dev_err(&fts->master->dev, "%s\n", msg); + fts->master->cur_msg->status = -EIO; + spi_finalize_current_transfer(fts->master); +} + +static irqreturn_t interrupt_transfer(struct phytium_spi *fts) +{ + u16 irq_status = phytium_readl(fts, ISR); + + if (irq_status & (INT_TXOI | INT_RXOI | INT_RXUI)) { + phytium_readl(fts, ICR); + int_error_stop(fts, "interrupt_transfer: fifo overrun/underrun"); + return IRQ_HANDLED; + } + + phytium_reader(fts); + if (fts->rx_end == fts->rx) { + spi_mask_intr(fts, INT_TXEI); + spi_finalize_current_transfer(fts->master); + return IRQ_HANDLED; + } + if (irq_status & INT_TXEI) { + spi_mask_intr(fts, INT_TXEI); + phytium_writer(fts); + spi_umask_intr(fts, INT_TXEI); + } + + return IRQ_HANDLED; +} + +static irqreturn_t phytium_spi_irq(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct phytium_spi *fts = spi_master_get_devdata(master); + u16 irq_status = phytium_readl(fts, ISR) & 0x3f; + + if (!irq_status) + return IRQ_NONE; + + if (!master->cur_msg) { + spi_mask_intr(fts, INT_TXEI); + return IRQ_HANDLED; + } + + if (fts->transfer_handler) + return fts->transfer_handler(fts); + else + return IRQ_HANDLED; +} + +static int poll_transfer(struct phytium_spi *fts) +{ + do { + phytium_writer(fts); + phytium_reader(fts); + cpu_relax(); + } while (fts->rx_end > fts->rx); + + return 0; +} + +static int phytium_spi_transfer_one(struct spi_master *master, + struct spi_device *spi, struct spi_transfer *transfer) +{ + struct phytium_spi *fts = spi_master_get_devdata(master); + struct chip_data *chip = spi_get_ctldata(spi); + u8 imask = 0; + u16 txlevel = 0; + u16 clk_div; + u32 cr0; + + fts->tx = (void *)transfer->tx_buf; + fts->tx_end = fts->tx + transfer->len; + fts->rx = transfer->rx_buf; + fts->rx_end = fts->rx + transfer->len; + fts->len = transfer->len; + + spi_enable_chip(fts, 0); + + if (transfer->speed_hz != chip->speed_hz) { + clk_div = (fts->max_freq / transfer->speed_hz + 1) & 0xfffe; + + chip->speed_hz = transfer->speed_hz; + chip->clk_div = clk_div; + + spi_set_clk(fts, chip->clk_div); + } + + if (transfer->bits_per_word == 8) { + fts->n_bytes = 1; + } else if (transfer->bits_per_word == 16) { + fts->n_bytes = 2; + } else { + return -EINVAL; + } + + cr0 = (transfer->bits_per_word - 1) + | (chip->type << FRF_OFFSET) + | (spi->mode << MODE_OFFSET) + | (chip->tmode << TMOD_OFFSET); + + if (chip->cs_control) { + if (fts->rx && fts->tx) + chip->tmode = TMOD_TR; + else if (fts->rx) + chip->tmode = TMOD_RO; + else + chip->tmode = TMOD_TO; + + cr0 &= ~TMOD_MASK; + cr0 |= (chip->tmode << TMOD_OFFSET); + } + + phytium_writel(fts, CTRL0, cr0); + + spi_mask_intr(fts, 0xff); + + if (!chip->poll_mode) { + txlevel = min_t(u16, fts->fifo_len / 2, fts->len / fts->n_bytes); + phytium_writel(fts, TXFLTR, txlevel); + + imask |= INT_TXEI | INT_TXOI | + INT_RXUI | INT_RXOI; + spi_umask_intr(fts, imask); + + fts->transfer_handler = interrupt_transfer; + } + + spi_enable_chip(fts, 1); + + if (chip->poll_mode) + return poll_transfer(fts); + + return 1; +} + +static void phytium_spi_handle_err(struct spi_master *master, + struct spi_message *msg) +{ + struct phytium_spi *fts = spi_master_get_devdata(master); + + spi_reset_chip(fts); +} + +static int phytium_spi_setup(struct spi_device *spi) +{ + struct phytium_spi_chip *chip_info = NULL; + struct chip_data *chip; + struct spi_master *master = spi->master; + struct phytium_spi *fts = spi_master_get_devdata(master); + int ret; + u32 cr0; + + spi_enable_chip(fts, 0); + + chip = spi_get_ctldata(spi); + if (!chip) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) + return -ENOMEM; + spi_set_ctldata(spi, chip); + } + + chip_info = spi->controller_data; + + if (chip_info) { + if (chip_info->cs_control) + chip->cs_control = chip_info->cs_control; + + chip->poll_mode = chip_info->poll_mode; + chip->type = chip_info->type; + } + + chip->tmode = 0; + + cr0 = (spi->bits_per_word - 1) | (chip->type << FRF_OFFSET) | + (spi->mode << MODE_OFFSET) | (chip->tmode << TMOD_OFFSET); + + phytium_writel(fts, CTRL0, cr0); + + if (gpio_is_valid(spi->cs_gpio)) { + ret = gpio_direction_output(spi->cs_gpio, + !(spi->mode & SPI_CS_HIGH)); + if (ret) + return ret; + } + + spi_enable_chip(fts, 1); + + return 0; +} + +static void phytium_spi_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + + kfree(chip); + spi_set_ctldata(spi, NULL); +} + +static void spi_hw_init(struct device *dev, struct phytium_spi *fts) +{ + spi_reset_chip(fts); + + if (!fts->fifo_len) { + u32 fifo; + + for (fifo = 1; fifo < 256; fifo++) { + phytium_writel(fts, TXFLTR, fifo); + if (fifo != phytium_readl(fts, TXFLTR)) + break; + } + phytium_writel(fts, TXFLTR, 0); + + fts->fifo_len = (fifo == 1) ? 0 : fifo; + dev_dbg(dev, "Detected FIFO size: %u bytes\n", fts->fifo_len); + } +} + +int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts) +{ + struct spi_master *master; + int ret; + + BUG_ON(fts == NULL); + + master = spi_alloc_master(dev, 0); + if (!master) + return -ENOMEM; + + fts->master = master; + snprintf(fts->name, sizeof(fts->name), "phytium_spi%d", fts->bus_num); + + ret = request_irq(fts->irq, phytium_spi_irq, IRQF_SHARED, fts->name, master); + if (ret < 0) { + dev_err(dev, "can not get IRQ\n"); + goto err_free_master; + } + + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; + master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); + master->bus_num = fts->bus_num; + master->num_chipselect = fts->num_cs; + master->setup = phytium_spi_setup; + master->cleanup = phytium_spi_cleanup; + master->set_cs = phytium_spi_set_cs; + master->transfer_one = phytium_spi_transfer_one; + master->handle_err = phytium_spi_handle_err; + master->max_speed_hz = fts->max_freq; + master->dev.of_node = dev->of_node; + master->dev.fwnode = dev->fwnode; + master->flags = SPI_MASTER_GPIO_SS; + master->cs_gpios = fts->cs; + + spi_hw_init(dev, fts); + + spi_master_set_devdata(master, fts); + ret = devm_spi_register_master(dev, master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + goto err_exit; + } + + return 0; + +err_exit: + spi_enable_chip(fts, 0); + free_irq(fts->irq, master); +err_free_master: + spi_master_put(master); + return ret; +} +EXPORT_SYMBOL_GPL(phytium_spi_add_host); + +void phytium_spi_remove_host(struct phytium_spi *fts) +{ + spi_shutdown_chip(fts); + + free_irq(fts->irq, fts->master); +} +EXPORT_SYMBOL_GPL(phytium_spi_remove_host); + +int phytium_spi_suspend_host(struct phytium_spi *fts) +{ + int ret; + + ret = spi_controller_suspend(fts->master); + if (ret) + return ret; + + spi_shutdown_chip(fts); + return 0; +} +EXPORT_SYMBOL_GPL(phytium_spi_suspend_host); + +int phytium_spi_resume_host(struct phytium_spi *fts) +{ + int ret; + + spi_hw_init(&fts->master->dev, fts); + ret = spi_controller_resume(fts->master); + if (ret) + dev_err(&fts->master->dev, "fail to start queue (%d)\n", ret); + return ret; +} +EXPORT_SYMBOL_GPL(phytium_spi_resume_host); + +MODULE_AUTHOR("Zhu Mingshuai "); +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Driver for Phytium SPI controller core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-phytium.h b/drivers/spi/spi-phytium.h new file mode 100644 index 0000000000000000000000000000000000000000..d75e8ba7daa40ad772027f53564b6578a337a081 --- /dev/null +++ b/drivers/spi/spi-phytium.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium SPI controller driver. + * + * Copyright (c) 2019-2023 Phytium Technology Co., Ltd. + */ +#ifndef PHYTIUM_SPI_HEADER_H +#define PHYTIUM_SPI_HEADER_H + +#include +#include +#include + +#define CTRL0 0x00 +#define SSIENR 0x08 +#define SER 0x10 +#define BAUDR 0x14 +#define TXFLTR 0x18 +#define TXFLR 0x20 +#define RXFLR 0x24 +#define IMR 0x2c +#define ISR 0x30 +#define ICR 0x48 +#define DR 0x60 +#define GCSR 0x100 + +#define FRF_OFFSET 4 +#define MODE_OFFSET 6 +#define TMOD_OFFSET 8 + +#define TMOD_MASK (0x3 << TMOD_OFFSET) +#define TMOD_TR 0x0 +#define TMOD_TO 0x1 +#define TMOD_RO 0x2 + +#define INT_TXEI (1 << 0) +#define INT_TXOI (1 << 1) +#define INT_RXUI (1 << 2) +#define INT_RXOI (1 << 3) + +struct phytium_spi { + struct spi_master *master; + char name[16]; + + void __iomem *regs; + bool global_cs; + unsigned long paddr; + int irq; + u32 fifo_len; + u32 max_freq; + + u32 reg_io_width; + u16 bus_num; + u16 num_cs; + int *cs; + + size_t len; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + u8 n_bytes; + struct clk *clk; + irqreturn_t (*transfer_handler)(struct phytium_spi *fts); +}; + +extern int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts); +extern void phytium_spi_remove_host(struct phytium_spi *fts); +extern int phytium_spi_suspend_host(struct phytium_spi *fts); +extern int phytium_spi_resume_host(struct phytium_spi *fts); + +#endif /* PHYTIUM_SPI_HEADER_H */ diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c index 034d86869772ef5bad01829e2764f9bce61820c1..d089b2cb5dd7defd6025edffbb4acd4d252c9a5a 100644 --- a/drivers/staging/android/vsoc.c +++ b/drivers/staging/android/vsoc.c @@ -438,12 +438,10 @@ static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg) return -EINVAL; wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec); - hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); + hrtimer_init_sleeper_on_stack(to, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS, current); hrtimer_set_expires_range_ns(&to->timer, wake_time, current->timer_slack_ns); - - hrtimer_init_sleeper(to, current); } while (1) { diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig index a6df12d88f90cd097ab88df8d151970243aea26b..f4cf1ba4a6c4e254e1f26369ba9eb047f6d2e243 100644 --- a/drivers/tee/Kconfig +++ b/drivers/tee/Kconfig @@ -2,6 +2,7 @@ config TEE tristate "Trusted Execution Environment support" depends on HAVE_ARM_SMCCC || COMPILE_TEST + select CRYPTO_SHA1 select DMA_SHARED_BUFFER select GENERIC_ALLOCATOR help diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index 3c1ec4e9ed29aeeb692bb3eb184a2cdf75dd5742..f3be66ef85b79d654dd5cbc8c65af89eafd6efa7 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -14,3 +14,33 @@ config OPTEE_SHM_NUM_PRIV_PAGES help This sets the number of private shared memory pages to be used by OP-TEE TEE driver. + +if OPTEE + +choice + prompt "Default conduit method" + default OPTEE_DEFAULT_METHOD_NONE + help + This option sets the default conduit method for OP-TEE in case + firmware misses "method" property. If in doubt, select "none" + which depends on firmware to provide the value. + +config OPTEE_DEFAULT_METHOD_NONE + bool "none" + help + There is no default conduit method used by the driver. Require + firwmare to provide the method explicitly. + +config OPTEE_DEFAULT_METHOD_HVC + bool "hvc" + help + Use the "hvc" as default conduit method. + +config OPTEE_DEFAULT_METHOD_SMC + bool "smc" + help + Use the "hvc" as default conduit method. + +endchoice + +endif diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index 4f4a7e2b122bfd46a15ac31f553f1b615d1686ae..37d021fe99ebe3d8f3ace671b28353de64472db9 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c @@ -241,9 +241,13 @@ int optee_open_session(struct tee_context *ctx, msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | OPTEE_MSG_ATTR_META; memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); - memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid)); msg_arg->params[1].u.value.c = arg->clnt_login; + rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value, + arg->clnt_login, arg->clnt_uuid); + if (rc) + goto out; + rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); if (rc) goto out; diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 473981e3ad70b257c7c94859087d26a5fb46b6db..dc47c5493ae280e12662db9294873b089cdba88f 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -14,6 +14,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -519,15 +520,23 @@ static void optee_smccc_hvc(unsigned long a0, unsigned long a1, arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); } -static optee_invoke_fn *get_invoke_func(struct device_node *np) +#if defined(CONFIG_OPTEE_DEFAULT_METHOD_HVC) +#define DEFAULT_CONDUIT_METHOD optee_smccc_hvc +#elif defined(CONFIG_OPTEE_DEFAULT_METHOD_SMC) +#define DEFAULT_CONDUIT_METHOD optee_smccc_hvc +#else +#define DEFAULT_CONDUIT_METHOD ERR_PTR(-ENXIO) +#endif + +static optee_invoke_fn *get_invoke_func(struct device *dev) { const char *method; - pr_info("probing for conduit method from DT.\n"); + pr_info("probing for conduit method.\n"); - if (of_property_read_string(np, "method", &method)) { + if (device_property_read_string(dev, "method", &method)) { pr_warn("missing \"method\" property\n"); - return ERR_PTR(-ENXIO); + return DEFAULT_CONDUIT_METHOD; } if (!strcmp("hvc", method)) @@ -539,7 +548,37 @@ static optee_invoke_fn *get_invoke_func(struct device_node *np) return ERR_PTR(-EINVAL); } -static struct optee *optee_probe(struct device_node *np) +static int optee_remove(struct platform_device *pdev) +{ + struct optee *optee = platform_get_drvdata(pdev); + + /* + * Ask OP-TEE to free all cached shared memory objects to decrease + * reference counters and also avoid wild pointers in secure world + * into the old shared memory range. + */ + optee_disable_shm_cache(optee); + + /* + * The two devices have to be unregistered before we can free the + * other resources. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + + tee_shm_pool_free(optee->pool); + if (optee->memremaped_shm) + memunmap(optee->memremaped_shm); + optee_wait_queue_exit(&optee->wait_queue); + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); + + kfree(optee); + + return 0; +} + +static int optee_probe(struct platform_device *pdev) { optee_invoke_fn *invoke_fn; struct tee_shm_pool *pool; @@ -549,25 +588,25 @@ static struct optee *optee_probe(struct device_node *np) u32 sec_caps; int rc; - invoke_fn = get_invoke_func(np); + invoke_fn = get_invoke_func(&pdev->dev); if (IS_ERR(invoke_fn)) - return (void *)invoke_fn; + return PTR_ERR(invoke_fn); if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { pr_warn("api uid mismatch\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } optee_msg_get_os_revision(invoke_fn); if (!optee_msg_api_revision_is_compatible(invoke_fn)) { pr_warn("api revision mismatch\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { pr_warn("capabilities mismatch\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } /* @@ -575,11 +614,11 @@ static struct optee *optee_probe(struct device_node *np) * doesn't have any reserved memory we can use we can't continue. */ if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) - return ERR_PTR(-EINVAL); + return -EINVAL; pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps); if (IS_ERR(pool)) - return (void *)pool; + return PTR_ERR(pool); optee = kzalloc(sizeof(*optee), GFP_KERNEL); if (!optee) { @@ -630,8 +669,10 @@ static struct optee *optee_probe(struct device_node *np) optee_enable_shm_cache(optee); + platform_set_drvdata(pdev, optee); + pr_info("initialized driver\n"); - return optee; + return 0; err: if (optee) { /* @@ -647,83 +688,37 @@ static struct optee *optee_probe(struct device_node *np) tee_shm_pool_free(pool); if (memremaped_shm) memunmap(memremaped_shm); - return ERR_PTR(rc); -} - -static void optee_remove(struct optee *optee) -{ - /* - * Ask OP-TEE to free all cached shared memory objects to decrease - * reference counters and also avoid wild pointers in secure world - * into the old shared memory range. - */ - optee_disable_shm_cache(optee); - - /* - * The two devices has to be unregistered before we can free the - * other resources. - */ - tee_device_unregister(optee->supp_teedev); - tee_device_unregister(optee->teedev); - - tee_shm_pool_free(optee->pool); - if (optee->memremaped_shm) - memunmap(optee->memremaped_shm); - optee_wait_queue_exit(&optee->wait_queue); - optee_supp_uninit(&optee->supp); - mutex_destroy(&optee->call_queue.mutex); - - kfree(optee); + return rc; } -static const struct of_device_id optee_match[] = { +static const struct of_device_id optee_dt_match[] = { { .compatible = "linaro,optee-tz" }, {}, }; +MODULE_DEVICE_TABLE(of, optee_dt_match); -static struct optee *optee_svc; - -static int __init optee_driver_init(void) -{ - struct device_node *fw_np; - struct device_node *np; - struct optee *optee; - - /* Node is supposed to be below /firmware */ - fw_np = of_find_node_by_name(NULL, "firmware"); - if (!fw_np) - return -ENODEV; - - np = of_find_matching_node(fw_np, optee_match); - if (!np || !of_device_is_available(np)) { - of_node_put(np); - return -ENODEV; - } - - optee = optee_probe(np); - of_node_put(np); - - if (IS_ERR(optee)) - return PTR_ERR(optee); - - optee_svc = optee; - - return 0; -} -module_init(optee_driver_init); - -static void __exit optee_driver_exit(void) -{ - struct optee *optee = optee_svc; - - optee_svc = NULL; - if (optee) - optee_remove(optee); -} -module_exit(optee_driver_exit); +#ifdef CONFIG_ACPI +static const struct acpi_device_id optee_acpi_match[] = { + { "PHYT8003" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, optee_acpi_match); +#endif + +static struct platform_driver optee_driver = { + .probe = optee_probe, + .remove = optee_remove, + .driver = { + .name = "optee", + .of_match_table = optee_dt_match, + .acpi_match_table = ACPI_PTR(optee_acpi_match), + }, +}; +module_platform_driver(optee_driver); MODULE_AUTHOR("Linaro"); MODULE_DESCRIPTION("OP-TEE driver"); MODULE_SUPPORTED_DEVICE(""); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:optee"); diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index d42fc2ae8592ef62a32c036534b5da7621524583..03e3b2ea8c1a61abc9e8fb0c1bfc8b91306f37cc 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -15,6 +15,7 @@ #define pr_fmt(fmt) "%s: " fmt, __func__ #include +#include #include #include #include @@ -22,12 +23,27 @@ #include #include #include +#include +#include #include "tee_private.h" #define TEE_NUM_DEVICES 32 #define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x)) +#define TEE_UUID_NS_NAME_SIZE 128 + +/* + * TEE Client UUID name space identifier (UUIDv4) + * + * Value here is random UUID that is allocated as name space identifier for + * forming Client UUID's for TEE environment using UUIDv5 scheme. + */ +static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683, + 0xa1, 0xb8, 0xec, 0x4b, + 0xc0, 0x8e, 0x01, 0xb6); + + /* * Unprivileged devices in the lower half range and privileged devices in * the upper half range. @@ -108,6 +124,143 @@ static int tee_release(struct inode *inode, struct file *filp) return 0; } +/** + * uuid_v5() - Calculate UUIDv5 + * @uuid: Resulting UUID + * @ns: Name space ID for UUIDv5 function + * @name: Name for UUIDv5 function + * @size: Size of name + * + * UUIDv5 is specific in RFC 4122. + * + * This implements section (for SHA-1): + * 4.3. Algorithm for Creating a Name-Based UUID + */ +static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name, + size_t size) +{ + unsigned char hash[SHA1_DIGEST_SIZE]; + struct crypto_shash *shash = NULL; + struct shash_desc *desc = NULL; + int rc; + + shash = crypto_alloc_shash("sha1", 0, 0); + if (IS_ERR(shash)) { + rc = PTR_ERR(shash); + pr_err("shash(sha1) allocation failed\n"); + return rc; + } + + desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash), + GFP_KERNEL); + if (!desc) { + rc = -ENOMEM; + goto out_free_shash; + } + + desc->tfm = shash; + + rc = crypto_shash_init(desc); + if (rc < 0) + goto out_free_desc; + + rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns)); + if (rc < 0) + goto out_free_desc; + + rc = crypto_shash_update(desc, (const u8 *)name, size); + if (rc < 0) + goto out_free_desc; + + rc = crypto_shash_final(desc, hash); + if (rc < 0) + goto out_free_desc; + + memcpy(uuid->b, hash, UUID_SIZE); + + /* Tag for version 5 */ + uuid->b[6] = (hash[6] & 0x0F) | 0x50; + uuid->b[8] = (hash[8] & 0x3F) | 0x80; + +out_free_desc: + kfree(desc); + +out_free_shash: + crypto_free_shash(shash); + return rc; +} + +int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, + const u8 connection_data[TEE_IOCTL_UUID_LEN]) +{ + gid_t ns_grp = (gid_t)-1; + kgid_t grp = INVALID_GID; + char *name = NULL; + int name_len; + int rc; + + if (connection_method == TEE_IOCTL_LOGIN_PUBLIC) { + /* Nil UUID to be passed to TEE environment */ + uuid_copy(uuid, &uuid_null); + return 0; + } + + /* + * In Linux environment client UUID is based on UUIDv5. + * + * Determine client UUID with following semantics for 'name': + * + * For TEEC_LOGIN_USER: + * uid= + * + * For TEEC_LOGIN_GROUP: + * gid= + * + */ + + name = kzalloc(TEE_UUID_NS_NAME_SIZE, GFP_KERNEL); + if (!name) + return -ENOMEM; + + switch (connection_method) { + case TEE_IOCTL_LOGIN_USER: + name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "uid=%x", + current_euid().val); + if (name_len >= TEE_UUID_NS_NAME_SIZE) { + rc = -E2BIG; + goto out_free_name; + } + break; + + case TEE_IOCTL_LOGIN_GROUP: + memcpy(&ns_grp, connection_data, sizeof(gid_t)); + grp = make_kgid(current_user_ns(), ns_grp); + if (!gid_valid(grp) || !in_egroup_p(grp)) { + rc = -EPERM; + goto out_free_name; + } + + name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "gid=%x", + grp.val); + if (name_len >= TEE_UUID_NS_NAME_SIZE) { + rc = -E2BIG; + goto out_free_name; + } + break; + + default: + rc = -EINVAL; + goto out_free_name; + } + + rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len); +out_free_name: + kfree(name); + + return rc; +} +EXPORT_SYMBOL_GPL(tee_session_calc_client_uuid); + static int tee_ioctl_version(struct tee_context *ctx, struct tee_ioctl_version_data __user *uvers) { diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c index 1ef937d799e4f3d200dbfe9fb5a3dc2b08fd1d21..540becb78a0f3e724d4421604aadebd7b10b1cb7 100644 --- a/drivers/thermal/x86_pkg_temp_thermal.c +++ b/drivers/thermal/x86_pkg_temp_thermal.c @@ -75,7 +75,7 @@ static int max_packages __read_mostly; /* Array of package pointers */ static struct pkg_device **packages; /* Serializes interrupt notification, work and hotplug */ -static DEFINE_SPINLOCK(pkg_temp_lock); +static DEFINE_RAW_SPINLOCK(pkg_temp_lock); /* Protects zone operation in the work function against hotplug removal */ static DEFINE_MUTEX(thermal_zone_mutex); @@ -291,12 +291,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) u64 msr_val, wr_val; mutex_lock(&thermal_zone_mutex); - spin_lock_irq(&pkg_temp_lock); + raw_spin_lock_irq(&pkg_temp_lock); ++pkg_work_cnt; pkgdev = pkg_temp_thermal_get_dev(cpu); if (!pkgdev) { - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); mutex_unlock(&thermal_zone_mutex); return; } @@ -310,7 +310,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) } enable_pkg_thres_interrupt(); - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); /* * If tzone is not NULL, then thermal_zone_mutex will prevent the @@ -335,7 +335,7 @@ static int pkg_thermal_notify(u64 msr_val) struct pkg_device *pkgdev; unsigned long flags; - spin_lock_irqsave(&pkg_temp_lock, flags); + raw_spin_lock_irqsave(&pkg_temp_lock, flags); ++pkg_interrupt_cnt; disable_pkg_thres_interrupt(); @@ -347,7 +347,7 @@ static int pkg_thermal_notify(u64 msr_val) pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work); } - spin_unlock_irqrestore(&pkg_temp_lock, flags); + raw_spin_unlock_irqrestore(&pkg_temp_lock, flags); return 0; } @@ -393,9 +393,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) pkgdev->msr_pkg_therm_high); cpumask_set_cpu(cpu, &pkgdev->cpumask); - spin_lock_irq(&pkg_temp_lock); + raw_spin_lock_irq(&pkg_temp_lock); packages[pkgid] = pkgdev; - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); return 0; } @@ -432,7 +432,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) } /* Protect against work and interrupts */ - spin_lock_irq(&pkg_temp_lock); + raw_spin_lock_irq(&pkg_temp_lock); /* * Check whether this cpu was the current target and store the new @@ -464,9 +464,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) * To cancel the work we need to drop the lock, otherwise * we might deadlock if the work needs to be flushed. */ - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); cancel_delayed_work_sync(&pkgdev->work); - spin_lock_irq(&pkg_temp_lock); + raw_spin_lock_irq(&pkg_temp_lock); /* * If this is not the last cpu in the package and the work * did not run after we dropped the lock above, then we @@ -477,7 +477,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) pkg_thermal_schedule_work(target, &pkgdev->work); } - spin_unlock_irq(&pkg_temp_lock); + raw_spin_unlock_irq(&pkg_temp_lock); /* Final cleanup if this is the last cpu */ if (lastcpu) diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index d2df7d71d6667e74b4ec01fcdfe874e432fcffe9..57fd29b2ddf5ed90bbee0ad1b651ebe95cc8a2ad 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -54,7 +54,16 @@ static struct uart_driver serial8250_reg; static unsigned int skip_txen_test; /* force skip of txen test at init time */ -#define PASS_LIMIT 512 +/* + * On -rt we can have a more delays, and legitimately + * so - so don't drop work spuriously and spam the + * syslog: + */ +#ifdef CONFIG_PREEMPT_RT_FULL +# define PASS_LIMIT 1000000 +#else +# define PASS_LIMIT 512 +#endif #include /* diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 34687c354f5e97a98fa1b08967d08b5d1997a58f..418b9c818b2e16854c90a8c39f54e0f99b9efc17 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -3294,9 +3295,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, serial8250_rpm_get(up); - if (port->sysrq) + if (port->sysrq || oops_in_progress) locked = 0; - else if (oops_in_progress) + else if (in_kdb_printk()) locked = spin_trylock_irqsave(&port->lock, flags); else spin_lock_irqsave(&port->lock, flags); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index cd13065095bc32fac477b0047286c755238eef7d..a11e6c87fe32b63f8573e9832b2142acca6212d7 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -74,6 +74,17 @@ config SERIAL_AMBA_PL011_CONSOLE your boot loader (lilo or loadlin) about how to pass options to the kernel at boot time.) +config SERIAL_PHYTIUM_PCI + tristate "Phytium PCI serial port support" + depends on PCI + select SERIAL_CORE + help + This driver supports the Phytium UART controller on PCI/PCIe adapters. + If you want to compile this driver into the kernel, say Y here. To + compile this driver as a module, choose M here. + + If unsure, say N. + config SERIAL_EARLYCON_ARM_SEMIHOST bool "Early console using ARM semihosting" depends on ARM64 || ARM diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index daac675612dff4804869460e7946210dd1eb7466..6d4cf6bc06ca20b50411ddc573ff169cebce8fdc 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_SERIAL_8250) += 8250/ obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o +obj-$(CONFIG_SERIAL_PHYTIUM_PCI) += phytium-uart.o obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 5edc3813a9b99341cf3d4124a55af40b8630954d..9666a20f23ecd6ed0ad039f2f14404f925add9f8 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2191,18 +2191,24 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) { struct uart_amba_port *uap = amba_ports[co->index]; unsigned int old_cr = 0, new_cr; - unsigned long flags; + unsigned long flags = 0; int locked = 1; clk_enable(uap->clk); - local_irq_save(flags); + /* + * local_irq_save(flags); + * + * This local_irq_save() is nonsense. If we come in via sysrq + * handling then interrupts are already disabled. Aside of + * that the port.sysrq check is racy on SMP regardless. + */ if (uap->port.sysrq) locked = 0; else if (oops_in_progress) - locked = spin_trylock(&uap->port.lock); + locked = spin_trylock_irqsave(&uap->port.lock, flags); else - spin_lock(&uap->port.lock); + spin_lock_irqsave(&uap->port.lock, flags); /* * First save the CR then disable the interrupts @@ -2228,8 +2234,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) pl011_write(old_cr, uap, REG_CR); if (locked) - spin_unlock(&uap->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&uap->port.lock, flags); clk_disable(uap->clk); } diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 6420ae581a802fe3c614591dce7ded443e0610a6..0f4f41ed9ffa555332bb895ccbecd51f7823301e 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1307,13 +1307,10 @@ serial_omap_console_write(struct console *co, const char *s, pm_runtime_get_sync(up->dev); - local_irq_save(flags); - if (up->port.sysrq) - locked = 0; - else if (oops_in_progress) - locked = spin_trylock(&up->port.lock); + if (up->port.sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); else - spin_lock(&up->port.lock); + spin_lock_irqsave(&up->port.lock, flags); /* * First save the IER then disable the interrupts @@ -1342,8 +1339,7 @@ serial_omap_console_write(struct console *co, const char *s, pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); if (locked) - spin_unlock(&up->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&up->port.lock, flags); } static int __init diff --git a/drivers/tty/serial/phytium-uart.c b/drivers/tty/serial/phytium-uart.c new file mode 100644 index 0000000000000000000000000000000000000000..a076668a32b9dde06c1c4683873cfb53d826fb27 --- /dev/null +++ b/drivers/tty/serial/phytium-uart.c @@ -0,0 +1,927 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Phytium PCI UART controller + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + * + * Derived from drivers/tty/serial/amba-pl011.c + * Copyright 1999 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * Copyright (C) 2010 ST-Ericsson SA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "phytium_uart" + +#define REG_DR 0x00 +#define REG_FR 0x18 +#define REG_IBRD 0x24 +#define REG_FBRD 0x28 +#define REG_LCRH_RX 0x2c +#define REG_LCRH_TX 0x2c +#define REG_CR 0x30 +#define REG_IFLS 0x34 +#define REG_IMSC 0x38 +#define REG_RIS 0x3c +#define REG_MIS 0x40 +#define REG_ICR 0x44 + +#define REG_DR_OE (1 << 11) +#define REG_DR_BE (1 << 10) +#define REG_DR_PE (1 << 9) +#define REG_DR_FE (1 << 8) + +#define REG_LCRH_SPS 0x80 +#define REG_LCRH_WLEN_8 0x60 +#define REG_LCRH_WLEN_7 0x40 +#define REG_LCRH_WLEN_6 0x20 +#define REG_LCRH_WLEN_5 0x00 +#define REG_LCRH_FEN 0x10 +#define REG_LCRH_STP2 0x08 +#define REG_LCRH_EPS 0x04 +#define REG_LCRH_PEN 0x02 +#define REG_LCRH_BRK 0x01 + +#define REG_FR_RI 0x100 +#define REG_FR_TXFE 0x080 +#define REG_FR_RXFF 0x040 +#define REG_FR_TXFF 0x020 +#define REG_FR_RXFE 0x010 +#define REG_FR_BUSY 0x008 +#define REG_FR_DCD 0x004 +#define REG_FR_DSR 0x002 +#define REG_FR_CTS 0x001 +#define REG_FR_TMSK (REG_FR_TXFF + REG_FR_BUSY) + +#define REG_CR_CTSEN 0x8000 /* CTS hardware flow control */ +#define REG_CR_RTSEN 0x4000 /* RTS hardware flow control */ +#define REG_CR_OUT2 0x2000 /* OUT2 */ +#define REG_CR_OUT1 0x1000 /* OUT1 */ +#define REG_CR_RTS 0x0800 /* RTS */ +#define REG_CR_DTR 0x0400 /* DTR */ +#define REG_CR_RXE 0x0200 /* receive enable */ +#define REG_CR_TXE 0x0100 /* transmit enable */ +#define REG_CR_LBE 0x0080 /* loopback enable */ +#define REG_CR_RTIE 0x0040 +#define REG_CR_TIE 0x0020 +#define REG_CR_RIE 0x0010 +#define REG_CR_MSIE 0x0008 +#define REG_CR_IIRLP 0x0004 /* SIR low power mode */ +#define REG_CR_SIREN 0x0002 /* SIR enable */ +#define REG_CR_UARTEN 0x0001 /* UART enable */ + +#define REG_IFLS_RX1_8 (0 << 3) +#define REG_IFLS_RX2_8 (1 << 3) +#define REG_IFLS_RX4_8 (2 << 3) +#define REG_IFLS_RX6_8 (3 << 3) +#define REG_IFLS_RX7_8 (4 << 3) +#define REG_IFLS_TX1_8 (0 << 0) +#define REG_IFLS_TX2_8 (1 << 0) +#define REG_IFLS_TX4_8 (2 << 0) +#define REG_IFLS_TX6_8 (3 << 0) + +#define REG_IMSC_OEIM (1 << 10) /* overrun error interrupt mask */ +#define REG_IMSC_BEIM (1 << 9) /* break error interrupt mask */ +#define REG_IMSC_PEIM (1 << 8) /* parity error interrupt mask */ +#define REG_IMSC_FEIM (1 << 7) /* framing error interrupt mask */ +#define REG_IMSC_RTIM (1 << 6) /* receive timeout interrupt mask */ +#define REG_IMSC_TXIM (1 << 5) /* transmit interrupt mask */ +#define REG_IMSC_RXIM (1 << 4) /* receive interrupt mask */ +#define REG_IMSC_DSRMIM (1 << 3) /* DSR interrupt mask */ +#define REG_IMSC_DCDMIM (1 << 2) /* DCD interrupt mask */ +#define REG_IMSC_CTSMIM (1 << 1) /* CTS interrupt mask */ +#define REG_IMSC_RIMIM (1 << 0) /* RI interrupt mask */ + +#define REG_ICR_OEIS (1 << 10) /* overrun error interrupt status */ +#define REG_ICR_BEIS (1 << 9) /* break error interrupt status */ +#define REG_ICR_PEIS (1 << 8) /* parity error interrupt status */ +#define REG_ICR_FEIS (1 << 7) /* framing error interrupt status */ +#define REG_ICR_RTIS (1 << 6) /* receive timeout interrupt status */ +#define REG_ICR_TXIS (1 << 5) /* transmit interrupt status */ +#define REG_ICR_RXIS (1 << 4) /* receive interrupt status */ +#define REG_ICR_DSRMIS (1 << 3) /* DSR interrupt status */ +#define REG_ICR_DCDMIS (1 << 2) /* DCD interrupt status */ +#define REG_ICR_CTSMIS (1 << 1) /* CTS interrupt status */ +#define REG_ICR_RIMIS (1 << 0) /* RI interrupt status */ + +#define UART_NR 12 + +#define UART_DR_ERROR (REG_DR_OE|REG_DR_BE|REG_DR_PE|REG_DR_FE) +#define UART_DUMMY_DR_RX (1 << 16) + +#define DEFAULT_UARTCLK 48000000 /* 48 MHz */ + +/* + * We wrap our port structure around the generic uart_port. + */ +struct phytium_uart_port { + struct uart_port port; + unsigned int im; /* interrupt mask */ + unsigned int old_status; + unsigned int old_cr; /* state during shutdown */ + char type[12]; +}; + +static unsigned int phytium_uart_read(const struct phytium_uart_port *pup, + unsigned int reg) +{ + void __iomem *addr = pup->port.membase + reg; + + return readl_relaxed(addr); +} + +static void phytium_uart_write(unsigned int val, const struct phytium_uart_port *pup, + unsigned int reg) +{ + void __iomem *addr = pup->port.membase + reg; + + writel_relaxed(val, addr); +} + +static int phytium_fifo_to_tty(struct phytium_uart_port *pup) +{ + u16 status; + unsigned int ch, flag, fifotaken; + + for (fifotaken = 0; fifotaken < 256; fifotaken++) { + status = phytium_uart_read(pup, REG_FR); + if (status & REG_FR_RXFE) + break; + + /* Take chars from the FIFO and update status */ + ch = phytium_uart_read(pup, REG_DR) | UART_DUMMY_DR_RX; + flag = TTY_NORMAL; + pup->port.icount.rx++; + + if (unlikely(ch & UART_DR_ERROR)) { + if (ch & REG_DR_BE) { + ch &= ~(REG_DR_FE | REG_DR_PE); + pup->port.icount.brk++; + if (uart_handle_break(&pup->port)) + continue; + } else if (ch & REG_DR_PE) + pup->port.icount.parity++; + else if (ch & REG_DR_FE) + pup->port.icount.frame++; + if (ch & REG_DR_OE) + pup->port.icount.overrun++; + + ch &= pup->port.read_status_mask; + + if (ch & REG_DR_BE) + flag = TTY_BREAK; + else if (ch & REG_DR_PE) + flag = TTY_PARITY; + else if (ch & REG_DR_FE) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(&pup->port, ch & 255)) + continue; + + uart_insert_char(&pup->port, ch, REG_DR_OE, ch, flag); + } + + return fifotaken; +} + +static void phytium_rx_chars(struct phytium_uart_port *pup) +__releases(&pup->port.lock) +__acquires(&pup->port.lock) +{ + phytium_fifo_to_tty(pup); + + spin_unlock(&pup->port.lock); + tty_flip_buffer_push(&pup->port.state->port); + spin_lock(&pup->port.lock); +} + +static void phytium_stop_tx(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + pup->im &= ~REG_IMSC_TXIM; + phytium_uart_write(pup->im, pup, REG_IMSC); +} + +static bool phytium_tx_char(struct phytium_uart_port *pup, unsigned char c, + bool from_irq) +{ + + if (unlikely(!from_irq) && + phytium_uart_read(pup, REG_FR) & REG_FR_TXFF) + return false; /* unable to transmit character */ + + phytium_uart_write(c, pup, REG_DR); + pup->port.icount.tx++; + + return true; +} + +static bool phytium_tx_chars(struct phytium_uart_port *pup, bool from_irq) +{ + struct circ_buf *xmit = &pup->port.state->xmit; + int count = pup->port.fifosize >> 1; + + if (pup->port.x_char) { + if (!phytium_tx_char(pup, pup->port.x_char, from_irq)) + return true; + pup->port.x_char = 0; + --count; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(&pup->port)) { + phytium_stop_tx(&pup->port); + return false; + } + + do { + if (likely(from_irq) && count-- == 0) + break; + + if (!phytium_tx_char(pup, xmit->buf[xmit->tail], from_irq)) + break; + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + } while (!uart_circ_empty(xmit)); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&pup->port); + + if (uart_circ_empty(xmit)) { + phytium_stop_tx(&pup->port); + return false; + } + return true; +} + +static void phytium_modem_status(struct phytium_uart_port *pup) +{ + unsigned int status, delta; + + status = phytium_uart_read(pup, REG_FR) & (REG_FR_DCD|REG_FR_DSR|REG_FR_CTS); + + delta = status ^ pup->old_status; + pup->old_status = status; + + if (!delta) + return; + + if (delta & REG_FR_DCD) + uart_handle_dcd_change(&pup->port, status & REG_FR_DCD); + + if (delta & REG_FR_DSR) + pup->port.icount.dsr++; + + if (delta & REG_FR_CTS) + uart_handle_cts_change(&pup->port, status & REG_FR_CTS); + + wake_up_interruptible(&pup->port.state->port.delta_msr_wait); +} + +static irqreturn_t phytium_uart_interrupt(int irq, void *dev_id) +{ + struct phytium_uart_port *pup = dev_id; + unsigned long flags; + unsigned int status, pass_counter = 256; + int handled = 0; + + spin_lock_irqsave(&pup->port.lock, flags); + status = phytium_uart_read(pup, REG_RIS) & pup->im; + if (status) { + do { + phytium_uart_write(status & ~(REG_ICR_TXIS|REG_ICR_RTIS|REG_ICR_RXIS), + pup, REG_ICR); + + if (status & (REG_ICR_RTIS|REG_ICR_RXIS)) + phytium_rx_chars(pup); + + if (status & (REG_ICR_DSRMIS|REG_ICR_DCDMIS| + REG_ICR_CTSMIS|REG_ICR_RIMIS)) + phytium_modem_status(pup); + if (status & REG_ICR_TXIS) + phytium_tx_chars(pup, true); + + if (pass_counter-- == 0) + break; + + status = phytium_uart_read(pup, REG_RIS) & pup->im; + } while (status != 0); + handled = 1; + } + spin_unlock_irqrestore(&pup->port.lock, flags); + + return IRQ_RETVAL(handled); +} + +static unsigned int phytium_tx_empty(struct uart_port *port) +{ + unsigned int status; + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + status = phytium_uart_read(pup, REG_FR) & (REG_FR_BUSY | REG_FR_TXFF); + + return status ? 0 : TIOCSER_TEMT; +} + +static void phytium_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int cr; + + cr = phytium_uart_read(pup, REG_CR); + +#define TIOCMBIT(tiocmbit, uartbit) \ + do { \ + if (mctrl & tiocmbit) \ + cr |= uartbit; \ + else \ + cr &= ~uartbit; \ + } while (0) + + TIOCMBIT(TIOCM_RTS, REG_CR_RTS); + TIOCMBIT(TIOCM_DTR, REG_CR_DTR); + TIOCMBIT(TIOCM_OUT1, REG_CR_OUT1); + TIOCMBIT(TIOCM_OUT2, REG_CR_OUT2); + TIOCMBIT(TIOCM_LOOP, REG_CR_LBE); + + if (port->status & UPSTAT_AUTORTS) { + /* We need to disable auto-RTS if we want to turn RTS off */ + TIOCMBIT(TIOCM_RTS, REG_CR_RTSEN); + } +#undef TIOCMBIT + + phytium_uart_write(cr, pup, REG_CR); +} + +static unsigned int phytium_get_mctrl(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int cr = 0; + unsigned int status = phytium_uart_read(pup, REG_FR); + +#define TIOCMBIT(uartbit, tiocmbit) \ + do { \ + if (status & uartbit) \ + cr |= tiocmbit; \ + } while (0) + + TIOCMBIT(REG_FR_DCD, TIOCM_CAR); + TIOCMBIT(REG_FR_DSR, TIOCM_DSR); + TIOCMBIT(REG_FR_CTS, TIOCM_CTS); + TIOCMBIT(REG_FR_RI, TIOCM_RNG); +#undef TIOCMBIT + return cr; +} + +static void phytium_start_tx(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + if (phytium_tx_chars(pup, false)) { + pup->im |= REG_IMSC_TXIM; + phytium_uart_write(pup->im, pup, REG_IMSC); + } +} + +static void phytium_stop_rx(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + pup->im &= ~(REG_IMSC_RXIM|REG_IMSC_RTIM|REG_IMSC_FEIM| + REG_IMSC_PEIM|REG_IMSC_BEIM|REG_IMSC_OEIM); + phytium_uart_write(pup->im, pup, REG_IMSC); +} + +static void phytium_enable_ms(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + pup->im |= REG_IMSC_RIMIM|REG_IMSC_CTSMIM|REG_IMSC_DCDMIM|REG_IMSC_DSRMIM; + phytium_uart_write(pup->im, pup, REG_IMSC); +} + +static void phytium_break_ctl(struct uart_port *port, int break_state) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned long flags; + unsigned int lcr_h; + + spin_lock_irqsave(&pup->port.lock, flags); + lcr_h = phytium_uart_read(pup, REG_LCRH_TX); + if (break_state == -1) + lcr_h |= REG_LCRH_BRK; + else + lcr_h &= ~REG_LCRH_BRK; + phytium_uart_write(lcr_h, pup, REG_LCRH_TX); + spin_unlock_irqrestore(&pup->port.lock, flags); +} + +static int phytium_hwinit(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + /* XXX: more configurable setup method in future */ + pup->port.uartclk = DEFAULT_UARTCLK; + + /* Clear pending error and receive interrupts */ + phytium_uart_write(REG_ICR_OEIS | REG_ICR_BEIS | REG_ICR_PEIS | + REG_ICR_FEIS | REG_ICR_RTIS | REG_ICR_RXIS, + pup, REG_ICR); + + /* + * Save interrupts enable mask, and enable RX interrupts in case if + * the interrupt is used for NMI entry. + */ + pup->im = phytium_uart_read(pup, REG_IMSC); + phytium_uart_write(REG_IMSC_RTIM | REG_IMSC_RXIM, pup, REG_IMSC); + + return 0; +} + +static int phytium_uart_allocate_irq(struct phytium_uart_port *pup) +{ + phytium_uart_write(pup->im, pup, REG_IMSC); + + return request_irq(pup->port.irq, phytium_uart_interrupt, IRQF_SHARED, DRV_NAME, pup); +} + +static void phytium_enable_interrtups(struct phytium_uart_port *pup) +{ + unsigned int i; + + spin_lock_irq(&pup->port.lock); + + /* Clear out any spuriously appearing RX interrupts */ + phytium_uart_write(REG_ICR_RTIS | REG_ICR_RXIS, pup, REG_ICR); + + /* + * RXIS is asserted only when the RX FIFO transitions from below + * to above the trigger threshold. If the RX FIFO is already + * full to the threashold this can't happen and RXIS will now be + * stuck off. Drain the RX FIFO explicitly to fix this: + */ + for (i = 0; i < pup->port.fifosize * 2; i++) { + if (phytium_uart_read(pup, REG_FR) & REG_FR_RXFE) + break; + + phytium_uart_read(pup, REG_DR); + } + + pup->im = REG_IMSC_RTIM | REG_IMSC_RXIM; + phytium_uart_write(pup->im, pup, REG_IMSC); + spin_unlock_irq(&pup->port.lock); +} + +static int phytium_startup(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int cr; + int ret = 0; + + ret = phytium_hwinit(port); + if (ret) + goto out; + + ret = phytium_uart_allocate_irq(pup); + if (ret) + goto out; + + phytium_uart_write(REG_IFLS_RX4_8|REG_IFLS_TX4_8, pup, REG_IFLS); + + spin_lock_irq(&pup->port.lock); + + /* restore RTS and DTR */ + cr = pup->old_cr & (REG_CR_RTS | REG_CR_DTR); + cr |= REG_CR_UARTEN | REG_CR_RXE | REG_CR_TXE; + phytium_uart_write(cr, pup, REG_CR); + + spin_unlock_irq(&pup->port.lock); + + /* initialise the old status of the modem signals */ + pup->old_status = phytium_uart_read(pup, REG_FR) & (REG_FR_DCD|REG_FR_DSR|REG_FR_CTS); + + phytium_enable_interrtups(pup); + +out: + return ret; +} + +static void phytium_shutdown_channel(struct phytium_uart_port *pup, + unsigned int lcrh) +{ + unsigned long val; + + val = phytium_uart_read(pup, lcrh); + val &= ~(REG_LCRH_BRK | REG_LCRH_FEN); + phytium_uart_write(val, pup, lcrh); +} + +static void phytium_disable_uart(struct phytium_uart_port *pup) +{ + unsigned int cr; + + pup->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + spin_lock_irq(&pup->port.lock); + cr = phytium_uart_read(pup, REG_CR); + pup->old_cr = cr; + cr &= REG_CR_RTS | REG_CR_DTR; + cr |= REG_CR_UARTEN | REG_CR_TXE; + phytium_uart_write(cr, pup, REG_CR); + spin_unlock_irq(&pup->port.lock); + + /* + * disable break condition and fifos + */ + phytium_shutdown_channel(pup, REG_LCRH_RX); +} + +static void phytium_disable_interrupts(struct phytium_uart_port *pup) +{ + spin_lock_irq(&pup->port.lock); + + /* mask all interrupts and clear all pending ones */ + pup->im = 0; + phytium_uart_write(pup->im, pup, REG_IMSC); + phytium_uart_write(0xffff, pup, REG_ICR); + + spin_unlock_irq(&pup->port.lock); +} + +static void phytium_shutdown(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + phytium_disable_interrupts(pup); + + free_irq(pup->port.irq, pup); + + phytium_disable_uart(pup); + + if (pup->port.ops->flush_buffer) + pup->port.ops->flush_buffer(port); +} + +static void +phytium_setup_status_masks(struct uart_port *port, struct ktermios *termios) +{ + port->read_status_mask = REG_DR_OE | 255; + if (termios->c_iflag & INPCK) + port->read_status_mask |= REG_DR_FE | REG_DR_PE; + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= REG_DR_BE; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= REG_DR_FE | REG_DR_PE; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= REG_DR_BE; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= REG_DR_OE; + } + + /* + * Ignore all characters if CREAD is not set. + */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= UART_DUMMY_DR_RX; +} + +static void +phytium_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int lcr_h, old_cr; + unsigned long flags; + unsigned int baud, quot; + + /* Ask the core to calculate the divisor for us. */ + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); + + if (baud > port->uartclk/16) + quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); + else + quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); + + switch (termios->c_cflag & CSIZE) { + case CS5: + lcr_h = REG_LCRH_WLEN_5; + break; + case CS6: + lcr_h = REG_LCRH_WLEN_6; + break; + case CS7: + lcr_h = REG_LCRH_WLEN_7; + break; + default: /* CS8 */ + lcr_h = REG_LCRH_WLEN_8; + break; + } + if (termios->c_cflag & CSTOPB) + lcr_h |= REG_LCRH_STP2; + if (termios->c_cflag & PARENB) { + lcr_h |= REG_LCRH_PEN; + if (!(termios->c_cflag & PARODD)) + lcr_h |= REG_LCRH_EPS; + if (termios->c_cflag & CMSPAR) + lcr_h |= REG_LCRH_SPS; + } + if (pup->port.fifosize > 1) + lcr_h |= REG_LCRH_FEN; + + spin_lock_irqsave(&port->lock, flags); + + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + + phytium_setup_status_masks(port, termios); + + if (UART_ENABLE_MS(port, termios->c_cflag)) + phytium_enable_ms(port); + + /* first, disable everything */ + old_cr = phytium_uart_read(pup, REG_CR); + phytium_uart_write(0, pup, REG_CR); + + if (termios->c_cflag & CRTSCTS) { + if (old_cr & REG_CR_RTS) + old_cr |= REG_CR_RTSEN; + + old_cr |= REG_CR_CTSEN; + port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; + } else { + old_cr &= ~(REG_CR_CTSEN | REG_CR_RTSEN); + port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + } + + /* Set baud rate */ + phytium_uart_write(quot & 0x3f, pup, REG_FBRD); + phytium_uart_write(quot >> 6, pup, REG_IBRD); + + phytium_uart_write(lcr_h, pup, REG_LCRH_RX); + phytium_uart_write(old_cr, pup, REG_CR); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *phytium_type(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + return pup->port.type == PORT_PHYTIUM ? pup->type : NULL; +} + +static void phytium_release_port(struct uart_port *port) +{ + /* Nothing to release ... */ +} + +static int phytium_request_port(struct uart_port *port) +{ + /* UARTs always present */ + return 0; +} + +static void phytium_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_PHYTIUM; + phytium_request_port(port); + } +} + +static int phytium_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + int ret = 0; + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_PHYTIUM) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= nr_irqs) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + + return ret; +} + +static const struct uart_ops phytium_uart_ops = { + .tx_empty = phytium_tx_empty, + .set_mctrl = phytium_set_mctrl, + .get_mctrl = phytium_get_mctrl, + .stop_tx = phytium_stop_tx, + .start_tx = phytium_start_tx, + .stop_rx = phytium_stop_rx, + .enable_ms = phytium_enable_ms, + .break_ctl = phytium_break_ctl, + .startup = phytium_startup, + .shutdown = phytium_shutdown, + .set_termios = phytium_set_termios, + .type = phytium_type, + .release_port = phytium_release_port, + .request_port = phytium_request_port, + .config_port = phytium_config_port, + .verify_port = phytium_verify_port, +}; + +static struct phytium_uart_port *uart_ports[UART_NR]; + +static struct uart_driver phytium_uart = { + .owner = THIS_MODULE, + .driver_name = DRV_NAME, + .dev_name = "ttyS", + .nr = UART_NR, +}; + +void phytium_unregister_port(struct phytium_uart_port *pup) +{ + int i; + bool busy = false; + + for (i = 0; i < ARRAY_SIZE(uart_ports); i++) { + if (uart_ports[i] == pup) + uart_ports[i] = NULL; + else if (uart_ports[i]) + busy = true; + } + + if (!busy) + uart_unregister_driver(&phytium_uart); +} + +static int phytium_find_free_port(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(uart_ports); i++) + if (uart_ports[i] == NULL) + return i; + + return -EBUSY; +} + +static int phytium_register_port(struct phytium_uart_port *pup) +{ + int rc; + + /* Ensure interrupts from this UART are masked and cleared */ + phytium_uart_write(0, pup, REG_IMSC); + phytium_uart_write(0xffff, pup, REG_ICR); + + if (!phytium_uart.state) { + rc = uart_register_driver(&phytium_uart); + if (rc < 0) { + dev_err(pup->port.dev, + "Failed to register Phytium PCI UART driver\n"); + return rc; + } + } + + rc = uart_add_one_port(&phytium_uart, &pup->port); + if (rc) + phytium_unregister_port(pup); + + return rc; +} + +static int phytium_uart_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct phytium_uart_port *pup; + int portnr, rc; + + portnr = phytium_find_free_port(); + if (portnr < 0) + return portnr; + + pup = devm_kzalloc(&pdev->dev, sizeof(struct phytium_uart_port), + GFP_KERNEL); + if (!pup) + return -ENOMEM; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + rc = pcim_iomap_regions_request_all(pdev, 0x01, pci_name(pdev)); + if (rc) + return rc; + + pup->port.iotype = UPIO_MEM32; + pup->port.irq = pdev->irq; + pup->port.mapbase = pci_resource_start(pdev, 0); + pup->port.membase = pcim_iomap_table(pdev)[0]; + pup->port.ops = &phytium_uart_ops; + pup->port.dev = &pdev->dev; + pup->port.fifosize = 32; + pup->port.flags = UPF_BOOT_AUTOCONF; + pup->port.line = portnr; + + uart_ports[portnr] = pup; + + pup->old_cr = 0; + snprintf(pup->type, sizeof(pup->type), "pci-uart"); + + pci_set_drvdata(pdev, pup); + + return phytium_register_port(pup); +} + +static void phytium_uart_remove(struct pci_dev *pdev) +{ + struct phytium_uart_port *pup = pci_get_drvdata(pdev); + + uart_remove_one_port(&phytium_uart, &pup->port); + phytium_unregister_port(pup); +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_uart_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_uart_port *pup = pci_get_drvdata(pdev); + + if (pup) + uart_suspend_port(&phytium_uart, &pup->port); + + return 0; +} + +static int phytium_uart_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytium_uart_port *pup = pci_get_drvdata(pdev); + + if (pup) + uart_resume_port(&phytium_uart, &pup->port); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_dev_pm_ops, phytium_uart_suspend, phytium_uart_resume); + +static const struct pci_device_id pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc2e) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, pci_ids); + +static struct pci_driver phytium_uart_pci_driver = { + .name = DRV_NAME, + .probe = phytium_uart_probe, + .remove = phytium_uart_remove, + .driver = { + .pm = &phytium_dev_pm_ops, + }, + .id_table = pci_ids, +}; + +static int __init phytium_uart_init(void) +{ + pr_info("Serial: Phytium PCI UART driver\n"); + + return pci_register_driver(&phytium_uart_pci_driver); +} + +static void __exit phytium_uart_exit(void) +{ + pci_unregister_driver(&phytium_uart_pci_driver); +} + +module_init(phytium_uart_init); +module_exit(phytium_uart_exit); + +MODULE_AUTHOR("Chen Baozi "); +MODULE_DESCRIPTION("Phytium PCI serial port driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index cee0274806c551f9971abc250c226606e8df59c2..f785ca55fb85f1bab5bd5eb327dff43da3367c3a 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -215,7 +215,7 @@ static struct sysrq_key_op sysrq_showlocks_op = { #endif #ifdef CONFIG_SMP -static DEFINE_SPINLOCK(show_lock); +static DEFINE_RAW_SPINLOCK(show_lock); static void showacpu(void *dummy) { @@ -225,10 +225,10 @@ static void showacpu(void *dummy) if (idle_cpu(smp_processor_id())) return; - spin_lock_irqsave(&show_lock, flags); + raw_spin_lock_irqsave(&show_lock, flags); pr_info("CPU%d:\n", smp_processor_id()); show_stack(NULL, NULL); - spin_unlock_irqrestore(&show_lock, flags); + raw_spin_unlock_irqrestore(&show_lock, flags); } static void sysrq_showregs_othercpus(struct work_struct *dummy) diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index a9f12a52f72651617c8257985652e10cebacb9a4..5df4d07679cb92b8afd19890b0a7e59833e09070 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -124,6 +124,8 @@ source "drivers/usb/chipidea/Kconfig" source "drivers/usb/isp1760/Kconfig" +source "drivers/usb/phytium/Kconfig" + comment "USB port drivers" if USB diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 7d1b8c82b208917e83ada929ef6902fed878e12e..22d79202ac93623e416bb0442c78aa89ff017a2a 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -12,6 +12,8 @@ obj-$(CONFIG_USB_DWC3) += dwc3/ obj-$(CONFIG_USB_DWC2) += dwc2/ obj-$(CONFIG_USB_ISP1760) += isp1760/ +obj-$(CONFIG_USB_PHYTIUM) += phytium/ + obj-$(CONFIG_USB_MON) += mon/ obj-$(CONFIG_USB_MTU3) += mtu3/ diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index df661460e9f960916e21bc9ab1f6dbde8bfc069e..1a1487d312d08a7cfb933b0b595d390ae35bef80 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1752,7 +1752,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb) struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); struct usb_anchor *anchor = urb->anchor; int status = urb->unlinked; - unsigned long flags; urb->hcpriv = NULL; if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) && @@ -1780,9 +1779,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb) * and no one may trigger the above deadlock situation when * running complete() in tasklet. */ - local_irq_save(flags); urb->complete(urb); - local_irq_restore(flags); usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 49eb4e3c760f4282d88d500aabc7998b8e1d227b..49fc75368aa22642afac230517d4779c8003c2bd 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1626,7 +1626,7 @@ static void ffs_data_put(struct ffs_data *ffs) ffs_data_clear(ffs); ffs_release_dev(ffs->private_data); BUG_ON(waitqueue_active(&ffs->ev.waitq) || - waitqueue_active(&ffs->ep0req_completion.wait) || + swait_active(&ffs->ep0req_completion.wait) || waitqueue_active(&ffs->wait)); destroy_workqueue(ffs->io_completion_wq); kfree(ffs->dev_name); diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 3ebcbd199a79190666c50732e2a11cdbda270fb7..a2240aa9f4abd5278b607e2a1560f142934e3081 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -345,7 +345,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) spin_unlock_irq (&epdata->dev->lock); if (likely (value == 0)) { - value = wait_event_interruptible (done.wait, done.done); + value = swait_event_interruptible_exclusive(done.wait, done.done); if (value != 0) { spin_lock_irq (&epdata->dev->lock); if (likely (epdata->ep != NULL)) { @@ -354,7 +354,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) usb_ep_dequeue (epdata->ep, epdata->req); spin_unlock_irq (&epdata->dev->lock); - wait_event (done.wait, done.done); + swait_event_exclusive(done.wait, done.done); if (epdata->status == -ECONNRESET) epdata->status = -EINTR; } else { diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index e6bbb919555465f1db997be58c514d143f8ae17f..a4119fa3f8bcb3c1c6979e07ecc836af198ea9e5 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -45,6 +45,7 @@ #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 #define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af +#define PCI_DEVICE_ID_PHYTIUM_XHCI 0xdc27 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba @@ -236,6 +237,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; + if (pdev->vendor == PCI_VENDOR_ID_PHYTIUM || + pdev->device == PCI_DEVICE_ID_PHYTIUM_XHCI) + xhci->quirks |= XHCI_RESET_ON_RESUME; /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3432) diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index adc437ca83b8804dcc88bb835c5e6935b9d70e90..ccb1b1dc92f464b1c7c96525202327096b000d39 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -65,12 +65,14 @@ static int xhci_priv_resume_quirk(struct usb_hcd *hcd) static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci) { + struct xhci_plat_priv *priv = xhci_to_priv(xhci); + /* * As of now platform drivers don't provide MSI support so we ensure * here that the generic code does not try to make a pci_dev from our * dev struct in order to setup MSI */ - xhci->quirks |= XHCI_PLAT; + xhci->quirks |= XHCI_PLAT | priv->quirks; } /* called during probe() after chip reset completes */ @@ -111,6 +113,10 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = { .resume_quirk = xhci_rcar_resume_quirk, }; +static const struct xhci_plat_priv xhci_plat_phytium_pe220x = { + .quirks = XHCI_RESET_ON_RESUME, +}; + static const struct of_device_id usb_xhci_of_match[] = { { .compatible = "generic-xhci", @@ -143,6 +149,9 @@ static const struct of_device_id usb_xhci_of_match[] = { }, { .compatible = "renesas,rcar-gen3-xhci", .data = &xhci_plat_renesas_rcar_gen3, + }, { + .compatible = "phytium,pe220x-xhci", + .data = &xhci_plat_phytium_pe220x, }, {}, }; @@ -252,7 +261,10 @@ static int xhci_plat_probe(struct platform_device *pdev) } xhci = hcd_to_xhci(hcd); - priv_match = of_device_get_match_data(&pdev->dev); + if (has_acpi_companion(&pdev->dev)) + priv_match = acpi_device_get_match_data(&pdev->dev); + else + priv_match = of_device_get_match_data(&pdev->dev); if (priv_match) { struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); @@ -438,6 +450,7 @@ static const struct dev_pm_ops xhci_plat_pm_ops = { static const struct acpi_device_id usb_xhci_acpi_match[] = { /* XHCI-compliant USB Controller */ { "PNP0D10", }, + { "PHYT0039", (kernel_ulong_t)&xhci_plat_phytium_pe220x }, { } }; MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h index ae29f22ff5bd7f38453b229985fea0d6db48e7b0..5681723fc9cd73b8c6bf40ef0d764df5e4fde511 100644 --- a/drivers/usb/host/xhci-plat.h +++ b/drivers/usb/host/xhci-plat.h @@ -12,10 +12,12 @@ struct xhci_plat_priv { const char *firmware_name; + unsigned long long quirks; void (*plat_start)(struct usb_hcd *); int (*init_quirk)(struct usb_hcd *); int (*resume_quirk)(struct usb_hcd *); }; #define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv) +#define xhci_to_priv(x) ((struct xhci_plat_priv *)(x)->priv) #endif /* _XHCI_PLAT_H */ diff --git a/drivers/usb/phytium/Kconfig b/drivers/usb/phytium/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..5d889c26a7c31c86c5cde712245823b5fdb29634 --- /dev/null +++ b/drivers/usb/phytium/Kconfig @@ -0,0 +1,8 @@ +config USB_PHYTIUM + tristate "PHYTIUM USB Support" + depends on USB + help + Say Y or M here if your system has a OTG USB Controller based on PHYTIUM SOC. + + If you choose to build this driver is a dynamically linked modules, the module will + be called phytium-usb.ko diff --git a/drivers/usb/phytium/Makefile b/drivers/usb/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..05d422d4a59bc5dddc5f5483e9eeaf792330ede5 --- /dev/null +++ b/drivers/usb/phytium/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_USB_PHYTIUM) += phytium-usb.o + +phytium-usb-y := core.o dma.o platform.o host.o gadget.o diff --git a/drivers/usb/phytium/core.c b/drivers/usb/phytium/core.c new file mode 100644 index 0000000000000000000000000000000000000000..c0182c0770e5ef5f5baa7d7b1709b844ca8578db --- /dev/null +++ b/drivers/usb/phytium/core.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "core.h" + +int phytium_core_reset(struct phytium_cusb *config, bool skip_wait) +{ + if (!config) + return 0; + + spin_lock_init(&config->lock); + + return 0; +} + +uint32_t phytium_read32(uint32_t *address) +{ + return readl(address); +} + +void phytium_write32(uint32_t *address, uint32_t value) +{ + writel(value, address); +} + +uint16_t phytium_read16(uint16_t *address) +{ + return readw(address); +} + +void phytium_write16(uint16_t *address, uint16_t value) +{ + writew(value, address); +} + +uint8_t phytium_read8(uint8_t *address) +{ + return readb(address); +} + +void phytium_write8(uint8_t *address, uint8_t value) +{ + writeb(value, address); +} + diff --git a/drivers/usb/phytium/core.h b/drivers/usb/phytium/core.h new file mode 100644 index 0000000000000000000000000000000000000000..f563672ccaa9963859ff3e1bf8ce62276d3c0bce --- /dev/null +++ b/drivers/usb/phytium/core.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __PHYTIUM_CORE_H__ +#define __PHYTIUM_CORE_H__ + +#include +#include +#include "host_api.h" +#include "gadget.h" + +#define MAX_EPS_CHANNELS 16 + +struct phytium_ep { + struct phytium_cusb *config; + u16 max_packet; + u8 ep_num; + struct GADGET_EP *gadget_ep; + struct list_head req_list; + struct usb_ep end_point; + char name[12]; + u8 is_tx; + const struct usb_endpoint_descriptor *desc; + u8 busy; +}; + +struct phytium_request { + struct usb_request request; + struct GADGET_REQ *gadget_request; + struct list_head list; + struct phytium_ep *ep; + struct phytium_cusb *config; + u8 is_tx; + u8 epnum; +}; + +struct phytium_cusb { + struct device *dev; + void __iomem *regs; + void __iomem *phy_regs; + int irq; + spinlock_t lock; + enum usb_dr_mode dr_mode; + + struct GADGET_OBJ *gadget_obj; + struct GADGET_CFG gadget_cfg; + struct GADGET_CALLBACKS gadget_callbacks; + struct GADGET_SYSREQ gadget_sysreq; + struct GADGET_DEV *gadget_dev; + void *gadget_priv; + + struct usb_gadget gadget; + struct usb_gadget_driver *gadget_driver; + struct phytium_ep endpoints_tx[MAX_EPS_CHANNELS]; + struct phytium_ep endpoints_rx[MAX_EPS_CHANNELS]; + u8 ep0_data_stage_is_tx; + + struct HOST_OBJ *host_obj; + struct HOST_CFG host_cfg; + struct HOST_CALLBACKS host_callbacks; + struct HOST_SYSREQ host_sysreq; + void *host_priv; + struct usb_hcd *hcd; + + struct DMA_OBJ *dma_obj; + struct DMA_CFG dma_cfg; + struct DMA_CALLBACKS dma_callbacks; + struct DMA_SYSREQ dma_sysreq; + bool isVhubHost; +}; + +int phytium_core_reset(struct phytium_cusb *config, bool skip_wait); + +int phytium_host_init(struct phytium_cusb *config); +int phytium_host_uninit(struct phytium_cusb *config); + +#ifdef CONFIG_PM +int phytium_host_resume(void *priv); +int phytium_host_suspend(void *priv); +int phytium_gadget_resume(void *priv); +int phytium_gadget_suspend(void *priv); +#endif + +int phytium_gadget_init(struct phytium_cusb *config); +int phytium_gadget_uninit(struct phytium_cusb *config); + +uint32_t phytium_read32(uint32_t *address); + +void phytium_write32(uint32_t *address, uint32_t value); + +uint16_t phytium_read16(uint16_t *address); + +void phytium_write16(uint16_t *address, uint16_t value); + +uint8_t phytium_read8(uint8_t *address); + +void phytium_write8(uint8_t *address, uint8_t value); + +#endif diff --git a/drivers/usb/phytium/dma.c b/drivers/usb/phytium/dma.c new file mode 100644 index 0000000000000000000000000000000000000000..2e0d6df3eb344e386c649f2fc2dab7e774969137 --- /dev/null +++ b/drivers/usb/phytium/dma.c @@ -0,0 +1,786 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "core.h" +#include "dma.h" +#include "hw-regs.h" + +#define TRB_POOL_SIZE (sizeof(struct DMA_Trb) * (NUM_OF_TRB)) +/* burst length 'x' should be multiples of 2 */ +#define TRB_BURST_LENGTH(x) (((x) & 0xFF) << 24) + +#define BUILD_NORMAL_TRB_NO_IOC(trb, data_ptr, data_size, stream_id) { \ + trb.dmaAddr = data_ptr; \ + trb.dmaSize = data_size; \ + trb.ctrl = (stream_id << 16) | TD_TYPE_NORMAL | TDF_CYCLE_BIT; } + +#define BUILD_NORMAL_TRB_NO_IOC_CHAIN(trb, data_ptr, data_size, stream_id) { \ + trb.dmaAddr = data_ptr; \ + trb.dmaSize = data_size; \ + trb.ctrl = (stream_id << 16) | TD_TYPE_NORMAL | TDF_CYCLE_BIT | TDF_CHAIN_BIT; } + +#define BUILD_NORMAL_TRB(trb, data_ptr, data_size, stream_id) { \ + trb.dmaAddr = data_ptr; \ + trb.dmaSize = (data_size) | TRB_BURST_LENGTH(0x80); \ + trb.ctrl = (stream_id << 16) | TD_TYPE_NORMAL | TDF_CYCLE_BIT |\ + TDF_INT_ON_COMPLECTION | TDF_INT_ON_SHORT_PACKET; } + +#define BUILD_LINK_TRB(trb, target_ptr) { \ + trb.dmaAddr = target_ptr; \ + trb.dmaSize = 0; \ + trb.ctrl = TD_TYPE_LINK | TDF_CYCLE_BIT | TDF_CHAIN_BIT; } + +#define BUILD_EMPTY_TRB(trb) { \ + trb.dmaAddr = 0; \ + trb.dmaSize = 0; \ + trb.ctrl = 0; } + + +uint32_t divRoundUp(uint32_t divident, uint32_t divisor) +{ + return divisor ? ((divident + divisor - 1) / divisor) : 0; +} + +static inline struct DMA_TrbChainDesc *GetTrbChainDescEntry(struct list_head *list) +{ + return (struct DMA_TrbChainDesc *)((uintptr_t)list - + (uintptr_t)&(((struct DMA_TrbChainDesc *)0)->chainNode)); +} + +static int32_t phytium_dma_probe(struct DMA_CFG *config, struct DMA_SYSREQ *sysReq) +{ + if (!sysReq) + return -EINVAL; + + sysReq->trbMemSize = TRB_POOL_SIZE; + sysReq->privDataSize = sizeof(struct DMA_CONTROLLER); + + return 0; +} + +static int32_t phytium_dma_init(struct DMA_CONTROLLER *priv, + const struct DMA_CFG *config, struct DMA_CALLBACKS *callbacks) +{ + if (!priv || !config || !callbacks) + return -EINVAL; + + if (!config->trbAddr || !config->trbDmaAddr) + return -EINVAL; + + memset((void *)priv, 0, sizeof(struct DMA_CONTROLLER)); + memset((void *)config->trbAddr, 0, TRB_POOL_SIZE); + + priv->trbDMAPoolAddr = config->trbDmaAddr; + priv->trbPoolAddr = config->trbAddr; + priv->regs = (struct DMARegs *)config->regBase; + priv->dmaCfg = *config; + priv->dmaDrv = DMA_GetInstance(); + priv->dmaCallbacks = *callbacks; + priv->isHostCtrlMode = 0; + return 0; +} + +static void phytium_dma_destroy(struct DMA_CONTROLLER *priv) +{ + +} + +static int32_t phytium_dma_start(struct DMA_CONTROLLER *priv) +{ + int i; + + if (!priv) + return -EINVAL; + + priv->dmaMode = DMA_MODE_CHANNEL_INDIVIDUAL; + if ((priv->dmaCfg.dmaModeRx & priv->dmaCfg.dmaModeTx) == 0xFFFF) { + priv->dmaMode = DMA_MODE_GLOBAL_DMULT; + phytium_write32(&priv->regs->conf, DMARF_DMULT); + } else if ((priv->dmaCfg.dmaModeRx | priv->dmaCfg.dmaModeTx)) { + priv->dmaMode = DMA_MODE_GLOBAL_DSING; + phytium_write32(&priv->regs->conf, DMARF_DSING); + } + + for (i = 0; i < MAX_DMA_CHANNELS; i++) { + if (priv->dmaCfg.dmaModeRx & (1 << i)) { + priv->rx[i].dmultEnabled = 1; + priv->rx[i].maxTrbLen = TD_DMULT_MAX_TRB_DATA_SIZE; + priv->rx[i].maxTdLen = TD_DMULT_MAX_TD_DATA_SIZE; + } else { + priv->rx[i].dmultEnabled = 0; + priv->rx[i].maxTrbLen = TD_SING_MAX_TRB_DATA_SIZE; + priv->rx[i].maxTdLen = TD_SING_MAX_TD_DATA_SIZE; + } + priv->rx[i].lastTransferLength = 0; + + if (priv->dmaCfg.dmaModeTx & (1 << i)) { + priv->tx[i].dmultEnabled = 1; + priv->tx[i].maxTrbLen = TD_DMULT_MAX_TRB_DATA_SIZE; + priv->tx[i].maxTdLen = TD_DMULT_MAX_TD_DATA_SIZE; + } else { + priv->tx[i].dmultEnabled = 0; + priv->tx[i].maxTrbLen = TD_SING_MAX_TRB_DATA_SIZE; + priv->tx[i].maxTdLen = TD_SING_MAX_TD_DATA_SIZE; + } + priv->tx[i].lastTransferLength = 0; + } + + return 0; +} + +static uint32_t phytium_dma_stop(struct DMA_CONTROLLER *priv) +{ + + return 0; +} + +static int32_t updateDescBuffer(struct DMA_CONTROLLER *priv, + struct DMA_TrbChainDesc *trbChainDesc, uint16_t status) +{ + uint32_t ep_sel, i; + struct DMA_Channel *channel; + + if (!priv || !trbChainDesc) + return -EINVAL; + + channel = trbChainDesc->channel; + if (!channel) + return -EINVAL; + + if (channel->isIsoc && trbChainDesc->lastTrbIsLink) { + if (channel->trbChainDescList.prev == channel->trbChainDescList.next) + return 0; + } + + for (i = 0; i < trbChainDesc->numOfTrbs; i++) { + if (trbChainDesc->framesDesc) { + if (trbChainDesc->isoError) + trbChainDesc->framesDesc[i].length = 0; + else + trbChainDesc->framesDesc[i].length = + (uint32_t)trbChainDesc->trbPool[i].dmaSize & TD_SIZE_MASK; + } + + trbChainDesc->actualLen += + (uint32_t)trbChainDesc->trbPool[i].dmaSize & TD_SIZE_MASK; + } + + if (trbChainDesc->isoError) + trbChainDesc->actualLen = 0; + + ep_sel = phytium_read32(&priv->regs->ep_sel); + channel->lastTransferLength = trbChainDesc->actualLen; + + if (!trbChainDesc->lastTrbIsLink) { + if (!list_empty(&trbChainDesc->chainNode)) { + for (i = 0; i < trbChainDesc->mapSize; i++) + priv->trbChainFreeMap[(trbChainDesc->start >> 3) + i] = 0; + + trbChainDesc->channel->numOfTrbChain--; + trbChainDesc->reserved = 0; + list_del(&trbChainDesc->chainNode); + trbChainDesc = NULL; + } + } + + if (channel->trbChainDescList.prev == &channel->trbChainDescList) + channel->status = DMA_STATUS_FREE; + + if (priv->dmaCallbacks.complete) + priv->dmaCallbacks.complete(priv->parent, channel->hwUsbEppNum, + channel->isDirTx, false); + + if (channel->trbChainDescList.next != &channel->trbChainDescList) { + trbChainDesc = GetTrbChainDescEntry(channel->trbChainDescList.next); + if (trbChainDesc && trbChainDesc->lastTrbIsLink) { + if (!list_empty(&trbChainDesc->chainNode)) { + for (i = 0; i < trbChainDesc->mapSize; i++) + priv->trbChainFreeMap[(trbChainDesc->start >> 3) + i] = 0; + + trbChainDesc->channel->numOfTrbChain--; + trbChainDesc->reserved = 0; + list_del(&trbChainDesc->chainNode); + } + } + } + + phytium_write32(&priv->regs->ep_sel, ep_sel); + + return 0; +} + +static void phytium_dma_isr(struct DMA_CONTROLLER *priv) +{ + uint32_t ep_sts, ep_ists, ep_cfg; + int i; + uint8_t isDirTx, epNum; + struct DMA_Channel *channel; + struct DMA_TrbChainDesc *trbChainDesc; + + if (!priv) + return; + + ep_ists = phytium_read32(&priv->regs->ep_ists); + if (!ep_ists) { + phytium_write32(&priv->regs->ep_sts, DMARF_EP_IOC | + DMARF_EP_ISP | DMARF_EP_TRBERR); + return; + } + + for (i = 0; i < 32; i++) { + if (!(ep_ists & (1 << i))) + continue; + + isDirTx = i > 15 ? DMARD_EP_TX : 0u; + epNum = isDirTx ? i - 16 : i; + phytium_write32(&priv->regs->ep_sel, epNum | isDirTx); + + if (isDirTx) + channel = &priv->tx[epNum]; + else + channel = &priv->rx[epNum]; + + ep_sts = phytium_read32(&priv->regs->ep_sts); + + if (ep_sts & DMARF_EP_TRBERR) + phytium_write32(&priv->regs->ep_sts, DMARF_EP_TRBERR); + + if ((ep_sts & DMARF_EP_IOC) || (ep_sts & DMARF_EP_ISP) || (channel->dmultGuard + & DMARF_EP_IOC) || (channel->dmultGuard & DMARF_EP_ISP)) { +retransmit: + phytium_write32(&priv->regs->ep_sts, DMARF_EP_IOC | DMARF_EP_ISP); + trbChainDesc = GetTrbChainDescEntry(channel->trbChainDescList.next); + if (!(ep_sts & DMARF_EP_TRBERR) && channel->dmultEnabled + && !trbChainDesc->lastTrbIsLink) { + if (!priv->isHostCtrlMode && !channel->isDirTx) { + channel->dmultGuard = 0; + phytium_write32(&priv->regs->ep_sts, DMARF_EP_TRBERR); + ep_cfg = phytium_read32(&priv->regs->ep_cfg); + ep_cfg &= ~DMARV_EP_ENABLED; + phytium_write32(&priv->regs->ep_cfg, (uint32_t)ep_cfg); + updateDescBuffer(priv, trbChainDesc, 0); + break; + } + channel->dmultGuard = ep_sts; + break; + } + + channel->dmultGuard = 0; + updateDescBuffer(priv, trbChainDesc, 0); + } + + if (ep_sts & DMARF_EP_OUTSMM) + phytium_write32(&priv->regs->ep_sts, DMARF_EP_OUTSMM); + + if (ep_sts & DMARF_EP_DESCMIS) + phytium_write32(&priv->regs->ep_sts, DMARF_EP_DESCMIS); + + if (ep_sts & DMARF_EP_ISOERR) { + phytium_write32(&priv->regs->ep_sts, DMARF_EP_ISOERR); + trbChainDesc = GetTrbChainDescEntry(channel->trbChainDescList.next); + trbChainDesc->isoError = 1; + goto retransmit; + } + } +} + +static void phytium_dma_errIsr(struct DMA_CONTROLLER *priv, uint8_t irqNr, uint8_t isDirTx) +{ + struct DMA_Channel *channel; + + if (!priv) + return; + + if (isDirTx) + channel = &priv->tx[irqNr]; + else + channel = &priv->rx[irqNr]; + + if (channel->status >= DMA_STATUS_BUSY) + channel->status = DMA_STATUS_ABORT; + + if (priv->dmaCallbacks.complete) { + if (!irqNr && priv->resubmit) { + priv->dmaCallbacks.complete(priv->parent, channel->hwUsbEppNum, + channel->isDirTx, true); + priv->resubmit = false; + } else + priv->dmaCallbacks.complete(priv->parent, channel->hwUsbEppNum, + channel->isDirTx, false); + } +} + +static void *phytium_dma_channelAlloc(struct DMA_CONTROLLER *priv, + uint8_t isDirTx, uint8_t hwEpNum, uint8_t isIsoc) +{ + struct DMA_Channel *channel; + uint32_t ep_ien, ep_cfg; + uint16_t dmaMode; + + if (!priv || (hwEpNum >= MAX_DMA_CHANNELS)) + return NULL; + + if (!priv->regs) { + pr_err("dma regs is null\n"); + return NULL; + } + + ep_ien = phytium_read32(&priv->regs->ep_ien); + + if (isDirTx) { + if (priv->tx[hwEpNum].status > DMA_STATUS_FREE) + return NULL; + + channel = &priv->tx[hwEpNum]; + channel->isDirTx = 0x80; + ep_ien |= (0x01 << (hwEpNum + 16)); + dmaMode = priv->dmaCfg.dmaModeTx; + } else { + if (priv->rx[hwEpNum].status > DMA_STATUS_FREE) + return NULL; + + channel = &priv->rx[hwEpNum]; + channel->isDirTx = 0x00; + ep_ien |= (0x01 << hwEpNum); + dmaMode = priv->dmaCfg.dmaModeRx; + } + + channel->isIsoc = 0; + if (isIsoc) + channel->isIsoc = 1; + + INIT_LIST_HEAD(&channel->trbChainDescList); + channel->numOfTrbChain = 0; + channel->controller = priv; + channel->dmultGuard = 0; + channel->status = DMA_STATUS_FREE; + channel->hwUsbEppNum = hwEpNum; + + phytium_write32(&priv->regs->ep_sel, (uint32_t)hwEpNum | channel->isDirTx); + ep_cfg = phytium_read32(&priv->regs->ep_cfg); + + if (priv->dmaMode == DMA_MODE_CHANNEL_INDIVIDUAL) { + if (dmaMode & (1 << hwEpNum)) + ep_cfg |= DMARV_EP_DMULT; + else + ep_cfg |= DMARV_EP_DSING; + } + + phytium_write32(&priv->regs->ep_sts, DMARF_EP_IOC | DMARF_EP_ISP | DMARF_EP_TRBERR); + phytium_write32(&priv->regs->ep_cfg, (uint32_t)DMARV_EP_ENABLED | ep_cfg); + phytium_write32(&priv->regs->ep_ien, ep_ien); + phytium_write32(&priv->regs->ep_sts_en, DMARF_EP_TRBERR); + + return channel; +} + +static int32_t phytium_dma_channelRelease(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel) +{ + uint32_t ep_ien, i; + struct DMA_TrbChainDesc *trbChainDesc; + + if (!channel || !priv) + return -EINVAL; + + ep_ien = phytium_read32(&priv->regs->ep_ien); + if (channel->isDirTx) + ep_ien &= ~(0x01 << (channel->hwUsbEppNum + 16)); + else + ep_ien &= ~(0x01 << channel->hwUsbEppNum); + + phytium_write32(&priv->regs->ep_sel, (uint32_t)channel->hwUsbEppNum | channel->isDirTx); + phytium_write32(&priv->regs->ep_cfg, (uint32_t)0); + phytium_write32(&priv->regs->ep_ien, ep_ien); + phytium_write32(&priv->regs->ep_sts_en, 0x0); + phytium_write32(&priv->regs->ep_cmd, DMARF_EP_EPRST); + phytium_write32(&priv->regs->ep_sts, DMARF_EP_IOC | DMARF_EP_ISP | + DMARF_EP_TRBERR | DMARF_EP_ISOERR); + + if (channel->status >= DMA_STATUS_BUSY) + channel->status = DMA_STATUS_ABORT; + + priv->dmaDrv->dma_channelAbort(priv, channel); + + while (channel->trbChainDescList.next != &channel->trbChainDescList) { + trbChainDesc = GetTrbChainDescEntry(channel->trbChainDescList.next); + if (trbChainDesc) { + if (!list_empty(&trbChainDesc->chainNode)) { + for (i = 0; i < trbChainDesc->mapSize; i++) + priv->trbChainFreeMap[(trbChainDesc->start >> 3) + i] = 0; + + trbChainDesc->channel->numOfTrbChain--; + trbChainDesc->reserved = 0; + list_del(&trbChainDesc->chainNode); + } + } + } + + channel->status = DMA_STATUS_UNKNOW; + + return 0; +} + +static void ShowTrbChain(struct DMA_TrbChainDesc *trbChainDesc) +{ + int i; + + if (!trbChainDesc) + return; + pr_debug("Trb Chain %p for channel %p\n", trbChainDesc, trbChainDesc->channel); + pr_debug("idx | trb size | trb addr | FLAG: NORMAL LINK CHAIN ALL\n"); + for (i = 0; i < trbChainDesc->numOfTrbs + 2; i++) + pr_debug("%02d | %08x | %08x | %d %d %d %08x\n", + i, + trbChainDesc->trbPool[i].dmaSize & TD_SIZE_MASK, + trbChainDesc->trbPool[i].dmaAddr, + (trbChainDesc->trbPool[i].ctrl & TD_TYPE_NORMAL) ? 1 : 0, + (trbChainDesc->trbPool[i].ctrl & TD_TYPE_LINK) ? 1 : 0, + (trbChainDesc->trbPool[i].ctrl & TDF_CHAIN_BIT) ? 1 : 0, + trbChainDesc->trbPool[i].ctrl); +} + +static uint32_t phytium_dma_NewTd(struct DMA_CONTROLLER *priv, + struct DMA_TrbChainDesc *trbChainDesc) +{ + uint32_t startAddress, dataSize; + struct DMA_Channel *channel; + int i = 0; + uint32_t tmp = 0; + + if (!priv || !trbChainDesc) + return 0; + + startAddress = trbChainDesc->dwStartAddress; + channel = trbChainDesc->channel; + dataSize = channel->maxTrbLen; + + if (trbChainDesc->numOfTrbs > 1) { + for (i = 0; i < (trbChainDesc->numOfTrbs - 1); i++) { + if (channel->dmultEnabled) { + if (trbChainDesc->framesDesc && priv->isHostCtrlMode + && channel->isIsoc) { + BUILD_NORMAL_TRB_NO_IOC(trbChainDesc->trbPool[i], + trbChainDesc->dwStartAddress + + trbChainDesc->framesDesc[i].offset, + trbChainDesc->framesDesc[i].length, 0); + continue; + } else + BUILD_NORMAL_TRB_NO_IOC(trbChainDesc->trbPool[i], + startAddress, dataSize, 0); + } else + BUILD_NORMAL_TRB_NO_IOC_CHAIN(trbChainDesc->trbPool[i], + startAddress, dataSize, 0); + startAddress += dataSize; + tmp += dataSize; + } + } + + if (trbChainDesc->framesDesc && priv->isHostCtrlMode && channel->isIsoc) { + BUILD_NORMAL_TRB(trbChainDesc->trbPool[i], + trbChainDesc->dwStartAddress + trbChainDesc->framesDesc[i].offset, + trbChainDesc->framesDesc[i].length, 0); + } else + BUILD_NORMAL_TRB(trbChainDesc->trbPool[i], startAddress, + trbChainDesc->len - tmp, 0); + + trbChainDesc->lastTrbIsLink = 0; + if (channel->dmultEnabled) { + if (channel->isIsoc) { + trbChainDesc->lastTrbIsLink = 1; + BUILD_LINK_TRB(trbChainDesc->trbPool[i + 1], trbChainDesc->trbDMAAddr); + } else + BUILD_EMPTY_TRB(trbChainDesc->trbPool[i + 1]); + } + + ShowTrbChain(trbChainDesc); + + return 0; +} + +static void phytium_dma_ArmTd(struct DMA_CONTROLLER *priv, struct DMA_TrbChainDesc *trbChainDesc) +{ + uint32_t ep_sts, ep_cfg, ep_cmd; + struct DMA_TrbChainDesc *startedChain; + struct DMA_Trb *lastPrevTrb; + struct DMA_Channel *channel; + + if (!priv || !trbChainDesc) + return; + + channel = trbChainDesc->channel; + phytium_write32(&priv->regs->ep_sel, (channel->isDirTx | channel->hwUsbEppNum)); + + ep_sts = phytium_read32(&priv->regs->ep_sts); + + if (channel->status == DMA_STATUS_FREE) { + phytium_write32(&priv->regs->ep_sts, DMARF_EP_TRBERR); + phytium_write32(&priv->regs->traddr, trbChainDesc->trbDMAAddr); + phytium_write32(&priv->regs->ep_sts, DMARF_EP_TRBERR); + + ep_cfg = phytium_read32(&priv->regs->ep_cfg); + phytium_write32(&priv->regs->ep_cmd, DMARF_EP_DRDY); + if (!(ep_cfg & DMARV_EP_ENABLED)) { + ep_cfg |= DMARV_EP_ENABLED; + phytium_write32(&priv->regs->ep_cfg, ep_cfg); + } + } else { + if (channel->trbChainDescList.prev != channel->trbChainDescList.next) { + startedChain = GetTrbChainDescEntry(channel->trbChainDescList.prev->prev); + lastPrevTrb = &startedChain->trbPool[startedChain->numOfTrbs]; + startedChain->lastTrbIsLink = 1; + BUILD_LINK_TRB((*lastPrevTrb), trbChainDesc->trbDMAAddr); + ep_cmd = phytium_read32(&priv->regs->ep_cmd); + if (!(ep_cmd & DMARF_EP_DRDY)) { + phytium_write32(&priv->regs->traddr, trbChainDesc->trbDMAAddr); + phytium_write32(&priv->regs->ep_cmd, DMARF_EP_DRDY); + } + ShowTrbChain(startedChain); + } + } +} + +static int32_t phytium_dma_TrbChainAlloc(struct DMA_CONTROLLER *priv, + struct DMA_Channel *channel, uint32_t numOfTrbs, struct DMA_TrbChainDesc **chain) +{ + struct DMA_TrbChainDesc *trbChainDesc = NULL; + int i, j; + uint32_t mapcount, count = 0; + + if (!priv || !channel) + return -ENOMEM; + + for (i = 0; i < TAB_SIZE_OF_DMA_CHAIN; i++) { + if (!priv->trbChainDesc[i].reserved) { + trbChainDesc = &priv->trbChainDesc[i]; + break; + } + } + + if (!trbChainDesc) { + pr_err("No Free TRB Chain Descriptor\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&trbChainDesc->chainNode); + *chain = NULL; + mapcount = (numOfTrbs + 2 + 7) >> 3; + + for (i = 0; i < TRB_MAP_SIZE; i++) { + if (priv->trbChainFreeMap[i] == 0x0) + count++; + else + count = 0; + + if (count == mapcount) + break; + } + + if (count != mapcount) { + pr_err("No Free TRBs count:0x%x, mapcount:0x%x\n", count, mapcount); + return -ENOMEM; + } + + trbChainDesc->reserved = 1; + trbChainDesc->channel = channel; + trbChainDesc->actualLen = 0; + trbChainDesc->mapSize = mapcount; + trbChainDesc->numOfTrbs = numOfTrbs; + trbChainDesc->start = (i + 1 - mapcount) << 3; + trbChainDesc->end = trbChainDesc->start; + trbChainDesc->trbPool = (struct DMA_Trb *)priv->trbPoolAddr + trbChainDesc->start; + trbChainDesc->trbDMAAddr = (uint32_t)(uintptr_t)((struct DMA_Trb *)priv->trbDMAPoolAddr + + trbChainDesc->start); + trbChainDesc->isoError = 0; + + list_add_tail(&trbChainDesc->chainNode, &channel->trbChainDescList); + + channel->numOfTrbChain++; + + for (j = 0; j < count; j++) + priv->trbChainFreeMap[i - j] = 0xFF; + + *chain = trbChainDesc; + + return 0; +} + +static int32_t phytium_dma_channelProgram(struct DMA_CONTROLLER *priv, + struct DMA_Channel *channel, uint16_t packetSize, uintptr_t dmaAddr, + uint32_t len, void *framesDesc, uint32_t framesNumber) +{ + uint32_t numOfTrbs; + uint8_t retval; + struct DMA_TrbChainDesc *trbChainDesc; + + if (!priv || !channel) + return -EINVAL; + + //printk(KERN_ERR "%s %d\n",__func__,__LINE__); + if (framesDesc && priv->isHostCtrlMode && channel->isIsoc) + numOfTrbs = framesNumber; + else + numOfTrbs = divRoundUp(len, channel->maxTrbLen); + + retval = phytium_dma_TrbChainAlloc(priv, channel, numOfTrbs, &trbChainDesc); + if (retval) + return retval; + + trbChainDesc->dwStartAddress = dmaAddr; + trbChainDesc->len = len; + trbChainDesc->framesDesc = framesDesc; + + channel->wMaxPacketSize = packetSize; + phytium_dma_NewTd(priv, trbChainDesc); + channel->lastTransferLength = 0; + phytium_dma_ArmTd(priv, trbChainDesc); + + channel->status = DMA_STATUS_BUSY; + + return 0; +} + +static enum DMA_Status phytium_dma_getChannelStatus(struct DMA_CONTROLLER *priv, + struct DMA_Channel *channel) +{ + uint32_t ep_cmd, ep_sts; + + if (!priv || !channel) + return DMA_STATUS_UNKNOW; + + if (channel->status >= DMA_STATUS_BUSY) { + phytium_write32(&priv->regs->ep_sel, channel->isDirTx | channel->hwUsbEppNum); + ep_cmd = phytium_read32(&priv->regs->ep_cmd); + ep_sts = phytium_read32(&priv->regs->ep_sts); + + if ((ep_cmd & DMARF_EP_DRDY) || (ep_sts & DMARF_EP_DBUSY)) + channel->status = DMA_STATUS_ARMED; + else + channel->status = DMA_STATUS_BUSY; + } + + return channel->status; +} + +static int32_t phytium_dma_channelAbort(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel) +{ + struct DMA_TrbChainDesc *trbChainDesc; + uint32_t ep_cfg, i; + + if (!priv || !channel) + return -EINVAL; + + if (phytium_dma_getChannelStatus(priv, channel) >= DMA_STATUS_BUSY) + phytium_write32(&priv->regs->conf, DMARF_RESET); + + phytium_write32(&priv->regs->ep_sel, (uint32_t)(channel->isDirTx | channel->hwUsbEppNum)); + ep_cfg = phytium_read32(&priv->regs->ep_cfg); + ep_cfg &= ~DMARV_EP_ENABLED; + phytium_write32(&priv->regs->ep_cfg, ep_cfg); + phytium_write32(&priv->regs->ep_sts, 0xFFFFFFFF); + + while (channel->trbChainDescList.next != &channel->trbChainDescList) { + trbChainDesc = GetTrbChainDescEntry(channel->trbChainDescList.next); + if (trbChainDesc) { + if (!list_empty(&trbChainDesc->chainNode)) { + for (i = 0; i < trbChainDesc->mapSize; i++) + priv->trbChainFreeMap[(trbChainDesc->start >> 3) + i] = 0; + + trbChainDesc->channel->numOfTrbChain--; + trbChainDesc->reserved = 0; + list_del(&trbChainDesc->chainNode); + } + } + } + if (channel->status != DMA_STATUS_UNKNOW) + channel->status = DMA_STATUS_FREE; + + return 0; +} + +static int32_t phytium_dma_getActualLength(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel) +{ + if (!priv || !channel) + return -EINVAL; + + return channel->lastTransferLength; +} + +static int32_t phytium_dma_getMaxLength(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel) +{ + if (!priv || !channel) + return -EINVAL; + + return channel->maxTdLen; +} + +static int32_t phytium_dma_setMaxLength(struct DMA_CONTROLLER *priv, + struct DMA_Channel *channel, uint32_t val) +{ + if (!priv || !channel) + return -EINVAL; + + if (channel->dmultEnabled) + channel->maxTrbLen = val; + else + channel->maxTrbLen = (val > TD_SING_MAX_TRB_DATA_SIZE) ? + TD_SING_MAX_TRB_DATA_SIZE : val; + + return 0; +} + +static void phytium_dma_setParentPriv(struct DMA_CONTROLLER *priv, void *parent) +{ + if (!priv) + return; + + priv->parent = parent; +} + +void phytium_dma_controllerReset(struct DMA_CONTROLLER *priv) +{ + uint32_t conf; + + if (!priv) + return; + + conf = phytium_read32(&priv->regs->conf); + conf |= DMARF_RESET; + phytium_write32(&priv->regs->conf, conf); + + priv->resubmit = true; +} + +void phytium_dma_setHostMode(struct DMA_CONTROLLER *priv) +{ + if (!priv) + return; + + priv->isHostCtrlMode = 1; +} + +struct DMA_OBJ phytium_dma_Drv = { + .dma_probe = phytium_dma_probe, + .dma_init = phytium_dma_init, + .dma_destroy = phytium_dma_destroy, + .dma_start = phytium_dma_start, + .dma_stop = phytium_dma_stop, + .dma_isr = phytium_dma_isr, + .dma_errIsr = phytium_dma_errIsr, + .dma_channelAlloc = phytium_dma_channelAlloc, + .dma_channelRelease = phytium_dma_channelRelease, + .dma_channelProgram = phytium_dma_channelProgram, + .dma_channelAbort = phytium_dma_channelAbort, + .dma_getChannelStatus = phytium_dma_getChannelStatus, + .dma_getActualLength = phytium_dma_getActualLength, + .dma_getMaxLength = phytium_dma_getMaxLength, + .dma_setMaxLength = phytium_dma_setMaxLength, + .dma_setParentPriv = phytium_dma_setParentPriv, + .dma_controllerReset = phytium_dma_controllerReset, + .dma_setHostMode = phytium_dma_setHostMode, +}; + +struct DMA_OBJ *DMA_GetInstance(void) +{ + return &phytium_dma_Drv; +} diff --git a/drivers/usb/phytium/dma.h b/drivers/usb/phytium/dma.h new file mode 100644 index 0000000000000000000000000000000000000000..073a078c1289625ce4bec236498aa49e651c7375 --- /dev/null +++ b/drivers/usb/phytium/dma.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __PHYTIUM_DMA_H__ +#define __PHYTIUM_DMA_H__ + +#include +//#include "list.h" + +#define NUM_OF_TRB 1024 +#define TRB_MAP_SIZE ((NUM_OF_TRB + (sizeof(uint8_t) * 8) - 1) / (sizeof(uint8_t) * 8)) +#define MAX_DMA_CHANNELS 16 +#define TAB_SIZE_OF_DMA_CHAIN (MAX_DMA_CHANNELS * 2) + +#define DMARD_EP_TX 0x80ul +#define DMARD_EP_RX 0x00ul + +#define DMARF_EP_EPRST 0x00000001ul +#define DMARF_EP_DRDY 0x00000040ul +#define DMARF_EP_DFLUSH 0x00000080ul + +#define DMARF_EP_IOC 0x4ul +#define DMARF_EP_ISP 0x8ul +#define DMARF_EP_DESCMIS 0x10ul +#define DMARF_EP_TRBERR 0x80ul +#define DMARF_EP_DBUSY 0x200ul +#define DMARF_EP_CCS 0x800ul +#define DMARF_EP_OUTSMM 0x4000ul +#define DMARF_EP_ISOERR 0x8000ul +#define DMARF_EP_DTRANS 0x80000000ul + +#define DMARV_EP_DISABLED 0ul +#define DMARV_EP_ENABLED 1ul +#define DMARV_EP_DSING 0x1000ul +#define DMARV_EP_DMULT 0x2000ul + +#define TD_SIZE_MASK 0x00001FFFF + +#define DMARF_RESET 0x00000001ul +#define DMARF_DSING 0x00000100ul +#define DMARF_DMULT 0x00000200ul + +#define TD_DMULT_MAX_TRB_DATA_SIZE 65536u +#define TD_DMULT_MAX_TD_DATA_SIZE (~1u) +#define TD_SING_MAX_TRB_DATA_SIZE 65536u +#define TD_SING_MAX_TD_DATA_SIZE 65536u + +#define TD_TYPE_NORMAL 0x400L +#define TD_TYPE_LINK 0x1800L +#define TDF_CYCLE_BIT 0x1L +#define TDF_TOGGLE_CYCLE_BIT 0x2L +#define TDF_INT_ON_SHORT_PACKET 0x4L +#define TDF_FIFO_MODE 0x8L +#define TDF_CHAIN_BIT 0x10L +#define TDF_INT_ON_COMPLECTION 0x20L +#define TDF_STREAMID_VALID 0x200L + +struct DMA_Trb { + uint32_t dmaAddr; + uint32_t dmaSize; /* 0:16 transfer length; 24:31 burst length */ + uint32_t ctrl; +}; + +enum DMA_Status { + DMA_STATUS_UNKNOW, + DMA_STATUS_FREE, + DMA_STATUS_ABORT, + DMA_STATUS_BUSY, + DMA_STATUS_ARMED +}; + +struct DMA_CFG { + uintptr_t regBase; + uint16_t dmaModeTx; + uint16_t dmaModeRx; + void *trbAddr; + uintptr_t trbDmaAddr; +}; + +struct DMA_SYSREQ { + uint32_t privDataSize; + uint32_t trbMemSize; +}; + +struct DMA_CALLBACKS { + void (*complete)(void *pD, uint8_t epNum, uint8_t dir, bool resubmit); +}; + +struct DMA_CONTROLLER; + +struct DMA_Channel { + struct DMA_CONTROLLER *controller; + uint16_t wMaxPacketSize; + uint8_t hwUsbEppNum; + uint8_t isDirTx; + uint32_t maxTdLen; + uint32_t maxTrbLen; + enum DMA_Status status; + void *priv; + uint32_t dmultGuard; + uint8_t dmultEnabled; + uint8_t numOfTrbChain; + struct list_head trbChainDescList; + uint32_t lastTransferLength; + uint8_t isIsoc; +}; + +struct DMA_OBJ { + int32_t (*dma_probe)(struct DMA_CFG *config, struct DMA_SYSREQ *sysReq); + + int32_t (*dma_init)(struct DMA_CONTROLLER *priv, const struct DMA_CFG *config, + struct DMA_CALLBACKS *callbacks); + + void (*dma_destroy)(struct DMA_CONTROLLER *priv); + + int32_t (*dma_start)(struct DMA_CONTROLLER *priv); + + uint32_t (*dma_stop)(struct DMA_CONTROLLER *priv); + + void (*dma_isr)(struct DMA_CONTROLLER *priv); + + void (*dma_errIsr)(struct DMA_CONTROLLER *priv, uint8_t irqNr, uint8_t isDirTx); + + void * (*dma_channelAlloc)(struct DMA_CONTROLLER *priv, + uint8_t isDirTx, uint8_t hwEpNum, uint8_t isIso); + + int32_t (*dma_channelRelease)(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel); + + int32_t (*dma_channelProgram)(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel, + uint16_t packetSize, uintptr_t dmaAddr, + uint32_t len, void *framesDesc, uint32_t framesNumber); + + int32_t (*dma_channelAbort)(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel); + + enum DMA_Status (*dma_getChannelStatus)(struct DMA_CONTROLLER *priv, + struct DMA_Channel *channel); + + int32_t (*dma_getActualLength)(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel); + + int32_t (*dma_getMaxLength)(struct DMA_CONTROLLER *priv, struct DMA_Channel *channel); + + int32_t (*dma_setMaxLength)(struct DMA_CONTROLLER *priv, + struct DMA_Channel *channel, uint32_t val); + + void (*dma_setParentPriv)(struct DMA_CONTROLLER *priv, void *parent); + + void (*dma_controllerReset)(struct DMA_CONTROLLER *priv); + + void (*dma_setHostMode)(struct DMA_CONTROLLER *priv); +}; + +enum DMA_Mode { + DMA_MODE_GLOBAL_DMULT, + DMA_MODE_GLOBAL_DSING, + DMA_MODE_CHANNEL_INDIVIDUAL, +}; + +struct DMA_TrbFrameDesc { + uint32_t length; + uint32_t offset; +}; + +struct DMA_TrbChainDesc { + uint8_t reserved; + struct DMA_Channel *channel; + struct DMA_Trb *trbPool; + uint32_t trbDMAAddr; + uint32_t len; + uint32_t dwStartAddress; + uint32_t actualLen; + uint8_t isoError; + uint8_t lastTrbIsLink; + uint32_t mapSize; + uint32_t numOfTrbs; + uint32_t start; + uint32_t end; + struct DMA_TrbFrameDesc *framesDesc; + struct list_head chainNode; +}; + +struct DMA_CONTROLLER { + struct DMARegs *regs; + struct DMA_OBJ *dmaDrv; + struct DMA_CFG dmaCfg; + struct DMA_CALLBACKS dmaCallbacks; + struct DMA_Channel rx[MAX_DMA_CHANNELS]; + struct DMA_Channel tx[MAX_DMA_CHANNELS]; + enum DMA_Mode dmaMode; + uint8_t isHostCtrlMode; + void *parent; + void *trbPoolAddr; + uintptr_t trbDMAPoolAddr; + uint8_t trbChainFreeMap[TRB_MAP_SIZE]; + struct DMA_TrbChainDesc trbChainDesc[TAB_SIZE_OF_DMA_CHAIN]; + bool resubmit; +}; + +struct DMA_OBJ *DMA_GetInstance(void); +#endif diff --git a/drivers/usb/phytium/gadget.c b/drivers/usb/phytium/gadget.c new file mode 100644 index 0000000000000000000000000000000000000000..afcb846c8db342444c4bb853bd92bfd7559c5630 --- /dev/null +++ b/drivers/usb/phytium/gadget.c @@ -0,0 +1,2538 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include "gadget.h" +#include "dma.h" +#include "core.h" +#include "hw-regs.h" + +#define DRV_NAME "phytium_gadget" + +#define GADGET_PRIV_BUFFER_SIZE 64 +#define GADGET_USB_EP_NUMBER_MASK 0xf +#define DMA_ADDR_INVALID (~(dma_addr_t)0) + +#define GADGET_ESTALL 1 +#define GADGET_EUNHANDLED 2 +#define GADGET_EAUTOACK 3 +#define GADGET_ESHUTDOWN 4 +#define GADGET_ECONNRESET 5 +#define GADGET_EAGAIN 6 + +static inline struct GadgetEp *toGadgetEp(struct GADGET_EP *gadget_Ep) +{ + return (struct GadgetEp *)((uintptr_t)gadget_Ep - + ((uintptr_t)&((struct GadgetEp *)0)->gadgetEp)); +} + +static inline struct GadgetRequest *requestToGadgetRequest(struct GADGET_REQ *req) +{ + return (struct GadgetRequest *)((uintptr_t)req - + ((uintptr_t)&((struct GadgetRequest *)0)->request)); +} + +static inline struct GADGET_REQ *listToGadgetRequest(struct list_head *list) +{ + return (struct GADGET_REQ *)((uintptr_t)list - + ((uintptr_t)&((struct GADGET_REQ *)0)->list)); +} + +#define listBrowsingRequest(iterator, head, memeber) \ + for (iterator = listToGadgetRequest((head)->next); \ + &iterator->list != (head); \ + iterator = listToGadgetRequest(iterator->list.next)) + +static inline struct GADGET_REQ *gadgetGetNextReq(struct GadgetEp *gadgetEp) +{ + struct list_head *list = &gadgetEp->request; + + if (list_empty(list)) { + pr_debug("no request available for %s\n", gadgetEp->gadgetEp.name); + return NULL; + } + + return listToGadgetRequest(list->next); +} + +static inline struct GADGET_REQ *gadgetGetNextEp0Req(struct GADGET_CTRL *priv) +{ + struct list_head *queue; + + if (!priv) + return NULL; + + queue = &priv->in[0].request; + + if (list_empty(queue)) + return NULL; + + return listToGadgetRequest(queue->next); +} + +void gadget_giveback(struct phytium_ep *phy_ep, struct usb_request *usb_req, int status) +{ + struct phytium_request *phy_req; + struct phytium_cusb *config; + int busy; + + if (!phy_ep || !usb_req) + return; + + busy = phy_ep->busy; + phy_req = usb_req ? container_of(usb_req, struct phytium_request, request) : NULL; + config = phy_req->config; + + list_del(&phy_req->list); + + if (usb_req->status == -EINPROGRESS) { + if (status == GADGET_ESHUTDOWN) + usb_req->status = -ESHUTDOWN; + else if (status == GADGET_ECONNRESET) + usb_req->status = -ECONNRESET; + else + usb_req->status = -phy_req->gadget_request->status; + } + + if (usb_req->status == 0) + pr_debug("%s done request %p, %d/%d\n", phy_ep->end_point.name, + usb_req, usb_req->actual, usb_req->length); + else + pr_debug("%s request %p, %d/%d fault %d\n", phy_ep->end_point.name, + usb_req, usb_req->actual, usb_req->length, usb_req->status); + + usb_gadget_unmap_request(&config->gadget, &phy_req->request, phy_req->is_tx); + + busy = phy_ep->busy; + phy_ep->busy = 1; + + spin_unlock(&config->lock); + + if (phy_req->request.complete) + phy_req->request.complete(&phy_req->ep->end_point, usb_req); + + spin_lock(&config->lock); + + phy_ep->busy = busy; +} + +static void gadget_callback_complete(struct GADGET_EP *gadget_ep, struct GADGET_REQ *gadget_req) +{ + struct phytium_ep *phy_ep; + struct phytium_request *phy_req; + struct usb_request *usb_req; + + if (!gadget_ep || !gadget_req) + return; + + phy_req = gadget_req->context; + usb_req = &phy_req->request; + phy_ep = phy_req->ep; + usb_req->actual = gadget_req->actual; + usb_req->length = gadget_req->length; + + gadget_giveback(phy_ep, usb_req, gadget_req->status); +} + +static void gadgetDisconnect(struct GADGET_CTRL *priv) +{ + pr_info("Disconnect USB Device Driver\n"); + + if (!priv) + return; + + priv->gadgetDev.speed = USB_SPEED_UNKNOWN; + priv->gadgetDev.state = USB_STATE_NOTATTACHED; + + if (priv->eventCallback.disconnect) + priv->eventCallback.disconnect(priv); +} + +static int gadget_get_frame(struct usb_gadget *gadget) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return 0; +} + +static int gadget_wakeup(struct usb_gadget *gadget) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return 0; +} + +static int gadget_vbus_session(struct usb_gadget *gadget, int is_active) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return 0; +} + +static int gadget_vbus_draw(struct usb_gadget *gadget, unsigned int mA) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return 0; +} + +static int gadget_pullup(struct usb_gadget *gadget, int is_on) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return 0; +} + +static int gadget_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) +{ + unsigned long flags; + struct phytium_cusb *config; + struct GADGET_CTRL *priv; + uint32_t gen_cfg; + + if (!gadget || !driver) + return -EINVAL; + + if (driver->max_speed < USB_SPEED_HIGH) + return -EINVAL; + + config = container_of(gadget, struct phytium_cusb, gadget); + + pr_info("registering driver %s\n", driver->function); + + spin_lock_irqsave(&config->lock, flags); + config->gadget_driver = driver; + spin_unlock_irqrestore(&config->lock, flags); + + config->gadget_obj->gadget_start(config->gadget_priv); + + priv = (struct GADGET_CTRL *)config->gadget_priv; + if (priv->phy_regs) { + gen_cfg = phytium_read32(&priv->phy_regs->gen_cfg); + gen_cfg = gen_cfg & (~BIT(7)); + phytium_write32(&priv->phy_regs->gen_cfg, gen_cfg); + } + + return 0; +} + +static int gadget_udc_stop(struct usb_gadget *gadget) +{ + struct phytium_cusb *config; + unsigned long flags; + struct GADGET_CTRL *priv; + uint32_t gen_cfg; + + if (!gadget) + return -EINVAL; + + config = container_of(gadget, struct phytium_cusb, gadget); + + priv = (struct GADGET_CTRL *)config->gadget_priv; + if (priv->phy_regs) { + gen_cfg = phytium_read32(&priv->phy_regs->gen_cfg); + gen_cfg = gen_cfg | BIT(7); + phytium_write32(&priv->phy_regs->gen_cfg, gen_cfg); + } + spin_lock_irqsave(&config->lock, flags); + config->gadget_driver = NULL; + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +static struct usb_gadget_ops phytium_gadget_ops = { + .get_frame = gadget_get_frame, + .wakeup = gadget_wakeup, + .vbus_session = gadget_vbus_session, + .vbus_draw = gadget_vbus_draw, + .pullup = gadget_pullup, + .udc_start = gadget_udc_start, + .udc_stop = gadget_udc_stop, +}; + +static int gadget_ep_enable(struct usb_ep *ls_ep, const struct usb_endpoint_descriptor *desc) +{ + struct phytium_ep *phy_ep = NULL; + struct phytium_cusb *config; + unsigned long flags; + + if (!ls_ep || !desc) + return -EINVAL; + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + + if (phy_ep->desc) + return -EBUSY; + + spin_lock_irqsave(&config->lock, flags); + + phy_ep->desc = desc; + phy_ep->busy = 0; + config->gadget_obj->gadget_epEnable(config->gadget_priv, phy_ep->gadget_ep, desc); + + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +static int gadget_ep_disable(struct usb_ep *ls_ep) +{ + struct phytium_ep *phy_ep = NULL; + struct phytium_cusb *config; + unsigned long flags; + + if (!ls_ep) + return -EBUSY; + + pr_info("%s %d\n", __func__, __LINE__); + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + + spin_lock_irqsave(&config->lock, flags); + + phy_ep->desc = NULL; + phy_ep->busy = 0; + phy_ep->end_point.desc = NULL; + config->gadget_obj->gadget_epDisable(config->gadget_priv, phy_ep->gadget_ep); + + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +static struct usb_request *gadget_ep_alloc_request(struct usb_ep *ls_ep, gfp_t gfp_flags) +{ + struct phytium_ep *phy_ep; + struct phytium_cusb *config; + struct GADGET_EP *gadget_ep; + struct phytium_request *phy_request; + + if (!ls_ep) + return NULL; + + pr_info("%s %d\n", __func__, __LINE__); + phy_request = kzalloc(sizeof(*phy_request), gfp_flags); + if (!phy_request) { + pr_err("not enough momory\n"); + return NULL; + } + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + gadget_ep = phy_ep->gadget_ep; + + INIT_LIST_HEAD(&phy_request->list); + phy_request->request.dma = DMA_ADDR_INVALID; + phy_request->epnum = phy_ep->ep_num; + phy_request->ep = phy_ep; + phy_request->config = phy_ep->config; + + config->gadget_obj->gadget_reqAlloc(config->gadget_priv, gadget_ep, + &phy_request->gadget_request); + + return &phy_request->request; +} + +static void gadget_ep_free_request(struct usb_ep *ls_ep, struct usb_request *ls_req) +{ + struct phytium_ep *phy_ep; + struct phytium_cusb *config; + struct phytium_request *phy_request; + + if (!ls_ep || !ls_req) + return; + + pr_info("%s %d\n", __func__, __LINE__); + phy_request = ls_req ? container_of(ls_req, struct phytium_request, request) : NULL; + config = phy_request->config; + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + + config->gadget_obj->gadget_reqFree(config->gadget_priv, phy_ep->gadget_ep, + phy_request->gadget_request); + kfree(phy_request); +} + +static int gadget_ep_enqueue(struct usb_ep *ls_ep, struct usb_request *ls_req, gfp_t gfp_flags) +{ + struct phytium_ep *phy_ep; + struct phytium_cusb *config; + struct phytium_request *phy_request; + unsigned long flags; + int status = 0; + + if (!ls_ep || !ls_req) + return -EINVAL; + + if (!ls_req->buf) + return -ENODATA; + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + phy_request = ls_req ? container_of(ls_req, struct phytium_request, request) : NULL; + phy_request->config = config; + + if (phy_request->ep != phy_ep) + return -EINVAL; + + phy_request->request.actual = 0; + phy_request->request.status = -EINPROGRESS; + phy_request->epnum = phy_ep->ep_num; + phy_request->is_tx = phy_ep->is_tx; + + phy_request->gadget_request->length = ls_req->length; + phy_request->gadget_request->status = 0; + phy_request->gadget_request->complete = gadget_callback_complete; + phy_request->gadget_request->buf = ls_req->buf; + phy_request->gadget_request->context = ls_req; + + status = usb_gadget_map_request(&config->gadget, &phy_request->request, phy_request->is_tx); + + if (!phy_ep->desc) { + pr_debug("req %p queued to %s while ep %s\n", phy_request, ls_ep->name, "disabled"); + status = -ESHUTDOWN; + usb_gadget_unmap_request(&config->gadget, &phy_request->request, + phy_request->is_tx); + return status; + } + + spin_lock_irqsave(&config->lock, flags); + + phy_request->gadget_request->dma = phy_request->request.dma; + list_add_tail(&phy_request->list, &phy_ep->req_list); + + pr_debug("queue to %s (%s), length = %d\n", phy_ep->name, + phy_ep->is_tx ? "IN/TX" : "OUT/RX", phy_request->request.length); + + status = config->gadget_obj->gadget_reqQueue(config->gadget_priv, phy_ep->gadget_ep, + phy_request->gadget_request); + + spin_unlock_irqrestore(&config->lock, flags); + + return status; +} + +static int gadget_ep_dequeue(struct usb_ep *ls_ep, struct usb_request *ls_req) +{ + struct phytium_ep *phy_ep; + struct phytium_cusb *config; + unsigned long flags; + int status = 0; + struct phytium_request *phy_request; + struct phytium_request *phy_next_request; + + if (!ls_ep || !ls_req) + return -EINVAL; + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + phy_request = ls_req ? container_of(ls_req, struct phytium_request, request) : NULL; + + if (phy_request->ep != phy_ep) + return -EINVAL; + + spin_lock_irqsave(&config->lock, flags); + + list_for_each_entry(phy_next_request, &phy_ep->req_list, list) { + if (phy_next_request == phy_request) + break; + } + + if (phy_next_request != phy_request) { + pr_info("request %p not queued to %s\n", phy_request, ls_ep->name); + status = -EINVAL; + goto done; + } + + status = config->gadget_obj->gadget_reqDequeue(config->gadget_priv, phy_ep->gadget_ep, + phy_request->gadget_request); +done: + spin_unlock_irqrestore(&config->lock, flags); + return status; +} + +static int gadget_ep_set_halt(struct usb_ep *ls_ep, int value) +{ + struct phytium_ep *phy_ep; + struct phytium_cusb *config; + struct GADGET_EP *gadget_ep = NULL; + unsigned long flags; + int status = 0; + + if (!ls_ep) + return -EINVAL; + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + gadget_ep = phy_ep->gadget_ep; + + spin_lock_irqsave(&config->lock, flags); + + status = config->gadget_obj->gadget_epSetHalt(config->gadget_priv, + phy_ep->gadget_ep, value); + if (status > 0) { + spin_unlock_irqrestore(&config->lock, flags); + return -status; + } + + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +static const struct usb_ep_ops gadget_ep_ops = { + .enable = gadget_ep_enable, + .disable = gadget_ep_disable, + .alloc_request = gadget_ep_alloc_request, + .free_request = gadget_ep_free_request, + .queue = gadget_ep_enqueue, + .dequeue = gadget_ep_dequeue, + .set_halt = gadget_ep_set_halt, +}; + +static int gadget_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) +{ + return -EINVAL; +} + +static int gadget_ep0_disable(struct usb_ep *ep) +{ + return -EINVAL; +} + +static int gadget_ep0_enqueue(struct usb_ep *ls_ep, struct usb_request *ls_req, gfp_t gfp_flags) +{ + struct phytium_ep *phy_ep; + struct phytium_request *phy_request; + struct phytium_cusb *config; + int status = 0; + unsigned long flags; + + if (!ls_ep || !ls_req) + return -EINVAL; + + phy_ep = ls_ep ? container_of(ls_ep, struct phytium_ep, end_point) : NULL; + config = phy_ep->config; + phy_request = ls_req ? container_of(ls_req, struct phytium_request, request) : NULL; + + spin_lock_irqsave(&config->lock, flags); + + if (!list_empty(&phy_ep->req_list)) { + status = -EBUSY; + goto cleanup; + } + + phy_request->config = config; + phy_request->request.actual = 0; + phy_request->gadget_request->actual = 0; + phy_request->is_tx = config->ep0_data_stage_is_tx; + phy_request->gadget_request->length = phy_request->request.length; + phy_request->gadget_request->status = 0; + phy_request->gadget_request->complete = gadget_callback_complete; + phy_request->gadget_request->buf = phy_request->request.buf; + phy_request->gadget_request->context = phy_request; + + status = usb_gadget_map_request(&config->gadget, &phy_request->request, phy_request->is_tx); + if (status) { + pr_info("failed to map request\n"); + status = -EINVAL; + goto cleanup; + } + + phy_request->gadget_request->dma = phy_request->request.dma; + list_add_tail(&phy_request->list, &phy_ep->req_list); + + pr_debug("queue to %s (%s), length = %d\n", phy_ep->name, + phy_ep->is_tx ? "IN/TX" : "OUT/RX", phy_request->request.length); + + status = config->gadget_obj->gadget_reqQueue(config->gadget_priv, phy_ep->gadget_ep, + phy_request->gadget_request); + if (status > 0) { + status = -status; + usb_gadget_unmap_request(&config->gadget, &phy_request->request, + phy_request->is_tx); + list_del(&phy_request->list); + goto cleanup; + } + +cleanup: + spin_unlock_irqrestore(&config->lock, flags); + return status; +} + +static int gadget_ep0_dequeue(struct usb_ep *ep, struct usb_request *ls_request) +{ + return -EOPNOTSUPP; +} + +static int gadget_ep0_set_halt(struct usb_ep *ep, int value) +{ + return -EINVAL; +} + + +static const struct usb_ep_ops gadget_ep0_ops = { + .enable = gadget_ep0_enable, + .disable = gadget_ep0_disable, + .alloc_request = gadget_ep_alloc_request, + .free_request = gadget_ep_free_request, + .queue = gadget_ep0_enqueue, + .dequeue = gadget_ep0_dequeue, + .set_halt = gadget_ep0_set_halt, +}; + +static int32_t gadgetWaitForBusyBit(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep) +{ + struct GadgetEp *gadgetEp; + uint8_t epNum; + uint8_t txcs = CS_BUSY; + uint8_t buf = 0; + uint8_t bufflag = 0; + + if (!priv || !gadget_Ep) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + epNum = gadgetEp->hwEpNum; + + if (gadgetEp->isInEp || gadgetEp->hwEpNum == 0) + return 0; + + buf = phytium_read8(&priv->regs->ep[epNum - 1].txcon) & CON_BUF; + + while ((txcs & CS_BUSY) || (bufflag == 0)) { + txcs = phytium_read8(&priv->regs->ep[epNum - 1].txcs); + + if (((txcs & CS_NPAK) >> CS_NPAK_OFFSET) == buf || buf == 0) + bufflag = 1; + else + bufflag = 0; + } + + return 0; +} + +static inline void gadgetEpXDataReceive(struct GADGET_CTRL *priv, + struct GadgetRequest *gadgetRequest) +{ + struct GadgetEp *gadgetEp; + struct GADGET_REQ *gadgetReq; + uint8_t epType; + uint32_t requestSize, channelStatus, chMaxLen; + + if (!priv || !gadgetRequest) + return; + + gadgetEp = gadgetRequest->ep; + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, gadgetEp->channel); + epType = gadgetEp->gadgetEp.desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + channelStatus = priv->dmaDrv->dma_getChannelStatus(priv->dmaController, gadgetEp->channel); + + gadgetReq = &gadgetRequest->request; + if (gadgetReq->actual < gadgetReq->length || gadgetRequest->zlp) { + gadgetRequest->zlp = 0; + if ((gadgetReq->length - gadgetReq->actual) < chMaxLen) + requestSize = gadgetReq->length - gadgetReq->actual; + else + requestSize = chMaxLen; + + priv->dmaDrv->dma_channelProgram(priv->dmaController, gadgetEp->channel, + gadgetEp->gadgetEp.maxPacket, + gadgetReq->dma + gadgetReq->actual, requestSize, NULL, 0); + } +} + +static inline void gadgetEpXDataSend(struct GADGET_CTRL *priv, struct GadgetRequest *gadgetRequest) +{ + struct GadgetEp *gadgetEp; + struct GADGET_REQ *gadgetReq; + uint8_t epType; + uint32_t requestSize, channelStatus, chMaxLen; + + if (!priv || !gadgetRequest) + return; + + gadgetEp = gadgetRequest->ep; + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, gadgetEp->channel); + epType = gadgetEp->gadgetEp.desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + channelStatus = priv->dmaDrv->dma_getChannelStatus(priv->dmaController, gadgetEp->channel); + + gadgetReq = &gadgetRequest->request; + if ((gadgetReq->length - gadgetReq->actual) < chMaxLen) + requestSize = gadgetReq->length - gadgetReq->actual; + else + requestSize = chMaxLen; + pr_debug("Transmit/IN %s gadgetReq %p gadgetRequest:%p requestSize:0x%x packetSize:0x%x\n", + gadgetEp->gadgetEp.name, gadgetReq, gadgetRequest, + requestSize, gadgetEp->gadgetEp.maxPacket); + + gadgetRequest->zlp = 0; + priv->dmaDrv->dma_channelProgram(priv->dmaController, gadgetEp->channel, + gadgetEp->gadgetEp.maxPacket, + gadgetReq->dma + gadgetReq->actual, requestSize, NULL, 0); +} + +static int32_t gadgetEpXSetHalt(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + uint8_t value) +{ + struct GadgetEp *gadgetEp; + uint8_t epType; + struct GADGET_REQ *req = NULL; + struct GadgetRequest *gadgetRequest = NULL; + uint8_t epNum, txcon, rxcon; + uint32_t status = DMA_STATUS_ARMED; + + if (!priv || !gadget_Ep) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + epType = gadgetEp->gadgetEp.desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + if (epType == USB_ENDPOINT_XFER_ISOC) + return -EINVAL; + + pr_debug("%s: %s stall\n", gadget_Ep->name, value ? "set" : "clear"); + req = gadgetGetNextReq(gadgetEp); + + if (!value) + gadgetEp->wedged = 0; + + if (value && gadgetEp->isInEp && req && gadgetEp->state == GADGET_EP_BUSY) { + while (status == DMA_STATUS_ARMED) + status = priv->dmaDrv->dma_getChannelStatus(priv->dmaController, + gadgetEp->channel); + + gadgetWaitForBusyBit(priv, gadget_Ep); + } + + epNum = gadgetEp->hwEpNum; + + if (gadgetEp->isInEp) { + txcon = phytium_read8(&priv->regs->ep[epNum - 1].txcon); + if (value) { + phytium_write8(&priv->regs->ep[epNum - 1].txcon, txcon | CON_STALL); + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX | epNum); + phytium_write8(&priv->regs->endprst, + ENDPRST_IO_TX | epNum | ENDPRST_FIFORST); + + } else { + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX | epNum); + phytium_write8(&priv->regs->endprst, + ENDPRST_IO_TX | epNum | ENDPRST_TOGRST); + phytium_write8(&priv->regs->ep[epNum - 1].txcon, txcon & (~CON_STALL)); + } + } else { + rxcon = phytium_read8(&priv->regs->ep[epNum - 1].rxcon); + if (value) { + phytium_write8(&priv->regs->ep[epNum - 1].rxcon, rxcon | CON_STALL); + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX | epNum); + phytium_write8(&priv->regs->endprst, epNum | ENDPRST_FIFORST); + } else { + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX | epNum); + phytium_write8(&priv->regs->endprst, + epNum | ENDPRST_TOGRST | ENDPRST_FIFORST); + phytium_write8(&priv->regs->ep[epNum - 1].rxcon, rxcon & (~CON_STALL)); + } + } + + if (gadgetEp->state != GADGET_EP_BUSY && !value && req) { + gadgetRequest = requestToGadgetRequest(req); + if (gadgetEp->isInEp) + gadgetEpXDataSend(priv, gadgetRequest); + else + gadgetEpXDataReceive(priv, gadgetRequest); + } + + return 0; +} + +static int32_t gadgetEp0SetHalt(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + uint8_t value) +{ + struct GadgetEp *gadgetEp; + + if (!priv || !gadget_Ep) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + if (!list_empty(&gadgetEp->request)) + return -EBUSY; + + switch (priv->ep0State) { + case GADGET_EP0_STAGE_IN: + case GADGET_EP0_STAGE_OUT: + case GADGET_EP0_STAGE_ACK: + case GADGET_EP0_STAGE_STATUSIN: + case GADGET_EP0_STAGE_STATUSOUT: + priv->ep0State = GADGET_EP0_STAGE_SETUP; + break; + default: + return -EINVAL; + } + + return 0; +} + + +static void gadgetEp0Callback(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + struct GADGET_REQ *req, uint8_t status) +{ + if (!priv || !gadgetEp || !req) + return; + + priv->ep0State = GADGET_EP0_STAGE_SETUP; + list_del(&req->list); + gadgetEp->requestsInList--; + + if (req->status == EINPROGRESS) + req->status = status; + + if (req->complete) + req->complete(&gadgetEp->gadgetEp, req); +} + +static enum usb_device_speed gadgetGetActualSpeed(struct GADGET_CTRL *priv) +{ + uint8_t speedctrl; + + if (!priv) + return USB_SPEED_UNKNOWN; + + speedctrl = phytium_read8(&priv->regs->speedctrl) & (~SPEEDCTRL_HSDISABLE); + switch (speedctrl) { + case SPEEDCTRL_HS: + return USB_SPEED_HIGH; + case SPEEDCTRL_FS: + return USB_SPEED_FULL; + case SPEEDCTRL_LS: + return USB_SPEED_LOW; + default: + return USB_SPEED_UNKNOWN; + } +} + +static int32_t gadgetServiceSetFeatureReq(struct GADGET_CTRL *priv, struct usb_ctrlrequest *setup) +{ + uint8_t epNum, isIn; + struct GadgetEp *gadgetEp; + + if (!priv || !setup) + return 0; + + switch (setup->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + switch (setup->wValue) { + case USB_DEVICE_REMOTE_WAKEUP: + pr_info("set feature - remote wakup\n"); + priv->isRemoteWakeup = 1; + break; + case USB_DEVICE_B_HNP_ENABLE: + pr_info("set feature - B HNP Enable\n"); + pr_info("otg not implement\n"); + return -EINVAL; + case USB_DEVICE_A_HNP_SUPPORT: + pr_info("set feature - A HNP support\n"); + pr_info("otg not implete\n"); + return -EINVAL; + } + break; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT: + epNum = setup->wIndex & 0x0f; + isIn = setup->wIndex & USB_DIR_IN; + if (epNum == 0 || epNum > 15 || setup->wValue != 0) + return -EINVAL; + + gadgetEp = isIn ? &priv->in[epNum] : &priv->out[epNum]; + + gadgetEpXSetHalt(priv, &gadgetEp->gadgetEp, 1); + break; + default: + return -EINVAL; + } + return 0; +} + +static int32_t gadgetServiceClearFeatureReq(struct GADGET_CTRL *priv, struct usb_ctrlrequest *setup) +{ + uint8_t epNum, isIn; + struct GadgetEp *gadgetEp; + + if (!priv || !setup) + return -EINVAL; + + switch (setup->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + if (setup->wValue == USB_DEVICE_B_HNP_ENABLE) { + pr_err("otg not implement\n"); + return -EINVAL; + } + + if (setup->wValue != USB_DEVICE_REMOTE_WAKEUP) + return GADGET_EUNHANDLED; + + priv->isRemoteWakeup = 0; + return GADGET_EAUTOACK; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT: + pr_info("clear feature wIndex:0x%x wValue:0x%x\n", setup->wIndex, setup->wValue); + epNum = setup->wIndex & 0x7f; + isIn = setup->wIndex & USB_DIR_IN; + if (epNum == 0 || epNum > 15 || setup->wValue != 0) + return GADGET_EUNHANDLED; + + gadgetEp = isIn ? &priv->in[epNum] : &priv->out[epNum]; + if (!gadgetEp->gadgetEp.desc) + return -EINVAL; + + if (gadgetEp->wedged) + break; + gadgetEpXSetHalt(priv, &gadgetEp->gadgetEp, 0); + break; + default: + return GADGET_EUNHANDLED; + } + + return 0; +} + +static int32_t gadgetServiceSetupReq(struct GADGET_CTRL *priv, struct usb_ctrlrequest *setup) +{ + struct GadgetEp *gadgetEp; + uint8_t isIn, epNum; + uint8_t rxcon, txcon; + int len; + + if (!priv || !setup) + return -EINVAL; + + if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { + if (setup->bRequest != USB_REQ_GET_STATUS) + return GADGET_EUNHANDLED; + } else + return GADGET_EUNHANDLED; + + priv->privBuffAddr[1] = 0; + priv->privBuffAddr[0] = 0; + + switch (setup->bRequest & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + pr_info("wIndex:0x%x isSelfPowered:%d isRomoteWakeup:%d\n", + setup->wIndex, priv->isSelfPowered, priv->isRemoteWakeup); + + if (setup->wIndex == OTG_STS_SELECTOR) + priv->privBuffAddr[0] = priv->hostRequestFlag; + else { + priv->privBuffAddr[0] = priv->isSelfPowered ? USB_DEVICE_SELF_POWERED : 0; + priv->privBuffAddr[1] = priv->isRemoteWakeup ? USB_DEVICE_REMOTE_WAKEUP : 0; + } + break; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT: + epNum = setup->wIndex & 0x0f; + if (!epNum) + break; + + isIn = setup->wIndex & USB_DIR_IN; + + gadgetEp = isIn ? &priv->in[epNum] : &priv->out[epNum]; + if (!gadgetEp->gadgetEp.desc) + return -EINVAL; + + if (isIn) { + txcon = phytium_read8(&priv->regs->ep[epNum - 1].txcon); + priv->privBuffAddr[0] = (txcon & CON_STALL) ? 1 : 0; + } else { + rxcon = phytium_read8(&priv->regs->ep[epNum - 1].rxcon); + priv->privBuffAddr[0] = (rxcon & CON_STALL) ? 1 : 0; + } + break; + default: + return GADGET_EUNHANDLED; + } + + len = setup->wLength; + if (len > 2) + len = 2; + + priv->dmaDrv->dma_channelProgram(priv->dmaController, priv->in[0].channel, + priv->in[0].gadgetEp.maxPacket, priv->privBuffDma, len, NULL, 0); + + return 0; +} + +static int32_t gadgetGetSetup(struct GADGET_CTRL *priv, struct usb_ctrlrequest *setup) +{ + int i; + uint8_t ep0cs; + struct GADGET_REQ *request = NULL; + struct GadgetRequest *gadgetRequest; + + if (!priv || !setup) + return -EINVAL; + + phytium_write8(&priv->regs->ep0cs, EP0CS_CHGSET); + + for (i = 0; i < 8; i++) + ((char *)setup)[i] = phytium_read8(&priv->regs->setupdat[i]); + + ep0cs = phytium_read8(&priv->regs->ep0cs); + if (ep0cs & EP0CS_CHGSET) { + pr_info("setup flags change: not error\n"); + return GADGET_EAUTOACK; + } + + phytium_write8(&priv->regs->usbirq, USBIR_SUDAV); + pr_debug("setup packet: req%02x.%02x v:%04x i:%04x I%d\n", setup->bRequestType, + setup->bRequest, setup->wValue, setup->wIndex, setup->wLength); + + request = gadgetGetNextEp0Req(priv); + if (request) { + gadgetRequest = requestToGadgetRequest(request); + pr_info("Previous request has not been finished but new was received\n"); + gadgetEp0Callback(priv, gadgetRequest->ep, request, 0); + } + + if (setup->wLength) { + if (setup->bRequestType & USB_DIR_IN) + priv->ep0State = GADGET_EP0_STAGE_IN; + else + priv->ep0State = GADGET_EP0_STAGE_OUT; + } else + priv->ep0State = GADGET_EP0_STAGE_ACK; + + return 0; +} + +static void gadgetEp0StageSetup(struct GADGET_CTRL *priv) +{ + struct usb_ctrlrequest setup; + int32_t retval; + uint8_t ep0cs; + + if (!priv) + return; + + retval = gadgetGetSetup(priv, &setup); + + priv->gadgetDev.speed = gadgetGetActualSpeed(priv); + + switch (priv->ep0State) { + case GADGET_EP0_STAGE_ACK: + if ((setup.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) + retval = GADGET_EUNHANDLED; + else { + switch (setup.bRequest) { + case USB_REQ_SET_ADDRESS: + priv->deviceAddress = setup.wValue & 0x7F; + priv->gadgetDev.state = USB_STATE_ADDRESS; + pr_info("set address: %d\n", priv->deviceAddress); + retval = GADGET_EAUTOACK; + break; + case USB_REQ_SET_FEATURE: + retval = gadgetServiceSetFeatureReq(priv, &setup); + break; + case USB_REQ_CLEAR_FEATURE: + retval = gadgetServiceClearFeatureReq(priv, &setup); + break; + default: + retval = GADGET_EUNHANDLED; + break; + } + } + + if (retval == GADGET_EUNHANDLED) + break; + else if (retval == 0) + phytium_write8(&priv->regs->ep0cs, EP0CS_HSNAK); + + priv->ep0State = GADGET_EP0_STAGE_SETUP; + break; + case GADGET_EP0_STAGE_IN: + pr_debug("setup data stage in\n"); + retval = gadgetServiceSetupReq(priv, &setup); + + if (retval == 0) + priv->ep0State = GADGET_EP0_STAGE_STATUSOUT; + break; + case GADGET_EP0_STAGE_OUT: + pr_debug("setup data stage out\n"); + phytium_write8(&priv->regs->ep0Rxbc, 0); + retval = GADGET_EUNHANDLED; + break; + default: + if (retval == GADGET_EAUTOACK) + return; + + pr_debug("forward request\n"); + retval = GADGET_EUNHANDLED; + break; + } + + if (retval == GADGET_EUNHANDLED) { + if (priv->eventCallback.setup) + retval = priv->eventCallback.setup(priv, &setup); + + if (retval == 0x7FFF) { + pr_debug("Respond Delayed not finished yet\n"); + return; + } + + if (retval) + retval = GADGET_ESTALL; + } + + if (retval == GADGET_EUNHANDLED || retval == GADGET_ESTALL) { + pr_debug("request not handled - send stall\n"); + ep0cs = phytium_read8(&priv->regs->ep0cs); + ep0cs |= EP0CS_STALL; + phytium_write8(&priv->regs->ep0cs, ep0cs); + priv->ep0State = GADGET_EP0_STAGE_SETUP; + } else if (priv->ep0State == GADGET_EP0_STAGE_ACK) { + priv->ep0State = GADGET_EP0_STAGE_SETUP; + phytium_write8(&priv->regs->ep0cs, EP0CS_HSNAK); + pr_debug("setup transfer completed\n"); + } +} +static void gadgetEp0DataSend(struct GADGET_CTRL *priv) +{ + struct GADGET_REQ *request; + uint32_t chMaxLen, requestSize; + + if (!priv) + return; + + request = gadgetGetNextEp0Req(priv); + if (!request) { + pr_debug("Ep0 queue is empty\n"); + return; + } + + if (priv->dmaDrv->dma_getChannelStatus(priv->dmaController, priv->in[0].channel) + >= DMA_STATUS_BUSY) { + pr_err("transfer is pending now\n"); + return; + } + + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, priv->in[0].channel); + requestSize = (request->length < chMaxLen) ? request->length : chMaxLen; + pr_debug("usbRequest;%p requestSize:%d packetSize:%d\n", request, requestSize, + priv->in[0].gadgetEp.maxPacket); + priv->ep0State = GADGET_EP0_STAGE_STATUSOUT; + + priv->dmaDrv->dma_channelProgram(priv->dmaController, priv->in[0].channel, + priv->in[0].gadgetEp.maxPacket, request->dma, requestSize, NULL, 0); +} + +static void gadgetEp0DataReceive(struct GADGET_CTRL *priv) +{ + uint32_t chMaxLen, requestSize; + struct GADGET_REQ *request; + + if (!priv) + return; + + request = gadgetGetNextEp0Req(priv); + if (!request) { + pr_debug("Ep0 queue is empty\n"); + return; + } + + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, priv->out[0].channel); + requestSize = (request->length < chMaxLen) ? request->length : chMaxLen; + pr_debug("usbRequest;%p requestSize:%d packetSize:%d\n", request, requestSize, + priv->out[0].gadgetEp.maxPacket); + priv->ep0State = GADGET_EP0_STAGE_STATUSIN; + + priv->dmaDrv->dma_channelProgram(priv->dmaController, priv->out[0].channel, + priv->out[0].gadgetEp.maxPacket, request->dma, requestSize, NULL, 0); +} + +static uint32_t gadgetEp0Irq(struct GADGET_CTRL *priv) +{ + uint8_t usbcs; + struct GADGET_REQ *request; + + if (!priv) + return 0; + + switch (priv->ep0State) { + case GADGET_EP0_STAGE_IN://send data + pr_debug("DATA Stage IN\n"); + gadgetEp0DataSend(priv); + break; + case GADGET_EP0_STAGE_OUT://receive data + pr_debug("DATA Stage OUT\n"); + gadgetEp0DataReceive(priv); + break; + case GADGET_EP0_STAGE_STATUSIN: + pr_debug("DATA Stage STATUS IN\n"); + request = gadgetGetNextEp0Req(priv); + if (request) + request->actual = priv->dmaDrv->dma_getActualLength(priv->dmaController, + priv->out[0].channel); + + priv->ep0State = GADGET_EP0_STAGE_SETUP; + phytium_write8(&priv->regs->ep0cs, EP0CS_HSNAK); + gadgetEp0Callback(priv, &priv->out[0], request, 0); + break; + case GADGET_EP0_STAGE_STATUSOUT: + pr_debug("DATA Stage STATUS OUT\n"); + request = gadgetGetNextEp0Req(priv); + if (request) + request->actual = priv->dmaDrv->dma_getActualLength(priv->dmaController, + priv->in[0].channel); + + phytium_write8(&priv->regs->ep0cs, EP0CS_HSNAK); + if (request) + gadgetEp0Callback(priv, &priv->in[0], request, 0); + else + priv->ep0State = GADGET_EP0_STAGE_SETUP; + break; + case GADGET_EP0_STAGE_SETUP: + pr_debug("DATA Stage SETUP\n"); + gadgetEp0StageSetup(priv); + break; + case GADGET_EP0_STAGE_ACK: + pr_debug("DATA Stage ACK\n"); + break; + default: + pr_debug("DATA Stage UNKNOWN\n"); + usbcs = phytium_read8(&priv->regs->usbcs); + usbcs |= EP0CS_STALL; + phytium_write8(&priv->regs->usbcs, usbcs); + break; + } + + return 0; +} + +static void gadgetEpXCallback(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + struct GADGET_REQ *req, uint32_t status) +{ + if (!gadgetEp || !req) + return; + + list_del(&req->list); + gadgetEp->requestsInList--; + req->status = status; + + if (req->complete) + req->complete(&gadgetEp->gadgetEp, req); +} + +static void gadgetEpXDataCallback(struct GADGET_CTRL *priv, uint8_t epNum, uint8_t epDir) +{ + struct GadgetEp *gadgetEp; + struct GADGET_REQ *gadgetReq; + uint32_t actual_length = 0; + uint32_t chMaxLen = 0; + uint8_t epType; + + if (!priv) + return; + + pr_debug("%s %d epNum:%d epDir:%d\n", __func__, __LINE__, epNum, epDir); + gadgetEp = epDir ? &priv->in[epNum] : &priv->out[epNum]; + + gadgetReq = gadgetGetNextReq(gadgetEp); + if (!gadgetReq) { + pr_debug("%s queue is empty\n", gadgetEp->gadgetEp.name); + return; + } + + if (gadgetEp->channel) { + actual_length = priv->dmaDrv->dma_getActualLength(priv->dmaController, + gadgetEp->channel); + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, gadgetEp->channel); + gadgetReq->actual += actual_length; + } + + if (gadgetReq->actual == gadgetReq->length || actual_length < chMaxLen) { + gadgetEpXCallback(priv, gadgetEp, gadgetReq, 0); + + if (gadgetEp->gadgetEp.desc) { + epType = gadgetEp->gadgetEp.desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + if (epType == USB_ENDPOINT_XFER_ISOC) + return; + + gadgetReq = gadgetGetNextReq(gadgetEp); + gadgetEp->state = GADGET_EP_ALLOCATED; + if (!gadgetReq) { + pr_debug("%s queue is empty\n", gadgetEp->gadgetEp.name); + return; + } + gadgetEp->state = GADGET_EP_BUSY; + if (epDir) + gadgetEpXDataSend(priv, requestToGadgetRequest(gadgetReq)); + else + gadgetEpXDataReceive(priv, requestToGadgetRequest(gadgetReq)); + } + } else { + if (epDir) + gadgetEpXDataSend(priv, requestToGadgetRequest(gadgetReq)); + else + gadgetEpXDataReceive(priv, requestToGadgetRequest(gadgetReq)); + } +} + +void gadget_CallbackTransfer(void *priv, uint8_t epNum, uint8_t epDir, bool resubmit) +{ + if (!epNum) + gadgetEp0Irq(priv); + else + gadgetEpXDataCallback(priv, epNum, epDir); +} + +static void gadgetAbortEndpoint(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp) +{ + struct GADGET_REQ *gadgetReq; + + pr_debug("Abort Device endpoint: %s, dma channel: %p\n", gadgetEp->gadgetEp.name, + gadgetEp->channel); + + if (gadgetEp->channel && gadgetEp->hwEpNum != 0) { + priv->dmaDrv->dma_channelRelease(priv->dmaController, gadgetEp->channel); + gadgetEp->channel = NULL; + } + + if (gadgetEp->channel && gadgetEp->hwEpNum == 0) { + if (priv->releaseEp0Flag == 1) { + priv->dmaDrv->dma_channelAbort(priv->dmaController, gadgetEp->channel); + priv->dmaDrv->dma_channelAlloc(priv->dmaController, gadgetEp->isInEp, + gadgetEp->hwEpNum, 0); + } + } + + while (gadgetEp->request.next != &gadgetEp->request) { + gadgetReq = listToGadgetRequest(gadgetEp->request.next); + pr_debug("shutdown request %p form epName:%s\n", gadgetReq, + gadgetEp->gadgetEp.name); + if (!gadgetEp->gadgetEp.address) + gadgetEp0Callback(priv, gadgetEp, gadgetReq, GADGET_ESHUTDOWN); + else + gadgetEpXCallback(priv, gadgetEp, gadgetReq, GADGET_ESHUTDOWN); + } + + gadgetEp->state = GADGET_EP_FREE; +} + +static void gadgetStopActivity(struct GADGET_CTRL *priv) +{ + int i = 0; + struct GadgetEp *gadgetEp; + + pr_debug("USB Stop Activity\n"); + + if (!priv) + return; + + for (i = 0; i < 16; i++) { + gadgetEp = &priv->in[i]; + if (gadgetEp->state != GADGET_EP_NOT_IMPLEMENTED) + gadgetAbortEndpoint(priv, gadgetEp); + + gadgetEp = &priv->out[i]; + if (gadgetEp->state != GADGET_EP_NOT_IMPLEMENTED) + gadgetAbortEndpoint(priv, gadgetEp); + } +} + + +static int32_t gadgetEp0Enable(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp) +{ + uint8_t txien, rxien; + + if (!priv || !gadgetEp) + return -EINVAL; + + if (gadgetEp->state != GADGET_EP_FREE) + return -EBUSY; + + if (!gadgetEp->isInEp) { + rxien = phytium_read16(&priv->regs->rxien); + rxien &= ~(1 << gadgetEp->hwEpNum); + phytium_write16(&priv->regs->rxien, rxien); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_IO_TX | FIFOCTRL_FIFOAUTO); + } else { + txien = phytium_read16(&priv->regs->txien); + txien &= ~(1 << gadgetEp->hwEpNum); + phytium_write16(&priv->regs->txien, txien); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO); + } + + gadgetEp->gadgetEp.desc = NULL; + gadgetEp->state = GADGET_EP_ALLOCATED; + gadgetEp->channel = priv->dmaDrv->dma_channelAlloc(priv->dmaController, + gadgetEp->isInEp, gadgetEp->hwEpNum, 0); + phytium_write8(&priv->regs->ep0maxpack, 0x40); + + return 0; +} + +static int32_t gadgetEpXEnable(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + const struct usb_endpoint_descriptor *desc) +{ + uint32_t status = -EINVAL; + uint32_t payload; + uint16_t type, iso = 0; + uint8_t epNum = 0; + + if (!priv || !gadgetEp || !desc) + return -EINVAL; + + pr_debug("enable endpoint %s\n", gadgetEp->gadgetEp.name); + if (gadgetEp->state != GADGET_EP_FREE) { + status = -EBUSY; + goto fail; + } + + payload = desc->wMaxPacketSize & 0x7ff; + if (!payload) { + status = -EINVAL; + goto fail; + } + + epNum = gadgetEp->hwEpNum; + + type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + switch (type) { + case USB_ENDPOINT_XFER_ISOC: + type = CON_TYPE_ISOC; + switch (payload >> 11) { + case 0: + iso = CON_TYPE_ISOC_1_ISOD; + break; + case 1: + payload *= 2; + iso = CON_TYPE_ISOC_2_ISOD; + break; + case 2: + payload *= 3; + iso = CON_TYPE_ISOC_3_ISOD; + break; + } + break; + case USB_ENDPOINT_XFER_INT: + type = CON_TYPE_INT; + break; + case USB_ENDPOINT_XFER_BULK: + type = CON_TYPE_BULK; + break; + } + + if (desc->bEndpointAddress & USB_DIR_IN) { + if (!gadgetEp->isInEp) { + status = -ENODEV; + goto fail; + } + + if (payload > priv->gadgetCfg.epIN[epNum].maxPacketSize) { + status = -EINVAL; + goto fail; + } + + phytium_write16(&priv->regs->txmaxpack[epNum - 1], payload); + phytium_write8(&priv->regs->ep[epNum - 1].txcon, CON_VAL | type + | iso | (priv->gadgetCfg.epIN[epNum - 1].bufferingValue - 1)); + + phytium_write8(&priv->regs->fifoctrl, FIFOCTRL_FIFOAUTO | FIFOCTRL_IO_TX | epNum); + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX | epNum); + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX | epNum | + ENDPRST_FIFORST | ENDPRST_TOGRST); + } else { + if (gadgetEp->isInEp) { + status = -ENODEV; + goto fail; + } + + if (payload > priv->gadgetCfg.epOUT[epNum].maxPacketSize) { + status = -EINVAL; + goto fail; + } + + phytium_write16(&priv->regs->rxmaxpack[epNum - 1], payload); + phytium_write8(&priv->regs->ep[epNum - 1].rxcon, CON_VAL | type + | iso | (priv->gadgetCfg.epIN[epNum - 1].bufferingValue - 1)); + + phytium_write8(&priv->regs->fifoctrl, FIFOCTRL_FIFOAUTO | epNum); + phytium_write8(&priv->regs->endprst, epNum); + phytium_write8(&priv->regs->endprst, epNum | ENDPRST_FIFORST | ENDPRST_TOGRST); + } + + if (priv->dmaController) + gadgetEp->channel = priv->dmaDrv->dma_channelAlloc(priv->dmaController, + gadgetEp->isInEp, epNum, (type == CON_TYPE_ISOC) ? 1 : 0); + + if (type == CON_TYPE_ISOC) { + if (gadgetEp->isInEp) { + phytium_write16(&priv->regs->isoautodump, 1 << epNum); + phytium_write16(&priv->regs->isodctrl, 1 << epNum); + } + + priv->dmaDrv->dma_setMaxLength(priv->dmaController, gadgetEp->channel, payload); + } + + gadgetEp->state = GADGET_EP_ALLOCATED; + gadgetEp->gadgetEp.desc = desc; +fail: + return status; +} + +static int32_t gadgetEpEnable(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + const struct usb_endpoint_descriptor *desc) +{ + struct GadgetEp *gadgetEp = NULL; + + if (!priv || !gadget_Ep || !desc) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + gadgetEp->wedged = 0; + + if (gadgetEp->hwEpNum) + return gadgetEpXEnable(priv, gadgetEp, desc); + else + return gadgetEp0Enable(priv, gadgetEp); +} + +static int32_t gadgetEpXDisable(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp) +{ + uint8_t txcon, rxcon; + + if (!priv || !gadgetEp) + return -EINVAL; + + pr_debug("disable endpoint %s\n", gadgetEp->gadgetEp.name); + if (gadgetEp->isInEp) { + txcon = phytium_read8(&priv->regs->ep[gadgetEp->hwEpNum - 1].txcon); + txcon &= ~CON_VAL; + phytium_write8(&priv->regs->ep[gadgetEp->hwEpNum - 1].txcon, txcon); + } else { + rxcon = phytium_read8(&priv->regs->ep[gadgetEp->hwEpNum - 1].rxcon); + rxcon &= ~CON_VAL; + phytium_write8(&priv->regs->ep[gadgetEp->hwEpNum - 1].rxcon, rxcon); + } + gadgetAbortEndpoint(priv, gadgetEp); + gadgetEp->gadgetEp.desc = 0; + gadgetEp->state = GADGET_EP_FREE; + return 0; +} + +static int32_t gadgetEp0Disable(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp) +{ + if (!priv || !gadgetEp) + return -EINVAL; + + pr_debug("disable endpoint %s\n", gadgetEp->gadgetEp.name); + gadgetAbortEndpoint(priv, gadgetEp); + + return 0; +} + +static int32_t gadgetEpDisable(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep) +{ + struct GadgetEp *gadgetEp = NULL; + + if (!priv || !gadget_Ep) + return -EINVAL; + + if (gadget_Ep->address == 0) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + gadgetEp->wedged = 0; + if (gadgetEp->hwEpNum) + return gadgetEpXDisable(priv, gadgetEp); + else + return gadgetEp0Disable(priv, gadgetEp); +} + +static int32_t gadgetEpXQueue(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + struct GADGET_REQ *req) +{ + struct GadgetRequest *gadgetRequest; + + if (!priv || !gadgetEp || !req) + return -EINVAL; + + req->actual = 0; + req->status = EINPROGRESS; + + gadgetRequest = requestToGadgetRequest(req); + gadgetRequest->ep = gadgetEp; + + if (req->length == 0) + gadgetRequest->zlp = 1; + + if (gadgetEp->gadgetEp.desc == NULL) { + pr_info("%s is disabled - can not queue request %p\n", + gadgetEp->gadgetEp.name, req); + return -EINVAL; + } + + list_add_tail(&req->list, &gadgetEp->request); + pr_debug("queue to %s (%s), length:%d\n", gadgetEp->gadgetEp.name, + (gadgetEp->isInEp ? "IN/TX" : "OUT/RX"), req->length); + + if ((gadgetEp->state == GADGET_EP_ALLOCATED) && (&req->list == gadgetEp->request.next)) { + if (gadgetEp->isInEp) { + if (!(phytium_read8(&priv->regs->ep[gadgetEp->hwEpNum - 1].txcon) + & CON_STALL)) { + gadgetEp->state = GADGET_EP_BUSY; + gadgetEpXDataSend(priv, gadgetRequest); + } + } else { + if (!(phytium_read8(&priv->regs->ep[gadgetEp->hwEpNum - 1].rxcon) + & CON_STALL)) { + gadgetEp->state = GADGET_EP_BUSY; + gadgetEpXDataReceive(priv, gadgetRequest); + } + } + } else if (gadgetEp->state == GADGET_EP_BUSY) { + if (usb_endpoint_xfer_isoc(gadgetEp->gadgetEp.desc)) { + if (gadgetEp->isInEp) + gadgetEpXDataSend(priv, gadgetRequest); + else + gadgetEpXDataReceive(priv, gadgetRequest); + } + } + + pr_debug("endpoint %s (%s) now is busy - transfer will be waiting in Queue\n", + gadgetEp->gadgetEp.name, (gadgetEp->isInEp ? "IN/TX" : "OUT/RX")); + + return 0; +} + +static int32_t gadgetEp0Queue(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + struct GADGET_REQ *req) +{ + struct GadgetRequest *gadgetRequest; + + if (!priv || !gadgetEp || !req) + return -EINVAL; + + req->actual = 0; + req->status = EINPROGRESS; + + if (!list_empty(&gadgetEp->request)) + return -EBUSY; + + gadgetRequest = requestToGadgetRequest(req); + gadgetRequest->ep = gadgetEp; + + switch (priv->ep0State) { + case GADGET_EP0_STAGE_OUT: + case GADGET_EP0_STAGE_IN: + case GADGET_EP0_STAGE_ACK: + break; + default: + return -EINVAL; + } + + list_add_tail(&req->list, &gadgetEp->request); + gadgetEp->requestsInList++; + + pr_debug("queue to %s (%s), length:%d stage:%d\n", gadgetEp->gadgetEp.name, + gadgetEp->isInEp ? "IN/TX" : "OUT/RX", req->length, priv->ep0State); + + switch (priv->ep0State) { + case GADGET_EP0_STAGE_OUT: + gadgetEp0DataReceive(priv); + break; + case GADGET_EP0_STAGE_IN: + gadgetEp0DataSend(priv); + break; + case GADGET_EP0_STAGE_ACK: + if (req->length) + return -EINVAL; + phytium_write8(&priv->regs->ep0cs, EP0CS_HSNAK); + gadgetEp0Callback(priv, gadgetRequest->ep, req, 0); + pr_info("control transfer completed\n"); + break; + default: + break; + } + + return 0; +} + +static int32_t gadgetEpQueue(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + struct GADGET_REQ *req) +{ + struct GadgetEp *gadgetEp = NULL; + + if (!priv || !gadget_Ep || !req) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + + if (gadget_Ep->address & GADGET_USB_EP_NUMBER_MASK) + return gadgetEpXQueue(priv, gadgetEp, req); + else + return gadgetEp0Queue(priv, gadgetEp, req); +} + +static int32_t gadgetEpXDequeue(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + struct GADGET_REQ *req) +{ + struct GadgetRequest *gadgetRequest; + struct GADGET_REQ *iterator; + + if (!priv || !gadgetEp || !req) + return -EINVAL; + + gadgetRequest = requestToGadgetRequest(req); + if (gadgetRequest->ep != gadgetEp) + return -EINVAL; + + pr_debug("Dequeue request %p form %s\n", req, gadgetEp->gadgetEp.name); + + listBrowsingRequest(iterator, &gadgetEp->request, list) { + if (req == iterator) + break; + } + + if (req != iterator) { + pr_info("request %p not queued to %s\n", req, gadgetEp->gadgetEp.name); + return -EINVAL; + } + + if (gadgetEp->state == GADGET_EP_BUSY) { + priv->dmaDrv->dma_channelAbort(priv->dmaController, gadgetEp->channel); + gadgetEp->state = GADGET_EP_ALLOCATED; + } + + gadgetEpXCallback(priv, gadgetEp, req, GADGET_ECONNRESET); + + return 0; +} + +static int32_t gadgetEp0Dequeue(struct GADGET_CTRL *priv, struct GadgetEp *gadgetEp, + struct GADGET_REQ *req) +{ + return 0; +} + +static int32_t gadgetEpDequeue(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + struct GADGET_REQ *req) +{ + struct GadgetEp *gadgetEp = NULL; + + if (!priv || !gadget_Ep || !req) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + + if (gadget_Ep->address & GADGET_USB_EP_NUMBER_MASK) + return gadgetEpXDequeue(priv, gadgetEp, req); + else + return gadgetEp0Dequeue(priv, gadgetEp, req); +} + +static int32_t gadgetEpSetHalt(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, uint8_t value) +{ + if (!priv || !gadget_Ep) + return -EINVAL; + + if (gadget_Ep->address & GADGET_USB_EP_NUMBER_MASK) + return gadgetEpXSetHalt(priv, gadget_Ep, value); + else + return gadgetEp0SetHalt(priv, gadget_Ep, value); +} + +static int32_t gadgetEpSetWedge(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep) +{ + struct GadgetEp *gadgetEp = NULL; + + if (!priv || !gadget_Ep) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + gadgetEp->wedged = 1; + + return gadgetEpSetHalt(priv, gadget_Ep, 1); +} + +static int32_t gadgetEpFifoStatus(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep) +{ + if (!priv || !gadget_Ep) + return -EINVAL; + + return 0; +} + +static void gadgetEpFifoFlush(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep) +{ + if (!priv || !gadget_Ep) + return; +} + +static int32_t gadgetEpAllocRequest(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + struct GADGET_REQ **req) +{ + struct GadgetEp *gadgetEp = NULL; + struct GadgetRequest *gadgetRequest = NULL; + + if (!priv || !gadget_Ep || !req) + return -EINVAL; + + gadgetEp = toGadgetEp(gadget_Ep); + if (priv->eventCallback.usbRequestMemAlloc) + gadgetRequest = priv->eventCallback.usbRequestMemAlloc(priv, + sizeof(*gadgetRequest)); + if (!gadgetRequest) + return -ENOMEM; + + memset(gadgetRequest, 0, sizeof(*gadgetRequest)); + *req = &gadgetRequest->request; + INIT_LIST_HEAD(&gadgetRequest->request.list); + gadgetRequest->ep = gadgetEp; + + return 0; +} + +static void gadgetEpFreeRequest(struct GADGET_CTRL *priv, struct GADGET_EP *gadget_Ep, + struct GADGET_REQ *req) +{ + struct GadgetRequest *gadgetRequest = NULL; + + if (!priv || !gadget_Ep || !req) + return; + + gadgetRequest = requestToGadgetRequest(req); + + if (priv->eventCallback.usbRequestMemFree) + priv->eventCallback.usbRequestMemFree(priv, gadgetRequest); +} + +static struct GADGET_EP_OPS gadgetEpOps = { + .epEnable = gadgetEpEnable, + .epDisable = gadgetEpDisable, + .reqQueue = gadgetEpQueue, + .reqDequeue = gadgetEpDequeue, + .epSetHalt = gadgetEpSetHalt, + .epSetWedge = gadgetEpSetWedge, + .epFifoStatus = gadgetEpFifoStatus, + .reqAlloc = gadgetEpAllocRequest, + .reqFree = gadgetEpFreeRequest +}; + +static void gadgetInitDeviceEp(struct GADGET_CTRL *priv, uint8_t isInEp) +{ + uint8_t num; + struct GadgetEp *gadgetEp; + struct GADGET_EP_CFG epCfg; + + if (!priv) + return; + + for (num = 0; num < 16; num++) { + gadgetEp = isInEp ? &priv->in[num] : &priv->out[num]; + if (num) { + epCfg = isInEp ? priv->gadgetCfg.epIN[num] : priv->gadgetCfg.epOUT[num]; + if (!epCfg.bufferingValue) { + gadgetEp->state = GADGET_EP_NOT_IMPLEMENTED; + gadgetEp->hwEpNum = num; + continue; + } + } + INIT_LIST_HEAD(&gadgetEp->gadgetEp.epList); + INIT_LIST_HEAD(&gadgetEp->request); + gadgetEp->state = GADGET_EP_FREE; + gadgetEp->hwEpNum = num; + gadgetEp->isInEp = isInEp; + gadgetEp->requestsInList = 0; + snprintf(gadgetEp->gadgetEp.name, sizeof(gadgetEp->gadgetEp.name), + "Ep%d%s", num, isInEp ? "in" : "out"); + if (!num) { + gadgetEp->gadgetEp.maxPacket = priv->gadgetCfg.epIN[num].maxPacketSize; + if (isInEp) + priv->gadgetDev.ep0 = &gadgetEp->gadgetEp; + gadgetEp->gadgetEp.ops = &gadgetEpOps; + gadgetEp0Enable(priv, gadgetEp); + continue; + } + + if (isInEp) { + if (epCfg.startBuf) + phytium_write16(&priv->regs->txstaddr[num - 1].addr, + epCfg.startBuf); + phytium_write8(&priv->regs->ep[num - 1].txcon, 0); + gadgetEp->gadgetEp.maxPacket = epCfg.maxPacketSize; + } else { + if (epCfg.startBuf) + phytium_write16(&priv->regs->rxstaddr[num - 1].addr, + epCfg.startBuf); + phytium_write8(&priv->regs->ep[num - 1].rxcon, 0); + gadgetEp->gadgetEp.maxPacket = epCfg.maxPacketSize; + } + + gadgetEp->gadgetEp.ops = &gadgetEpOps; + gadgetEp->gadgetEp.address = isInEp ? (0x80 | num) : num; + gadgetEp->gadgetEp.maxburst = 0; + gadgetEp->gadgetEp.mult = 0; + gadgetEp->gadgetEp.maxStreams = 0; + priv->endpointInList++; + list_add_tail(&gadgetEp->gadgetEp.epList, &priv->gadgetDev.epList); + } +} + +static void gadgetInitEndpoint(struct GADGET_CTRL *priv) +{ + if (!priv) + return; + + gadgetInitDeviceEp(priv, 1); + gadgetInitDeviceEp(priv, 0); +} + +static void gadgetSetup(struct GADGET_CTRL *priv) +{ + if (!priv) + return; + + INIT_LIST_HEAD(&priv->gadgetDev.epList); + priv->gadgetDev.state = USB_STATE_NOTATTACHED; + priv->gadgetDev.maxSpeed = USB_SPEED_HIGH; + priv->gadgetDev.speed = USB_SPEED_FULL; + snprintf(priv->gadgetDev.name, sizeof(priv->gadgetDev.name), "Phytium USB SD Driver"); + + gadgetInitEndpoint(priv); + + phytium_write8(&priv->regs->ep0maxpack, 0x40); + phytium_write16(&priv->regs->rxien, 0); + phytium_write16(&priv->regs->txien, 0); + phytium_write16(&priv->regs->rxirq, 0xFFFF); + phytium_write16(&priv->regs->txirq, 0xFFFF); + phytium_write8(&priv->regs->usbirq, 0xEF); + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST); + priv->isReady = 1; +} +int32_t gadgetInit(struct GADGET_CTRL *priv, struct GADGET_CFG *config, + struct GADGET_CALLBACKS *callbacks, struct device *pdev) +{ + struct DMA_SYSREQ dmaSysReq; + uint8_t usbcs; + + if (!priv || !config || !callbacks) + return -EINVAL; + + priv->dev = pdev; + priv->eventCallback = *callbacks; + priv->gadgetCfg = *config; + priv->regs = (struct HW_REGS *)config->regBase; + priv->phy_regs = (struct VHUB_REGS *)config->phy_regBase; + priv->gadgetDrv = GADGET_GetInstance(); + priv->dmaDrv = DMA_GetInstance(); + priv->dmaController = (void *)(priv + 1); + priv->dmaCfg.dmaModeRx = 0xFFFF; + priv->dmaCfg.dmaModeTx = 0xFFFF; + priv->dmaCfg.regBase = config->regBase + 0x400; + priv->dmaCfg.trbAddr = config->trbAddr; + priv->dmaCfg.trbDmaAddr = config->trbDmaAddr; + + priv->dmaDrv->dma_probe(NULL, &dmaSysReq); + priv->privBuffAddr = (uint8_t *)((uintptr_t)config->trbAddr + dmaSysReq.trbMemSize); + priv->privBuffDma = (uintptr_t)((uintptr_t)config->trbDmaAddr + dmaSysReq.trbMemSize); + priv->dmaCallback.complete = gadget_CallbackTransfer; + priv->dmaDrv->dma_init(priv->dmaController, &priv->dmaCfg, &priv->dmaCallback); + priv->dmaDrv->dma_setParentPriv(priv->dmaController, priv); + + usbcs = phytium_read8(&priv->regs->usbcs); + usbcs |= USBCS_DISCON | USBCS_LPMNYET; + phytium_write8(&priv->regs->usbcs, usbcs); + + gadgetSetup(priv); + + usbcs = phytium_read8(&priv->regs->usbcs); + usbcs &= USBCS_DISCON; + phytium_write8(&priv->regs->usbcs, usbcs); + + return 0; +} + +static void gadgetDestroy(struct GADGET_CTRL *priv) +{ + pr_debug("Destroy Device Controller driver\n"); + + if (priv) + return; + gadgetDisconnect(priv); + + gadgetStopActivity(priv); + + phytium_write8(&priv->regs->usbcs, USBCS_DISCON); + + priv->isReady = 0; +} + +static void gadgetStart(struct GADGET_CTRL *priv) +{ + uint8_t usbien, usbcs; + + pr_debug("Usb Device Controller start\n"); + if (!priv) + return; + + usbien = phytium_read8(&priv->regs->usbien); + usbien |= USBIR_URES | USBIR_SUDAV | USBIR_LPMIR; + phytium_write8(&priv->regs->usbien, usbien); + + usbcs = phytium_read8(&priv->regs->usbcs); + usbcs &= ~USBCS_DISCON; + phytium_write8(&priv->regs->usbcs, usbcs); + + priv->dmaDrv->dma_start(priv->dmaController); +} + +static void gadgetReset(struct GADGET_CTRL *priv) +{ + int i = 0; + + pr_debug("Usb Disable Device Activity\n"); + + if (!priv) + return; + + if (priv->gadgetDev.speed != USB_SPEED_UNKNOWN) + gadgetDisconnect(priv); + + priv->gadgetDev.aHnpSupport = 0; + priv->gadgetDev.bHnpEnable = 0; + priv->gadgetDev.state = USB_STATE_DEFAULT; + priv->deviceAddress = 0; + priv->ep0State = GADGET_EP0_STAGE_SETUP; + + gadgetStopActivity(priv); + + for (i = 0; i < 1000; i++) { + priv->gadgetDev.speed = gadgetGetActualSpeed(priv); + if (priv->gadgetDev.speed == USB_SPEED_HIGH) + return; + } +} + +static void gadgetStop(struct GADGET_CTRL *priv) +{ + pr_debug("Usb Device Controller stop\n"); + + if (!priv) + return; + + if (!priv->isReady) + return; + + gadgetReset(priv); + + phytium_write8(&priv->regs->usbien, 0); + priv->dmaDrv->dma_stop(priv->dmaController); + + priv->isReady = 0; +} + +static void gadgetIsr(struct GADGET_CTRL *priv) +{ + uint8_t usbirq, usbien, usbcs; + + if (!priv) + return; + + usbirq = phytium_read8(&priv->regs->usbirq); + usbien = phytium_read8(&priv->regs->usbien); + + pr_debug("usbirq:0x%x usbien:0x%x\n", usbirq, usbien); + + usbirq = usbirq & usbien; + + if (!usbirq) + goto DMA_IRQ; + + if (usbirq & USBIR_LPMIR) { + pr_debug("USBIRQ LPM\n"); + usbcs = phytium_read8(&priv->regs->usbcs); + usbcs &= ~USBCS_LPMNYET; + phytium_write8(&priv->regs->usbcs, usbcs); + phytium_write8(&priv->regs->usbirq, USBIR_LPMIR); + } + + if (usbirq & USBIR_URES) { + pr_debug("USBIRQ RESET\n"); + phytium_write8(&priv->regs->usbirq, USBIR_URES); + priv->releaseEp0Flag = 1; + gadgetReset(priv); + priv->releaseEp0Flag = 0; + priv->gadgetDev.state = USB_STATE_DEFAULT; + if (priv->eventCallback.connect) + priv->eventCallback.connect(priv); + } + + if (usbirq & USBIR_HSPEED) { + pr_debug("USBIRQ HighSpeed\n"); + phytium_write8(&priv->regs->usbirq, USBIR_HSPEED); + priv->gadgetDev.speed = USB_SPEED_HIGH; + } + + if (usbirq & USBIR_SUDAV) { + pr_debug("USBIRQ SUDAV\n"); + priv->ep0State = GADGET_EP0_STAGE_SETUP; + gadgetEp0Irq(priv); + } + + if (usbirq & USBIR_SOF) { + pr_debug("USBIRQ SOF\n"); + phytium_write8(&priv->regs->usbirq, USBIR_SOF); + } + + if (usbirq & USBIR_SUTOK) { + pr_debug("USBIRQ SUTOK\n"); + phytium_write8(&priv->regs->usbirq, USBIR_SUTOK); + } + + if (usbirq & USBIR_SUSP) { + pr_debug("USBIRQ SUSPEND\n"); + phytium_write8(&priv->regs->usbirq, USBIR_SUSP); + } + + return; +DMA_IRQ: + priv->dmaDrv->dma_isr(priv->dmaController); +} + +static void gadgetGetDevInstance(struct GADGET_CTRL *priv, struct GADGET_DEV **dev) +{ + if (!priv || !dev) + return; + + *dev = &priv->gadgetDev; +} + +static int32_t gadgetGetFrame(struct GADGET_CTRL *priv, uint32_t *numOfFrame) +{ + if (!priv || !numOfFrame) + return -EINVAL; + + *numOfFrame = phytium_read16(&priv->regs->frmnr); + + return 0; +} + +static int32_t gadgetWakeUp(struct GADGET_CTRL *priv) +{ + if (!priv) + return -EINVAL; + + return -ENOTSUPP; +} + +static int32_t gadgetSetSelfPowered(struct GADGET_CTRL *priv) +{ + if (!priv) + return -EINVAL; + + priv->isSelfPowered = 1; + + return 0; +} + +static int32_t gadgetClearSelfPowered(struct GADGET_CTRL *priv) +{ + if (!priv) + return -EINVAL; + + priv->isSelfPowered = 0; + + return 0; +} + +static int32_t gadgetVbusSession(struct GADGET_CTRL *priv, uint8_t isActive) +{ + if (!priv) + return -EINVAL; + + return -ENOTSUPP; +} + +static int32_t gadgetVbusDraw(struct GADGET_CTRL *priv, uint8_t mA) +{ + if (!priv) + return -EINVAL; + + return -ENOTSUPP; +} + +static int32_t gadgetPullUp(struct GADGET_CTRL *priv, uint8_t isOn) +{ + if (!priv) + return -EINVAL; + + return -ENOTSUPP; +} + +struct GADGET_OBJ GadgetObj = { + .gadget_init = gadgetInit, + .gadget_destroy = gadgetDestroy, + .gadget_start = gadgetStart, + .gadget_stop = gadgetStop, + .gadget_isr = gadgetIsr, + //endpoint operation + .gadget_epEnable = gadgetEpEnable, + .gadget_epDisable = gadgetEpDisable, + .gadget_epSetHalt = gadgetEpSetHalt, + .gadget_epSetWedge = gadgetEpSetWedge, + .gadget_epFifoStatus = gadgetEpFifoStatus, + .gadget_epFifoFlush = gadgetEpFifoFlush, + .gadget_reqQueue = gadgetEpQueue, + .gadget_reqDequeue = gadgetEpDequeue, + .gadget_reqAlloc = gadgetEpAllocRequest, + .gadget_reqFree = gadgetEpFreeRequest, + + //Device operations + .gadget_getDevInstance = gadgetGetDevInstance, + .gadget_dGetFrame = gadgetGetFrame, + .gadget_dWakeUp = gadgetWakeUp, + .gadget_dSetSelfpowered = gadgetSetSelfPowered, + .gadget_dClearSelfpowered = gadgetClearSelfPowered, + .gadget_dVbusSession = gadgetVbusSession, + .gadget_dVbusDraw = gadgetVbusDraw, + .gadget_dPullUp = gadgetPullUp, +}; + +struct GADGET_OBJ *GADGET_GetInstance(void) +{ + return &GadgetObj; +} + +static int phytium_gadget_set_default_cfg(struct phytium_cusb *config) +{ + int index; + + config->gadget_cfg.regBase = (uintptr_t)config->regs; + config->gadget_cfg.phy_regBase = (uintptr_t)config->phy_regs; + config->gadget_cfg.dmaInterfaceWidth = GADGET_DMA_32_WIDTH; + + for (index = 0; index < 16; index++) { + if (index == 0) { + config->gadget_cfg.epIN[index].bufferingValue = 1; + config->gadget_cfg.epIN[index].maxPacketSize = 64; + config->gadget_cfg.epIN[index].startBuf = 0; + + config->gadget_cfg.epOUT[index].bufferingValue = 1; + config->gadget_cfg.epOUT[index].maxPacketSize = 64; + config->gadget_cfg.epOUT[index].startBuf = 0; + } else { + config->gadget_cfg.epIN[index].bufferingValue = 4; + config->gadget_cfg.epIN[index].maxPacketSize = 1024; + config->gadget_cfg.epIN[index].startBuf = 64 + 4096 * (index - 1); + + config->gadget_cfg.epOUT[index].bufferingValue = 4; + config->gadget_cfg.epOUT[index].maxPacketSize = 1024; + config->gadget_cfg.epOUT[index].startBuf = 64 + 4096 * (index - 1); + } + } + + return 0; +} + +void gadget_callback_connect(struct GADGET_CTRL *priv) +{ + if (!priv) + return; +} + +void gadget_callback_disconnect(struct GADGET_CTRL *priv) +{ + if (!priv) + return; +} + +int32_t gadget_callback_setup(struct GADGET_CTRL *priv, struct usb_ctrlrequest *ctrl) +{ + struct phytium_cusb *config; + int ret = 0; + + if (!priv || !ctrl) + return -EINVAL; + + config = dev_get_drvdata(priv->dev); + if (!config) + return -1; + + if (!config->gadget_driver) + return -EOPNOTSUPP; + + if (ctrl->bRequestType & USB_DIR_IN) + config->ep0_data_stage_is_tx = 1; + else + config->ep0_data_stage_is_tx = 0; + + spin_unlock(&config->lock); + ret = config->gadget_driver->setup(&config->gadget, ctrl); + spin_lock(&config->lock); + + if (ret == 0x7FFF) + return ret; + + if (ret < 0) + return 1; + + return 0; +} + +void *gadget_callback_usbRequestMemAlloc(struct GADGET_CTRL *priv, u32 size) +{ + struct GADGET_REQ *gadget_req = NULL; + + gadget_req = kzalloc(size, GFP_NOWAIT); + if (!gadget_req) + return NULL; + + return gadget_req; +} + +void gadget_callback_usbRequestMemFree(struct GADGET_CTRL *priv, void *usbReq) +{ + if (!usbReq) + return; + + kfree(usbReq); +} + +static void init_peripheral_ep(struct phytium_cusb *config, + struct phytium_ep *phy_ep, struct GADGET_EP *gadget_ep, int is_tx) +{ + if (!config || !phy_ep || !gadget_ep) + return; + + memset(phy_ep, 0, sizeof(*phy_ep)); + phy_ep->config = config; + phy_ep->is_tx = is_tx; + phy_ep->gadget_ep = gadget_ep; + phy_ep->ep_num = gadget_ep->address & 0xF; + phy_ep->end_point.maxpacket = gadget_ep->maxPacket; + phy_ep->end_point.maxpacket_limit = 1024; + + INIT_LIST_HEAD(&phy_ep->req_list); + sprintf(phy_ep->name, "ep%d%s", phy_ep->ep_num, is_tx ? "in" : "out"); + + switch (phy_ep->ep_num) { + case 0: + phy_ep->end_point.caps.type_control = 1; + break; + case 1: + phy_ep->end_point.caps.type_bulk = 1; + break; + case 2: + phy_ep->end_point.caps.type_int = 1; + break; + case 3: + phy_ep->end_point.caps.type_iso = 1; + break; + default: + phy_ep->end_point.caps.type_int = 1; + phy_ep->end_point.caps.type_bulk = 1; + break; + } + + if (is_tx) { + phy_ep->end_point.caps.dir_in = 1; + phy_ep->end_point.caps.dir_out = 0; + } else { + phy_ep->end_point.caps.dir_in = 0; + phy_ep->end_point.caps.dir_out = 1; + } + + phy_ep->end_point.name = phy_ep->name; + + INIT_LIST_HEAD(&phy_ep->end_point.ep_list); + + if (!phy_ep->ep_num) { + phy_ep->end_point.ops = &gadget_ep0_ops; + config->gadget.ep0 = &phy_ep->end_point; + + if (config->gadget_dev->maxSpeed > USB_SPEED_HIGH) + config->gadget.ep0->maxpacket = 9; + } else { + phy_ep->end_point.ops = &gadget_ep_ops; + list_add_tail(&phy_ep->end_point.ep_list, &config->gadget.ep_list); + } +} + +static void gadget_init_endpoint(struct phytium_cusb *config) +{ + struct list_head *list; + struct GADGET_EP *gadget_ep; + + if (!config) + return; + + INIT_LIST_HEAD(&(config->gadget.ep_list)); + + init_peripheral_ep(config, &config->endpoints_tx[0], config->gadget_dev->ep0, 1); + init_peripheral_ep(config, &config->endpoints_rx[0], config->gadget_dev->ep0, 0); + + list_for_each(list, &config->gadget_dev->epList) { + gadget_ep = (struct GADGET_EP *)list; + if (gadget_ep->address & USB_DIR_IN) + init_peripheral_ep(config, &config->endpoints_tx[gadget_ep->address & 0xf], + gadget_ep, 1); + else + init_peripheral_ep(config, &config->endpoints_rx[gadget_ep->address & 0xf], + gadget_ep, 0); + } +} + +static int gadget_setup(struct phytium_cusb *config) +{ + int ret = -1; + + config->gadget_obj->gadget_getDevInstance(config->gadget_priv, &config->gadget_dev); + config->gadget.ops = &phytium_gadget_ops; + config->gadget.max_speed = config->gadget_dev->maxSpeed; + config->gadget.speed = USB_SPEED_HIGH; + config->gadget.name = "phytium_gadget"; + config->gadget.is_otg = 0; + + gadget_init_endpoint(config); + + ret = usb_add_gadget_udc(config->dev, &config->gadget); + if (ret) + goto err; + + return 0; + +err: + config->gadget.dev.parent = NULL; + device_unregister(&config->gadget.dev); + return ret; +} + +int phytium_gadget_reinit(struct phytium_cusb *config) +{ + struct GADGET_CTRL *ctrl; + + if (!config) + return 0; + + ctrl = (struct GADGET_CTRL *)config->gadget_priv; + if (!ctrl) + return 0; + + gadgetStop(ctrl); + + config->gadget_obj->gadget_init(config->gadget_priv, &config->gadget_cfg, + &config->gadget_callbacks, config->dev); + + return 0; +} + +int phytium_gadget_init(struct phytium_cusb *config) +{ + int ret; + + if (!config) + return 0; + + phytium_gadget_set_default_cfg(config); + config->gadget_obj = &GadgetObj; + + config->dma_cfg.regBase = config->gadget_cfg.regBase + 0x400; + config->dma_obj = DMA_GetInstance(); + config->dma_obj->dma_probe(&config->dma_cfg, &config->dma_sysreq); + + config->gadget_sysreq.privDataSize = sizeof(struct GADGET_CTRL); + config->gadget_sysreq.trbMemSize = config->dma_sysreq.trbMemSize + GADGET_PRIV_BUFFER_SIZE; + config->gadget_sysreq.privDataSize += config->dma_sysreq.privDataSize; + + config->gadget_priv = devm_kzalloc(config->dev, + config->gadget_sysreq.privDataSize, GFP_KERNEL); + if (!config->gadget_priv) { + ret = -ENOMEM; + goto err_probe; + } + config->gadget_cfg.trbAddr = dma_alloc_coherent(config->dev, + config->gadget_sysreq.trbMemSize, + (dma_addr_t *)&config->gadget_cfg.trbDmaAddr, GFP_KERNEL); + if (!config->gadget_cfg.trbAddr) { + ret = -ENOMEM; + goto err_dma_coherent; + } + + config->gadget_callbacks.connect = gadget_callback_connect; + config->gadget_callbacks.disconnect = gadget_callback_disconnect; + config->gadget_callbacks.setup = gadget_callback_setup; + config->gadget_callbacks.usbRequestMemAlloc = gadget_callback_usbRequestMemAlloc; + config->gadget_callbacks.usbRequestMemFree = gadget_callback_usbRequestMemFree; + + ret = config->gadget_obj->gadget_init(config->gadget_priv, &config->gadget_cfg, + &config->gadget_callbacks, config->dev); + if (ret) { + ret = -ENODEV; + goto err_init; + } + + //dev_set_drvdata(config->dev, config); + + gadget_setup(config); + + return 0; + +err_init: + dma_free_coherent(config->dev, config->gadget_sysreq.trbMemSize, + config->gadget_cfg.trbAddr, config->gadget_cfg.trbDmaAddr); +err_dma_coherent: +err_probe: + dev_set_drvdata(config->dev, NULL); + + return ret; +} + +int phytium_gadget_uninit(struct phytium_cusb *config) +{ + if (config) + usb_del_gadget_udc(&config->gadget); + + return 0; +} + +#ifdef CONFIG_PM +int phytium_gadget_resume(void *priv) +{ + struct GADGET_CTRL *ctrl; + uint32_t gen_cfg; + unsigned long flags = 0; + struct phytium_cusb *config = (struct phytium_cusb *)priv; + + if (!config) + return 0; + + ctrl = (struct GADGET_CTRL *)config->gadget_priv; + if (!ctrl) + return 0; + + spin_lock_irqsave(&config->lock, flags); + phytium_gadget_reinit(config); + + if (config->gadget_driver) { + config->gadget_obj->gadget_start(config->gadget_priv); + if (ctrl->phy_regs) { + gen_cfg = phytium_read32(&ctrl->phy_regs->gen_cfg); + gen_cfg = gen_cfg & (~BIT(7)); + phytium_write32(&ctrl->phy_regs->gen_cfg, gen_cfg); + } + } + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +int phytium_gadget_suspend(void *priv) +{ + return 0; +} +#endif diff --git a/drivers/usb/phytium/gadget.h b/drivers/usb/phytium/gadget.h new file mode 100644 index 0000000000000000000000000000000000000000..d87b55ade7a70b29189402e8164d833e6340730b --- /dev/null +++ b/drivers/usb/phytium/gadget.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __PHYTIUM_GADGET_H_ +#define __PHYTIUM_GADGET_H_ + +#include +#include +#include "dma.h" + +struct GADGET_CTRL; +struct GADGET_EP; +struct GADGET_REQ; + +enum GADGET_EP_STATE { + GADGET_EP_FREE, + GADGET_EP_ALLOCATED, + GADGET_EP_BUSY, + GADGET_EP_NOT_IMPLEMENTED +}; + +enum GADGET_EP0_STAGE { + GADGET_EP0_STAGE_SETUP, + GADGET_EP0_STAGE_IN, + GADGET_EP0_STAGE_OUT, + GADGET_EP0_STAGE_STATUSIN, + GADGET_EP0_STAGE_STATUSOUT, + GADGET_EP0_STAGE_ACK +}; + +struct GADGET_EP_OPS { + int32_t (*epEnable)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + const struct usb_endpoint_descriptor *desc); + + int32_t (*epDisable)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*epSetHalt)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, uint8_t value); + + int32_t (*epSetWedge)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*epFifoStatus)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*epFifoFlush)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*reqQueue)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ *req); + + int32_t (*reqDequeue)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ *req); + + int32_t (*reqAlloc)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ **req); + + void (*reqFree)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ *req); +}; + +struct GADGET_EP { + struct list_head epList; + char name[255]; + struct GADGET_EP_OPS *ops; + uint16_t maxPacket; + uint16_t maxStreams; + uint8_t mult; + uint8_t maxburst; + uint8_t address; + const struct usb_endpoint_descriptor *desc; + const struct usb_ss_ep_comp_descriptor *compDesc; +}; + +enum GADGET_DMAInterfaceWidth { + GADGET_DMA_32_WIDTH = 4, + GADGET_DMA_64_WIDTH = 8, +}; + +struct GADGET_EP_CFG { + uint8_t bufferingValue; + uint16_t startBuf; + uint16_t maxPacketSize; +}; + +struct GADGET_CFG { + uintptr_t regBase; + uintptr_t phy_regBase; + struct GADGET_EP_CFG epIN[16]; + struct GADGET_EP_CFG epOUT[16]; + enum GADGET_DMAInterfaceWidth dmaInterfaceWidth; + void *trbAddr; + uintptr_t trbDmaAddr; +}; + +struct GADGET_SYSREQ { + uint32_t privDataSize; + uint32_t trbMemSize; +}; + +struct GadgetEp { + struct GADGET_EP gadgetEp; + enum GADGET_EP_STATE state; + uint8_t hwEpNum; + uint8_t isInEp; + struct list_head request; + uint8_t iso_flag; + void *channel; + uint32_t requestsInList; + uint8_t wedged; +}; + +struct GADGET_DEV { + struct list_head epList; + struct GADGET_EP *ep0; + unsigned int speed; + unsigned int maxSpeed; + enum usb_device_state state; + uint8_t sgSupported; + uint8_t bHnpEnable; + uint8_t aHnpSupport; + char name[255]; +}; + +struct GADGET_SgList { + uintptr_t link; + uint32_t offset; + uint32_t length; + uintptr_t dmaAddress; +}; + +struct GADGET_REQ { + struct list_head list; + void *buf; + uint32_t length; + uintptr_t dma; + uint32_t numOfSgs; + uint32_t numMappedSgs; + uint16_t streamId; + uint8_t oInterrupt; + uint8_t zero; + uint8_t shortNotOk; + void *context; + uint32_t status; + uint32_t actual; + struct GADGET_SgList *sg; + void (*complete)(struct GADGET_EP *ep, struct GADGET_REQ *req); +}; + + +struct GADGET_CALLBACKS { + void (*disconnect)(struct GADGET_CTRL *priv); + + void (*connect)(struct GADGET_CTRL *priv); + + int32_t (*setup)(struct GADGET_CTRL *priv, + struct usb_ctrlrequest *ctrl); + + void *(*usbRequestMemAlloc)(struct GADGET_CTRL *priv, + uint32_t requiredSize); + + void (*usbRequestMemFree)(struct GADGET_CTRL *priv, void *usbRequest); +}; + +struct GADGET_OBJ { + int32_t (*gadget_init)(struct GADGET_CTRL *priv, struct GADGET_CFG *config, + struct GADGET_CALLBACKS *callbacks, struct device *pdev); + + void (*gadget_destroy)(struct GADGET_CTRL *priv); + + void (*gadget_start)(struct GADGET_CTRL *priv); + + void (*gadget_stop)(struct GADGET_CTRL *priv); + + void (*gadget_isr)(struct GADGET_CTRL *priv); + + int32_t (*gadget_epEnable)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + const struct usb_endpoint_descriptor *desc); + + int32_t (*gadget_epDisable)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*gadget_epSetHalt)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, uint8_t value); + + int32_t (*gadget_epSetWedge)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*gadget_epFifoStatus)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + void (*gadget_epFifoFlush)(struct GADGET_CTRL *priv, struct GADGET_EP *ep); + + int32_t (*gadget_reqQueue)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ *req); + + int32_t (*gadget_reqDequeue)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ *req); + + int32_t (*gadget_reqAlloc)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ **req); + + void (*gadget_reqFree)(struct GADGET_CTRL *priv, struct GADGET_EP *ep, + struct GADGET_REQ *req); + + void (*gadget_getDevInstance)(struct GADGET_CTRL *priv, struct GADGET_DEV **dev); + + int32_t (*gadget_dGetFrame)(struct GADGET_CTRL *priv, uint32_t *numOfFrame); + + int32_t (*gadget_dWakeUp)(struct GADGET_CTRL *priv); + + int32_t (*gadget_dSetSelfpowered)(struct GADGET_CTRL *priv); + + int32_t (*gadget_dClearSelfpowered)(struct GADGET_CTRL *priv); + + int32_t (*gadget_dVbusSession)(struct GADGET_CTRL *priv, uint8_t isActive); + + int32_t (*gadget_dVbusDraw)(struct GADGET_CTRL *priv, uint8_t mA); + + int32_t (*gadget_dPullUp)(struct GADGET_CTRL *priv, uint8_t isOn); + + void (*gadget_dGetConfigParams)(struct GADGET_CTRL *priv, + struct usb_dcd_config_params *configParams); +}; + +struct GADGET_CTRL { + struct device *dev; + struct GADGET_DEV gadgetDev; + struct HW_REGS *regs; + struct GADGET_OBJ *gadgetDrv; + struct GADGET_CFG gadgetCfg; + struct GADGET_CALLBACKS eventCallback; + struct GadgetEp in[16]; + struct GadgetEp out[16]; + enum GADGET_EP0_STAGE ep0State; + uint8_t isRemoteWakeup; + uint8_t isSelfPowered; + uint8_t deviceAddress; + struct DMA_OBJ *dmaDrv; + void *dmaController; + struct DMA_CFG dmaCfg; + struct DMA_CALLBACKS dmaCallback; + uint8_t releaseEp0Flag; + uint8_t isReady; + uint8_t *privBuffAddr; + uintptr_t privBuffDma; + uint8_t endpointInList; + uint8_t hostRequestFlag; + struct VHUB_REGS *phy_regs; +}; + +struct GadgetRequest { + struct GADGET_REQ request; + struct GadgetEp *ep; + struct GADGET_DEV *dev; + uint8_t zlp; +}; + +struct GADGET_OBJ *GADGET_GetInstance(void); + +#endif /* __LINUX_PHYTIUM_GADGET */ + diff --git a/drivers/usb/phytium/host.c b/drivers/usb/phytium/host.c new file mode 100644 index 0000000000000000000000000000000000000000..75bcaa2a71477f2fc727769b95b897ea6808fa81 --- /dev/null +++ b/drivers/usb/phytium/host.c @@ -0,0 +1,2647 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +//#include "list.h" +#include "core.h" +#include "dma.h" +#include "hw-regs.h" + +#define DRV_NAME "phytium_usb" + +#define HOST_GENERIC_EP_CONTROLL 0x00 +#define HOST_GENERIC_EP_ISOC 0x01 +#define HOST_GENERIC_EP_BULK 0x02 +#define HOST_GENERIC_EP_INT 0x03 + +#define HOST_ESTALL 1 +#define HOST_EUNHANDLED 2 +#define HOST_EAUTOACK 3 +#define HOST_ESHUTDOWN 4 + +#define HOST_EP_NUM 16 + +static inline struct HOST_REQ *getUsbRequestEntry(struct list_head *list) +{ + return (struct HOST_REQ *)((uintptr_t)list - (uintptr_t)&(((struct HOST_REQ *)0)->list)); +} + +static inline struct HOST_EP_PRIV *getUsbHEpPrivEntry(struct list_head *list) +{ + struct HOST_EP_PRIV *hostEpPriv; + + if (list_empty(list)) + return NULL; + + hostEpPriv = (struct HOST_EP_PRIV *)((uintptr_t)list - + (uintptr_t)&(((struct HOST_EP_PRIV *)0)->node)); + + return hostEpPriv; +} + +static struct HOST_REQ *getNextReq(struct HOST_EP *usbEp) +{ + struct list_head *queue; + + if (!usbEp) + return NULL; + + queue = &usbEp->reqList; + + if (list_empty(queue)) + return NULL; + + return getUsbRequestEntry(queue->next); +} + +static void host_SetVbus(struct HOST_CTRL *priv, uint8_t isOn) +{ + uint8_t otgctrl = phytium_read8(&priv->regs->otgctrl); + + if (isOn) { + if (!(otgctrl & OTGCTRL_BUSREQ) || (otgctrl & OTGCTRL_ABUSDROP)) { + otgctrl &= ~OTGCTRL_ABUSDROP; + otgctrl |= OTGCTRL_BUSREQ; + phytium_write8(&priv->regs->otgctrl, otgctrl); + } + priv->otgState = HOST_OTG_STATE_A_WAIT_BCON; + } else { + if ((otgctrl & OTGCTRL_BUSREQ) || (otgctrl & OTGCTRL_ABUSDROP)) { + otgctrl |= OTGCTRL_ABUSDROP; + otgctrl &= ~OTGCTRL_BUSREQ; + phytium_write8(&priv->regs->otgctrl, otgctrl); + } + priv->otgState = HOST_OTG_STATE_A_IDLE; + } +} + +static inline void disconnectHostDetect(struct HOST_CTRL *priv) +{ + uint8_t otgctrl, otgstate; + uint32_t gen_cfg; + + if (!priv) + return; + + otgctrl = phytium_read8(&priv->regs->otgctrl); + if ((otgctrl & OTGCTRL_ASETBHNPEN) && priv->otgState == HOST_OTG_STATE_A_SUSPEND) + pr_info("Device no Response\n"); + + phytium_write8(&priv->regs->otgirq, OTGIRQ_CONIRQ); +retry: + otgstate = phytium_read8(&priv->regs->otgstate); + if ((otgstate == HOST_OTG_STATE_A_HOST || otgstate == HOST_OTG_STATE_B_HOST)) { + pr_info("IRQ OTG: DisconnIrq Babble\n"); + goto retry; + } + + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | 0 | 0x04); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | FIFOCTRL_IO_TX | 0 | 0x04); + + priv->portStatus = USB_PORT_STAT_POWER; + priv->portStatus |= USB_PORT_STAT_C_CONNECTION << 16; + + if (priv->hostCallbacks.portStatusChange) + priv->hostCallbacks.portStatusChange(priv); + + if (priv->otgState == HOST_OTG_STATE_A_SUSPEND) + host_SetVbus(priv, 1); + + priv->otgState = HOST_OTG_STATE_A_IDLE; + if (priv->custom_regs) { + phytium_write32(&priv->custom_regs->wakeup, 1); + } else { + gen_cfg = phytium_read32(&priv->vhub_regs->gen_cfg); + gen_cfg |= BIT(1); + phytium_write32(&priv->vhub_regs->gen_cfg, gen_cfg); + } +} + +static inline void A_IdleDetect(struct HOST_CTRL *priv, uint8_t otgstate) +{ + uint8_t otgctrl; + + if (!priv) + return; + + phytium_write8(&priv->regs->otgirq, OTGIRQ_IDLEIRQ); + + if (otgstate != HOST_OTG_STATE_A_IDLE) { + pr_info("IRQ OTG: A_IDLE Babble\n"); + return; + } + + priv->portStatus = 0; + otgctrl = phytium_read8(&priv->regs->otgctrl); + otgctrl &= ~OTGCTRL_ASETBHNPEN; + phytium_write8(&priv->regs->otgctrl, otgctrl); + + host_SetVbus(priv, 1); + + priv->otgState = HOST_OTG_STATE_A_IDLE; +} + +static inline void B_IdleDetect(struct HOST_CTRL *priv, uint8_t otgstate) +{ + uint8_t otgctrl, usbcs; + + if (!priv) + return; + + phytium_write8(&priv->regs->otgirq, OTGIRQ_IDLEIRQ); + + if (otgstate != HOST_OTG_STATE_B_IDLE) { + pr_info("IRQ OTG: B_IDLE Babble\n"); + return; + } + + otgctrl = phytium_read8(&priv->regs->otgctrl); + otgctrl &= ~OTGCTRL_ASETBHNPEN; + phytium_write8(&priv->regs->otgctrl, otgctrl); + + host_SetVbus(priv, 0); + + priv->otgState = HOST_OTG_STATE_B_IDLE; + + usbcs = phytium_read8(&priv->regs->usbcs); + usbcs &= ~USBCS_DISCON; + phytium_write8(&priv->regs->usbcs, usbcs); +} + +static uint32_t waitForBusyBit(struct HOST_CTRL *priv, struct HostEp *hwEp) +{ + uint8_t *csReg; + uint8_t flag = CS_BUSY; + uint8_t buf = 0; + uint8_t val = CS_BUSY; + uint8_t otgstate; + uint8_t bufflag = 0; + + if (!priv || !hwEp) + return 0; + + if (hwEp->isInEp) + return 0; + + if (hwEp->hwEpNum == 0) { + csReg = &priv->regs->ep0cs; + flag = EP0CS_TXBUSY_MASK; + buf = 0; + } else { + csReg = &priv->regs->ep[hwEp->hwEpNum - 1].txcs; + buf = phytium_read8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon) & CON_BUF; + } + + while ((val & flag) || bufflag == 0) { + otgstate = phytium_read8(&priv->regs->otgstate); + if (otgstate != HOST_OTG_STATE_B_HOST && otgstate != HOST_OTG_STATE_A_HOST) { + priv->ep0State = HOST_EP0_STAGE_IDLE; + return HOST_ESHUTDOWN; + } + + val = phytium_read8(csReg); + if (((val & CS_NPAK) >> CS_NPAK_OFFSET) == buf || buf == 0) + bufflag = 1; + else + bufflag = 0; + } + + return 0; +} + +static inline void connectHostDetect(struct HOST_CTRL *priv, uint8_t otgState) +{ + uint32_t gen_cfg; + + if (!priv) + return; + pr_debug("otgState:0x%x pirv->otgState:0x%x\n", otgState, priv->otgState); + if (priv->custom_regs) { + phytium_write32(&priv->custom_regs->wakeup, 0); + } else { + gen_cfg = phytium_read32(&priv->vhub_regs->gen_cfg); + gen_cfg &= ~BIT(1); + phytium_write32(&priv->vhub_regs->gen_cfg, gen_cfg); + } + + phytium_write8(&priv->regs->otgirq, OTGIRQ_CONIRQ); + + if ((otgState != HOST_OTG_STATE_A_HOST) && (otgState != HOST_OTG_STATE_B_HOST)) + return; + + if ((priv->otgState == HOST_OTG_STATE_A_PERIPHERAL) + || (priv->otgState == HOST_OTG_STATE_B_PERIPHERAL)) + priv->otgState = otgState; + + priv->ep0State = HOST_EP0_STAGE_IDLE; + + priv->portStatus &= ~(USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED | + USB_PORT_STAT_ENABLE); + + priv->portStatus |= USB_PORT_STAT_C_CONNECTION | (USB_PORT_STAT_C_CONNECTION << 16); + priv->dmaDrv->dma_controllerReset(priv->dmaController); + priv->port_resetting = 1; + host_SetVbus(priv, 1); + + switch (phytium_read8(&priv->regs->speedctrl)) { + case SPEEDCTRL_HS: + priv->portStatus |= USB_PORT_STAT_HIGH_SPEED; + pr_debug("detect High speed device\n"); + break; + case SPEEDCTRL_FS: + priv->portStatus &= ~(USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_LOW_SPEED); + pr_debug("detect Full speed device\n"); + break; + case SPEEDCTRL_LS: + priv->portStatus |= USB_PORT_STAT_LOW_SPEED; + pr_debug("detect Low speed device\n"); + break; + } + + priv->vBusErrCnt = 0; + priv->dmaDrv->dma_setHostMode(priv->dmaController); + + if (priv->hostCallbacks.portStatusChange) + priv->hostCallbacks.portStatusChange(priv); + + priv->otgState = otgState; +} + +static void hostOtgIrq(struct HOST_CTRL *priv) +{ + uint8_t otgirq, otgien; + uint8_t otgstatus, otgstate; + uint8_t otgctrl; + + if (!priv) + return; + + otgirq = phytium_read8(&priv->regs->otgirq); + otgien = phytium_read8(&priv->regs->otgien); + otgstatus = phytium_read8(&priv->regs->otgstatus); + otgstate = phytium_read8(&priv->regs->otgstate); + otgirq &= otgien; + + if (!otgirq) + return; + + if (otgirq & OTGIRQ_BSE0SRPIRQ) { + otgirq &= ~OTGIRQ_BSE0SRPIRQ; + phytium_write8(&priv->regs->otgirq, OTGIRQ_BSE0SRPIRQ); + + otgctrl = phytium_read8(&priv->regs->otgctrl); + otgctrl &= ~OTGIRQ_BSE0SRPIRQ; + phytium_write8(&priv->regs->otgctrl, otgctrl); + } + + if (otgirq & OTGIRQ_SRPDETIRQ) { + otgirq &= ~OTGIRQ_SRPDETIRQ; + phytium_write8(&priv->regs->otgirq, OTGIRQ_SRPDETIRQ); + + otgctrl = phytium_read8(&priv->regs->otgctrl); + otgctrl &= ~OTGIRQ_SRPDETIRQ; + phytium_write8(&priv->regs->otgctrl, otgctrl); + } + + if (otgirq & OTGIRQ_VBUSERRIRQ) { + otgirq &= ~OTGIRQ_VBUSERRIRQ; + phytium_write8(&priv->regs->otgirq, OTGIRQ_VBUSERRIRQ); + + if (otgstate != HOST_OTG_STATE_A_VBUS_ERR) { + pr_info("IRQ OTG: VBUS ERROR Babble\n"); + return; + } + + host_SetVbus(priv, 0); + priv->otgState = HOST_OTG_STATE_A_VBUS_ERR; + if (priv->portStatus & USB_PORT_STAT_CONNECTION) { + priv->portStatus = USB_PORT_STAT_POWER | (USB_PORT_STAT_C_CONNECTION << 16); + if (priv->hostCallbacks.portStatusChange) + priv->hostCallbacks.portStatusChange(priv); + return; + } + + if (priv->vBusErrCnt >= 3) { + priv->vBusErrCnt = 0; + pr_info("%s %d VBUS OVER CURRENT\n", __func__, __LINE__); + priv->portStatus |= USB_PORT_STAT_OVERCURRENT | + (USB_PORT_STAT_C_OVERCURRENT << 16); + + phytium_write8(&priv->regs->otgirq, OTGIRQ_IDLEIRQ); + } else { + priv->vBusErrCnt++; + host_SetVbus(priv, 1); + phytium_write8(&priv->regs->otgirq, OTGIRQ_IDLEIRQ); + } + } + + if (otgirq & OTGIRQ_CONIRQ) { + if (priv->otgState == HOST_OTG_STATE_A_HOST || + priv->otgState == HOST_OTG_STATE_B_HOST || + priv->otgState == HOST_OTG_STATE_A_SUSPEND) { + if (otgstate == HOST_OTG_STATE_A_WAIT_VFALL || + otgstate == HOST_OTG_STATE_A_WAIT_BCON || + otgstate == HOST_OTG_STATE_A_SUSPEND) + disconnectHostDetect(priv); + } else if (priv->otgState != HOST_OTG_STATE_A_HOST && + priv->otgState != HOST_OTG_STATE_B_HOST && + priv->otgState != HOST_OTG_STATE_A_SUSPEND) + connectHostDetect(priv, otgstate); + + phytium_write8(&priv->regs->otgirq, OTGIRQ_CONIRQ); + } + + if (otgirq & OTGIRQ_IDLEIRQ) { + if (!(otgstatus & OTGSTATUS_ID)) + A_IdleDetect(priv, otgstate); + else + B_IdleDetect(priv, otgstate); + } + + phytium_write8(&priv->regs->otgirq, OTGIRQ_IDCHANGEIRQ | + OTGIRQ_SRPDETIRQ); +} + +static void hostErrorIrq(struct HOST_CTRL *priv) +{ + uint16_t txerrirq, txerrien; + uint16_t rxerrirq, rxerrien; + uint16_t i, mask; + + if (!priv) + return; + + txerrirq = phytium_read16(&priv->regs->txerrirq); + txerrien = phytium_read16(&priv->regs->txerrien); + txerrirq &= txerrien; + + rxerrirq = phytium_read16(&priv->regs->rxerrirq); + rxerrien = phytium_read16(&priv->regs->rxerrien); + rxerrirq &= rxerrirq; + if (!txerrirq && !rxerrirq) + return; + + for (i = 0; i < HOST_EP_NUM; i++) { + mask = 1 << i; + if (rxerrirq & mask) { + phytium_write16(&priv->regs->rxerrirq, mask); + rxerrien &= ~mask; + phytium_write16(&priv->regs->rxerrien, rxerrien); + priv->dmaDrv->dma_errIsr(priv->dmaController, i, 0); + } + + if (txerrirq & mask) { + phytium_write16(&priv->regs->txerrirq, mask); + txerrien &= ~mask; + phytium_write16(&priv->regs->txerrien, txerrien); + priv->dmaDrv->dma_errIsr(priv->dmaController, i, 1); + } + } +} + +static uint32_t decodeErrorCode(uint8_t code) +{ + uint32_t status = 0; + + switch (code) { + case ERR_NONE: + status = 0; + break; + case ERR_CRC: + pr_info("CRC Error\n"); + status = HOST_ESHUTDOWN; + break; + case ERR_DATA_TOGGLE_MISMATCH: + pr_info("Toggle MisMatch Error\n"); + status = HOST_ESHUTDOWN; + break; + case ERR_STALL: + pr_debug("Stall Error\n"); + status = HOST_ESTALL; + break; + case ERR_TIMEOUT: + pr_debug("Timeout Error\n"); + status = HOST_ESHUTDOWN; + break; + case ERR_PID: + pr_info("PID Error\n"); + status = HOST_ESHUTDOWN; + break; + case ERR_TOO_LONG_PACKET: + pr_info("TOO_LONG_PACKET Error\n"); + status = HOST_ESHUTDOWN; + break; + case ERR_DATA_UNDERRUN: + pr_info("UNDERRUN Error\n"); + status = HOST_ESHUTDOWN; + break; + } + + return status; +} + +static struct HOST_EP_PRIV *getIntTransfer(struct list_head *head) +{ + struct list_head *listEntry = NULL; + struct HOST_EP_PRIV *usbHEpPriv = NULL; + struct HOST_EP_PRIV *usbHEpPrivActual = NULL; + + list_for_each(listEntry, head) { + usbHEpPriv = getUsbHEpPrivEntry(listEntry); + if (!usbHEpPrivActual) + usbHEpPrivActual = usbHEpPriv; + + if (usbHEpPriv->frame < usbHEpPrivActual->frame) + usbHEpPrivActual = usbHEpPriv; + } + + return usbHEpPrivActual; +} + +static void givebackRequest(struct HOST_CTRL *priv, struct HOST_REQ *usbReq, uint32_t status) +{ + if (!priv || !usbReq) + return; + + list_del(&usbReq->list); + + if (usbReq->status == EINPROGRESS) + usbReq->status = status; + + if (priv->hostCallbacks.givebackRequest) + priv->hostCallbacks.givebackRequest(priv, usbReq, status); +} + +static void hostEpProgram(struct HOST_CTRL *priv, struct HostEp *hwEp, + struct HOST_REQ *usbReq, uintptr_t dmaBuff, uint32_t length) +{ + struct HOST_EP *usbHEp; + struct HOST_EP_PRIV *usbEpPriv; + uint32_t chMaxLen; + uint8_t regCon = 0; + uint8_t ep0cs; + uint16_t txerrien = 0; + uint16_t rxerrien = 0; + uint32_t result; + uint8_t txsoftimer, rxsoftimer; + u8 retval = 0; + + if (!priv || !hwEp || !usbReq) + return; + + usbHEp = hwEp->scheduledUsbHEp; + usbEpPriv = (struct HOST_EP_PRIV *)usbHEp->hcPriv; + + if (!hwEp->channel) { + if (usbEpPriv->type == USB_ENDPOINT_XFER_ISOC) + hwEp->channel = priv->dmaDrv->dma_channelAlloc(priv->dmaController, + !hwEp->isInEp, hwEp->hwEpNum, 1); + else + hwEp->channel = priv->dmaDrv->dma_channelAlloc(priv->dmaController, + !hwEp->isInEp, hwEp->hwEpNum, 0); + } + + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, hwEp->channel); + + pr_debug("chMaxLen:0x%x buffLength:0x%x\n", chMaxLen, usbReq->buffLength); + if (usbReq->buffLength > chMaxLen) + length = chMaxLen; + + switch (usbEpPriv->type) { + case USB_ENDPOINT_XFER_CONTROL: + regCon = CON_TYPE_CONTROL; + break; + case USB_ENDPOINT_XFER_BULK: + regCon = CON_TYPE_BULK; + break; + case USB_ENDPOINT_XFER_INT: + regCon = CON_TYPE_INT; + break; + case USB_ENDPOINT_XFER_ISOC: + if (usbEpPriv->isocEpConfigured) + goto dma_program; + + usbEpPriv->isocEpConfigured = 1; + regCon = CON_TYPE_ISOC; + switch (usbHEp->desc.wMaxPacketSize >> 11) { + case 0: + regCon |= CON_TYPE_ISOC_1_ISOD; + priv->dmaDrv->dma_setMaxLength(priv->dmaController, + hwEp->channel, usbEpPriv->maxPacketSize); + break; + case 1: + regCon |= CON_TYPE_ISOC_2_ISOD; + priv->dmaDrv->dma_setMaxLength(priv->dmaController, + hwEp->channel, 2 * 1024); + break; + case 2: + priv->dmaDrv->dma_setMaxLength(priv->dmaController, + hwEp->channel, 3 * 1024); + regCon |= CON_TYPE_ISOC_3_ISOD; + break; + } + break; + } + + if (usbEpPriv->type != USB_ENDPOINT_XFER_ISOC) { + if (!hwEp->hwEpNum) { + if (phytium_read8(&priv->regs->ep0cs) & 0x4) { + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | + 0 | 0x4); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | + FIFOCTRL_IO_TX | 0 | 0x4); + } + } + if (waitForBusyBit(priv, hwEp) > 0) { + usbReq->status = HOST_ESHUTDOWN; + givebackRequest(priv, usbReq, HOST_ESHUTDOWN); + pr_info("something error happen\n"); + return; + } + } + + if (!hwEp->isInEp) { + if (hwEp->hwEpNum) { + regCon |= hwEp->hwBuffers - 1; + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon, regCon); + if (usbEpPriv->type != USB_ENDPOINT_XFER_ISOC) { + retval = priv->hostCallbacks.getEpToggle(priv, + usbReq->usbDev, usbEpPriv->epNum, 0); + if (retval) { + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | + ENDPRST_IO_TX); + + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | + ENDPRST_TOGSETQ | ENDPRST_IO_TX | ENDPRST_FIFORST); + } else { + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | + ENDPRST_IO_TX); + + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | + ENDPRST_TOGRST | ENDPRST_IO_TX | ENDPRST_FIFORST); + } + } + + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon, regCon | CON_VAL); + phytium_write16(&priv->regs->txmaxpack[hwEp->hwEpNum - 1], + usbEpPriv->maxPacketSize); + + phytium_write8(&priv->regs->epExt[hwEp->hwEpNum - 1].txctrl, + usbEpPriv->epNum); + + phytium_write8(&priv->regs->fnaddr, usbEpPriv->faddress); + + // if (usbEpPriv->type == USB_ENDPOINT_XFER_INT) + txsoftimer = phytium_read8(&priv->regs->txsoftimer[hwEp->hwEpNum].ctrl); + txsoftimer = txsoftimer | BIT(1); + phytium_write8(&priv->regs->txsoftimer[hwEp->hwEpNum].ctrl, txsoftimer); + + phytium_write16(&priv->regs->txsoftimer[hwEp->hwEpNum].timer, + usbEpPriv->frame); + + phytium_write8(&priv->regs->txsoftimer[hwEp->hwEpNum].ctrl, 0x83); + } else { + phytium_write8(&priv->regs->fnaddr, usbEpPriv->faddress); + phytium_write8(&priv->regs->ep0maxpack, usbEpPriv->maxPacketSize); + phytium_write8(&priv->regs->ep0ctrl, usbEpPriv->epNum); + + if (priv->ep0State == HOST_EP0_STAGE_SETUP) { + ep0cs = phytium_read8(&priv->regs->ep0cs); + ep0cs |= EP0CS_HCSET; + phytium_write8(&priv->regs->ep0cs, ep0cs); + } + } + + phytium_write16(&priv->regs->txerrirq, 1 << hwEp->hwEpNum); + txerrien = phytium_read16(&priv->regs->txerrien); + txerrien |= 1 << hwEp->hwEpNum; + phytium_write16(&priv->regs->txerrien, txerrien); + } else { + if (hwEp->hwEpNum) { + regCon |= hwEp->hwBuffers - 1; + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcon, regCon); + + if (usbEpPriv->type != USB_ENDPOINT_XFER_ISOC) { + if (priv->hostCallbacks.getEpToggle) { + retval = priv->hostCallbacks.getEpToggle(priv, + usbReq->usbDev, usbEpPriv->epNum, 1); + if (retval) { + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum); + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | + ENDPRST_TOGSETQ | ENDPRST_FIFORST); + } else { + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum); + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | + ENDPRST_TOGRST | ENDPRST_FIFORST); + } + } + } + + phytium_write16(&priv->regs->rxmaxpack[hwEp->hwEpNum - 1], + usbEpPriv->maxPacketSize); + + phytium_write8(&priv->regs->epExt[hwEp->hwEpNum - 1].rxctrl, + usbEpPriv->epNum); + + phytium_write8(&priv->regs->fnaddr, usbEpPriv->faddress); + + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcs, 1); + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcs, 1); + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcs, 1); + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcs, 1); + + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcon, regCon | CON_VAL); + rxsoftimer = phytium_read8(&priv->regs->rxsoftimer[hwEp->hwEpNum].ctrl); + rxsoftimer = rxsoftimer | BIT(1); + phytium_write8(&priv->regs->rxsoftimer[hwEp->hwEpNum].ctrl, rxsoftimer); + + phytium_write16(&priv->regs->rxsoftimer[hwEp->hwEpNum].timer, + usbEpPriv->frame); + + phytium_write8(&priv->regs->rxsoftimer[hwEp->hwEpNum].ctrl, 0x83); + } else { + phytium_write8(&priv->regs->fnaddr, usbEpPriv->faddress); + phytium_write8(&priv->regs->ep0maxpack, usbEpPriv->maxPacketSize); + phytium_write8(&priv->regs->ep0ctrl, usbEpPriv->epNum); + + if (priv->ep0State == HOST_EP0_STAGE_IN + || priv->ep0State == HOST_EP0_STAGE_STATUSIN) + phytium_write8(&priv->regs->ep0cs, EP0CS_HCSETTOGGLE); + } + + phytium_write16(&priv->regs->rxerrirq, 1 << hwEp->hwEpNum); + rxerrien = phytium_read16(&priv->regs->rxerrien); + rxerrien |= 1 << hwEp->hwEpNum; + phytium_write16(&priv->regs->rxerrien, rxerrien); + } +dma_program: + result = priv->dmaDrv->dma_channelProgram(priv->dmaController, hwEp->channel, + usbEpPriv->maxPacketSize, dmaBuff, length, + (void *)usbReq->isoFramesDesc, usbReq->isoFramesNumber); + if (result) { + if (!hwEp->isInEp) { + txerrien &= ~(1 << hwEp->hwEpNum); + if (hwEp->hwEpNum) + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon, 0x08); + phytium_write16(&priv->regs->txerrien, txerrien); + } else { + rxerrien &= ~(1 << hwEp->hwEpNum); + if (hwEp->hwEpNum) + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcon, 0x08); + phytium_write16(&priv->regs->rxerrien, rxerrien); + } + + priv->dmaDrv->dma_channelRelease(priv->dmaController, hwEp->channel); + hwEp->channel = NULL; + } +} + +static void hostStartReq(struct HOST_CTRL *priv, struct HOST_REQ *req) +{ + uintptr_t dmaBuff; + uint32_t length; + struct HOST_EP *hostEp; + struct HOST_EP_PRIV *hostEpPriv; + struct HOST_EP_PRIV *hostNewEpPriv; + struct HOST_REQ *usbReq = NULL; + struct HostEp *hwEp = NULL; + int num; + + if (!priv || !req) + return; + + hostEp = req->hcPriv; + if (hostEp) { + hostEpPriv = (struct HOST_EP_PRIV *)hostEp->hcPriv; + if (hostEpPriv) { + hostEpPriv->genericHwEp->state = HOST_EP_BUSY; + switch (hostEpPriv->type) { + case USB_ENDPOINT_XFER_CONTROL: + usbReq = getNextReq(hostEp); + + priv->in[HOST_GENERIC_EP_CONTROLL].scheduledUsbHEp = hostEp; + priv->ep0State = HOST_EP0_STAGE_SETUP; + hostEpPriv->currentHwEp = hostEpPriv->genericHwEp; + hostEpPriv->genericHwEp->scheduledUsbHEp = hostEp; + dmaBuff = usbReq->setupDma; + length = 8; + pr_debug("packet info: %02x %02x %04x %04x %04x\n", + usbReq->setup->bRequestType, + usbReq->setup->bRequest, + usbReq->setup->wValue, + usbReq->setup->wIndex, + usbReq->setup->wLength); + hostEpProgram(priv, hostEpPriv->genericHwEp, + usbReq, dmaBuff, length); + break; + case USB_ENDPOINT_XFER_BULK: + hwEp = hostEpPriv->genericHwEp; + usbReq = getNextReq(hostEp); + hostEpPriv->currentHwEp = hwEp; + hwEp->scheduledUsbHEp = hostEp; + dmaBuff = usbReq->buffDma + usbReq->actualLength; + length = usbReq->buffLength - usbReq->actualLength; + hostEpProgram(priv, hwEp, usbReq, dmaBuff, length); + break; + case USB_ENDPOINT_XFER_INT: + if (hostEpPriv->genericHwEp->scheduledUsbHEp) + return; + + num = req->epNum - 1; + if (hostEpPriv->isIn) + hostNewEpPriv = getIntTransfer(&priv->intInHEpQueue[num]); + else + hostNewEpPriv = getIntTransfer(&priv->intOutHEpQueue[num]); + + hostNewEpPriv->currentHwEp = hostEpPriv->genericHwEp; + hostEpPriv->genericHwEp->scheduledUsbHEp = hostNewEpPriv->usbHEp; + usbReq = getNextReq(hostEp); + dmaBuff = usbReq->buffDma + usbReq->actualLength; + length = usbReq->buffLength - usbReq->actualLength; + hostEpProgram(priv, hostEpPriv->genericHwEp, + usbReq, dmaBuff, length); + break; + case USB_ENDPOINT_XFER_ISOC: + hostEpPriv->currentHwEp = hostEpPriv->genericHwEp; + hostEpPriv->genericHwEp->scheduledUsbHEp = hostEp; + dmaBuff = req->buffDma + req->actualLength; + length = req->buffLength - req->actualLength; + hostEpProgram(priv, hostEpPriv->genericHwEp, req, dmaBuff, length); + break; + } + } + } +} + + +static void scheduleNextTransfer(struct HOST_CTRL *priv, + struct HOST_REQ *usbReq, struct HostEp *hwEp) +{ + struct HOST_EP *usbEp; + struct HOST_EP_PRIV *usbHEpPriv; + uint8_t endprst; + uint32_t status; + struct HOST_REQ *usbNextReq = NULL; + + if (!priv || !usbReq || !hwEp) + return; + + usbEp = hwEp->scheduledUsbHEp; + usbHEpPriv = (struct HOST_EP_PRIV *)usbEp->hcPriv; + if (!usbHEpPriv) + return; + + status = (usbReq->status == EINPROGRESS) ? 0 : usbReq->status; + switch (usbHEpPriv->type) { + case USB_ENDPOINT_XFER_BULK: + case USB_ENDPOINT_XFER_INT: + if (hwEp->isInEp) { + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum); + endprst = (phytium_read8(&priv->regs->endprst) & ENDPRST_TOGSETQ) ? 1 : 0; + if (priv->hostCallbacks.setEpToggle) + priv->hostCallbacks.setEpToggle(priv, usbReq->usbDev, + usbHEpPriv->epNum, usbHEpPriv->isIn, endprst); + } else { + if (waitForBusyBit(priv, hwEp) > 0) { + usbReq->status = HOST_ESHUTDOWN; + givebackRequest(priv, usbReq, HOST_ESHUTDOWN); + return; + } + + phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | ENDPRST_IO_TX); + endprst = (phytium_read8(&priv->regs->endprst) & ENDPRST_TOGSETQ) ? 1 : 0; + if (priv->hostCallbacks.setEpToggle) + priv->hostCallbacks.setEpToggle(priv, usbReq->usbDev, + usbHEpPriv->epNum, usbHEpPriv->isIn, endprst); + } + break; + } + + if (usbHEpPriv->transferFinished) + givebackRequest(priv, usbReq, status); + + if (list_empty(&usbEp->reqList)) { + if (usbHEpPriv->type == USB_ENDPOINT_XFER_CONTROL) + hwEp->state = HOST_EP_ALLOCATED; + else { + if (usbHEpPriv->genericHwEp == hwEp) + hwEp->state = HOST_EP_ALLOCATED; + else + hwEp->state = HOST_EP_FREE; + } + + usbHEpPriv->epIsReady = 0; + usbHEpPriv->currentHwEp = NULL; + hwEp->scheduledUsbHEp = NULL; + + if (hwEp->channel) { + priv->dmaDrv->dma_channelRelease(priv->dmaController, hwEp->channel); + hwEp->channel = NULL; + } + + if (usb_endpoint_xfer_int(&usbEp->desc)) + list_del(&usbHEpPriv->node); + usbHEpPriv = NULL; + } else { + if (usbHEpPriv->type == USB_ENDPOINT_XFER_INT) { + usbHEpPriv->currentHwEp = NULL; + hwEp->scheduledUsbHEp = NULL; + } + + if (usbHEpPriv->type == USB_ENDPOINT_XFER_ISOC) + return; + } + + if (usbHEpPriv) { + usbNextReq = getNextReq(usbHEpPriv->usbHEp); + hostStartReq(priv, usbNextReq); + } +} + +static int32_t hostEp0Irq(struct HOST_CTRL *priv, uint8_t isIn) +{ + struct HostEp *hwEp; + struct HOST_EP *hostEp; + struct HOST_REQ *usbReq = NULL; + struct HOST_EP_PRIV *usbHEpPriv; + uint32_t status, length; + uint8_t usbError; + uint8_t nextStage = 0; + int ret = 0; + + if (!priv) + return -EINVAL; + + hwEp = isIn ? &priv->in[HOST_GENERIC_EP_CONTROLL] : &priv->out[HOST_GENERIC_EP_CONTROLL]; + hostEp = hwEp->scheduledUsbHEp; + usbHEpPriv = (struct HOST_EP_PRIV *)hostEp->hcPriv; + + usbReq = getNextReq(hostEp); + if (!usbReq) + return 0; + + usbError = isIn ? phytium_read8(&priv->regs->rx0err) : phytium_read8(&priv->regs->tx0err); + usbError = (usbError & ERR_TYPE) >> 2; + status = decodeErrorCode(usbError); + if (status) { + usbReq->status = status; + + if (status == HOST_ESTALL) { + priv->dmaDrv->dma_controllerReset(priv->dmaController); + ret = 1; + } + + phytium_write16(&priv->regs->rxerrirq, 1 << hwEp->hwEpNum); + phytium_write16(&priv->regs->txerrirq, 1 << hwEp->hwEpNum); + + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | 0 | 0x4); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | + FIFOCTRL_IO_TX | 0 | 0x4); + } + + length = priv->dmaDrv->dma_getActualLength(priv->dmaController, hwEp->channel); + priv->dmaDrv->dma_channelRelease(priv->dmaController, hwEp->channel); + hwEp->channel = NULL; + + if (usbReq->status == EINPROGRESS && priv->ep0State < HOST_EP0_STAGE_STATUSIN) { + nextStage = 0; + switch (priv->ep0State) { + case HOST_EP0_STAGE_IN: + pr_debug("Ep0 Data IN\n"); + usbHEpPriv->currentHwEp = &priv->out[HOST_GENERIC_EP_CONTROLL]; + usbReq->actualLength = length; + priv->ep0State = HOST_EP0_STAGE_STATUSOUT; + break; + case HOST_EP0_STAGE_OUT: + pr_debug("Ep0 Data OUT\n"); + usbHEpPriv->currentHwEp = &priv->in[HOST_GENERIC_EP_CONTROLL]; + usbReq->actualLength = length; + priv->ep0State = HOST_EP0_STAGE_STATUSIN; + break; + case HOST_EP0_STAGE_SETUP: + pr_debug("Ep0 Stage Setup\n"); + if (!usbReq->setup->wLength) { + pr_debug("EP0_STAGE_STATUSIN\n"); + priv->ep0State = HOST_EP0_STAGE_STATUSIN; + usbHEpPriv->currentHwEp = &priv->in[HOST_GENERIC_EP_CONTROLL]; + break; + } else if (usbReq->setup->bRequestType & USB_DIR_IN) { + pr_debug("EP0_STAGE_STAGE_IN\n"); + priv->ep0State = HOST_EP0_STAGE_IN; + usbHEpPriv->currentHwEp = &priv->in[HOST_GENERIC_EP_CONTROLL]; + nextStage = 1; + break; + } + priv->ep0State = HOST_EP0_STAGE_OUT; + nextStage = 1; + break; + case HOST_EP0_STAGE_STATUSIN: + case HOST_EP0_STAGE_STATUSOUT: + case HOST_EP0_STAGE_ACK: + case HOST_EP0_STAGE_IDLE: + default: + pr_debug("EP0 STAGE is %d\n", priv->ep0State); + break; + } + + if (nextStage) { + length = usbReq->buffLength; + hostEpProgram(priv, usbHEpPriv->currentHwEp, usbReq, + usbReq->buffDma, length); + } else + hostEpProgram(priv, usbHEpPriv->currentHwEp, usbReq, usbReq->setupDma, 0); + } else + priv->ep0State = HOST_EP0_STAGE_IDLE; + + if (priv->ep0State == HOST_EP0_STAGE_IDLE || usbReq->status != EINPROGRESS) { + usbHEpPriv->transferFinished = 1; + scheduleNextTransfer(priv, usbReq, hwEp); + } + + return ret; +} + +static void updateTimeIntTransfer(struct list_head *head, struct HOST_EP_PRIV *lastFinished) +{ + struct list_head *listEntry = NULL; + struct HOST_EP_PRIV *usbHEpPriv = NULL; + uint16_t time = lastFinished->frame; + + list_for_each(listEntry, head) { + usbHEpPriv = getUsbHEpPrivEntry(listEntry); + if (usbHEpPriv == lastFinished) { + lastFinished->frame = lastFinished->interval; + continue; + } + + if (usbHEpPriv->frame < time) + usbHEpPriv->frame = 0; + else + lastFinished->interval = usbHEpPriv->frame; + } +} + +static int32_t hostEpXIrq(struct HOST_CTRL *priv, uint8_t hwEpNum, uint8_t isIn, bool resubmit) +{ + struct HostEp *hwEp; + struct HOST_EP *hostEp; + struct HOST_EP_PRIV *usbHEpPriv; + struct HOST_REQ *usbReq; + + uint8_t usbError, rxcon, txcon; + uint32_t status, length, chMaxLen; + + if (!priv) + return -EINVAL; + + hwEp = isIn ? &priv->in[hwEpNum] : &priv->out[hwEpNum]; + hostEp = hwEp->scheduledUsbHEp; + if (!hostEp) + return -EINVAL; + + usbHEpPriv = (struct HOST_EP_PRIV *)hostEp->hcPriv; + + usbReq = getNextReq(hostEp); + if (!usbReq) + return 0; + + if (isIn) + usbError = phytium_read8(&priv->regs->epExt[hwEpNum - 1].rxerr); + else + usbError = phytium_read8(&priv->regs->epExt[hwEpNum - 1].txerr); + + usbError = (usbError & ERR_TYPE) >> 2; + status = decodeErrorCode(usbError); + if (status) { + pr_debug("%s %d Aborting\n", __func__, __LINE__); + if (isIn) + phytium_write16(&priv->regs->rxerrirq, 1 << hwEpNum); + else + phytium_write16(&priv->regs->txerrirq, 1 << hwEpNum); + priv->dmaDrv->dma_channelAbort(priv->dmaController, hwEp->channel); + } + + length = priv->dmaDrv->dma_getActualLength(priv->dmaController, hwEp->channel); + chMaxLen = priv->dmaDrv->dma_getMaxLength(priv->dmaController, hwEp->channel); + + if (status != 0) + usbReq->status = status; + + usbReq->actualLength += length; + + if (!resubmit) + usbHEpPriv->transferFinished = 1; + + if (length == chMaxLen) { + if ((usbReq->buffLength - usbReq->actualLength) == 0) + usbHEpPriv->transferFinished = 1; + else + usbHEpPriv->transferFinished = 0; + } + + if (usbHEpPriv->type != USB_ENDPOINT_XFER_ISOC) { + if (!hwEp->isInEp) { + txcon = phytium_read8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon); + txcon &= ~CON_VAL; + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon, txcon); + } else { + rxcon = phytium_read8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcon); + rxcon &= ~CON_VAL; + phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].rxcon, rxcon); + } + priv->dmaDrv->dma_channelRelease(priv->dmaController, hwEp->channel); + hwEp->channel = NULL; + } + + if (usbHEpPriv->type == USB_ENDPOINT_XFER_INT) { + if (usbHEpPriv->isIn) + updateTimeIntTransfer(&priv->intInHEpQueue[hwEp->hwEpNum - 1], usbHEpPriv); + else + updateTimeIntTransfer(&priv->intOutHEpQueue[hwEp->hwEpNum - 1], usbHEpPriv); + } + + scheduleNextTransfer(priv, usbReq, hwEp); + + return 0; +} + + +static void host_CallbackTransfer(void *priv, uint8_t epNum, uint8_t isDirTx, bool resubmit) +{ + struct HOST_CTRL *host_priv = (struct HOST_CTRL *)priv; + int i; + int ret; + + if (!epNum) { + ret =hostEp0Irq(host_priv, !isDirTx); + if (resubmit || ret) { + for (i = 1; i < HOST_EP_NUM; i++) { + hostEpXIrq(host_priv, i, !isDirTx, true); + hostEpXIrq(host_priv, i, isDirTx, true); + } + } + } else + hostEpXIrq(host_priv, epNum, !isDirTx, false); +} + +static int hc_reset(struct usb_hcd *hcd) +{ + hcd->speed = HCD_USB2; + hcd->self.root_hub->speed = USB_SPEED_HIGH; + + return 0; +} + +static int hc_start(struct usb_hcd *hcd) +{ + hcd->state = HC_STATE_RUNNING; + return 0; +} + +static void hc_stop(struct usb_hcd *hcd) +{ + struct phytium_cusb *config = *(struct phytium_cusb **)hcd->hcd_priv; + + if (config) + config->host_obj->host_stop(config->host_priv); + + if (config->host_cfg.trbAddr) { + dma_free_coherent(config->dev, config->host_sysreq.trbMemSize, + config->host_cfg.trbAddr, config->host_cfg.trbDmaAddr); + config->host_cfg.trbAddr = NULL; + } + + if (config->host_priv) { + devm_kfree(config->dev, config->host_priv); + config->host_priv = NULL; + } + + hcd->state = HC_STATE_HALT; +} + +static void hc_shutdown(struct usb_hcd *hcd) +{ +} + +static void host_endpoint_update(struct phytium_cusb *config, + struct HOST_USB_DEVICE *udev, struct usb_host_endpoint *ep) +{ + int epnum; + struct HOST_EP *hostEp; + + if (!config || !udev || !ep) + return; + + epnum = usb_endpoint_num(&ep->desc); + if (epnum > MAX_INSTANCE_EP_NUM) + epnum = MAX_INSTANCE_EP_NUM; + + if (usb_endpoint_dir_out(&ep->desc)) { + if (udev->out_ep[epnum] == NULL) { + hostEp = kzalloc(sizeof(struct HOST_EP) + + config->host_obj->host_epGetPrivateDataSize(config->host_priv), + GFP_ATOMIC); + udev->out_ep[epnum] = hostEp; + } else + hostEp = udev->out_ep[epnum]; + } else { + if (udev->in_ep[epnum] == NULL) { + hostEp = kzalloc(sizeof(struct HOST_EP) + + config->host_obj->host_epGetPrivateDataSize(config->host_priv), + GFP_ATOMIC); + udev->in_ep[epnum] = hostEp; + } else + hostEp = udev->in_ep[epnum]; + } + + hostEp->desc = *(struct usb_endpoint_descriptor *)(&ep->desc); + hostEp->userExt = (void *)ep; + INIT_LIST_HEAD(&hostEp->reqList); + hostEp->hcPriv = &((uint8_t *)hostEp)[sizeof(struct HOST_EP)]; +} + +static int hc_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) +{ + unsigned long flags; + u32 isoFrameSize; + int retval; + int index; + struct HOST_REQ *req; + struct HOST_CTRL *priv; + struct HOST_USB_DEVICE *usbDev; + struct usb_host_endpoint *host_ep; + struct usb_endpoint_descriptor *host_ep_desc; + struct phytium_cusb *config = *(struct phytium_cusb **)hcd->hcd_priv; + + if (!config) + return -ENODEV; + + priv = config->host_priv; + if (!priv) + return -ENODEV; + + usbDev = priv->host_devices_table[urb->dev->slot_id]; + if (!usbDev) + return -ENODEV; + + host_ep = urb->ep; + host_ep_desc = &host_ep->desc; + + spin_lock_irqsave(&config->lock, flags); + retval = usb_hcd_link_urb_to_ep(hcd, urb); + if (retval < 0) { + spin_unlock_irqrestore(&config->lock, flags); + return retval; + } + spin_unlock_irqrestore(&config->lock, flags); + isoFrameSize = urb->number_of_packets * sizeof(struct HOST_ISOFRAMESDESC); + req = kzalloc((sizeof(struct HOST_REQ) + + isoFrameSize), mem_flags); + if (!req) { + spin_lock_irqsave(&config->lock, flags); + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&config->lock, flags); + return -ENOMEM; + } + + req->isoFramesDesc = NULL; + req->isoFramesNumber = urb->number_of_packets; + req->epNum = usb_endpoint_num(host_ep_desc); + if (req->epNum > MAX_INSTANCE_EP_NUM) + req->epNum = MAX_INSTANCE_EP_NUM; + + if (usb_endpoint_dir_in(host_ep_desc)) { + if (!usbDev->in_ep[req->epNum]) + host_endpoint_update(config, usbDev, host_ep); + } else { + if (!usbDev->out_ep[req->epNum]) + host_endpoint_update(config, usbDev, host_ep); + } + + if (usb_endpoint_xfer_isoc(host_ep_desc)) { + req->isoFramesDesc = (struct HOST_ISOFRAMESDESC *)(&req[1]); + for (index = 0; index < urb->number_of_packets; index++) { + req->isoFramesDesc[index].length = urb->iso_frame_desc[index].length; + req->isoFramesDesc[index].offset = urb->iso_frame_desc[index].offset; + } + } + + spin_lock_irqsave(&config->lock, flags); + INIT_LIST_HEAD(&req->list); + req->userExt = (void *)urb; + req->actualLength = urb->actual_length; + req->bufAddress = urb->transfer_buffer; + req->buffDma = urb->transfer_dma; + req->buffLength = urb->transfer_buffer_length; + req->epIsIn = usb_endpoint_dir_in(host_ep_desc); + req->eptype = usb_endpoint_type(host_ep_desc); + req->faddress = usb_pipedevice(urb->pipe); + req->interval = urb->interval; + req->reqUnlinked = 0; + req->setup = (struct usb_ctrlrequest *)urb->setup_packet; + req->setupDma = urb->setup_dma; + req->status = EINPROGRESS; + req->usbDev = &usbDev->udev; + req->usbEp = req->epIsIn ? usbDev->in_ep[req->epNum] : usbDev->out_ep[req->epNum]; + if (!req->epNum) + usbDev->ep0_hep.desc.wMaxPacketSize = urb->dev->ep0.desc.wMaxPacketSize; + + req->hcPriv = (void *)req->usbEp; + urb->hcpriv = req; + host_ep->hcpriv = (void *)usbDev; + retval = config->host_obj->host_reqQueue(config->host_priv, req); + if (retval) { + usb_hcd_unlink_urb_from_ep(hcd, urb); + urb->hcpriv = NULL; + spin_unlock_irqrestore(&config->lock, flags); + kfree(req); + return -retval; + } + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +static int hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) +{ + unsigned long flags; + struct HOST_CTRL *priv; + int ret = 0; + struct phytium_cusb *config = *(struct phytium_cusb **)hcd->hcd_priv; + + if (!config) + return -ENODEV; + + priv = config->host_priv; + if (!priv->host_devices_table[urb->dev->slot_id]) + return -ENODEV; + + spin_lock_irqsave(&config->lock, flags); + if (usb_hcd_check_unlink_urb(hcd, urb, status)) + goto done; + + if (!urb->hcpriv) + goto err_giveback; + + ret = config->host_obj->host_reqDequeue(priv, urb->hcpriv, status); + kfree(urb->hcpriv); + urb->hcpriv = NULL; +done: + spin_unlock_irqrestore(&config->lock, flags); + return ret; + +err_giveback: + kfree(urb->hcpriv); + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&config->lock, flags); + usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); + return ret; +} + +static void hc_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ld_ep) +{ + struct HOST_USB_DEVICE *usbDev; + int ep_num = usb_endpoint_num(&ld_ep->desc); + + if (ep_num > MAX_INSTANCE_EP_NUM) + ep_num = MAX_INSTANCE_EP_NUM; + + usbDev = (struct HOST_USB_DEVICE *)ld_ep->hcpriv; + if (!usbDev) + return; + + if (ld_ep->desc.bEndpointAddress) { + if (usb_endpoint_dir_in(&ld_ep->desc)) { + if (!usbDev->in_ep[ep_num]) { + usbDev->in_ep[ep_num]->userExt = NULL; + INIT_LIST_HEAD(&usbDev->in_ep[ep_num]->reqList); + kfree(usbDev->in_ep[ep_num]); + usbDev->in_ep[ep_num] = NULL; + } + } else { + if (!usbDev->out_ep[ep_num]) { + usbDev->out_ep[ep_num]->userExt = NULL; + INIT_LIST_HEAD(&usbDev->out_ep[ep_num]->reqList); + kfree(usbDev->out_ep[ep_num]); + usbDev->out_ep[ep_num] = NULL; + } + } + } +} + +static int hc_get_frame(struct usb_hcd *hcd) +{ + return 0; +} + +static int hc_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct HOST_USB_DEVICE *usbDev; + struct phytium_cusb *config; + struct HOST_CTRL *priv; + unsigned long flags = 0; + int index; + int slot = -EINVAL; + + if (!hcd || !udev) + return -EINVAL; + + config = *(struct phytium_cusb **)hcd->hcd_priv; + if (!config) + return 0; + + spin_lock_irqsave(&config->lock, flags); + priv = config->host_priv; + for (index = 0; index < MAX_SUPPORTED_DEVICES; index++) { + if (priv->host_devices_table[index] == NULL) { + slot = index; + break; + } + } + spin_unlock_irqrestore(&config->lock, flags); + + if (slot < 0) + return -ENOMEM; + + usbDev = kzalloc((sizeof(struct HOST_USB_DEVICE) + + config->host_obj->host_epGetPrivateDataSize(priv)), GFP_KERNEL); + if (!usbDev) + return -ENOMEM; + + usbDev->ep0_hep.hcPriv = &((uint8_t *)usbDev)[sizeof(struct HOST_USB_DEVICE)]; + usbDev->udev.userExt = (void *)usbDev; + usbDev->ld_udev = udev; + usbDev->ld_udev->slot_id = slot; + usbDev->ep0_hep.desc.bLength = 7; + usbDev->ep0_hep.desc.bDescriptorType = USB_DT_ENDPOINT; + usbDev->in_ep[0] = &usbDev->ep0_hep; + usbDev->out_ep[0] = &usbDev->ep0_hep; + INIT_LIST_HEAD(&usbDev->ep0_hep.reqList); + + priv->host_devices_table[slot] = usbDev; + + return 1; +} + +static void hc_free_dev(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct HOST_CTRL *priv; + struct HOST_USB_DEVICE *usbDev; + struct phytium_cusb *config; + int i; + + if (!hcd || !udev) + return; + + config = *(struct phytium_cusb **)(hcd->hcd_priv); + if (!config) + return; + + priv = (struct HOST_CTRL *)config->host_priv; + usbDev = priv->host_devices_table[udev->slot_id]; + if (!usbDev) + return; + + usbDev->in_ep[0] = NULL; + usbDev->out_ep[0] = NULL; + for (i = 1; i < HOST_EP_NUM; i++) { + if (usbDev->in_ep[i]) + hc_endpoint_disable(hcd, + (struct usb_host_endpoint *)usbDev->in_ep[i]->userExt); + if (usbDev->out_ep[i]) + hc_endpoint_disable(hcd, + (struct usb_host_endpoint *)usbDev->out_ep[i]->userExt); + } + + priv->host_devices_table[udev->slot_id] = NULL; + usbDev->ld_udev->slot_id = 0; + usbDev->udev.userExt = (void *)NULL; + kfree(usbDev); +} + +static int hc_reset_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct HOST_CTRL *priv; + struct HOST_USB_DEVICE *usb_device; + struct phytium_cusb *config; + + if (!hcd || !udev) + return -EINVAL; + + config = *(struct phytium_cusb **)hcd->hcd_priv; + priv = (struct HOST_CTRL *)config->host_priv; + usb_device = priv->host_devices_table[udev->slot_id]; + if (!usb_device) + return -ENODEV; + + usb_device->udev.speed = usb_device->ld_udev->speed; + usb_device->udev.devnum = usb_device->ld_udev->devnum; + + if (USB_SPEED_HIGH == usb_device->udev.speed || USB_SPEED_FULL == usb_device->udev.speed) + usb_device->ep0_hep.desc.wMaxPacketSize = 64; + else + usb_device->ep0_hep.desc.wMaxPacketSize = 8; + + pr_debug("speed:%d ep0 wMaxPacketSize:%d\n", usb_device->udev.speed, + usb_device->ep0_hep.desc.wMaxPacketSize); + + return 0; +} + +static int hc_update_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct HOST_CTRL *priv; + struct HOST_USB_DEVICE *usb_device; + struct phytium_cusb *config; + + if (!hcd || !udev) + return -EINVAL; + + config = *(struct phytium_cusb **)(hcd->hcd_priv); + priv = (struct HOST_CTRL *)config->host_priv; + + usb_device = priv->host_devices_table[udev->slot_id]; + if (!usb_device) + return -ENODEV; + + usb_device->udev.devnum = udev->devnum; + + return 0; +} + +static int hc_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + return 0; +} + +static int hc_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + return 0; +} + +static int hc_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + struct HOST_CTRL *priv; + struct phytium_cusb *config = *(struct phytium_cusb **)hcd->hcd_priv; + + if (!config) + return 0; + + priv = (struct HOST_CTRL *)config->host_priv; + if (!priv) + return 0; + + if (priv->portStatus & 0xffff0000) { + *buf = 0x02; + return 1; + } + + return 0; +} + +#ifdef CONFIG_PM +static int hc_bus_suspend(struct usb_hcd *hcd) +{ + unsigned long flags; + struct phytium_cusb *config = *(struct phytium_cusb **)(hcd->hcd_priv); + + if (!config) + return 0; + + spin_lock_irqsave(&config->lock, flags); + hcd->state = HC_STATE_SUSPENDED; + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} + +static int hc_bus_resume(struct usb_hcd *hcd) +{ + unsigned long flags; + struct phytium_cusb *config = *(struct phytium_cusb **)(hcd->hcd_priv); + + if (!config) + return 0; + + spin_lock_irqsave(&config->lock, flags); + hcd->state = HC_STATE_RESUMING; + spin_unlock_irqrestore(&config->lock, flags); + return 0; +} +#endif + +static void host_giveback_request(struct HOST_CTRL *priv, + struct HOST_REQ *req, uint32_t status) +{ + struct urb *urb_req; + int urb_status = 0; + int i = 0; + struct phytium_cusb *config; + + if (!priv || !req) + return; + + urb_req = req->userExt; + urb_req->actual_length = req->actualLength; + + switch (status) { + case HOST_ESTALL: + urb_status = -EPIPE; + break; + case HOST_EUNHANDLED: + urb_status = -EPROTO; + break; + case HOST_ESHUTDOWN: + urb_status = -ESHUTDOWN; + break; + default: + urb_status = status; + } + pr_debug("complete %p %pS (%d) dev%d ep%d%s %d/%d\n", + urb_req, urb_req->complete, urb_status, + usb_pipedevice(urb_req->pipe), + usb_pipeendpoint(urb_req->pipe), + usb_pipein(urb_req->pipe) ? "in" : "out", + urb_req->actual_length, + urb_req->transfer_buffer_length); + + if (usb_endpoint_xfer_isoc(&urb_req->ep->desc)) { + for (i = 0; i < urb_req->number_of_packets; i++) { + urb_req->iso_frame_desc[i].status = 0; + urb_req->iso_frame_desc[i].actual_length = req->isoFramesDesc[i].length; + } + } + + config = dev_get_drvdata(priv->dev); + usb_hcd_unlink_urb_from_ep(config->hcd, urb_req); + spin_unlock(&config->lock); + usb_hcd_giveback_urb(config->hcd, urb_req, urb_status); + kfree(req); + spin_lock(&config->lock); +} + +static void host_rh_port_status_change(struct HOST_CTRL *priv) +{ + uint32_t statusHub; + char *status; + uint32_t retval; + struct usb_ctrlrequest setup; + struct phytium_cusb *config; + + if (!priv) + return; + + config = dev_get_drvdata(priv->dev); + if (!config) + return; + + status = (char *)&statusHub; + hc_hub_status_data(config->hcd, status); + + if (status) { + setup.bRequestType = USB_TYPE_CLASS | USB_RECIP_OTHER | USB_DIR_IN;//to host + setup.bRequest = USB_REQ_GET_STATUS; + setup.wValue = 0; + setup.wIndex = 1; + setup.wLength = 4; + retval = config->host_obj->host_vhubControl(priv, &setup, (uint8_t *)&statusHub); + if (retval) + return; + + if (status[1] & USB_PORT_STAT_C_CONNECTION) { + if (status[0] & USB_PORT_STAT_CONNECTION) { + if (config->hcd->status_urb) + usb_hcd_poll_rh_status(config->hcd); + else + usb_hcd_resume_root_hub(config->hcd); + } else { + usb_hcd_resume_root_hub(config->hcd); + usb_hcd_poll_rh_status(config->hcd); + } + } + } else + usb_hcd_resume_root_hub(config->hcd); +} + +static void host_set_ep_toggle(struct HOST_CTRL *priv, + struct HOST_DEVICE *udev, u8 ep_num, u8 is_in, u8 toggle) +{ + struct HOST_USB_DEVICE *device; + + if (!priv || !udev) + return; + + device = (struct HOST_USB_DEVICE *)udev->userExt; + + usb_settoggle(device->ld_udev, ep_num, !is_in, toggle); +} + +static u8 host_get_ep_toggle(struct HOST_CTRL *priv, + struct HOST_DEVICE *udev, u8 ep_num, u8 is_in) +{ + struct HOST_USB_DEVICE *device; + + if (!priv || !udev) + return 0; + + device = (struct HOST_USB_DEVICE *)udev->userExt; + + return usb_gettoggle(device->ld_udev, ep_num, !is_in); +} + +static uint32_t initEndpoints(struct HOST_CTRL *priv) +{ + int epNum; + + if (!priv) + return 0; + + priv->hwEpInCount = 0; + priv->hwEpOutCount = 0; + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | 0); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | FIFOCTRL_IO_TX | 0); + + for (epNum = 0; epNum < HOST_EP_NUM; epNum++) { + priv->in[epNum].isInEp = 1; + priv->in[epNum].hwEpNum = epNum; + if (priv->hostCfg.epIN[epNum].bufferingValue) { + priv->in[epNum].hwMaxPacketSize = priv->hostCfg.epIN[epNum].maxPacketSize; + priv->in[epNum].hwBuffers = priv->hostCfg.epIN[epNum].bufferingValue; + priv->in[epNum].state = HOST_EP_FREE; + priv->in[epNum].channel = NULL; + priv->hwEpInCount++; + + if (epNum) { + phytium_write16(&priv->regs->rxstaddr[epNum - 1].addr, + priv->hostCfg.epIN[epNum].startBuf); + phytium_write8(&priv->regs->ep[epNum - 1].rxcon, 0x08); + phytium_write8(&priv->regs->fifoctrl, FIFOCTRL_FIFOAUTO | epNum); + phytium_write8(&priv->regs->irqmode[epNum - 1].inirqmode, 0x80); + } + } else + priv->in[epNum].state = HOST_EP_NOT_IMPLEMENTED; + + priv->out[epNum].isInEp = 0; + priv->out[epNum].hwEpNum = epNum; + if (priv->hostCfg.epOUT[epNum].bufferingValue) { + priv->out[epNum].hwMaxPacketSize = priv->hostCfg.epOUT[epNum].maxPacketSize; + priv->out[epNum].hwBuffers = priv->hostCfg.epOUT[epNum].bufferingValue; + priv->out[epNum].state = HOST_EP_FREE; + priv->out[epNum].channel = NULL; + priv->hwEpInCount++; + + if (epNum) { + phytium_write16(&priv->regs->txstaddr[epNum - 1].addr, + priv->hostCfg.epOUT[epNum].startBuf); + phytium_write8(&priv->regs->ep[epNum - 1].txcon, 0x08); + phytium_write8(&priv->regs->fifoctrl, + FIFOCTRL_FIFOAUTO | FIFOCTRL_IO_TX | epNum); + phytium_write8(&priv->regs->irqmode[epNum - 1].outirqmode, 0x80); + } + } else + priv->out[epNum].state = HOST_EP_NOT_IMPLEMENTED; + } + + return 0; +} + +int32_t hostInit(struct HOST_CTRL *priv, struct HOST_CFG *config, + struct HOST_CALLBACKS *callbacks, struct device *pdev, bool isVhubHost) +{ + int index; + + if (!config || !priv || !callbacks || !pdev) + return -EINVAL; + + priv->dev = pdev; + priv->hostCallbacks = *callbacks; + priv->hostCfg = *config; + priv->regs = (struct HW_REGS *)config->regBase; + priv->hostDrv = HOST_GetInstance(); + + priv->dmaDrv = DMA_GetInstance(); + priv->dmaCfg.dmaModeRx = 0xFFFF; + priv->dmaCfg.dmaModeTx = 0xFFFF; + priv->dmaCfg.regBase = config->regBase + 0x400; + priv->dmaCfg.trbAddr = config->trbAddr; + priv->dmaCfg.trbDmaAddr = config->trbDmaAddr; + priv->dmaCallback.complete = host_CallbackTransfer; + + priv->dmaController = (void *)(priv + 1); + priv->dmaDrv->dma_init(priv->dmaController, &priv->dmaCfg, &priv->dmaCallback); + priv->dmaDrv->dma_setParentPriv(priv->dmaController, priv); + + INIT_LIST_HEAD(&priv->ctrlHEpQueue); + + for (index = 0; index < MAX_INSTANCE_EP_NUM; index++) { + INIT_LIST_HEAD(&priv->isoInHEpQueue[index]); + INIT_LIST_HEAD(&priv->isoOutHEpQueue[index]); + INIT_LIST_HEAD(&priv->intInHEpQueue[index]); + INIT_LIST_HEAD(&priv->intOutHEpQueue[index]); + INIT_LIST_HEAD(&priv->bulkInHEpQueue[index]); + INIT_LIST_HEAD(&priv->bulkOutHEpQueue[index]); + } + + phytium_write8(&priv->regs->cpuctrl, BIT(1)); + phytium_write8(&priv->regs->otgctrl, OTGCTRL_ABUSDROP); + phytium_write8(&priv->regs->ep0maxpack, 0x40); + + //disable interrupts + phytium_write8(&priv->regs->otgien, 0x0); + phytium_write8(&priv->regs->usbien, 0x0); + phytium_write16(&priv->regs->txerrien, 0x0); + phytium_write16(&priv->regs->rxerrien, 0x0); + + //clear all interrupt except otg idle irq + phytium_write8(&priv->regs->otgirq, 0xFE); + phytium_write8(&priv->regs->usbirq, 0xFF); + phytium_write16(&priv->regs->txerrirq, 0xFF); + phytium_write16(&priv->regs->rxerrirq, 0xFF); + + phytium_write8(&priv->regs->tawaitbcon, 0x80); + + initEndpoints(priv); + priv->otgState = HOST_OTG_STATE_B_IDLE; + + //reset all endpoint + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST); + + if (isVhubHost) + priv->vhub_regs = (struct VHUB_REGS *)(config->phy_regBase); + else + priv->custom_regs = (struct CUSTOM_REGS *)(config->phy_regBase); + + return 0; +} + +void hostDestroy(struct HOST_CTRL *priv) +{ +} + +void hostStart(struct HOST_CTRL *priv) +{ + uint8_t otgstate, usbien; + + if (!priv) + return; + + priv->dmaDrv->dma_start(priv->dmaController); + usbien = phytium_read8(&priv->regs->usbien); + usbien = usbien | USBIR_URES | USBIR_SUSP; + phytium_write8(&priv->regs->usbien, usbien); +retry: + otgstate = phytium_read8(&priv->regs->otgstate); + switch (otgstate) { + case HOST_OTG_STATE_A_IDLE: + priv->ep0State = HOST_EP0_STAGE_IDLE; + priv->otgState = HOST_OTG_STATE_A_IDLE; + phytium_write8(&priv->regs->otgirq, OTGIRQ_IDLEIRQ); + host_SetVbus(priv, 1); + break; + case HOST_OTG_STATE_B_IDLE: + host_SetVbus(priv, 1); + break; + case HOST_OTG_STATE_A_WAIT_VFALL: + goto retry; + } + + phytium_write8(&priv->regs->otgien, OTGIRQ_CONIRQ | + OTGIRQ_VBUSERRIRQ | OTGIRQ_SRPDETIRQ); +} + +void hostStop(struct HOST_CTRL *priv) +{ + if (!priv) + return; + + phytium_write8(&priv->regs->otgien, 0x0); + phytium_write8(&priv->regs->usbien, 0x0); + phytium_write16(&priv->regs->txerrien, 0x0); + phytium_write16(&priv->regs->rxerrien, 0x0); + + phytium_write8(&priv->regs->otgirq, 0xFE); + phytium_write8(&priv->regs->usbirq, 0xFF); + phytium_write16(&priv->regs->txerrirq, 0xFF); + phytium_write16(&priv->regs->rxerrirq, 0xFF); + + host_SetVbus(priv, 0); + priv->dmaDrv->dma_stop(priv->dmaController); +} + +static void handleReset(struct HOST_CTRL *priv) +{ + if (!priv) + return; + + if (priv->otgState == HOST_OTG_STATE_A_WAIT_BCON + || priv->otgState == HOST_OTG_STATE_B_WAIT_ACON) { + switch (phytium_read8(&priv->regs->speedctrl)) { + case SPEEDCTRL_HS: + priv->portStatus |= USB_PORT_STAT_HIGH_SPEED; + break; + case SPEEDCTRL_FS: + priv->portStatus &= ~(USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_LOW_SPEED); + break; + case SPEEDCTRL_LS: + priv->portStatus |= USB_PORT_STAT_LOW_SPEED; + break; + } + + priv->dmaDrv->dma_setHostMode(priv->dmaController); + + if (priv->hostCallbacks.portStatusChange) + priv->hostCallbacks.portStatusChange(priv); + + switch (phytium_read8(&priv->regs->otgstate)) { + case HOST_OTG_STATE_B_HOST: + priv->otgState = HOST_OTG_STATE_B_HOST; + break; + case HOST_OTG_STATE_A_HOST: + break; + default: + priv->otgState = HOST_OTG_STATE_A_HOST; + break; + } + } +} + +void hostIsr(struct HOST_CTRL *priv) +{ + uint8_t usbirq, usbien; + + if (!priv) + return; + + usbirq = phytium_read8(&priv->regs->usbirq); + usbien = phytium_read8(&priv->regs->usbien); + pr_debug("raw usbirq:0x%x usbien:0x%x\n", usbirq, usbien); + usbirq = usbirq & usbien; + + hostErrorIrq(priv); + hostOtgIrq(priv); + + if (!usbirq) + goto DMA_IRQ; + + if (usbirq & USBIR_URES) { + phytium_write8(&priv->regs->usbirq, USBIR_URES); + priv->port_resetting = 0; + handleReset(priv); + } + + if (usbirq & USBIR_SOF) + phytium_write8(&priv->regs->usbirq, USBIR_SOF); + + if (usbirq & USBIR_SUSP) { + pr_debug("clear suspend irq\n"); + phytium_write8(&priv->regs->usbirq, USBIR_SUSP); + phytium_write8(&priv->regs->clkgate, 0x7); + } + + return; +DMA_IRQ: + priv->dmaDrv->dma_isr(priv->dmaController); +} + +int32_t hostEpDisable(struct HOST_CTRL *priv, struct HOST_EP *ep) +{ + return 0; +} + +int32_t hostReqQueue(struct HOST_CTRL *priv, struct HOST_REQ *req) +{ + struct HOST_EP_PRIV *hostEpPriv; + struct list_head *hEpQueue = NULL; + uint32_t interval = 0; + uint8_t idleQueue = 0; + + if (!priv || !req) + return -EINVAL; + + list_add_tail((struct list_head *)&req->list, (struct list_head *)&req->usbEp->reqList); + hostEpPriv = (struct HOST_EP_PRIV *)req->usbEp->hcPriv; + + if (hostEpPriv->epIsReady) { + if (usb_endpoint_xfer_isoc(&req->usbEp->desc)) { + hostEpPriv->isocEpConfigured = 1; + hostStartReq(priv, req); + } + return 0; + } + + hostEpPriv->epIsReady = 1; + hostEpPriv->maxPacketSize = req->usbEp->desc.wMaxPacketSize; + hostEpPriv->interval = req->interval; + hostEpPriv->epNum = req->usbEp->desc.bEndpointAddress & 0xf; + hostEpPriv->faddress = req->faddress; + hostEpPriv->usbHEp = req->usbEp; + hostEpPriv->isIn = req->epIsIn; + + switch (usb_endpoint_type(&req->usbEp->desc)) { + case USB_ENDPOINT_XFER_CONTROL: + hostEpPriv->isIn = 0; + hEpQueue = &priv->ctrlHEpQueue; + hostEpPriv->type = USB_ENDPOINT_XFER_CONTROL; + break; + case USB_ENDPOINT_XFER_BULK: + hEpQueue = hostEpPriv->isIn ? &priv->bulkInHEpQueue[req->epNum - 1] : + &priv->bulkOutHEpQueue[req->epNum - 1]; + hostEpPriv->type = USB_ENDPOINT_XFER_BULK; + break; + case USB_ENDPOINT_XFER_INT: + if (req->usbDev->speed < USB_SPEED_FULL) + interval = (req->usbEp->desc.bInterval < 10) ? 10 : req->usbEp->desc.bInterval; + else if (req->usbDev->speed == USB_SPEED_FULL) + interval = (req->usbEp->desc.bInterval < 1) ? + 1 : req->usbEp->desc.bInterval; + else { + if (req->usbEp->desc.bInterval < 1) + interval = 1; + else if (req->usbEp->desc.bInterval > 16) + interval = 16; + else + interval = req->usbEp->desc.bInterval; + + interval = 1 << (interval - 1); + } + hEpQueue = hostEpPriv->isIn ? &priv->intInHEpQueue[req->epNum - 1] : + &priv->intOutHEpQueue[req->epNum - 1]; + hostEpPriv->type = USB_ENDPOINT_XFER_INT; + hostEpPriv->frame = interval; + hostEpPriv->interval = interval; + break; + case USB_ENDPOINT_XFER_ISOC: + if (req->usbEp->desc.bInterval < 1) + interval = 1; + else if (req->usbEp->desc.bInterval > 16) + interval = 16; + else + interval = req->usbEp->desc.bInterval; + + interval = 1 << (interval - 1); + hEpQueue = hostEpPriv->isIn ? &priv->isoInHEpQueue[req->epNum - 1] : + &priv->isoOutHEpQueue[req->epNum - 1]; + hostEpPriv->type = USB_ENDPOINT_XFER_ISOC; + hostEpPriv->frame = interval; + hostEpPriv->interval = interval; + break; + default: + break; + } + + if (hostEpPriv->isIn) + hostEpPriv->genericHwEp = &priv->in[req->epNum]; + else + hostEpPriv->genericHwEp = &priv->out[req->epNum]; + + hostEpPriv->genericHwEp->state = HOST_EP_ALLOCATED; + hostEpPriv->genericHwEp->refCount++; + + if (list_empty(hEpQueue)) { + if (hostEpPriv->genericHwEp->state == HOST_EP_BUSY) { + pr_err("Error:Hardware endpoint %d is busy\n", + hostEpPriv->genericHwEp->hwEpNum); + return -EINVAL; + } + idleQueue = 1; + } + + if (usb_endpoint_xfer_int(&req->usbEp->desc)) + list_add_tail(&hostEpPriv->node, hEpQueue); + + if (idleQueue) + hostStartReq(priv, req); + + return 0; +} + +static int abortActuallyUsbRequest(struct HOST_CTRL *priv, + struct HOST_REQ *req, struct HOST_EP *usbEp) +{ + struct HOST_EP_PRIV *usbEpPriv; + struct HostEp *hostEp; + uint16_t rxerrien = 0; + uint16_t txerrien = 0; + uint8_t rxcon, txcon; + + if (!priv || !req || !usbEp) + return -EINVAL; + + usbEpPriv = (struct HOST_EP_PRIV *)usbEp->hcPriv; + hostEp = usbEpPriv->currentHwEp; + + usbEpPriv->transferFinished = 1; + + if (hostEp->isInEp) { + if (hostEp->hwEpNum) { + rxcon = phytium_read8(&priv->regs->ep[hostEp->hwEpNum - 1].rxcon); + rxcon = rxcon & (~BIT(7)); + phytium_write8(&priv->regs->ep[hostEp->hwEpNum - 1].rxcon, rxcon); + } + rxerrien = phytium_read16(&priv->regs->rxerrien); + rxerrien &= ~(1 << hostEp->hwEpNum); + phytium_write16(&priv->regs->rxerrien, rxerrien); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | + ENDPRST_IO_TX | hostEp->hwEpNum); + } else { + if (hostEp->hwEpNum) { + txcon = phytium_read8(&priv->regs->ep[hostEp->hwEpNum - 1].txcon); + txcon = txcon & (~BIT(7)); + phytium_write8(&priv->regs->ep[hostEp->hwEpNum - 1].txcon, txcon); + } + txerrien = phytium_read16(&priv->regs->txerrien); + txerrien &= ~(1 << hostEp->hwEpNum); + phytium_write16(&priv->regs->txerrien, txerrien); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | hostEp->hwEpNum); + } + + scheduleNextTransfer(priv, req, hostEp); + + return 0; +} + +int32_t hostReqDequeue(struct HOST_CTRL *priv, struct HOST_REQ *req, uint32_t status) +{ + struct HOST_EP *usbEp; + struct HOST_EP_PRIV *usbEpPriv; + int ret = 0; + + if (!priv || !req) + return -EINVAL; + + usbEp = req->usbEp; + usbEpPriv = (struct HOST_EP_PRIV *)usbEp->hcPriv; + + pr_debug("Dequeue usbReq:%p dev%d usbEp%d%s\n", req, req->faddress, req->epNum, + req->epIsIn ? "in" : "out"); + + if (!usbEpPriv->epIsReady) + return 0; + + if (!usbEpPriv->currentHwEp || req->list.prev != &usbEp->reqList) { + givebackRequest(priv, req, status); + if (list_empty(&usbEp->reqList)) { + usbEpPriv->epIsReady = 0; + usbEpPriv->currentHwEp = NULL; + if (usb_endpoint_xfer_int(&usbEp->desc)) + list_del(&usbEpPriv->node); + } + } else + ret = abortActuallyUsbRequest(priv, req, usbEp); + + if (usbEpPriv->isocEpConfigured) + usbEpPriv->isocEpConfigured = 0; + + return ret; +} + +int32_t hostVHubStatusData(struct HOST_CTRL *priv, uint8_t *status) +{ + return 0; +} + +int32_t hostGetDevicePD(struct HOST_CTRL *priv) +{ + return 0; +} + +int32_t hostGetPrivateDataSize(struct HOST_CTRL *priv) +{ + if (!priv) + return 0; + + return sizeof(struct HOST_EP_PRIV); +} + +static int hub_descriptor(struct usb_hub_descriptor *buf) +{ + buf->bDescLength = 9; + buf->bDescriptorType = 0x29; + buf->bNbrPorts = 1; + buf->wHubCharacteristics = 0x11; + buf->bPwrOn2PwrGood = 5; + buf->bHubContrCurrent = 0; + buf->u.hs.DeviceRemovable[0] = 0x2; + + return 0; +} + +static int HubPortSuspend(struct HOST_CTRL *priv, u16 on) +{ + uint8_t otgctrl; + + if (!priv) + return 0; + + otgctrl = phytium_read8(&priv->regs->otgctrl); + + if (on) { + otgctrl &= ~OTGCTRL_BUSREQ; + otgctrl &= ~OTGCTRL_BHNPEN; + + priv->portStatus |= USB_PORT_STAT_SUSPEND; + + switch (phytium_read8(&priv->regs->otgstate)) { + case HOST_OTG_STATE_A_HOST: + priv->otgState = HOST_OTG_STATE_A_SUSPEND; + otgctrl |= OTGCTRL_ASETBHNPEN; + break; + case HOST_OTG_STATE_B_HOST: + priv->otgState = HOST_OTG_STATE_B_HOST_2; + break; + default: + break; + } + phytium_write8(&priv->regs->otgctrl, otgctrl); + } else { + otgctrl |= OTGCTRL_BUSREQ; + otgctrl &= ~OTGCTRL_ASETBHNPEN; + phytium_write8(&priv->regs->otgctrl, otgctrl); + priv->portStatus |= USB_PORT_STAT_RESUME; + } + return 0; +} + +static void HubPortReset(struct HOST_CTRL *priv, uint8_t on) +{ + uint8_t speed; + + if (!priv) + return; + + if (on) { + phytium_write16(&priv->regs->txerrirq, 0xFFFF); + phytium_write16(&priv->regs->txirq, 0xFFFF); + phytium_write16(&priv->regs->rxerrirq, 0xFFFF); + phytium_write16(&priv->regs->rxirq, 0xFFFF); + + phytium_write8(&priv->regs->endprst, ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | + ENDPRST_TOGRST | ENDPRST_IO_TX); + phytium_write8(&priv->regs->endprst, ENDPRST_FIFORST | ENDPRST_TOGRST); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | 0 | 0x04); + phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | + FIFOCTRL_IO_TX | 0 | 0x04); + + priv->portStatus |= USB_PORT_STAT_RESET; + priv->portStatus &= ~USB_PORT_STAT_ENABLE; + priv->port_resetting = 0; + } else { + speed = phytium_read8(&priv->regs->speedctrl); + if (speed == SPEEDCTRL_HS) + priv->portStatus |= USB_PORT_STAT_HIGH_SPEED; + else if (speed == SPEEDCTRL_FS) + priv->portStatus &= ~(USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_LOW_SPEED); + else + priv->portStatus |= USB_PORT_STAT_LOW_SPEED; + + priv->portStatus &= ~USB_PORT_STAT_RESET; + priv->portStatus |= USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET << 16) + | (USB_PORT_STAT_C_ENABLE << 16); + + if (priv->hostCallbacks.portStatusChange) + priv->hostCallbacks.portStatusChange(priv); + } +} + +static int get_PortStatus(struct HOST_CTRL *priv, u16 wIndex, uint8_t *buff) +{ + uint32_t temp = 0; + + if (!priv || !buff) + return 0; + + if ((wIndex & 0xff) != 1) + return 1; + + if (priv->portStatus & USB_PORT_STAT_RESET) { + if (!priv->port_resetting) + HubPortReset(priv, 0); + } + + if (priv->portStatus & USB_PORT_STAT_RESUME) { + priv->portStatus &= ~(USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESUME); + priv->portStatus |= USB_PORT_STAT_C_SUSPEND << 16; + if (priv->hostCallbacks.portStatusChange) + priv->hostCallbacks.portStatusChange(priv); + } + + temp = priv->portStatus & (~USB_PORT_STAT_RESUME); + buff[0] = temp; + buff[1] = temp >> 8; + buff[2] = temp >> 16; + buff[3] = temp >> 24; + + return 0; +} + +static int set_PortFeature(struct HOST_CTRL *priv, u16 wValue, u16 wIndex) +{ +// struct HW_Regs *regs = priv->regs; + + if ((wIndex & 0xff) != 1) + return 1; + + switch (wValue) { + case USB_PORT_FEAT_CONNECTION: + break; + case USB_PORT_FEAT_ENABLE: + break; + case USB_PORT_FEAT_SUSPEND: + HubPortSuspend(priv, 1); + break; + case USB_PORT_FEAT_OVER_CURRENT: + break; + case USB_PORT_FEAT_RESET: + HubPortReset(priv, 1); + break; + case USB_PORT_FEAT_L1: + break; + case USB_PORT_FEAT_POWER: + hostStart(priv); + break; + case USB_PORT_FEAT_LOWSPEED: + break; + case USB_PORT_FEAT_C_CONNECTION: + break; + case USB_PORT_FEAT_C_ENABLE: + break; + case USB_PORT_FEAT_C_SUSPEND: + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + break; + case USB_PORT_FEAT_INDICATOR: + break; + case USB_PORT_FEAT_C_PORT_L1: + break; + default: + break; + } + priv->portStatus |= 1 << wValue; + + return 0; +} + +static int Clear_PortFeature(struct HOST_CTRL *priv, u16 wValue, u16 wIndex) +{ + if ((wIndex & 0xff) != 1) + return 1; + + switch (wValue) { + case USB_PORT_FEAT_CONNECTION: + break; + case USB_PORT_FEAT_ENABLE: + break; + case USB_PORT_FEAT_SUSPEND: + HubPortSuspend(priv, 0); + break; + case USB_PORT_FEAT_OVER_CURRENT: + break; + case USB_PORT_FEAT_RESET: + break; + case USB_PORT_FEAT_L1: + break; + case USB_PORT_FEAT_POWER: + break; + case USB_PORT_FEAT_LOWSPEED: + break; + case USB_PORT_FEAT_C_CONNECTION: + break; + case USB_PORT_FEAT_C_ENABLE: + break; + case USB_PORT_FEAT_C_SUSPEND: + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + break; + case USB_PORT_FEAT_INDICATOR: + break; + case USB_PORT_FEAT_C_PORT_L1: + break; + default: + break; + } + priv->portStatus &= ~(1 << wValue); + + return 0; +} + +static int hc_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength) +{ + unsigned long flags = 0; + int retval = 0; + struct HOST_CTRL *priv; + struct phytium_cusb *config = *(struct phytium_cusb **)hcd->hcd_priv; + + if (!config) + return -EINVAL; + + if (!buf) + return -EINVAL; + + if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) { + spin_unlock_irqrestore(&config->lock, flags); + return -ESHUTDOWN; + } + + priv = (struct HOST_CTRL *)config->host_priv; + if (!priv) + return -EINVAL; + + spin_lock_irqsave(&config->lock, flags); + switch (typeReq) { + case GetHubStatus: + break; + case GetPortStatus: + get_PortStatus(priv, wIndex, (uint8_t *)buf); + break; + case GetHubDescriptor: + hub_descriptor((struct usb_hub_descriptor *)buf); + break; + case SetPortFeature: + set_PortFeature(priv, wValue, wIndex); + break; + case ClearPortFeature: + retval = Clear_PortFeature(priv, wValue, wIndex); + break; + case SetHubFeature: + break; + case ClearHubFeature: + break; + default: + break; + } + + spin_unlock_irqrestore(&config->lock, flags); + + return retval; +} + +int32_t hostVHubControl(struct HOST_CTRL *priv, struct usb_ctrlrequest *setup, uint8_t *buff) +{ + uint16_t request; + uint32_t retval = 0; + + if (!priv || !setup || !buff) + return -EINVAL; + + request = setup->bRequestType << 0x08 | setup->bRequest; + switch (request) { + case ClearHubFeature: + break; + case SetHubFeature: + break; + case ClearPortFeature: + retval = Clear_PortFeature(priv, setup->wValue, setup->wIndex); + break; + case SetPortFeature: + retval = set_PortFeature(priv, setup->wValue, setup->wIndex); + break; + case GetHubDescriptor: + retval = hub_descriptor((struct usb_hub_descriptor *)buff); + break; + case GetHubStatus: + break; + case GetPortStatus: + retval = get_PortStatus(priv, setup->wIndex, (uint8_t *)buff); + break; + default: + retval = EOPNOTSUPP; + break; + } + + return retval; +} + +struct HOST_OBJ hostDrv = { + .host_init = hostInit, + .host_destroy = hostDestroy, + .host_start = hostStart, + .host_stop = hostStop, + .host_isr = hostIsr, + + //endpoint operation + .host_epDisable = hostEpDisable, + .host_reqQueue = hostReqQueue, + .host_reqDequeue = hostReqDequeue, + .host_vhubStatusData = hostVHubStatusData, + .host_vhubControl = hostVHubControl, + .host_getDevicePD = hostGetDevicePD, + .host_epGetPrivateDataSize = hostGetPrivateDataSize, +}; + +static struct hc_driver host_driver = { + .description = "phytium-hcd", + .product_desc = "Phytium Host USB Driver", + .hcd_priv_size = sizeof(struct phytium_cusb *), + .flags = HCD_MEMORY | HCD_USB2, + .reset = hc_reset, + .start = hc_start, + .stop = hc_stop, + .shutdown = hc_shutdown, + .urb_enqueue = hc_urb_enqueue, + .urb_dequeue = hc_urb_dequeue, + .endpoint_disable = hc_endpoint_disable, + .get_frame_number = hc_get_frame, + .alloc_dev = hc_alloc_dev, + .free_dev = hc_free_dev, + .reset_device = hc_reset_device, + .update_device = hc_update_device, + .add_endpoint = hc_add_endpoint, + .drop_endpoint = hc_drop_endpoint, + .hub_status_data = hc_hub_status_data, + .hub_control = hc_hub_control, +#ifdef CONFIG_PM + .bus_suspend = hc_bus_suspend, + .bus_resume = hc_bus_resume, +#endif +}; + +static int phytium_host_set_default_cfg(struct phytium_cusb *config) +{ + int index; + + config->host_cfg.regBase = (uintptr_t)config->regs; + config->host_cfg.phy_regBase = (uintptr_t)config->phy_regs; + config->host_cfg.dmultEnabled = 1; + config->host_cfg.memoryAlignment = 0; + config->host_cfg.dmaSupport = 1; + config->host_cfg.isEmbeddedHost = 1; + + for (index = 0; index < HOST_EP_NUM; index++) { + if (index == 0) { + config->host_cfg.epIN[index].bufferingValue = 1; + config->host_cfg.epIN[index].maxPacketSize = 64; + config->host_cfg.epIN[index].startBuf = 0; + + config->host_cfg.epOUT[index].bufferingValue = 1; + config->host_cfg.epOUT[index].maxPacketSize = 64; + config->host_cfg.epOUT[index].startBuf = 0; + } else { + config->host_cfg.epIN[index].bufferingValue = 2; + config->host_cfg.epIN[index].maxPacketSize = 1024; + config->host_cfg.epIN[index].startBuf = 64 + 2 * 1024 * (index - 1); + + config->host_cfg.epOUT[index].bufferingValue = 2; + config->host_cfg.epOUT[index].maxPacketSize = 1024; + config->host_cfg.epOUT[index].startBuf = 64 + 2 * 1024 * (index - 1); + } + } + + return 0; +} + +static int phytium_host_reinit(struct phytium_cusb *config) +{ + struct HOST_CTRL *ctrl; + + if (!config || !config->host_priv) + return 0; + + ctrl = (struct HOST_CTRL *)config->host_priv; + + usb_root_hub_lost_power(config->hcd->self.root_hub); + hostStop(ctrl); + + ctrl->portStatus = 0; + + config->host_obj->host_init(config->host_priv, &config->host_cfg, + &config->host_callbacks, config->dev, config->isVhubHost); + + return 0; +} + +int phytium_host_init(struct phytium_cusb *config) +{ + int ret; + + if (!config) + return 0; + + phytium_host_set_default_cfg(config); + config->host_obj = HOST_GetInstance(); + config->dma_cfg.regBase = config->host_cfg.regBase + 0x400; + + config->dma_obj = DMA_GetInstance(); + config->dma_obj->dma_probe(&config->dma_cfg, &config->dma_sysreq); + + config->host_sysreq.privDataSize = sizeof(struct HOST_CTRL); + config->host_sysreq.trbMemSize = config->dma_sysreq.trbMemSize; + config->host_sysreq.privDataSize += config->dma_sysreq.privDataSize; + + config->host_priv = devm_kzalloc(config->dev, config->host_sysreq.privDataSize, GFP_KERNEL); + if (!config->host_priv) { + ret = -ENOMEM; + goto err_probe; + } + + config->host_cfg.trbAddr = dma_alloc_coherent(config->dev, config->host_sysreq.trbMemSize, + (dma_addr_t *)&config->host_cfg.trbDmaAddr, GFP_KERNEL); + if (!config->host_cfg.trbAddr) { + ret = -ENOMEM; + goto err_dma_coherent; + } + + config->host_callbacks.portStatusChange = host_rh_port_status_change; + config->host_callbacks.getEpToggle = host_get_ep_toggle; + config->host_callbacks.setEpToggle = host_set_ep_toggle; + config->host_callbacks.givebackRequest = host_giveback_request; + + config->host_obj->host_init(config->host_priv, &config->host_cfg, + &config->host_callbacks, config->dev, config->isVhubHost); + + config->hcd = usb_create_hcd(&host_driver, config->dev, dev_name(config->dev)); + if (!config->hcd) { + ret = -ENODEV; + goto err_host; + } + + *config->hcd->hcd_priv = (unsigned long)config; + config->hcd->self.uses_pio_for_control = 0; + config->hcd->uses_new_polling = 1; + config->hcd->has_tt = 1; + + dev_set_drvdata(config->dev, config); + + config->hcd->self.otg_port = 1; + config->hcd->power_budget = 500; + ret = usb_add_hcd(config->hcd, 0, 0); + if (ret < 0) + goto err_setup; + + return 0; +err_setup: + usb_put_hcd(config->hcd); +err_host: + config->host_obj->host_destroy(config->host_priv); + dma_free_coherent(config->dev, config->host_sysreq.trbMemSize, + config->host_cfg.trbAddr, config->host_cfg.trbDmaAddr); +err_dma_coherent: +err_probe: + dev_set_drvdata(config->dev, NULL); + return ret; +} + +int phytium_host_uninit(struct phytium_cusb *config) +{ + if (!config) + return 0; + + if (config->hcd) { + usb_remove_hcd(config->hcd); + usb_put_hcd(config->hcd); + config->hcd = NULL; + } + + return 0; +} + +#ifdef CONFIG_PM +int phytium_host_resume(void *priv) +{ + int otgctrl; + struct phytium_cusb *config = (struct phytium_cusb *)priv; + struct HOST_CTRL *ctrl = (struct HOST_CTRL *)config->host_priv; + + if (!ctrl) + return -EINVAL; + + otgctrl = phytium_read8(&ctrl->regs->otgctrl); + otgctrl |= 1; + phytium_write8(&ctrl->regs->otgctrl, otgctrl); + + phytium_host_reinit(config); + + return 0; +} + +int phytium_host_suspend(void *priv) +{ + int otgctrl; + struct phytium_cusb *config = (struct phytium_cusb *)priv; + struct HOST_CTRL *ctrl = (struct HOST_CTRL *)config->host_priv; + + if (!ctrl) + return -EINVAL; + + otgctrl = phytium_read8(&ctrl->regs->otgctrl); + otgctrl = otgctrl & (~1); + phytium_write8(&ctrl->regs->otgctrl, otgctrl); + + return 0; +} +#endif + +struct HOST_OBJ *HOST_GetInstance(void) +{ + return &hostDrv; +} diff --git a/drivers/usb/phytium/host_api.h b/drivers/usb/phytium/host_api.h new file mode 100644 index 0000000000000000000000000000000000000000..3d45258278c475fdd4ef15810499e41c73d27948 --- /dev/null +++ b/drivers/usb/phytium/host_api.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __PHYTIUM_HOST_API_H_ +#define __PHYTIUM_HOST_API_H_ + +#include +//#include "list.h" +#include "dma.h" + +#define MAX_SUPPORTED_DEVICES 16 +#define USB_PORT_STAT_RESUME (1 << 31) +#define MAX_INSTANCE_EP_NUM 6 + +enum HOST_OtgState { + HOST_OTG_STATE_A_IDLE, + HOST_OTG_STATE_A_WAIT_VRISE, + HOST_OTG_STATE_A_WAIT_BCON, + HOST_OTG_STATE_A_HOST, + HOST_OTG_STATE_A_SUSPEND, + HOST_OTG_STATE_A_PERIPHERAL, + HOST_OTG_STATE_A_VBUS_ERR, + HOST_OTG_STATE_A_WAIT_VFALL, + HOST_OTG_STATE_B_IDLE = 0x10, + HOST_OTG_STATE_B_PERIPHERAL, + HOST_OTG_STATE_B_WAIT_ACON, + HOST_OTG_STATE_B_HOST, + HOST_OTG_STATE_B_HOST_2, + HOST_OTG_STATE_B_SRP_INT1, + HOST_OTG_STATE_B_SRP_INT2, + HOST_OTG_STATE_B_DISCHRG1, + HOST_OTG_STATE_B_DISCHRG2, + HOST_OTG_STATE_UNKNOWN, +}; + +enum HOST_EP0_STAGE { + HOST_EP0_STAGE_IDLE, + HOST_EP0_STAGE_SETUP, + HOST_EP0_STAGE_IN, + HOST_EP0_STAGE_OUT, + HOST_EP0_STAGE_STATUSIN, + HOST_EP0_STAGE_STATUSOUT, + HOST_EP0_STAGE_ACK, +}; + +enum HOST_EP_STATE { + HOST_EP_FREE, + HOST_EP_ALLOCATED, + HOST_EP_BUSY, + HOST_EP_NOT_IMPLEMENTED +}; + +struct HOST_DEVICE { + uint8_t devnum; + uint8_t hubPort; + unsigned int speed; + struct HOST_DEVICE *parent; + void *hcPriv; + void *userExt; +}; + +struct HOST_EP { + struct usb_endpoint_descriptor desc; + struct list_head reqList; + void *userExt; + uint8_t *hcPriv; +}; + +struct HOST_USB_DEVICE { + struct HOST_EP ep0_hep; + struct HOST_EP *in_ep[16]; + struct HOST_EP *out_ep[16]; + struct HOST_DEVICE udev; + struct usb_device *ld_udev; +}; + +struct HostEp { + uint8_t name[255]; + uint8_t hwEpNum; + uint8_t hwBuffers; + uint16_t hwMaxPacketSize; + uint8_t isInEp; + void *channel; + uint8_t usbEpNum; + uint8_t type; + uint8_t usbPacketSize; + enum HOST_EP_STATE state; + struct HOST_EP *scheduledUsbHEp; + uint8_t refCount; +}; + +struct HOST_ISOFRAMESDESC { + uint32_t length; + uint32_t offset; +}; + +struct HOST_EP_PRIV { + struct list_head node; + struct HostEp *genericHwEp; + struct HostEp *currentHwEp; + uint8_t epIsReady; + uint8_t isIn; + uint8_t type; + uint8_t interval; + uint8_t epNum; + uint8_t faddress; + uint16_t maxPacketSize; + uint32_t frame; + uint8_t hubAddress; + uint8_t portAddress; + uint8_t split; + struct HOST_EP *usbHEp; + uint8_t isocEpConfigured; + uint8_t transferFinished; +}; + +struct HOST_REQ { + struct list_head list; + struct HOST_EP *usbEp; + void *userExt; + void *hcPriv; + struct HOST_DEVICE *usbDev; + struct usb_ctrlrequest *setup; + uintptr_t setupDma; + void *bufAddress; + uintptr_t buffDma; + uint32_t buffLength; + uint32_t actualLength; + uint8_t epIsIn; + uint8_t eptype; + uint8_t epNum; + uint8_t faddress; + uint8_t interval; + uint8_t status; + uint8_t reqUnlinked; + struct HOST_ISOFRAMESDESC *isoFramesDesc; + uint32_t isoFramesNumber; +}; + +struct HOST_SYSREQ { + uint32_t privDataSize; + uint32_t trbMemSize; +}; + +struct HOST_EP_CFG { + uint8_t bufferingValue; + uint16_t startBuf; + uint16_t maxPacketSize; +}; + +struct HOST_CFG { + uintptr_t regBase; + uintptr_t phy_regBase; + struct HOST_EP_CFG epIN[16]; + struct HOST_EP_CFG epOUT[16]; + uint8_t dmultEnabled; + uint8_t memoryAlignment; + uint8_t dmaSupport; + uint8_t isEmbeddedHost; + void *trbAddr; + uintptr_t trbDmaAddr; +}; + +struct HOST_CTRL; + +struct HOST_CALLBACKS { + void (*portStatusChange)(struct HOST_CTRL *priv); + + uint8_t (*getEpToggle)(struct HOST_CTRL *priv, + struct HOST_DEVICE *usbDev, uint8_t epNum, uint8_t isIn); + + void (*setEpToggle)(struct HOST_CTRL *priv, struct HOST_DEVICE *usbDev, + uint8_t epNum, uint8_t isIn, uint8_t toggle); + + void (*givebackRequest)(struct HOST_CTRL *priv, + struct HOST_REQ *usbReq, uint32_t status); + + void (*setTimer)(struct HOST_CTRL *priv, uint32_t time, uint8_t id); +}; + +struct HOST_OBJ { + int32_t (*host_probe)(struct HOST_CFG *config, struct HOST_SYSREQ *sysReq); + + int32_t (*host_init)(struct HOST_CTRL *priv, struct HOST_CFG *config, + struct HOST_CALLBACKS *callbacks, struct device *pdev, bool isVhubHost); + + void (*host_destroy)(struct HOST_CTRL *priv); + + void (*host_start)(struct HOST_CTRL *priv); + + void (*host_stop)(struct HOST_CTRL *priv); + + void (*host_isr)(struct HOST_CTRL *priv); + + int32_t (*host_epDisable)(struct HOST_CTRL *priv, struct HOST_EP *ep); + + int32_t (*host_reqQueue)(struct HOST_CTRL *priv, struct HOST_REQ *req); + + int32_t (*host_reqDequeue)(struct HOST_CTRL *priv, + struct HOST_REQ *req, uint32_t status); + + int32_t (*host_vhubStatusData)(struct HOST_CTRL *priv, uint8_t *status); + + int32_t (*host_vhubControl)(struct HOST_CTRL *priv, + struct usb_ctrlrequest *setup, uint8_t *buff); + + int32_t (*host_getDevicePD)(struct HOST_CTRL *priv); + + int32_t (*host_epGetPrivateDataSize)(struct HOST_CTRL *priv); +}; + +struct HOST_CTRL { + struct device *dev; + struct HW_REGS *regs; + struct HOST_OBJ *hostDrv; + struct HOST_CFG hostCfg; + struct HOST_CALLBACKS hostCallbacks; + struct HostEp in[16]; + struct HostEp out[16]; + uint32_t portStatus; + struct list_head ctrlHEpQueue; + struct list_head isoInHEpQueue[MAX_INSTANCE_EP_NUM]; + struct list_head isoOutHEpQueue[MAX_INSTANCE_EP_NUM]; + struct list_head intInHEpQueue[MAX_INSTANCE_EP_NUM]; + struct list_head intOutHEpQueue[MAX_INSTANCE_EP_NUM]; + struct list_head bulkInHEpQueue[MAX_INSTANCE_EP_NUM]; + struct list_head bulkOutHEpQueue[MAX_INSTANCE_EP_NUM]; + uint8_t hwEpInCount; + uint8_t hwEpOutCount; + unsigned int speed; + enum HOST_OtgState otgState; + enum HOST_EP0_STAGE ep0State; + uint8_t vBusErrCnt; + uint8_t isRemoteWakeup; + uint8_t isSelfPowered; + uint8_t deviceAddress; + struct DMA_OBJ *dmaDrv; + void *dmaController; + struct DMA_CFG dmaCfg; + struct DMA_CALLBACKS dmaCallback; + uint8_t port_resetting; + struct HOST_USB_DEVICE *host_devices_table[MAX_SUPPORTED_DEVICES]; + struct CUSTOM_REGS *custom_regs; + struct VHUB_REGS *vhub_regs; +}; + +struct HOST_OBJ *HOST_GetInstance(void); + +#endif diff --git a/drivers/usb/phytium/hw-regs.h b/drivers/usb/phytium/hw-regs.h new file mode 100644 index 0000000000000000000000000000000000000000..8da9f8e9b92537f20c2be1c232849f331f32baf7 --- /dev/null +++ b/drivers/usb/phytium/hw-regs.h @@ -0,0 +1,297 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PHYTIUM_HW_REGS +#define __LINUX_PHYTIUM_HW_REGS + +#define USBIEN 0x198 +#define USBIEN_SUDAVIE BIT(0) +#define USBIEN_SOFIE BIT(1) +#define USBIEN_SUTOKIE BIT(2) +#define USBIEN_SUSPIE BIT(3) +#define USBIEN_URESIE BIT(4) +#define USBIEN_HSPEEDIE BIT(5) +#define USBIEN_LPMIE BIT(7) + +#define USBCS 0x1a3 +#define USBCS_LSMODE BIT(0) +#define USBCS_LPMNYET BIT(1) +#define USBCS_SIGSUME BIT(5) +#define USBCS_DISCON BIT(6) +#define USBCS_WAKESRC BIT(7) + +#define USBIR_SOF BIT(1) +#define USBIR_SUSP BIT(3) +#define USBIR_URES BIT(4) +#define USBIR_HSPEED BIT(5) + +#define USBIR_SUDAV BIT(0) +#define USBIR_SUTOK BIT(2) +#define USBIR_LPMIR BIT(7) + +#define OTGIRQ_IDLEIRQ BIT(0) +#define OTGIRQ_SRPDETIRQ BIT(1) +#define OTGIRQ_CONIRQ BIT(2) +#define OTGIRQ_LOCSOFIRQ BIT(2) +#define OTGIRQ_VBUSERRIRQ BIT(3) +#define OTGIRQ_PERIPHIRQ BIT(4) +#define OTGIRQ_IDCHANGEIRQ BIT(5) +#define OTGIRQ_HOSTDISCON BIT(6) +#define OTGIRQ_BSE0SRPIRQ BIT(7) + +#define OTGCTRL_BUSREQ BIT(0) +#define OTGCTRL_ABUSDROP BIT(1) +#define OTGCTRL_ASETBHNPEN BIT(2) +#define OTGCTRL_BHNPEN BIT(3) +#define OTGCTRL_SRPVBUSDETEN BIT(4) +#define OTGCTRL_SRPDATDETEN BIT(5) +#define OTGCTRL_FORCEBCONN BIT(7) + +#define OTGSTATUS_ID BIT(6) + +#define ENDPRST_EP 0x0f +#define ENDPRST_IO_TX BIT(4) +#define ENDPRST_TOGRST BIT(5) +#define ENDPRST_FIFORST BIT(6) +#define ENDPRST_TOGSETQ BIT(7) + +#define FIFOCTRL_EP 0x0f +#define FIFOCTRL_IO_TX BIT(4) +#define FIFOCTRL_FIFOAUTO BIT(5) + +#define SPEEDCTRL_LS BIT(0) +#define SPEEDCTRL_FS BIT(1) +#define SPEEDCTRL_HS BIT(2) +#define SPEEDCTRL_HSDISABLE BIT(7) + +#define CON_TYPE_CONTROL 0x00 +#define CON_TYPE_ISOC 0x04 +#define CON_TYPE_BULK 0x08 +#define CON_TYPE_INT 0x0C +#define CON_TYPE_ISOC_1_ISOD 0x00 +#define CON_TYPE_ISOC_2_ISOD 0x10 +#define CON_TYPE_ISOC_3_ISOD 0x20 +#define CON_STALL 0x40 +#define CON_VAL 0x80 + +#define ERR_TYPE 0x1c +#define ERR_COUNT 0x03 +#define ERR_RESEND BIT(6) +#define ERR_UNDERRIEN BIT(7) + +#define ERR_NONE 0 +#define ERR_CRC 1 +#define ERR_DATA_TOGGLE_MISMATCH 2 +#define ERR_STALL 3 +#define ERR_TIMEOUT 4 +#define ERR_PID 5 +#define ERR_TOO_LONG_PACKET 6 +#define ERR_DATA_UNDERRUN 7 + +#define EP0CS_HCSETTOGGLE BIT(6) +#define EP0CS_HCSET BIT(4) +#define EP0CS_RXBUSY_MASK BIT(3) +#define EP0CS_TXBUSY_MASK BIT(2) +#define EP0CS_STALL BIT(0) +#define EP0CS_HSNAK BIT(1) +#define EP0CS_DSTALL BIT(4) +#define EP0CS_CHGSET BIT(7) + +#define CS_ERR 0x01 +#define CS_BUSY 0x02 +#define CS_NPAK 0x0c +#define CS_NPAK_OFFSET 0x02 +#define CS_AUTO 0x10 + +#define CON_BUF_SINGLE 0x00 +#define CON_BUF_DOUBLE 0x01 +#define CON_BUF_TRIPLE 0x02 +#define CON_BUF_QUAD 0x03 +#define CON_BUF 0x03 + +struct HW_REGS { + uint8_t ep0Rxbc; /*address 0x00*/ + uint8_t ep0Txbc; /*address 0x01*/ + uint8_t ep0cs; /*address 0x02*/ + int8_t reserved0; /*address 0x03*/ + uint8_t lpmctrll; /*address 0x04*/ + uint8_t lpmctrlh; /*address 0x05*/ + uint8_t lpmclock; + uint8_t ep0fifoctrl; + struct ep { /*address 0x08*/ + uint16_t rxbc; //outbc (hcinbc) + uint8_t rxcon; + uint8_t rxcs; + uint16_t txbc; //inbc (hcoutbc + uint8_t txcon; + uint8_t txcs; + } ep[15]; + uint8_t reserved1[4]; + uint32_t fifodat[15]; /*address 0x84*/ + uint8_t ep0ctrl; /*address 0xC0*/ + uint8_t tx0err; /*address 0xC1*/ + uint8_t reserved2; + uint8_t rx0err; /*address 0xC3*/ + struct epExt { + uint8_t txctrl; + uint8_t txerr; + uint8_t rxctrl; + uint8_t rxerr; + } epExt[15]; + uint8_t ep0datatx[64]; /*address 0x100*/ + uint8_t ep0datarx[64]; /*address 0x140*/ + uint8_t setupdat[8]; /*address 0x180*/ + uint16_t txirq; /*address 0x188*/ + uint16_t rxirq; /*address 0x18A*/ + uint8_t usbirq; /*address 0x18C*/ + uint8_t reserved4; + uint16_t rxpngirq; /*address 0x18E*/ + uint16_t txfullirq; /*address 0x190*/ + uint16_t rxemptirq; /*address 0x192*/ + uint16_t txien; /*address 0x194*/ + uint16_t rxien; /*address 0x196*/ + uint8_t usbien; /*address 0x198*/ + uint8_t reserved6; + uint16_t rxpngien; /*address 0x19A*/ + uint16_t txfullien; /*address 0x19C*/ + uint16_t rxemptien; /*address 0x19E*/ + uint8_t usbivect; /*address 0x1A0*/ + uint8_t fifoivect; /*address 0x1A1*/ + uint8_t endprst; /*address 0x1A2*/ + uint8_t usbcs; /*address 0x1A3*/ + uint16_t frmnr; /*address 0x1A4*/ + uint8_t fnaddr; /*address 0x1A6*/ + uint8_t clkgate; /*address 0x1A7*/ + uint8_t fifoctrl; /*address 0x1A8*/ + uint8_t speedctrl; /*address 0x1A9*/ + uint8_t reserved8[1]; /*address 0x1AA*/ + uint8_t portctrl; /*address 0x1AB*/ + uint16_t hcfrmnr; /*address 0x1AC*/ + uint16_t hcfrmremain; /*address 0x1AE*/ + uint8_t reserved9[4]; /*address 0x1B0*/ + uint16_t rxerrirq; /*address 0x1B4*/ + uint16_t txerrirq; /*address 0x1B6*/ + uint16_t rxerrien; /*address 0x1B8*/ + uint16_t txerrien; /*address 0x1BA*/ + /*OTG extension*/ + uint8_t otgirq; /*address 0x1BC*/ + uint8_t otgstate; /*address 0x1BD*/ + uint8_t otgctrl; /*address 0x1BE*/ + uint8_t otgstatus; /*address 0x1BF*/ + uint8_t otgien; /*address 0x1C0*/ + uint8_t taaidlbdis; /*address 0x1C1*/ + uint8_t tawaitbcon; /*address 0x1C2*/ + uint8_t tbvbuspls; /*address 0x1C3*/ + uint8_t otg2ctrl; /*address 0x1C4*/ + uint8_t reserved10[2]; /*address 0x1C5*/ + uint8_t tbvbusdispls; /*address 0x1C7*/ + uint8_t traddr; /*address 0x1C8*/ + uint8_t trwdata; /*address 0x1C9*/ + uint8_t trrdata; /*address 0x1CA*/ + uint8_t trctrl; /*address 0x1CB*/ + uint16_t isoautoarm; /*address 0x1CC*/ + uint8_t adpbc1ien; /*address 0x1CE*/ + uint8_t adpbc2ien; /*address 0x1CF*/ + uint8_t adpbcctr0; /*address 0x1D0*/ + uint8_t adpbcctr1; /*address 0x1D1*/ + uint8_t adpbcctr2; /*address 0x1D2*/ + uint8_t adpbc1irq; /*address 0x1D3*/ + uint8_t adpbc0status; /*address 0x1D4*/ + uint8_t adpbc1status; /*address 0x1D5*/ + uint8_t adpbc2status; /*address 0x1D6*/ + uint8_t adpbc2irq; /*address 0x1D7*/ + uint16_t isodctrl; /*address 0x1D8*/ + uint8_t reserved11[2]; + uint16_t isoautodump; /*address 0x1DC*/ + uint8_t reserved12[2]; + uint8_t ep0maxpack; /*address 0x1E0*/ + uint8_t reserved13; + uint16_t rxmaxpack[15]; /*address 0x1E2*/ + struct rxsoftimer { /*address 0x200 to 0x23F*/ + uint16_t timer; + uint8_t reserved; + uint8_t ctrl; + } rxsoftimer[16]; + + struct txsoftimer { /*address 0x240 to 0x27F*/ + uint16_t timer; + uint8_t reserved; + uint8_t ctrl; + } txsoftimer[16]; + uint8_t reserved14[132]; + struct rxstaddr { /*address 0x304*/ + uint16_t addr; + uint16_t reserved; + } rxstaddr[15]; + uint8_t reserved15[4]; + struct txstaddr { /*address 0x344*/ + uint16_t addr; + uint16_t reserved; + } txstaddr[15]; + int8_t reserved16[4]; /*address 0x380*/ + struct irqmode { /*address 0x384*/ + int8_t inirqmode; + int8_t reserved21; + int8_t outirqmode; + int8_t reserved22; + } irqmode[15]; + /*The Microprocessor control*/ + uint8_t cpuctrl; /*address 0x3C0*/ + int8_t reserved17[15]; + /*The debug counters and workarounds*/ + uint8_t debug_rx_bcl; /*address 0x3D0*/ + uint8_t debug_rx_bch; /*address 0x3D1*/ + uint8_t debug_rx_status; /*address 0x3D2*/ + uint8_t debug_irq; /*address 0x3D3*/ + uint8_t debug_tx_bcl; /*address 0x3D4*/ + uint8_t debug_tx_bch; /*address 0x3D5*/ + uint8_t debug_tx_status; /*address 0x3D6*/ + uint8_t debug_ien; /*address 0x3D7*/ + uint8_t phywa_en; /*address 0x3D8*/ + /*endian*/ + uint8_t wa1_cnt; /*address 0x3D9*/ + int8_t reserved18[2]; /*address 0x3DA*/ + uint8_t endian_sfr_cs; /*address 0x3DC*/ + int8_t reserved19[2]; /*address 0x3DD*/ + uint8_t endian_sfr_s; /*address 0x3DF*/ + int8_t reserved20[2]; /*address 0x3E0*/ + uint16_t txmaxpack[15]; /*address 0x3E2*/ +}; + +struct CUSTOM_REGS { + uint32_t secure_ctrl; /*address 0x80000*/ + uint32_t secsid_atst; /*address 0x80004*/ + uint32_t nsaid_smmuid; /*address 0x80008*/ + uint32_t ace; /*address 0x8000c*/ + uint32_t wakeup; /*address 0x80010*/ + uint32_t debug; /*address 0x80014*/ +}; + +struct VHUB_REGS { + uint32_t gen_cfg; /*address 0x00*/ + uint32_t gen_st; /*address 0x04*/ + uint32_t bc_cfg; /*address 0x08*/ + uint32_t bc_st; /*address 0x0c*/ + uint32_t adp_cfg; /*address 0x10*/ + uint32_t adp_st; /*address 0x14*/ + uint32_t dbg_cfg; /*address 0x18*/ + uint32_t dbg_st; /*address 0x1c*/ + uint32_t utmip_cfg; /*address 0x20*/ + uint32_t utmip_st; /*address 0x24*/ +}; + +struct DMARegs { + uint32_t conf; /*address 0x400*/ + uint32_t sts; /*address 0x404*/ + uint32_t reserved5[5]; + uint32_t ep_sel; /*address 0x41C*/ + uint32_t traddr; /*address 0x420*/ + uint32_t ep_cfg; /*address 0x424*/ + uint32_t ep_cmd; /*address 0x428*/ + uint32_t ep_sts; /*address 0x42c*/ + uint32_t ep_sts_sid;/*address 0x430*/ + uint32_t ep_sts_en; /*address 0x434*/ + uint32_t drbl; /*address 0x438*/ + uint32_t ep_ien; /*address 0x43C*/ + uint32_t ep_ists; /*address 0x440*/ +}; + +#endif diff --git a/drivers/usb/phytium/pci.c b/drivers/usb/phytium/pci.c new file mode 100644 index 0000000000000000000000000000000000000000..964fd67bfc5321a2f5a3f0dfa47dbf5e1ff820a7 --- /dev/null +++ b/drivers/usb/phytium/pci.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include "core.h" + +#define PHYTIUM_OTG_USB_LOADED 3 + +static bool phytium_hw_is_device(struct phytium_cusb *config) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return false; +} + +static bool phytium_hw_is_host(struct phytium_cusb *config) +{ + pr_info("%s %d\n", __func__, __LINE__); + + return true; +} + +static int phytium_get_dr_mode(struct phytium_cusb *config) +{ + enum usb_dr_mode mode; + + config->dr_mode = usb_get_dr_mode(config->dev); + if (config->dr_mode == USB_DR_MODE_UNKNOWN) + config->dr_mode = USB_DR_MODE_OTG; + + mode = config->dr_mode; + if (phytium_hw_is_device(config)) { + if (IS_ENABLED(CONFIG_USB_PHYTIUM_HOST)) { + dev_err(config->dev, "Controller does not support host mode.\n"); + return -EINVAL; + } + + mode = USB_DR_MODE_PERIPHERAL; + } else if (phytium_hw_is_host(config)) { + if (IS_ENABLED(CONFIG_USB_PHYTIUM_PERIPHERAL)) { + dev_err(config->dev, "Controller does not support device mode.\n"); + return -EINVAL; + } + mode = USB_DR_MODE_HOST; + } else { + if (IS_ENABLED(CONFIG_USB_PHYTIUM_HOST)) + mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_PHYTIUM_PERIPHERAL)) + mode = USB_DR_MODE_PERIPHERAL; + } + + if (mode != config->dr_mode) { + dev_warn(config->dev, "Configuration mismatch. dr_mode forced to %s\n", + mode == USB_DR_MODE_HOST ? "host" : "device"); + config->dr_mode = mode; + } + + return 0; +} + +static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct phytium_cusb *config; + int retval = 0; + + if (usb_disabled()) + return -ENODEV; + + retval = pcim_enable_device(pdev); + if (retval < 0) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + return -ENODEV; + } + pci_set_master(pdev); + + config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL); + if (!config) + return -ENOMEM; + + spin_lock_init(&config->lock); + config->dev = &pdev->dev; + + config->irq = pdev->irq; + if (config->irq <= 0) { + dev_err(config->dev, "getting usb irq failed\n"); + return config->irq; + } + + config->regs = pci_iomap(pdev, 0, 0); + if (IS_ERR(config->regs)) { + dev_err(config->dev, "map IOMEM resource failed\n"); + return -1; + } + + if (!pdev->dev.dma_mask) + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + dev_err(&pdev->dev, "failed to set 64-bit dma\n"); + else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + dev_err(&pdev->dev, "failed to set 32-bit dma\n"); + + pci_enable_msi(pdev); + + phytium_get_dr_mode(config); + + phytium_core_reset(config, false); + + if (config->dr_mode == USB_DR_MODE_HOST || config->dr_mode == USB_DR_MODE_OTG) + phytium_host_init(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || config->dr_mode == USB_DR_MODE_OTG) + phytium_gadget_init(config); + + dev_set_drvdata(config->dev, config); + + return 0; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct phytium_cusb *config = dev_get_drvdata(&pdev->dev); + + phytium_get_dr_mode(config); + if (config->dr_mode == USB_DR_MODE_HOST || config->dr_mode == USB_DR_MODE_OTG) + phytium_host_uninit(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || config->dr_mode == USB_DR_MODE_OTG) + phytium_gadget_uninit(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || config->dr_mode == USB_DR_MODE_OTG) + usb_del_gadget_udc(&config->gadget); + + dev_set_drvdata(&pdev->dev, NULL); + pr_info("%s %d\n", __func__, __LINE__); +} + +static void phytium_pci_shutdown(struct pci_dev *pdev) +{ + struct phytium_cusb *config; + + config = dev_get_drvdata(&pdev->dev); + + phytium_get_dr_mode(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || config->dr_mode == USB_DR_MODE_OTG) + usb_del_gadget_udc(&config->gadget); +} + +#ifdef CONFIG_PM +static int phytium_pci_resume(struct pci_dev *pdev) +{ + unsigned long flags = 0; + struct phytium_cusb *config; + int ret = 0; + + config = dev_get_drvdata(&pdev->dev); + + spin_lock_irqsave(&config->lock, flags); + ret = phytium_host_resume(config); + spin_unlock_irqrestore(&config->lock, flags); + + return ret; +} + +static int phytium_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + unsigned long flags = 0; + struct phytium_cusb *config; + int ret; + + config = dev_get_drvdata(&pdev->dev); + + spin_lock_irqsave(&config->lock, flags); + ret = phytium_host_suspend(config); + spin_unlock_irqrestore(&config->lock, flags); + + return 0; +} +#endif + +const struct pci_device_id phytium_pci_id_table[] = { + {0x10ee, 0x8012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {} +}; + +static struct pci_driver phytium_otg_driver = { + .name = "phytium_usb", + .id_table = phytium_pci_id_table, + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .shutdown = phytium_pci_shutdown, +#ifdef CONFIG_PM + .resume = phytium_pci_resume, + .suspend = phytium_pci_suspend, +#endif +}; + +module_pci_driver(phytium_otg_driver); + +MODULE_AUTHOR("Chen Zhenhua "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium usb pci wrapper"); diff --git a/drivers/usb/phytium/platform.c b/drivers/usb/phytium/platform.c new file mode 100644 index 0000000000000000000000000000000000000000..7d39e4b6034d191d0b306b7b15ceadfb6887b79e --- /dev/null +++ b/drivers/usb/phytium/platform.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-3.0 + +#include +#include +//#include +#include +#include +#include +#include +#include +#include +#include "core.h" +#include "hw-regs.h" + +#define PHYTIUM_OTG_USB_LOADED 3 +#define USB2_2_BASE_ADDRESS 0x31800000 + +static const struct of_device_id phytium_otg_of_match[] = { + { + .compatible = "phytium,usb2", + }, + {}, +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_otg_acpi_match[] = { + { "PHYT0037", 0 }, + { } +}; +#endif + +static int phytium_get_dr_mode(struct phytium_cusb *config) +{ + config->dr_mode = usb_get_dr_mode(config->dev); + if (config->dr_mode == USB_DR_MODE_UNKNOWN) + config->dr_mode = USB_DR_MODE_PERIPHERAL; + + return 0; +} + +static irqreturn_t platform_usb_irq(int irq, void *dev_id) +{ + unsigned long flags; + uint8_t otgstate; + + struct phytium_cusb *config = (struct phytium_cusb *)dev_id; + struct GADGET_CTRL *gadget_ctrl = config->gadget_priv; + struct HOST_CTRL *host_ctrl = config->host_priv; + + if (gadget_ctrl || host_ctrl) { + if (host_ctrl) + otgstate = phytium_read8(&host_ctrl->regs->otgstate); + else + otgstate = phytium_read8(&gadget_ctrl->regs->otgstate); + + spin_lock_irqsave(&config->lock, flags); + if (otgstate > HOST_OTG_STATE_A_WAIT_VFALL) + config->gadget_obj->gadget_isr(config->gadget_priv); + else + config->host_obj->host_isr(config->host_priv); + spin_unlock_irqrestore(&config->lock, flags); + } + + return IRQ_HANDLED; +} + +static int phytium_driver_probe(struct platform_device *pdev) +{ + struct phytium_cusb *config; + struct resource *res, *phy_res; + int retval = 0; + + config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL); + if (!config) + return -ENOMEM; + + spin_lock_init(&config->lock); + config->dev = &pdev->dev; + config->isVhubHost = false; + + config->irq = platform_get_irq(pdev, 0); + if (config->irq <= 0) { + dev_err(config->dev, "getting usb irq failed\n"); + return config->irq; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + config->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (IS_ERR(config->regs)) { + dev_err(config->dev, "map IOMEM resource failed\n"); + return -1; + } + + phy_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + config->phy_regs = devm_ioremap(&pdev->dev, phy_res->start, resource_size(phy_res)); + if (IS_ERR(config->phy_regs)) { + dev_err(config->dev, "map IOMEM phy resource failed\n"); + return -1; + } + + phytium_get_dr_mode(config); + + phytium_core_reset(config, false); + + if (config->dr_mode == USB_DR_MODE_HOST || + config->dr_mode == USB_DR_MODE_OTG) { + if (res->start == USB2_2_BASE_ADDRESS) + config->isVhubHost = true; + + phytium_host_init(config); + } + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || + config->dr_mode == USB_DR_MODE_OTG) + phytium_gadget_init(config); + + dev_set_drvdata(&pdev->dev, config); + + if (config->irq > 0) { + retval = devm_request_irq(config->dev, config->irq, platform_usb_irq, + IRQF_SHARED, "phytium_otg", config); + if (retval != 0) { + dev_err(config->dev, "request irq %d err %d\n", config->irq, retval); + config->irq = 0; + } + } + + return retval; +} + +static int phytium_driver_remove(struct platform_device *dev) +{ + struct phytium_cusb *config = platform_get_drvdata(dev); + + if (!config) + return 0; + + phytium_get_dr_mode(config); + + if (config->dr_mode == USB_DR_MODE_HOST || + config->dr_mode == USB_DR_MODE_OTG) + phytium_host_uninit(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || + config->dr_mode == USB_DR_MODE_OTG) + phytium_gadget_uninit(config); + + dev_set_drvdata(&dev->dev, NULL); + return 0; +} + +static void phytium_driver_shutdown(struct platform_device *dev) +{ + pr_info("%s %d\n", __func__, __LINE__); +} + +#ifdef CONFIG_PM +static int phytium_driver_suspend(struct device *dev) +{ + struct phytium_cusb *config; + int ret = 0; + + config = dev_get_drvdata(dev); + + if (config->dr_mode == USB_DR_MODE_HOST || + config->dr_mode == USB_DR_MODE_OTG) + ret = phytium_host_suspend(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || + config->dr_mode == USB_DR_MODE_OTG) + ret = phytium_gadget_suspend(config); + + return ret; +} + +static int phytium_driver_resume(struct device *dev) +{ + struct phytium_cusb *config; + int ret = 0; + + config = dev_get_drvdata(dev); + if (config->dr_mode == USB_DR_MODE_HOST || + config->dr_mode == USB_DR_MODE_OTG) + ret = phytium_host_resume(config); + + if (config->dr_mode == USB_DR_MODE_PERIPHERAL || + config->dr_mode == USB_DR_MODE_OTG) + ret = phytium_gadget_resume(config); + + return ret; +} + +static const struct dev_pm_ops phytium_usb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_driver_suspend, phytium_driver_resume) +}; +#endif + +static struct platform_driver phytium_otg_driver = { + .driver = { + .name = "phytium-otg", + .of_match_table = of_match_ptr(phytium_otg_of_match), + .acpi_match_table = ACPI_PTR(phytium_otg_acpi_match), +#ifdef CONFIG_PM + .pm = &phytium_usb_pm_ops, +#endif + }, + .probe = phytium_driver_probe, + .remove = phytium_driver_remove, + .shutdown = phytium_driver_shutdown, +}; + +module_platform_driver(phytium_otg_driver); + +MODULE_AUTHOR("Chen Zhenhua "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium usb platform wrapper"); diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig index 00827d2897b534bcbefef21999bae9989aca5dd3..63be9ed503f915111d5e6686821e49e6cf500528 100644 --- a/drivers/w1/masters/Kconfig +++ b/drivers/w1/masters/Kconfig @@ -64,5 +64,15 @@ config HDQ_MASTER_OMAP Say Y here if you want support for the 1-wire or HDQ Interface on an OMAP processor. +config W1_MASTER_PHYTIUM + tristate "Phytium 1-wire driver" + depends on ARCH_PHYTIUM || COMPILE_TEST + help + Say Y here if you want to get support for the 1-wire interface + on an Phytium SoC. + + This driver can also be built as a module. If so, the module + will be called phytium-w1. + endmenu diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile index 18954cae42567283206b7eea52e7f07174a83e8f..e27abaa8f2a2fff673c51e04443da3735d96d92a 100644 --- a/drivers/w1/masters/Makefile +++ b/drivers/w1/masters/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_W1_MASTER_MXC) += mxc_w1.o obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o +obj-$(CONFIG_W1_MASTER_PHYTIUM) += phytium_w1.o diff --git a/drivers/w1/masters/phytium_w1.c b/drivers/w1/masters/phytium_w1.c new file mode 100644 index 0000000000000000000000000000000000000000..9a84802ac2fb7045d6defe935f23eb178d393edc --- /dev/null +++ b/drivers/w1/masters/phytium_w1.c @@ -0,0 +1,579 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/w1/masters/phytium_w1m.c + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PHY_W1M_CTL 0x08 + +/* Simplify mode */ +#define PHY_W1M_CMD 0x04 +#define PHY_W1M_PWM0_START_B 0x30 +#define PHY_W1M_PWM0_END_B 0x34 +#define PHY_W1M_PWM1_START_B 0x38 +#define PHY_W1M_PWM1_END_B 0x3c +#define PHY_W1M_SAMPLE_B 0x40 +#define PHY_W1M_INT_EN_B 0x64 +#define PHY_W1M_INT_STATUS_B 0x74 +#define PHY_W1M_DATA_REG 0x70 + +#define PHY_W1M_CMD_ROM_SEARCH 0xF0 +#define PHY_W1M_CMD_WRITE_BYTE 0x36 +#define PHY_W1M_CMD_RESET_BUS 0x37 +#define PHY_W1M_CMD_READ_BYTE 0x3B +#define PHY_W1M_SLAVE_ROM_ID 0x160 + +#define PHY_W1M_INT_EN_TXCOMPLETE BIT(6) +#define PHY_W1M_INT_EN_RXCOMPLETE BIT(7) + +#define PHY_W1M_INT_STATUS_TXCOMPLETE BIT(6) +#define PHY_W1M_INT_STATUS_RXCOMPLETE BIT(7) + +#define W1M_MOD_W1 1 +#define W1M_MOD_PECI 0 + +#define PHY_W1M_FLAG_CLEAR 0 +#define PHY_W1M_FLAG_SET 1 +#define PHY_W1M_TIMEOUT (HZ/5) + +#define PHY_W1M_MAX_USER 4 + +static DECLARE_WAIT_QUEUE_HEAD(w1m_wait_queue); + +struct w1m_data { + struct device *dev; + void __iomem *w1m_base; + /* lock status update */ + struct mutex w1m_mutex; + int w1m_usecount; + u8 w1m_irqstatus; + /* device lock */ + spinlock_t w1m_spinlock; + /* + * Used to control the call to phytium_w1m_get and phytium_w1m_put. + * Non-w1 Protocol: Write the CMD|REG_address first, followed by + * the data wrire or read. + */ + int init_trans; +}; + +/* W1 register I/O routines */ +static inline u8 phytium_w1m_read(struct w1m_data *w1m_data, u32 offset) +{ + return readl(w1m_data->w1m_base + offset); +} + +static inline void phytium_w1m_write(struct w1m_data *w1m_data, u32 offset, u8 val) +{ + writel(val, w1m_data->w1m_base + offset); +} + +static inline u8 phytium_w1m_merge(struct w1m_data *w1m_data, u32 offset, + u8 val, u8 mask) +{ + u8 new_val = (__raw_readl(w1m_data->w1m_base + offset) & ~mask) + | (val & mask); + writel(new_val, w1m_data->w1m_base + offset); + + return new_val; +} + +static void w1m_disable_interrupt(struct w1m_data *w1m_data, u32 offset, + u32 mask) +{ + u32 ie; + + ie = readl(w1m_data->w1m_base + offset); + writel(ie & mask, w1m_data->w1m_base + offset); +} + +/* write out a byte and fill *status with W1M_INT_STATUS */ +static int phytium_write_byte(struct w1m_data *w1m_data, u8 val, u8 *status) +{ + int ret; + unsigned long irqflags; + + *status = 0; + + spin_lock_irqsave(&w1m_data->w1m_spinlock, irqflags); + /* clear interrupt flags via a dummy read */ + phytium_w1m_read(w1m_data, PHY_W1M_INT_STATUS_B); + /* ISR loads it with new INT_STATUS */ + w1m_data->w1m_irqstatus = 0; + spin_unlock_irqrestore(&w1m_data->w1m_spinlock, irqflags); + + phytium_w1m_merge(w1m_data, PHY_W1M_INT_EN_B, + PHY_W1M_INT_EN_TXCOMPLETE, + PHY_W1M_INT_EN_TXCOMPLETE); + + phytium_w1m_write(w1m_data, PHY_W1M_DATA_REG, val); + phytium_w1m_write(w1m_data, PHY_W1M_CMD, 0x36); + + /* wait for the TXCOMPLETE bit */ + ret = wait_event_timeout(w1m_wait_queue, + w1m_data->w1m_irqstatus, PHY_W1M_TIMEOUT); + if (ret == 0) { + dev_err(w1m_data->dev, "TX wait elapsed\n"); + ret = -ETIMEDOUT; + goto out; + } + + *status = w1m_data->w1m_irqstatus; + /* check irqstatus */ + if (!(*status & PHY_W1M_INT_STATUS_TXCOMPLETE)) { + dev_err(w1m_data->dev, + "timeout waiting for TXCOMPLETE/RXCOMPLETE, %x", *status); + ret = -ETIMEDOUT; + } + +out: + return ret; +} + +static irqreturn_t w1m_isr(int irq, void *_w1m) +{ + struct w1m_data *w1m_data = _w1m; + unsigned long irqflags; + + spin_lock_irqsave(&w1m_data->w1m_spinlock, irqflags); + w1m_data->w1m_irqstatus = phytium_w1m_read(w1m_data, PHY_W1M_INT_STATUS_B); + spin_unlock_irqrestore(&w1m_data->w1m_spinlock, irqflags); + + phytium_w1m_write(w1m_data, PHY_W1M_INT_STATUS_B, 0x00); + if (w1m_data->w1m_irqstatus & + (PHY_W1M_INT_STATUS_TXCOMPLETE | PHY_W1M_INT_STATUS_RXCOMPLETE)) { + /* wake up sleeping process */ + wake_up(&w1m_wait_queue); + } + + return IRQ_HANDLED; +} + +static int phytium_read_byte(struct w1m_data *w1m_data, u8 *val) +{ + int ret = 0; + u8 status; + unsigned long irqflags; + + ret = mutex_lock_interruptible(&w1m_data->w1m_mutex); + if (ret < 0) { + ret = -EINTR; + goto rtn; + } + + if (!w1m_data->w1m_usecount) { + ret = -EINVAL; + goto out; + } + + spin_lock_irqsave(&w1m_data->w1m_spinlock, irqflags); + /* clear interrupt flags via a dummy read */ + phytium_w1m_read(w1m_data, PHY_W1M_INT_STATUS_B); + /* ISR loads it with new INT_STATUS */ + w1m_data->w1m_irqstatus = 0; + spin_unlock_irqrestore(&w1m_data->w1m_spinlock, irqflags); + + phytium_w1m_merge(w1m_data, PHY_W1M_INT_EN_B, + PHY_W1M_INT_EN_RXCOMPLETE, + PHY_W1M_INT_EN_RXCOMPLETE); + + phytium_w1m_write(w1m_data, PHY_W1M_CMD, 0x3b); + + wait_event_timeout(w1m_wait_queue, + (w1m_data->w1m_irqstatus & PHY_W1M_INT_STATUS_RXCOMPLETE), + PHY_W1M_TIMEOUT); + + status = w1m_data->w1m_irqstatus; + /* check irqstatus */ + if (!(status & PHY_W1M_INT_STATUS_RXCOMPLETE)) { + dev_err(w1m_data->dev, "timeout waiting for RXCOMPLETE, %x", status); + ret = -ETIMEDOUT; + goto out; + } + + /* the data is ready. Read it in! */ + *val = phytium_w1m_read(w1m_data, PHY_W1M_DATA_REG); +out: + mutex_unlock(&w1m_data->w1m_mutex); +rtn: + return ret; +} + +static int phytium_w1m_get(struct w1m_data *w1m_data) +{ + int ret = 0; + + ret = mutex_lock_interruptible(&w1m_data->w1m_mutex); + if (ret < 0) { + ret = -EINTR; + goto rtn; + } + + if (w1m_data->w1m_usecount == PHY_W1M_MAX_USER) { + dev_warn(w1m_data->dev, "attempt to exceed the max use count"); + ret = -EINVAL; + goto out; + } else { + w1m_data->w1m_usecount++; + try_module_get(THIS_MODULE); + if (w1m_data->w1m_usecount == 1) + pm_runtime_get_sync(w1m_data->dev); + } + +out: + mutex_unlock(&w1m_data->w1m_mutex); +rtn: + return ret; +} + +/* Disable clocks to the module */ +static int phytium_w1m_put(struct w1m_data *w1m_data) +{ + int ret = 0; + + ret = mutex_lock_interruptible(&w1m_data->w1m_mutex); + if (ret < 0) + return -EINTR; + + if (w1m_data->w1m_usecount == 0) { + dev_warn(w1m_data->dev, + "attempt to decrement use count when it is zero"); + ret = -EINVAL; + } else { + w1m_data->w1m_usecount--; + module_put(THIS_MODULE); + if (w1m_data->w1m_usecount == 0) + pm_runtime_put_sync(w1m_data->dev); + } + mutex_unlock(&w1m_data->w1m_mutex); + + return ret; +} + +/* + * W1 triplet callback function - used for searching ROM addresses. + * Registered only when controller is in 1-wire mode. + */ +static u8 phytium_w1_triplet(void *_w1m, u8 bdir) +{ + u8 id_bit, comp_bit; + int err; + u8 ret = 0x3; /* no slaves responded */ + struct w1m_data *w1m_data = _w1m; + + phytium_w1m_get(_w1m); + + err = mutex_lock_interruptible(&w1m_data->w1m_mutex); + if (err < 0) { + dev_err(w1m_data->dev, "Could not acquire mutex\n"); + goto rtn; + } + + w1m_data->w1m_irqstatus = 0; + /* read id_bit */ + phytium_w1m_merge(w1m_data, PHY_W1M_INT_EN_B, + PHY_W1M_INT_EN_RXCOMPLETE, + PHY_W1M_INT_EN_RXCOMPLETE); + phytium_w1m_write(w1m_data, PHY_W1M_CMD, 0x3a); + + err = wait_event_timeout(w1m_wait_queue, + (w1m_data->w1m_irqstatus + & PHY_W1M_INT_STATUS_RXCOMPLETE), + PHY_W1M_TIMEOUT); + if (err == 0) { + dev_err(w1m_data->dev, "RX wait elapsed\n"); + goto out; + } + id_bit = (phytium_w1m_read(_w1m, PHY_W1M_DATA_REG) & 0x01); + + w1m_data->w1m_irqstatus = 0; + /* read comp_bit */ + phytium_w1m_merge(w1m_data, PHY_W1M_INT_EN_B, + PHY_W1M_INT_EN_RXCOMPLETE, + PHY_W1M_INT_EN_RXCOMPLETE); + phytium_w1m_write(w1m_data, PHY_W1M_CMD, 0x3a); + err = wait_event_timeout(w1m_wait_queue, + (w1m_data->w1m_irqstatus + & PHY_W1M_INT_STATUS_RXCOMPLETE), + PHY_W1M_TIMEOUT); + if (err == 0) { + dev_err(w1m_data->dev, "RX wait elapsed\n"); + goto out; + } + comp_bit = (phytium_w1m_read(_w1m, PHY_W1M_DATA_REG) & 0x01); + + if (id_bit && comp_bit) { + ret = 0x03; /* no slaves responded */ + goto out; + } + if (!id_bit && !comp_bit) { + /* Both bits are valid, take the direction given */ + ret = bdir ? 0x04 : 0; + } else { + /* Only one bit is valid, take that direction */ + bdir = id_bit; + ret = id_bit ? 0x05 : 0x02; + } + + w1m_data->w1m_irqstatus = 0; + /* write bdir bit */ + phytium_w1m_merge(w1m_data, PHY_W1M_INT_EN_B, + PHY_W1M_INT_EN_TXCOMPLETE, + PHY_W1M_INT_EN_TXCOMPLETE); + + phytium_w1m_write(w1m_data, PHY_W1M_DATA_REG, bdir); + phytium_w1m_write(w1m_data, PHY_W1M_CMD, 0x35); + + err = wait_event_timeout(w1m_wait_queue, + (w1m_data->w1m_irqstatus + & PHY_W1M_INT_STATUS_TXCOMPLETE), + PHY_W1M_TIMEOUT); + if (err == 0) { + dev_err(w1m_data->dev, "TX wait elapsed\n"); + goto out; + } + +out: + mutex_unlock(&w1m_data->w1m_mutex); +rtn: + phytium_w1m_put(_w1m); + return ret; +} + +/* reset callback */ +static u8 phytium_w1_reset_bus(void *_w1m) +{ + phytium_w1m_get(_w1m); + phytium_w1m_write(_w1m, PHY_W1M_CMD, 0x37); + mdelay(1); + phytium_w1m_put(_w1m); + return 0; +} + +/* Read a byte of data from the device */ +static u8 phytium_w1_read_byte(void *_w1m) +{ + struct w1m_data *w1m_data = _w1m; + u8 val = 0; + int ret; + + /* First write to initialize the transfer */ + if (w1m_data->init_trans == 0) + phytium_w1m_get(w1m_data); + + w1m_data->init_trans++; + ret = phytium_read_byte(w1m_data, &val); + if (ret) { + ret = mutex_lock_interruptible(&w1m_data->w1m_mutex); + if (ret < 0) { + dev_err(w1m_data->dev, "Could not acquire mutex\n"); + return -EINTR; + } + w1m_data->init_trans = 0; + mutex_unlock(&w1m_data->w1m_mutex); + phytium_w1m_put(w1m_data); + return -1; + } + + w1m_disable_interrupt(w1m_data, PHY_W1M_INT_EN_B, + ~((u32)PHY_W1M_INT_EN_RXCOMPLETE)); + + /* Write followed by a read, release the module */ + if (w1m_data->init_trans) { + ret = mutex_lock_interruptible(&w1m_data->w1m_mutex); + if (ret < 0) { + dev_err(w1m_data->dev, "Could not acquire mutex\n"); + return -EINTR; + } + w1m_data->init_trans = 0; + mutex_unlock(&w1m_data->w1m_mutex); + phytium_w1m_put(w1m_data); + } + + return val; +} + +/* Write a byte of data to the device */ +static void phytium_w1_write_byte(void *_w1m, u8 byte) +{ + struct w1m_data *w1m_data = _w1m; + int ret; + u8 status; + + /* First write to initialize the transfer */ + if (w1m_data->init_trans == 0) + phytium_w1m_get(w1m_data); + + ret = mutex_lock_interruptible(&w1m_data->w1m_mutex); + if (ret < 0) { + dev_err(w1m_data->dev, "Could not acquire mutex\n"); + return; + } + mutex_unlock(&w1m_data->w1m_mutex); + + ret = phytium_write_byte(w1m_data, byte, &status); + if (ret < 0) { + dev_err(w1m_data->dev, "TX failure:Ctrl status %x\n", status); + return; + } + + w1m_data->init_trans++; + w1m_disable_interrupt(w1m_data, PHY_W1M_INT_EN_B, + ~((u32)PHY_W1M_INT_EN_TXCOMPLETE)); + /* Second write, data transferred. Release the module */ + if (w1m_data->init_trans > 1) { + phytium_w1m_put(w1m_data); + ret = mutex_lock_interruptible(&w1m_data->w1m_mutex); + if (ret < 0) { + dev_err(w1m_data->dev, "Could not acquire mutex\n"); + return; + } + w1m_data->init_trans = 0; + mutex_unlock(&w1m_data->w1m_mutex); + } +} + +static struct w1_bus_master phytium_w1_master = { + .read_byte = phytium_w1_read_byte, + .write_byte = phytium_w1_write_byte, + .reset_bus = phytium_w1_reset_bus, +}; + +static int phytium_w1m_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct w1m_data *w1m_data; + struct resource *res; + int ret, irq; + + w1m_data = devm_kzalloc(dev, sizeof(*w1m_data), GFP_KERNEL); + if (!w1m_data) + return -ENOMEM; + + w1m_data->dev = dev; + platform_set_drvdata(pdev, w1m_data); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + w1m_data->w1m_base = devm_ioremap_resource(dev, res); + if (IS_ERR(w1m_data->w1m_base)) + return PTR_ERR(w1m_data->w1m_base); + + w1m_data->w1m_usecount = 0; + mutex_init(&w1m_data->w1m_mutex); + + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "pm_runtime_get_sync failed\n"); + goto err_w1; + } + + spin_lock_init(&w1m_data->w1m_spinlock); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq); + ret = irq; + goto err_irq; + } + + ret = devm_request_irq(dev, irq, w1m_isr, 0, "phytium-w1", w1m_data); + if (ret < 0) { + dev_err(&pdev->dev, "could not request irq\n"); + goto err_irq; + } + + pm_runtime_put_sync(&pdev->dev); + + phytium_w1_master.triplet = phytium_w1_triplet; + phytium_w1m_write(w1m_data, PHY_W1M_CTL, 0x10); + phytium_w1m_write(w1m_data, PHY_W1M_INT_EN_B, 0x00); + + phytium_w1_master.data = w1m_data; + + ret = w1_add_master_device(&phytium_w1_master); + if (ret) { + dev_err(&pdev->dev, "Failure in registering w1 master\n"); + goto err_w1; + } + + return 0; + +err_irq: + pm_runtime_put_sync(&pdev->dev); +err_w1: + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static int phytium_w1m_remove(struct platform_device *pdev) +{ + struct w1m_data *w1m_data = platform_get_drvdata(pdev); + + mutex_lock(&w1m_data->w1m_mutex); + + if (w1m_data->w1m_usecount) { + dev_warn(&pdev->dev, "removed when use count is not zero\n"); + mutex_unlock(&w1m_data->w1m_mutex); + return -EBUSY; + } + + mutex_unlock(&w1m_data->w1m_mutex); + + /* remove module dependency */ + pm_runtime_disable(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_onewire_acpi_ids[] = { + { "PHYT0034", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_onewire_acpi_ids); +#endif + +static const struct of_device_id phytium_w1m_dt_ids[] = { + { .compatible = "phytium,w1" }, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_w1m_dt_ids); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_w1m_acpi_ids[] = { + { "PHYT0034", 0 }, + { } +}; +#endif + +static struct platform_driver phytium_w1m_driver = { + .probe = phytium_w1m_probe, + .remove = phytium_w1m_remove, + .driver = { + .name = "phytium-w1", + .of_match_table = phytium_w1m_dt_ids, + .acpi_match_table = ACPI_PTR(phytium_w1m_acpi_ids), + }, +}; +module_platform_driver(phytium_w1m_driver); + +MODULE_AUTHOR("Zhu Mingshuai "); +MODULE_DESCRIPTION("Phytium w1 bus master driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 8fe59b7d8eec855d8eee56cb5949aefb8a179dd5..516782cff8d28008eb874b238aef0351ba84909c 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -145,7 +145,7 @@ static inline void watchdog_update_worker(struct watchdog_device *wdd) ktime_t t = watchdog_next_keepalive(wdd); if (t > 0) - hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL); + hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL_HARD); } else { hrtimer_cancel(&wd_data->timer); } @@ -164,7 +164,7 @@ static int __watchdog_ping(struct watchdog_device *wdd) if (ktime_after(earliest_keepalive, now)) { hrtimer_start(&wd_data->timer, ktime_sub(earliest_keepalive, now), - HRTIMER_MODE_REL); + HRTIMER_MODE_REL_HARD); return 0; } @@ -959,7 +959,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); kthread_init_work(&wd_data->work, watchdog_ping_work); - hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); wd_data->timer.function = watchdog_timer_expired; if (wdd->id == 0) { @@ -1007,7 +1007,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) __module_get(wdd->ops->owner); get_device(&wd_data->dev); if (handle_boot_enabled) - hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL); + hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL_HARD); else pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n", wdd->id); diff --git a/fs/aio.c b/fs/aio.c index 9635c29b83da178db894d669753b2f875e48f420..6deff68b92c75b4fcd79ad66ef49d59eb00e7a2f 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -121,6 +121,7 @@ struct kioctx { long nr_pages; struct rcu_work free_rwork; /* see free_ioctx() */ + struct work_struct free_work; /* see free_ioctx() */ /* * signals when all in-flight requests are done @@ -608,9 +609,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref) * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - * now it's safe to cancel any that need to be. */ -static void free_ioctx_users(struct percpu_ref *ref) +static void free_ioctx_users_work(struct work_struct *work) { - struct kioctx *ctx = container_of(ref, struct kioctx, users); + struct kioctx *ctx = container_of(work, struct kioctx, free_work); struct aio_kiocb *req; spin_lock_irq(&ctx->ctx_lock); @@ -628,6 +629,14 @@ static void free_ioctx_users(struct percpu_ref *ref) percpu_ref_put(&ctx->reqs); } +static void free_ioctx_users(struct percpu_ref *ref) +{ + struct kioctx *ctx = container_of(ref, struct kioctx, users); + + INIT_WORK(&ctx->free_work, free_ioctx_users_work); + schedule_work(&ctx->free_work); +} + static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) { unsigned i, new_nr; diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c index 70e9afe589fbf2b730c3a0d51ec4b8616f593e63..1a6b88ad4fe0b693077ab244634b6626bd494773 100644 --- a/fs/autofs/expire.c +++ b/fs/autofs/expire.c @@ -8,6 +8,7 @@ * option, any later version, incorporated herein by reference. */ +#include #include "autofs_i.h" /* Check if a dentry can be expired */ @@ -153,7 +154,7 @@ static struct dentry *get_next_positive_dentry(struct dentry *prev, parent = p->d_parent; if (!spin_trylock(&parent->d_lock)) { spin_unlock(&p->d_lock); - cpu_relax(); + cpu_chill(); goto relock; } spin_unlock(&p->d_lock); diff --git a/fs/buffer.c b/fs/buffer.c index 356e289d19f20a87d5c3eb07ed7b69d590e83532..ab18ec733ed3af1a658668c7d8e768d2fe40c0a7 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -274,8 +274,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) * decide that the page is now completely done. */ first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + flags = bh_uptodate_lock_irqsave(first); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; @@ -288,8 +287,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } while (tmp != bh); - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + bh_uptodate_unlock_irqrestore(first, flags); /* * If none of the buffers had errors and they are all @@ -301,9 +299,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) return; still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - return; + bh_uptodate_unlock_irqrestore(first, flags); } /* @@ -330,8 +326,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) } first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + flags = bh_uptodate_lock_irqsave(first); clear_buffer_async_write(bh); unlock_buffer(bh); @@ -343,15 +338,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + bh_uptodate_unlock_irqrestore(first, flags); end_page_writeback(page); return; still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - return; + bh_uptodate_unlock_irqrestore(first, flags); } EXPORT_SYMBOL(end_buffer_async_write); @@ -3372,6 +3364,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); + buffer_head_init_locks(ret); preempt_disable(); __this_cpu_inc(bh_accounting.nr); recalc_bh_state(); diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 3925a7bfc74d61c6c85f8ab6537498f064512870..33f7723fb83e90030fd0c630896b969337903bfe 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, struct inode *inode; struct super_block *sb = parent->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); diff --git a/fs/dcache.c b/fs/dcache.c index 1897833a46685be6da4870a9cc130321d0e78f99..6e1c0b4c04f42fc07a6e4a071c37da1c188f34ca 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2406,9 +2406,10 @@ EXPORT_SYMBOL(d_rehash); static inline unsigned start_dir_add(struct inode *dir) { + preempt_disable_rt(); for (;;) { - unsigned n = dir->i_dir_seq; - if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n) + unsigned n = dir->__i_dir_seq; + if (!(n & 1) && cmpxchg(&dir->__i_dir_seq, n, n + 1) == n) return n; cpu_relax(); } @@ -2416,26 +2417,30 @@ static inline unsigned start_dir_add(struct inode *dir) static inline void end_dir_add(struct inode *dir, unsigned n) { - smp_store_release(&dir->i_dir_seq, n + 2); + smp_store_release(&dir->__i_dir_seq, n + 2); + preempt_enable_rt(); } static void d_wait_lookup(struct dentry *dentry) { - if (d_in_lookup(dentry)) { - DECLARE_WAITQUEUE(wait, current); - add_wait_queue(dentry->d_wait, &wait); - do { - set_current_state(TASK_UNINTERRUPTIBLE); - spin_unlock(&dentry->d_lock); - schedule(); - spin_lock(&dentry->d_lock); - } while (d_in_lookup(dentry)); - } + struct swait_queue __wait; + + if (!d_in_lookup(dentry)) + return; + + INIT_LIST_HEAD(&__wait.task_list); + do { + prepare_to_swait_exclusive(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE); + spin_unlock(&dentry->d_lock); + schedule(); + spin_lock(&dentry->d_lock); + } while (d_in_lookup(dentry)); + finish_swait(dentry->d_wait, &__wait); } struct dentry *d_alloc_parallel(struct dentry *parent, const struct qstr *name, - wait_queue_head_t *wq) + struct swait_queue_head *wq) { unsigned int hash = name->hash; struct hlist_bl_head *b = in_lookup_hash(parent, hash); @@ -2449,7 +2454,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, retry: rcu_read_lock(); - seq = smp_load_acquire(&parent->d_inode->i_dir_seq); + seq = smp_load_acquire(&parent->d_inode->__i_dir_seq); r_seq = read_seqbegin(&rename_lock); dentry = __d_lookup_rcu(parent, name, &d_seq); if (unlikely(dentry)) { @@ -2477,7 +2482,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, } hlist_bl_lock(b); - if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { + if (unlikely(READ_ONCE(parent->d_inode->__i_dir_seq) != seq)) { hlist_bl_unlock(b); rcu_read_unlock(); goto retry; @@ -2550,7 +2555,7 @@ void __d_lookup_done(struct dentry *dentry) hlist_bl_lock(b); dentry->d_flags &= ~DCACHE_PAR_LOOKUP; __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); - wake_up_all(dentry->d_wait); + swake_up_all(dentry->d_wait); dentry->d_wait = NULL; hlist_bl_unlock(b); INIT_HLIST_NODE(&dentry->d_u.d_alias); @@ -3077,6 +3082,8 @@ __setup("dhash_entries=", set_dhash_entries); static void __init dcache_init_early(void) { + unsigned int loop; + /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ @@ -3093,11 +3100,16 @@ static void __init dcache_init_early(void) NULL, 0, 0); + + for (loop = 0; loop < (1U << d_hash_shift); loop++) + INIT_HLIST_BL_HEAD(dentry_hashtable + loop); + d_hash_shift = 32 - d_hash_shift; } static void __init dcache_init(void) { + unsigned int loop; /* * A constructor could be added for stable state like the lists, * but it is probably not worth it because of the cache nature @@ -3121,6 +3133,10 @@ static void __init dcache_init(void) NULL, 0, 0); + + for (loop = 0; loop < (1U << d_hash_shift); loop++) + INIT_HLIST_BL_HEAD(dentry_hashtable + loop); + d_hash_shift = 32 - d_hash_shift; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index a4a32b79e83215fb4f1683f0f8d5d44bf963794b..8569db5eceb0862cdb6a808c7c963d793d879585 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -569,12 +569,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) static void ep_poll_safewake(wait_queue_head_t *wq) { - int this_cpu = get_cpu(); + int this_cpu = get_cpu_light(); ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); - put_cpu(); + put_cpu_light(); } #else diff --git a/fs/exec.c b/fs/exec.c index 28e3b5eb2f4a1f15877945c4418e5adf296be937..2c15772a1feb759e974100623b1f4fc1ecdc6123 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1028,6 +1028,7 @@ static int exec_mmap(struct mm_struct *mm) } } task_lock(tsk); + preempt_disable_rt(); local_irq_disable(); active_mm = tsk->active_mm; @@ -1047,6 +1048,7 @@ static int exec_mmap(struct mm_struct *mm) local_irq_enable(); tsk->mm->vmacache_seqnum = 0; vmacache_flush(tsk); + preempt_enable_rt(); task_unlock(tsk); if (old_mm) { up_read(&old_mm->mmap_sem); diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 3de933354a08b1039cc1021304f0bd5bfe129965..838d9f4fc55427502616439c786b818a4b067684 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -95,8 +95,7 @@ static void ext4_finish_bio(struct bio *bio) * We check all buffers in the page under BH_Uptodate_Lock * to avoid races with other end io clearing async_write flags */ - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &head->b_state); + flags = bh_uptodate_lock_irqsave(head); do { if (bh_offset(bh) < bio_start || bh_offset(bh) + bh->b_size > bio_end) { @@ -110,8 +109,7 @@ static void ext4_finish_bio(struct bio *bio) buffer_io_error(bh); } } while ((bh = bh->b_this_page) != head); - bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); - local_irq_restore(flags); + bh_uptodate_unlock_irqrestore(head, flags); if (!under_io) { #ifdef CONFIG_EXT4_FS_ENCRYPTION if (data_page) diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index 2ff05adfc22a44e5d9912230f9721034207373e7..84e57196a293045bfd8b16cc2b02558199994015 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c @@ -950,3 +950,11 @@ int __fscache_check_consistency(struct fscache_cookie *cookie, return -ESTALE; } EXPORT_SYMBOL(__fscache_check_consistency); + +void __init fscache_cookie_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fscache_cookie_hash); i++) + INIT_HLIST_BL_HEAD(&fscache_cookie_hash[i]); +} diff --git a/fs/fscache/main.c b/fs/fscache/main.c index aa49234e9520a71d97be73cc3d3584b3714f6e0c..95a0798de3811bcd7ca7576676862e57099c50d3 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -188,6 +188,7 @@ static int __init fscache_init(void) ret = -ENOMEM; goto error_cookie_jar; } + fscache_cookie_init(); fscache_root = kobject_create_and_add("fscache", kernel_kobj); if (!fscache_root) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 6098e0c7f87b37c7065b1ebd88696d94d60f74b8..9ce28840684ae3dc5a5b5d67f0530e3861ee106e 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1234,7 +1234,7 @@ static int fuse_direntplus_link(struct file *file, struct inode *dir = d_inode(parent); struct fuse_conn *fc; struct inode *inode; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); if (!o->nodeid) { /* diff --git a/fs/inode.c b/fs/inode.c index c9eb5041ffae199f46c397280296258bd329efb1..e1fb0b743c96a9bc89d8ef6d217c73391b68a138 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -156,7 +156,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) inode->i_bdev = NULL; inode->i_cdev = NULL; inode->i_link = NULL; - inode->i_dir_seq = 0; + inode->__i_dir_seq = 0; inode->i_rdev = 0; inode->dirtied_when = 0; diff --git a/fs/locks.c b/fs/locks.c index 28270e74be3428be2b518d4c26daa04e353cff39..552476d6f6bbde7aafb58a56bc0d9e4961df1b85 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -936,7 +936,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) return -ENOMEM; } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); if (request->fl_flags & FL_ACCESS) goto find_conflict; @@ -977,7 +977,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) out: spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); if (new_fl) locks_free_lock(new_fl); locks_dispose_list(&dispose); @@ -1015,7 +1015,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, new_fl2 = locks_alloc_lock(); } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); /* * New lock request. Walk all POSIX locks and look for conflicts. If @@ -1187,7 +1187,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, } out: spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); /* * Free any unused locks. */ @@ -1462,7 +1462,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) return error; } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); @@ -1514,13 +1514,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) locks_insert_block(fl, new_fl); trace_break_lease_block(inode, new_fl); spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); error = wait_event_interruptible_timeout(new_fl->fl_wait, !new_fl->fl_next, break_time); - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); trace_break_lease_unblock(inode, new_fl); locks_delete_block(new_fl); @@ -1537,7 +1537,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) } out: spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); locks_free_lock(new_fl); return error; @@ -1609,7 +1609,7 @@ int fcntl_getlease(struct file *filp) ctx = smp_load_acquire(&inode->i_flctx); if (ctx && !list_empty_careful(&ctx->flc_lease)) { - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); list_for_each_entry(fl, &ctx->flc_lease, fl_list) { @@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp) break; } spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); } @@ -1693,7 +1693,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr return -EINVAL; } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); error = check_conflicting_open(dentry, arg, lease->fl_flags); @@ -1764,7 +1764,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr lease->fl_lmops->lm_setup(lease, priv); out: spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); if (is_deleg) inode_unlock(inode); @@ -1787,7 +1787,7 @@ static int generic_delete_lease(struct file *filp, void *owner) return error; } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); list_for_each_entry(fl, &ctx->flc_lease, fl_list) { if (fl->fl_file == filp && @@ -1800,7 +1800,7 @@ static int generic_delete_lease(struct file *filp, void *owner) if (victim) error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); return error; } @@ -2531,13 +2531,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx) if (list_empty(&ctx->flc_lease)) return; - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) if (filp == fl->fl_file) lease_modify(fl, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); } diff --git a/fs/namei.c b/fs/namei.c index 327844fedf3d2e44bca1f5e986442f3993fb3e39..5114c81118077e104fb95991e1145b8054487411 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1646,7 +1646,7 @@ static struct dentry *__lookup_slow(const struct qstr *name, { struct dentry *dentry, *old; struct inode *inode = dir->d_inode; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); /* Don't go there if it's already dead */ if (unlikely(IS_DEADDIR(inode))) @@ -3136,7 +3136,7 @@ static int lookup_open(struct nameidata *nd, struct path *path, struct dentry *dentry; int error, create_error = 0; umode_t mode = op->mode; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); if (unlikely(IS_DEADDIR(dir_inode))) return -ENOENT; diff --git a/fs/namespace.c b/fs/namespace.c index 396ff1bcfdadec6e580e8a28f0e9a6e9f15e9d29..2c0ff275e97805e3d68b749bc66f59abf81721dc 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -327,8 +328,11 @@ int __mnt_want_write(struct vfsmount *m) * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); - while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) - cpu_relax(); + while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { + preempt_enable(); + cpu_chill(); + preempt_disable(); + } /* * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index b0c0c2fc2fbac8e353432225e032ff9fa4b65681..26565ba05dc1a027cd334f471bd85f5413a8d4e4 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -162,11 +162,11 @@ static int nfs_delegation_claim_opens(struct inode *inode, sp = state->owner; /* Block nfs4_proc_unlck */ mutex_lock(&sp->so_delegreturn_mutex); - seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); + seq = read_seqbegin(&sp->so_reclaim_seqlock); err = nfs4_open_delegation_recall(ctx, state, stateid); if (!err) err = nfs_delegation_claim_locks(state, stateid); - if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) + if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq)) err = -EAGAIN; mutex_unlock(&sp->so_delegreturn_mutex); put_nfs_open_context(ctx); diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 10bc04af2882441466e21097ab70d3a2caad7b67..d8376dae3f7d4d2faca1c7821b5219768d96e8a8 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -457,7 +457,7 @@ static void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) { struct qstr filename = QSTR_INIT(entry->name, entry->len); - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); struct dentry *dentry; struct dentry *alias; struct inode *dir = d_inode(parent); @@ -1519,7 +1519,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, struct file *file, unsigned open_flags, umode_t mode) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); struct nfs_open_context *ctx; struct dentry *res; struct iattr attr = { .ia_valid = ATTR_OPEN }; @@ -1857,7 +1857,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) trace_nfs_rmdir_enter(dir, dentry); if (d_really_is_positive(dentry)) { +#ifdef CONFIG_PREEMPT_RT_BASE + down(&NFS_I(d_inode(dentry))->rmdir_sem); +#else down_write(&NFS_I(d_inode(dentry))->rmdir_sem); +#endif error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); /* Ensure the VFS deletes this inode */ switch (error) { @@ -1867,7 +1871,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) case -ENOENT: nfs_dentry_handle_enoent(dentry); } +#ifdef CONFIG_PREEMPT_RT_BASE + up(&NFS_I(d_inode(dentry))->rmdir_sem); +#else up_write(&NFS_I(d_inode(dentry))->rmdir_sem); +#endif } else error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); trace_nfs_rmdir_exit(dir, dentry, error); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 592b95ab378bdcb8498a2c181d3f9bd7da3b9db7..4fc14fb5006883f06f2bcb1d697e6ad75874d39a 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -2105,7 +2105,11 @@ static void init_once(void *foo) atomic_long_set(&nfsi->nrequests, 0); atomic_long_set(&nfsi->commit_info.ncommit, 0); atomic_set(&nfsi->commit_info.rpcs_out, 0); +#ifdef CONFIG_PREEMPT_RT_BASE + sema_init(&nfsi->rmdir_sem, 1); +#else init_rwsem(&nfsi->rmdir_sem); +#endif mutex_init(&nfsi->commit_mutex); nfs4_init_once(nfsi); } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 5ac7bf24c507b822797a1c760d93bd4772a202c0..b15a7a2f64fea36b707a5dc86748fcd49cc96fd6 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -114,7 +114,7 @@ struct nfs4_state_owner { unsigned long so_flags; struct list_head so_states; struct nfs_seqid_counter so_seqid; - seqcount_t so_reclaim_seqcount; + seqlock_t so_reclaim_seqlock; struct mutex so_delegreturn_mutex; }; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 759c834b60fd6f013ee13b3fb2a1d7244009c030..256a4b0c4407aafa73fceea5aaaea717f2360b1e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2874,7 +2874,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, unsigned int seq; int ret; - seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); + seq = raw_seqcount_begin(&sp->so_reclaim_seqlock.seqcount); ret = _nfs4_proc_open(opendata, ctx); if (ret != 0) @@ -2915,7 +2915,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, if (d_inode(dentry) == state->inode) { nfs_inode_attach_open_context(ctx); - if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) + if (read_seqretry(&sp->so_reclaim_seqlock, seq)) nfs4_schedule_stateid_recovery(server, state); } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 30576a10a1f4caf0d914c3b6f1401f57bb4aceaf..e0b94cbce3bffb183ba7852f42f57616b33db5a9 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -516,7 +516,7 @@ nfs4_alloc_state_owner(struct nfs_server *server, nfs4_init_seqid_counter(&sp->so_seqid); atomic_set(&sp->so_count, 1); INIT_LIST_HEAD(&sp->so_lru); - seqcount_init(&sp->so_reclaim_seqcount); + seqlock_init(&sp->so_reclaim_seqlock); mutex_init(&sp->so_delegreturn_mutex); return sp; } @@ -1584,8 +1584,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs * recovering after a network partition or a reboot from a * server that doesn't support a grace period. */ +#ifdef CONFIG_PREEMPT_RT_FULL + write_seqlock(&sp->so_reclaim_seqlock); +#else + write_seqcount_begin(&sp->so_reclaim_seqlock.seqcount); +#endif spin_lock(&sp->so_lock); - raw_write_seqcount_begin(&sp->so_reclaim_seqcount); restart: list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) @@ -1672,14 +1676,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs spin_lock(&sp->so_lock); goto restart; } - raw_write_seqcount_end(&sp->so_reclaim_seqcount); spin_unlock(&sp->so_lock); +#ifdef CONFIG_PREEMPT_RT_FULL + write_sequnlock(&sp->so_reclaim_seqlock); +#else + write_seqcount_end(&sp->so_reclaim_seqlock.seqcount); +#endif return 0; out_err: nfs4_put_open_state(state); - spin_lock(&sp->so_lock); - raw_write_seqcount_end(&sp->so_reclaim_seqcount); - spin_unlock(&sp->so_lock); +#ifdef CONFIG_PREEMPT_RT_FULL + write_sequnlock(&sp->so_reclaim_seqlock); +#else + write_seqcount_end(&sp->so_reclaim_seqlock.seqcount); +#endif return status; } diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index fd61bf0fce63acf21c3219196a463735f3032f83..839bfa76f41ecc396a0b881d367eff6084882924 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include @@ -52,6 +52,29 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata) rpc_restart_call_prepare(task); } +#ifdef CONFIG_PREEMPT_RT_BASE +static void nfs_down_anon(struct semaphore *sema) +{ + down(sema); +} + +static void nfs_up_anon(struct semaphore *sema) +{ + up(sema); +} + +#else +static void nfs_down_anon(struct rw_semaphore *rwsem) +{ + down_read_non_owner(rwsem); +} + +static void nfs_up_anon(struct rw_semaphore *rwsem) +{ + up_read_non_owner(rwsem); +} +#endif + /** * nfs_async_unlink_release - Release the sillydelete data. * @task: rpc_task of the sillydelete @@ -65,7 +88,7 @@ static void nfs_async_unlink_release(void *calldata) struct dentry *dentry = data->dentry; struct super_block *sb = dentry->d_sb; - up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem); + nfs_up_anon(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem); d_lookup_done(dentry); nfs_free_unlinkdata(data); dput(dentry); @@ -118,10 +141,10 @@ static int nfs_call_unlink(struct dentry *dentry, struct inode *inode, struct nf struct inode *dir = d_inode(dentry->d_parent); struct dentry *alias; - down_read_non_owner(&NFS_I(dir)->rmdir_sem); + nfs_down_anon(&NFS_I(dir)->rmdir_sem); alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq); if (IS_ERR(alias)) { - up_read_non_owner(&NFS_I(dir)->rmdir_sem); + nfs_up_anon(&NFS_I(dir)->rmdir_sem); return 0; } if (!d_in_lookup(alias)) { @@ -143,7 +166,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct inode *inode, struct nf ret = 0; spin_unlock(&alias->d_lock); dput(alias); - up_read_non_owner(&NFS_I(dir)->rmdir_sem); + nfs_up_anon(&NFS_I(dir)->rmdir_sem); /* * If we'd displaced old cached devname, free it. At that * point dentry is definitely not a root, so we won't need @@ -183,7 +206,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name) goto out_free_name; } data->res.dir_attr = &data->dir_attr; - init_waitqueue_head(&data->wq); + init_swait_queue_head(&data->wq); status = -EBUSY; spin_lock(&dentry->d_lock); diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 8946130c87ad7d10b3057ba826c4d5141dca7e91..71d0b3ba70f86edb4c1f823779816b13912a0a2d 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -106,8 +106,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) "0x%llx.", (unsigned long long)bh->b_blocknr); } first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + flags = bh_uptodate_lock_irqsave(first); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; @@ -122,8 +121,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } while (tmp != bh); - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + bh_uptodate_unlock_irqrestore(first, flags); /* * If none of the buffers had errors then we can set the page uptodate, * but we first have to perform the post read mst fixups, if the @@ -156,9 +154,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) unlock_page(page); return; still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - return; + bh_uptodate_unlock_irqrestore(first, flags); } /** diff --git a/fs/proc/array.c b/fs/proc/array.c index 9eb99a43f849a045d60b6f196d22e665a0e73e20..e4d0cfebaac5b100007c0fca09ea79bdf35d659a 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -381,9 +381,9 @@ static inline void task_context_switch_counts(struct seq_file *m, static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) { seq_printf(m, "Cpus_allowed:\t%*pb\n", - cpumask_pr_args(&task->cpus_allowed)); + cpumask_pr_args(task->cpus_ptr)); seq_printf(m, "Cpus_allowed_list:\t%*pbl\n", - cpumask_pr_args(&task->cpus_allowed)); + cpumask_pr_args(task->cpus_ptr)); } static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) diff --git a/fs/proc/base.c b/fs/proc/base.c index e3f10c110b74efd6d6894d22552288d859cd6545..8806344af2415a9d8bb05a5b4f831cc442c12eac 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -95,6 +95,7 @@ #include #include #include +#include #include "internal.h" #include "fd.h" @@ -1889,7 +1890,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, child = d_hash_and_lookup(dir, &qname); if (!child) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); child = d_alloc_parallel(dir, &qname, &wq); if (IS_ERR(child)) goto end_instantiate; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index c95f32b83a942c3b39350bc76bde54800c4d53fe..75f500cb7e745f818f697f7f1d71d17804529cc8 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -681,7 +681,7 @@ static bool proc_sys_fill_cache(struct file *file, child = d_lookup(dir, &qname); if (!child) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); child = d_alloc_parallel(dir, &qname, &wq); if (IS_ERR(child)) return false; diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c index 23a9c28ad8ea68e6e1b4679ccbbda74bfafcc115..6a73c4fa88e78cfcd52c7e994e762c3a721f88ee 100644 --- a/fs/squashfs/decompressor_multi_percpu.c +++ b/fs/squashfs/decompressor_multi_percpu.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "squashfs_fs.h" #include "squashfs_fs_sb.h" @@ -25,6 +26,8 @@ struct squashfs_stream { void *stream; }; +static DEFINE_LOCAL_IRQ_LOCK(stream_lock); + void *squashfs_decompressor_create(struct squashfs_sb_info *msblk, void *comp_opts) { @@ -79,10 +82,15 @@ int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh, { struct squashfs_stream __percpu *percpu = (struct squashfs_stream __percpu *) msblk->stream; - struct squashfs_stream *stream = get_cpu_ptr(percpu); - int res = msblk->decompressor->decompress(msblk, stream->stream, bh, b, - offset, length, output); - put_cpu_ptr(stream); + struct squashfs_stream *stream; + int res; + + stream = get_locked_ptr(stream_lock, percpu); + + res = msblk->decompressor->decompress(msblk, stream->stream, bh, b, + offset, length, output); + + put_locked_ptr(stream_lock, stream); if (res < 0) ERROR("%s decompression failed, data probably corrupt\n", diff --git a/fs/timerfd.c b/fs/timerfd.c index d69ad801eb8039af7f19054d32791d48b40589aa..f845093466be1fd3c6a5afae135c8bd7586c41a6 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -471,7 +471,11 @@ static int do_timerfd_settime(int ufd, int flags, break; } spin_unlock_irq(&ctx->wqh.lock); - cpu_relax(); + + if (isalarm(ctx)) + hrtimer_grab_expiry_lock(&ctx->t.alarm.timer); + else + hrtimer_grab_expiry_lock(&ctx->t.tmr); } /* diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 23c8efc967af2cb8e1606ac7929da84532906f03..3f79609511dabdc7d60c92442552595cf917bb3a 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -56,7 +56,7 @@ struct userfaultfd_ctx { /* waitqueue head for events */ wait_queue_head_t event_wqh; /* a refile sequence protected by fault_pending_wqh lock */ - struct seqcount refile_seq; + seqlock_t refile_seq; /* pseudo fd refcounting */ atomic_t refcount; /* userfaultfd syscall flags */ @@ -1059,7 +1059,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, * waitqueue could become empty if this is the * only userfault. */ - write_seqcount_begin(&ctx->refile_seq); + write_seqlock(&ctx->refile_seq); /* * The fault_pending_wqh.lock prevents the uwq @@ -1085,7 +1085,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, list_del(&uwq->wq.entry); add_wait_queue(&ctx->fault_wqh, &uwq->wq); - write_seqcount_end(&ctx->refile_seq); + write_sequnlock(&ctx->refile_seq); /* careful to always initialize msg if ret == 0 */ *msg = uwq->msg; @@ -1258,11 +1258,11 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, * sure we've userfaults to wake. */ do { - seq = read_seqcount_begin(&ctx->refile_seq); + seq = read_seqbegin(&ctx->refile_seq); need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || waitqueue_active(&ctx->fault_wqh); cond_resched(); - } while (read_seqcount_retry(&ctx->refile_seq, seq)); + } while (read_seqretry(&ctx->refile_seq, seq)); if (need_wakeup) __wake_userfault(ctx, range); } @@ -1936,7 +1936,7 @@ static void init_once_userfaultfd_ctx(void *mem) init_waitqueue_head(&ctx->fault_wqh); init_waitqueue_head(&ctx->event_wqh); init_waitqueue_head(&ctx->fd_wqh); - seqcount_init(&ctx->refile_seq); + seqlock_init(&ctx->refile_seq); } SYSCALL_DEFINE1(userfaultfd, int, flags) diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index 14499757338f65416835330254b8c90a06918d64..c524216e036393cc9df66201552e9bcca5f0f15a 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h @@ -81,7 +81,7 @@ int acpi_irq_penalty_init(void); int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, - int *polarity, char **name); + int *polarity, char **name, struct fwnode_handle **rs_fwnode); int acpi_pci_link_free_irq(acpi_handle handle); /* ACPI PCI Device Binding (pci_bind.c) */ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 1817a8415a5e82a7bc526a38361cea88566e62bb..942d64c0476e746247e7c3c1a356d82ff2f873a2 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -5,6 +5,7 @@ #include #include #include +#include #ifdef CONFIG_SMP diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 1a37748766b70660faea7792294931de78398ce5..b5795704051814d667a31e6a5e71af78777bb4da 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -23,6 +23,7 @@ #include #include /* for struct resource */ +#include #include #include #include @@ -327,6 +328,22 @@ int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); void acpi_set_irq_model(enum acpi_irq_model_id model, struct fwnode_handle *fwnode); +struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, + unsigned int size, + struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, + void *host_data); + +#ifdef CONFIG_ACPI_GENERIC_GSI +struct fwnode_handle *acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source); +#else +static inline +struct fwnode_handle *acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) +{ + return NULL; +} +#endif + #ifdef CONFIG_X86_IO_APIC extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #else diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 8f1be8b49391099539b57a6640bb5ea726ca955a..53f63f18ecdfe78681c3da2d5a5c094ec584d2a0 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -14,7 +14,7 @@ * Nauman Rafique */ -#include +#include #include #include #include diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2885dce1ad496368e9f931ebba86ef69b90df05c..8dbb9ecf9993b476d1b3002f5c29a21a9a5380d4 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -256,7 +256,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; } - +void __blk_mq_complete_request_remote_work(struct work_struct *work); int blk_mq_request_started(struct request *rq); void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, blk_status_t error); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 56fe682d9beb4286fdef441579408e24f0157396..c01e5d9597f994480a468e554f0a6ed4d8dacd22 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -157,6 +158,9 @@ enum mq_rq_state { */ struct request { struct request_queue *q; +#ifdef CONFIG_PREEMPT_RT_FULL + struct work_struct work; +#endif struct blk_mq_ctx *mq_ctx; int cpu; @@ -661,6 +665,7 @@ struct request_queue { #endif struct rcu_head rcu_head; wait_queue_head_t mq_freeze_wq; + struct work_struct mq_pcpu_wake; struct percpu_ref q_usage_counter; struct list_head all_q_node; diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index a19519f4241dce4f18b8c338648a70af75392994..40dd5ef9c154b519cd48f3a89553477ce4275aad 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -4,6 +4,39 @@ #include +#ifdef CONFIG_PREEMPT_RT_FULL + +extern void __local_bh_disable(void); +extern void _local_bh_enable(void); +extern void __local_bh_enable(void); + +static inline void local_bh_disable(void) +{ + __local_bh_disable(); +} + +static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) +{ + __local_bh_disable(); +} + +static inline void local_bh_enable(void) +{ + __local_bh_enable(); +} + +static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) +{ + __local_bh_enable(); +} + +static inline void local_bh_enable_ip(unsigned long ip) +{ + __local_bh_enable(); +} + +#else + #ifdef CONFIG_TRACE_IRQFLAGS extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); #else @@ -31,5 +64,6 @@ static inline void local_bh_enable(void) { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); } +#endif #endif /* _LINUX_BH_H */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 9168fc33a4f7843ff760d948cf7a892e4defffce..703bf3335a3da16ac94d347643ea781b31802667 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -76,8 +76,50 @@ struct buffer_head { struct address_space *b_assoc_map; /* mapping this buffer is associated with */ atomic_t b_count; /* users using this buffer_head */ +#ifdef CONFIG_PREEMPT_RT_BASE + spinlock_t b_uptodate_lock; +#if IS_ENABLED(CONFIG_JBD2) + spinlock_t b_state_lock; + spinlock_t b_journal_head_lock; +#endif +#endif }; +static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) +{ + unsigned long flags; + +#ifndef CONFIG_PREEMPT_RT_BASE + local_irq_save(flags); + bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); +#else + spin_lock_irqsave(&bh->b_uptodate_lock, flags); +#endif + return flags; +} + +static inline void +bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) +{ +#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); + local_irq_restore(flags); +#else + spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); +#endif +} + +static inline void buffer_head_init_locks(struct buffer_head *bh) +{ +#ifdef CONFIG_PREEMPT_RT_BASE + spin_lock_init(&bh->b_uptodate_lock); +#if IS_ENABLED(CONFIG_JBD2) + spin_lock_init(&bh->b_state_lock); + spin_lock_init(&bh->b_journal_head_lock); +#endif +#endif +} + /* * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() * and buffer_foo() functions. diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index f92264d1ed4fb9f82ae139cec9ec92349fd08f90..cc912f3dc20321bc8b17f348e9c5e7ab83ddbd0d 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -20,6 +20,7 @@ #include #include #include +#include #ifdef CONFIG_CGROUPS @@ -157,6 +158,7 @@ struct cgroup_subsys_state { /* percpu_ref killing and RCU release */ struct work_struct destroy_work; + struct swork_event destroy_swork; struct rcu_work destroy_rwork; /* diff --git a/include/linux/completion.h b/include/linux/completion.h index 519e94915d1850628ba3a79f7341c29ce3a68f40..bf8e77001f18f162b65ab5eb2bdcd014f37a87aa 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -9,7 +9,7 @@ * See kernel/sched/completion.c for details. */ -#include +#include /* * struct completion - structure used to maintain state for a "completion" @@ -25,7 +25,7 @@ */ struct completion { unsigned int done; - wait_queue_head_t wait; + struct swait_queue_head wait; }; #define init_completion_map(x, m) __init_completion(x) @@ -34,7 +34,7 @@ static inline void complete_acquire(struct completion *x) {} static inline void complete_release(struct completion *x) {} #define COMPLETION_INITIALIZER(work) \ - { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } + { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ (*({ init_completion_map(&(work), &(map)); &(work); })) @@ -85,7 +85,7 @@ static inline void complete_release(struct completion *x) {} static inline void __init_completion(struct completion *x) { x->done = 0; - init_waitqueue_head(&x->wait); + init_swait_queue_head(&x->wait); } /** diff --git a/include/linux/cpu.h b/include/linux/cpu.h index aab4273810e378c65cd1036c4ee487d4257ba2b5..87347ccbba0c207ff5ccfc6cdd5b8f5641faf907 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -129,6 +129,7 @@ static inline int cpus_read_trylock(void) { return true; } static inline void lockdep_assert_cpus_held(void) { } static inline void cpu_hotplug_disable(void) { } static inline void cpu_hotplug_enable(void) { } + #endif /* !CONFIG_HOTPLUG_CPU */ /* Wrappers which go away once all code is converted */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 02b1b40fea5b11b6b062ee84994dbd5e7509cb93..50cbde278301a598325acaa0ce6ab79a2be35cf2 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -105,7 +105,7 @@ struct dentry { union { struct list_head d_lru; /* LRU list */ - wait_queue_head_t *d_wait; /* in-lookup ones only */ + struct swait_queue_head *d_wait; /* in-lookup ones only */ }; struct list_head d_child; /* child of parent list */ struct list_head d_subdirs; /* our children */ @@ -236,7 +236,7 @@ extern struct dentry * d_alloc(struct dentry *, const struct qstr *); extern struct dentry * d_alloc_anon(struct super_block *); extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, - wait_queue_head_t *); + struct swait_queue_head *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); extern struct dentry * d_exact_alias(struct dentry *, struct inode *); diff --git a/include/linux/delay.h b/include/linux/delay.h index b78bab4395d806d1b9cd4607107779e4388cb8c2..7c4bc414a504e60e977a805f075e0ef9fc6b50c9 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -64,4 +64,10 @@ static inline void ssleep(unsigned int seconds) msleep(seconds * 1000); } +#ifdef CONFIG_PREEMPT_RT_FULL +extern void cpu_chill(void); +#else +# define cpu_chill() cpu_relax() +#endif + #endif /* defined(_LINUX_DELAY_H) */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 9a5d4b499271662b99041fcaf965e86b771bf5f1..3ec7b52f8994424c20c2807abdb69783c2edc575 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -566,7 +566,7 @@ typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor, unsigned long *data_size, void *data); typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor); -typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, +typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data); typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); @@ -672,6 +672,7 @@ void efi_native_runtime_setup(void); #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) #define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) +#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2) typedef struct { efi_guid_t guid; @@ -957,6 +958,7 @@ extern struct efi { unsigned long mem_attr_table; /* memory attributes table */ unsigned long rng_seed; /* UEFI firmware random seed */ unsigned long tpm_log; /* TPM2 Event Log table */ + unsigned long mem_reserve; /* Linux EFI memreserve table */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; @@ -1045,6 +1047,7 @@ extern int __init efi_uart_console_only (void); extern u64 efi_mem_desc_end(efi_memory_desc_t *md); extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); extern void efi_mem_reserve(phys_addr_t addr, u64 size); +extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); extern void efi_reserve_boot_services(void); @@ -1707,4 +1710,20 @@ extern struct efi_runtime_work efi_rts_work; /* Workqueue to queue EFI Runtime Services */ extern struct workqueue_struct *efi_rts_wq; +struct linux_efi_memreserve { + int size; // allocated size of the array + atomic_t count; // number of entries used + phys_addr_t next; // pa of next struct instance + struct { + phys_addr_t base; + phys_addr_t size; + } entry[0]; +}; + +#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \ + (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0])) + +#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ + / sizeof(((struct linux_efi_memreserve *)0)->entry[0])) + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 86f884e78b6b9f1441848218ac6186c45f98dbe2..029870aca4ffc5ed86b1e5f9b38bc2d8ff9638b4 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -679,7 +679,7 @@ struct inode { struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; - unsigned i_dir_seq; + unsigned __i_dir_seq; }; __u32 i_generation; diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 84b90a79d75a0af1e04aa9d74364c8bec8ee5d79..87a9330eafa20fc4120c41747c92f267f3fad2d2 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -230,6 +230,7 @@ extern void __fscache_readpages_cancel(struct fscache_cookie *cookie, extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool); extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t, bool (*)(void *), void *); +extern void fscache_cookie_init(void); /** * fscache_register_netfs - Register a filesystem as desiring caching services diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 0690679832d45fda5c4c945dc5e96f980eae5ca1..eaa2ef9bc10e1ef13951a726b7e8de485e108d94 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -8,6 +8,7 @@ #include #include #include +#include #include @@ -66,7 +67,7 @@ static inline void kunmap(struct page *page) static inline void *kmap_atomic(struct page *page) { - preempt_disable(); + preempt_disable_nort(); pagefault_disable(); return page_address(page); } @@ -75,7 +76,7 @@ static inline void *kmap_atomic(struct page *page) static inline void __kunmap_atomic(void *addr) { pagefault_enable(); - preempt_enable(); + preempt_enable_nort(); } #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) @@ -87,32 +88,51 @@ static inline void __kunmap_atomic(void *addr) #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) +#ifndef CONFIG_PREEMPT_RT_FULL DECLARE_PER_CPU(int, __kmap_atomic_idx); +#endif static inline int kmap_atomic_idx_push(void) { +#ifndef CONFIG_PREEMPT_RT_FULL int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; -#ifdef CONFIG_DEBUG_HIGHMEM +# ifdef CONFIG_DEBUG_HIGHMEM WARN_ON_ONCE(in_irq() && !irqs_disabled()); BUG_ON(idx >= KM_TYPE_NR); -#endif +# endif return idx; +#else + current->kmap_idx++; + BUG_ON(current->kmap_idx > KM_TYPE_NR); + return current->kmap_idx - 1; +#endif } static inline int kmap_atomic_idx(void) { +#ifndef CONFIG_PREEMPT_RT_FULL return __this_cpu_read(__kmap_atomic_idx) - 1; +#else + return current->kmap_idx - 1; +#endif } static inline void kmap_atomic_idx_pop(void) { -#ifdef CONFIG_DEBUG_HIGHMEM +#ifndef CONFIG_PREEMPT_RT_FULL +# ifdef CONFIG_DEBUG_HIGHMEM int idx = __this_cpu_dec_return(__kmap_atomic_idx); BUG_ON(idx < 0); -#else +# else __this_cpu_dec(__kmap_atomic_idx); +# endif +#else + current->kmap_idx--; +# ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(current->kmap_idx < 0); +# endif #endif } diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 542b4fa2cda9b0051d9a6fe11a7d642e2d2bbc59..aee31b1f0cc338f83a26a02f7f2396521824160b 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -41,6 +41,7 @@ enum hrtimer_mode { HRTIMER_MODE_REL = 0x01, HRTIMER_MODE_PINNED = 0x02, HRTIMER_MODE_SOFT = 0x04, + HRTIMER_MODE_HARD = 0x08, HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, @@ -51,6 +52,11 @@ enum hrtimer_mode { HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, + HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD, + HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD, + + HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD, + HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD, }; /* @@ -186,6 +192,8 @@ enum hrtimer_base_type { * @nr_retries: Total number of hrtimer interrupt retries * @nr_hangs: Total number of hrtimer interrupt hangs * @max_hang_time: Maximum time spent in hrtimer_interrupt + * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are + * expired * @expires_next: absolute time of the next event, is required for remote * hrtimer enqueue; it is the total first expiry time (hard * and soft hrtimer are taken into account) @@ -213,6 +221,7 @@ struct hrtimer_cpu_base { unsigned short nr_hangs; unsigned int max_hang_time; #endif + spinlock_t softirq_expiry_lock; ktime_t expires_next; struct hrtimer *next_timer; ktime_t softirq_expires_next; @@ -364,10 +373,17 @@ DECLARE_PER_CPU(struct tick_device, tick_cpu_device); /* Initialize timers: */ extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, enum hrtimer_mode mode); +extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, + enum hrtimer_mode mode, + struct task_struct *task); #ifdef CONFIG_DEBUG_OBJECTS_TIMERS extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, enum hrtimer_mode mode); +extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, + clockid_t clock_id, + enum hrtimer_mode mode, + struct task_struct *task); extern void destroy_hrtimer_on_stack(struct hrtimer *timer); #else @@ -377,6 +393,15 @@ static inline void hrtimer_init_on_stack(struct hrtimer *timer, { hrtimer_init(timer, which_clock, mode); } + +static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, + clockid_t clock_id, + enum hrtimer_mode mode, + struct task_struct *task) +{ + hrtimer_init_sleeper(sl, clock_id, mode, task); +} + static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } #endif @@ -400,6 +425,7 @@ static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, extern int hrtimer_cancel(struct hrtimer *timer); extern int hrtimer_try_to_cancel(struct hrtimer *timer); +extern void hrtimer_grab_expiry_lock(const struct hrtimer *timer); static inline void hrtimer_start_expires(struct hrtimer *timer, enum hrtimer_mode mode) @@ -486,9 +512,6 @@ extern long hrtimer_nanosleep(const struct timespec64 *rqtp, const enum hrtimer_mode mode, const clockid_t clockid); -extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, - struct task_struct *tsk); - extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, const enum hrtimer_mode mode); extern int schedule_hrtimeout_range_clock(ktime_t *expires, diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h new file mode 100644 index 0000000000000000000000000000000000000000..73b0982cc5193c2926fc733dd588195dfec57d9f --- /dev/null +++ b/include/linux/i3c/ccc.h @@ -0,0 +1,385 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Cadence Design Systems Inc. + * + * Author: Boris Brezillon + */ + +#ifndef I3C_CCC_H +#define I3C_CCC_H + +#include +#include + +/* I3C CCC (Common Command Codes) related definitions */ +#define I3C_CCC_DIRECT BIT(7) + +#define I3C_CCC_ID(id, broadcast) \ + ((id) | ((broadcast) ? 0 : I3C_CCC_DIRECT)) + +/* Commands valid in both broadcast and unicast modes */ +#define I3C_CCC_ENEC(broadcast) I3C_CCC_ID(0x0, broadcast) +#define I3C_CCC_DISEC(broadcast) I3C_CCC_ID(0x1, broadcast) +#define I3C_CCC_ENTAS(as, broadcast) I3C_CCC_ID(0x2 + (as), broadcast) +#define I3C_CCC_RSTDAA(broadcast) I3C_CCC_ID(0x6, broadcast) +#define I3C_CCC_SETMWL(broadcast) I3C_CCC_ID(0x9, broadcast) +#define I3C_CCC_SETMRL(broadcast) I3C_CCC_ID(0xa, broadcast) +#define I3C_CCC_SETXTIME(broadcast) ((broadcast) ? 0x28 : 0x98) +#define I3C_CCC_VENDOR(id, broadcast) ((id) + ((broadcast) ? 0x61 : 0xe0)) + +/* Broadcast-only commands */ +#define I3C_CCC_ENTDAA I3C_CCC_ID(0x7, true) +#define I3C_CCC_DEFSLVS I3C_CCC_ID(0x8, true) +#define I3C_CCC_ENTTM I3C_CCC_ID(0xb, true) +#define I3C_CCC_ENTHDR(x) I3C_CCC_ID(0x20 + (x), true) + +/* Unicast-only commands */ +#define I3C_CCC_SETDASA I3C_CCC_ID(0x7, false) +#define I3C_CCC_SETNEWDA I3C_CCC_ID(0x8, false) +#define I3C_CCC_GETMWL I3C_CCC_ID(0xb, false) +#define I3C_CCC_GETMRL I3C_CCC_ID(0xc, false) +#define I3C_CCC_GETPID I3C_CCC_ID(0xd, false) +#define I3C_CCC_GETBCR I3C_CCC_ID(0xe, false) +#define I3C_CCC_GETDCR I3C_CCC_ID(0xf, false) +#define I3C_CCC_GETSTATUS I3C_CCC_ID(0x10, false) +#define I3C_CCC_GETACCMST I3C_CCC_ID(0x11, false) +#define I3C_CCC_SETBRGTGT I3C_CCC_ID(0x13, false) +#define I3C_CCC_GETMXDS I3C_CCC_ID(0x14, false) +#define I3C_CCC_GETHDRCAP I3C_CCC_ID(0x15, false) +#define I3C_CCC_GETXTIME I3C_CCC_ID(0x19, false) + +#define I3C_CCC_EVENT_SIR BIT(0) +#define I3C_CCC_EVENT_MR BIT(1) +#define I3C_CCC_EVENT_HJ BIT(3) + +/** + * struct i3c_ccc_events - payload passed to ENEC/DISEC CCC + * + * @events: bitmask of I3C_CCC_EVENT_xxx events. + * + * Depending on the CCC command, the specific events coming from all devices + * (broadcast version) or a specific device (unicast version) will be + * enabled (ENEC) or disabled (DISEC). + */ +struct i3c_ccc_events { + u8 events; +}; + +/** + * struct i3c_ccc_mwl - payload passed to SETMWL/GETMWL CCC + * + * @len: maximum write length in bytes + * + * The maximum write length is only applicable to SDR private messages or + * extended Write CCCs (like SETXTIME). + */ +struct i3c_ccc_mwl { + __be16 len; +}; + +/** + * struct i3c_ccc_mrl - payload passed to SETMRL/GETMRL CCC + * + * @len: maximum read length in bytes + * @ibi_len: maximum IBI payload length + * + * The maximum read length is only applicable to SDR private messages or + * extended Read CCCs (like GETXTIME). + * The IBI length is only valid if the I3C slave is IBI capable + * (%I3C_BCR_IBI_REQ_CAP is set). + */ +struct i3c_ccc_mrl { + __be16 read_len; + u8 ibi_len; +} __packed; + +/** + * struct i3c_ccc_dev_desc - I3C/I2C device descriptor used for DEFSLVS + * + * @dyn_addr: dynamic address assigned to the I3C slave or 0 if the entry is + * describing an I2C slave. + * @dcr: DCR value (not applicable to entries describing I2C devices) + * @lvr: LVR value (not applicable to entries describing I3C devices) + * @bcr: BCR value or 0 if this entry is describing an I2C slave + * @static_addr: static address or 0 if the device does not have a static + * address + * + * The DEFSLVS command should be passed an array of i3c_ccc_dev_desc + * descriptors (one entry per I3C/I2C dev controlled by the master). + */ +struct i3c_ccc_dev_desc { + u8 dyn_addr; + union { + u8 dcr; + u8 lvr; + }; + u8 bcr; + u8 static_addr; +}; + +/** + * struct i3c_ccc_defslvs - payload passed to DEFSLVS CCC + * + * @count: number of dev descriptors + * @master: descriptor describing the current master + * @slaves: array of descriptors describing slaves controlled by the + * current master + * + * Information passed to the broadcast DEFSLVS to propagate device + * information to all masters currently acting as slaves on the bus. + * This is only meaningful if you have more than one master. + */ +struct i3c_ccc_defslvs { + u8 count; + struct i3c_ccc_dev_desc master; + struct i3c_ccc_dev_desc slaves[0]; +} __packed; + +/** + * enum i3c_ccc_test_mode - enum listing all available test modes + * + * @I3C_CCC_EXIT_TEST_MODE: exit test mode + * @I3C_CCC_VENDOR_TEST_MODE: enter vendor test mode + */ +enum i3c_ccc_test_mode { + I3C_CCC_EXIT_TEST_MODE, + I3C_CCC_VENDOR_TEST_MODE, +}; + +/** + * struct i3c_ccc_enttm - payload passed to ENTTM CCC + * + * @mode: one of the &enum i3c_ccc_test_mode modes + * + * Information passed to the ENTTM CCC to instruct an I3C device to enter a + * specific test mode. + */ +struct i3c_ccc_enttm { + u8 mode; +}; + +/** + * struct i3c_ccc_setda - payload passed to SETNEWDA and SETDASA CCCs + * + * @addr: dynamic address to assign to an I3C device + * + * Information passed to the SETNEWDA and SETDASA CCCs to assign/change the + * dynamic address of an I3C device. + */ +struct i3c_ccc_setda { + u8 addr; +}; + +/** + * struct i3c_ccc_getpid - payload passed to GETPID CCC + * + * @pid: 48 bits PID in big endian + */ +struct i3c_ccc_getpid { + u8 pid[6]; +}; + +/** + * struct i3c_ccc_getbcr - payload passed to GETBCR CCC + * + * @bcr: BCR (Bus Characteristic Register) value + */ +struct i3c_ccc_getbcr { + u8 bcr; +}; + +/** + * struct i3c_ccc_getdcr - payload passed to GETDCR CCC + * + * @dcr: DCR (Device Characteristic Register) value + */ +struct i3c_ccc_getdcr { + u8 dcr; +}; + +#define I3C_CCC_STATUS_PENDING_INT(status) ((status) & GENMASK(3, 0)) +#define I3C_CCC_STATUS_PROTOCOL_ERROR BIT(5) +#define I3C_CCC_STATUS_ACTIVITY_MODE(status) \ + (((status) & GENMASK(7, 6)) >> 6) + +/** + * struct i3c_ccc_getstatus - payload passed to GETSTATUS CCC + * + * @status: status of the I3C slave (see I3C_CCC_STATUS_xxx macros for more + * information). + */ +struct i3c_ccc_getstatus { + __be16 status; +}; + +/** + * struct i3c_ccc_getaccmst - payload passed to GETACCMST CCC + * + * @newmaster: address of the master taking bus ownership + */ +struct i3c_ccc_getaccmst { + u8 newmaster; +}; + +/** + * struct i3c_ccc_bridged_slave_desc - bridged slave descriptor + * + * @addr: dynamic address of the bridged device + * @id: ID of the slave device behind the bridge + */ +struct i3c_ccc_bridged_slave_desc { + u8 addr; + __be16 id; +} __packed; + +/** + * struct i3c_ccc_setbrgtgt - payload passed to SETBRGTGT CCC + * + * @count: number of bridged slaves + * @bslaves: bridged slave descriptors + */ +struct i3c_ccc_setbrgtgt { + u8 count; + struct i3c_ccc_bridged_slave_desc bslaves[0]; +} __packed; + +/** + * enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers + */ +enum i3c_sdr_max_data_rate { + I3C_SDR0_FSCL_MAX, + I3C_SDR1_FSCL_8MHZ, + I3C_SDR2_FSCL_6MHZ, + I3C_SDR3_FSCL_4MHZ, + I3C_SDR4_FSCL_2MHZ, +}; + +/** + * enum i3c_tsco - clock to data turn-around + */ +enum i3c_tsco { + I3C_TSCO_8NS, + I3C_TSCO_9NS, + I3C_TSCO_10NS, + I3C_TSCO_11NS, + I3C_TSCO_12NS, +}; + +#define I3C_CCC_MAX_SDR_FSCL_MASK GENMASK(2, 0) +#define I3C_CCC_MAX_SDR_FSCL(x) ((x) & I3C_CCC_MAX_SDR_FSCL_MASK) + +/** + * struct i3c_ccc_getmxds - payload passed to GETMXDS CCC + * + * @maxwr: write limitations + * @maxrd: read limitations + * @maxrdturn: maximum read turn-around expressed micro-seconds and + * little-endian formatted + */ +struct i3c_ccc_getmxds { + u8 maxwr; + u8 maxrd; + u8 maxrdturn[3]; +} __packed; + +#define I3C_CCC_HDR_MODE(mode) BIT(mode) + +/** + * struct i3c_ccc_gethdrcap - payload passed to GETHDRCAP CCC + * + * @modes: bitmap of supported HDR modes + */ +struct i3c_ccc_gethdrcap { + u8 modes; +} __packed; + +/** + * enum i3c_ccc_setxtime_subcmd - SETXTIME sub-commands + */ +enum i3c_ccc_setxtime_subcmd { + I3C_CCC_SETXTIME_ST = 0x7f, + I3C_CCC_SETXTIME_DT = 0xbf, + I3C_CCC_SETXTIME_ENTER_ASYNC_MODE0 = 0xdf, + I3C_CCC_SETXTIME_ENTER_ASYNC_MODE1 = 0xef, + I3C_CCC_SETXTIME_ENTER_ASYNC_MODE2 = 0xf7, + I3C_CCC_SETXTIME_ENTER_ASYNC_MODE3 = 0xfb, + I3C_CCC_SETXTIME_ASYNC_TRIGGER = 0xfd, + I3C_CCC_SETXTIME_TPH = 0x3f, + I3C_CCC_SETXTIME_TU = 0x9f, + I3C_CCC_SETXTIME_ODR = 0x8f, +}; + +/** + * struct i3c_ccc_setxtime - payload passed to SETXTIME CCC + * + * @subcmd: one of the sub-commands ddefined in &enum i3c_ccc_setxtime_subcmd + * @data: sub-command payload. Amount of data is determined by + * &i3c_ccc_setxtime->subcmd + */ +struct i3c_ccc_setxtime { + u8 subcmd; + u8 data[0]; +} __packed; + +#define I3C_CCC_GETXTIME_SYNC_MODE BIT(0) +#define I3C_CCC_GETXTIME_ASYNC_MODE(x) BIT((x) + 1) +#define I3C_CCC_GETXTIME_OVERFLOW BIT(7) + +/** + * struct i3c_ccc_getxtime - payload retrieved from GETXTIME CCC + * + * @supported_modes: bitmap describing supported XTIME modes + * @state: current status (enabled mode and overflow status) + * @frequency: slave's internal oscillator frequency in 500KHz steps + * @inaccuracy: slave's internal oscillator inaccuracy in 0.1% steps + */ +struct i3c_ccc_getxtime { + u8 supported_modes; + u8 state; + u8 frequency; + u8 inaccuracy; +} __packed; + +/** + * struct i3c_ccc_cmd_payload - CCC payload + * + * @len: payload length + * @data: payload data. This buffer must be DMA-able + */ +struct i3c_ccc_cmd_payload { + u16 len; + void *data; +}; + +/** + * struct i3c_ccc_cmd_dest - CCC command destination + * + * @addr: can be an I3C device address or the broadcast address if this is a + * broadcast CCC + * @payload: payload to be sent to this device or broadcasted + */ +struct i3c_ccc_cmd_dest { + u8 addr; + struct i3c_ccc_cmd_payload payload; +}; + +/** + * struct i3c_ccc_cmd - CCC command + * + * @rnw: true if the CCC should retrieve data from the device. Only valid for + * unicast commands + * @id: CCC command id + * @ndests: number of destinations. Should always be one for broadcast commands + * @dests: array of destinations and associated payload for this CCC. Most of + * the time, only one destination is provided + * @err: I3C error code + */ +struct i3c_ccc_cmd { + u8 rnw; + u8 id; + unsigned int ndests; + struct i3c_ccc_cmd_dest *dests; + enum i3c_error_code err; +}; + +#endif /* I3C_CCC_H */ diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h new file mode 100644 index 0000000000000000000000000000000000000000..de102e4418ab4118f00cf5fd5b6adda4c67eb73e --- /dev/null +++ b/include/linux/i3c/device.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Cadence Design Systems Inc. + * + * Author: Boris Brezillon + */ + +#ifndef I3C_DEV_H +#define I3C_DEV_H + +#include +#include +#include +#include +#include +#include + +/** + * enum i3c_error_code - I3C error codes + * + * These are the standard error codes as defined by the I3C specification. + * When -EIO is returned by the i3c_device_do_priv_xfers() or + * i3c_device_send_hdr_cmds() one can check the error code in + * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of + * what went wrong. + * + * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C + * related + * @I3C_ERROR_M0: M0 error + * @I3C_ERROR_M1: M1 error + * @I3C_ERROR_M2: M2 error + */ +enum i3c_error_code { + I3C_ERROR_UNKNOWN = 0, + I3C_ERROR_M0 = 1, + I3C_ERROR_M1, + I3C_ERROR_M2, +}; + +/** + * enum i3c_hdr_mode - HDR mode ids + * @I3C_HDR_DDR: DDR mode + * @I3C_HDR_TSP: TSP mode + * @I3C_HDR_TSL: TSL mode + */ +enum i3c_hdr_mode { + I3C_HDR_DDR, + I3C_HDR_TSP, + I3C_HDR_TSL, +}; + +/** + * struct i3c_priv_xfer - I3C SDR private transfer + * @rnw: encodes the transfer direction. true for a read, false for a write + * @len: transfer length in bytes of the transfer + * @data: input/output buffer + * @data.in: input buffer. Must point to a DMA-able buffer + * @data.out: output buffer. Must point to a DMA-able buffer + * @err: I3C error code + */ +struct i3c_priv_xfer { + u8 rnw; + u16 len; + union { + void *in; + const void *out; + } data; + enum i3c_error_code err; +}; + +/** + * enum i3c_dcr - I3C DCR values + * @I3C_DCR_GENERIC_DEVICE: generic I3C device + */ +enum i3c_dcr { + I3C_DCR_GENERIC_DEVICE = 0, +}; + +#define I3C_PID_MANUF_ID(pid) (((pid) & GENMASK_ULL(47, 33)) >> 33) +#define I3C_PID_RND_LOWER_32BITS(pid) (!!((pid) & BIT_ULL(32))) +#define I3C_PID_RND_VAL(pid) ((pid) & GENMASK_ULL(31, 0)) +#define I3C_PID_PART_ID(pid) (((pid) & GENMASK_ULL(31, 16)) >> 16) +#define I3C_PID_INSTANCE_ID(pid) (((pid) & GENMASK_ULL(15, 12)) >> 12) +#define I3C_PID_EXTRA_INFO(pid) ((pid) & GENMASK_ULL(11, 0)) + +#define I3C_BCR_DEVICE_ROLE(bcr) ((bcr) & GENMASK(7, 6)) +#define I3C_BCR_I3C_SLAVE (0 << 6) +#define I3C_BCR_I3C_MASTER (1 << 6) +#define I3C_BCR_HDR_CAP BIT(5) +#define I3C_BCR_BRIDGE BIT(4) +#define I3C_BCR_OFFLINE_CAP BIT(3) +#define I3C_BCR_IBI_PAYLOAD BIT(2) +#define I3C_BCR_IBI_REQ_CAP BIT(1) +#define I3C_BCR_MAX_DATA_SPEED_LIM BIT(0) + +/** + * struct i3c_device_info - I3C device information + * @pid: Provisional ID + * @bcr: Bus Characteristic Register + * @dcr: Device Characteristic Register + * @static_addr: static/I2C address + * @dyn_addr: dynamic address + * @hdr_cap: supported HDR modes + * @max_read_ds: max read speed information + * @max_write_ds: max write speed information + * @max_ibi_len: max IBI payload length + * @max_read_turnaround: max read turn-around time in micro-seconds + * @max_read_len: max private SDR read length in bytes + * @max_write_len: max private SDR write length in bytes + * + * These are all basic information that should be advertised by an I3C device. + * Some of them are optional depending on the device type and device + * capabilities. + * For each I3C slave attached to a master with + * i3c_master_add_i3c_dev_locked(), the core will send the relevant CCC command + * to retrieve these data. + */ +struct i3c_device_info { + u64 pid; + u8 bcr; + u8 dcr; + u8 static_addr; + u8 dyn_addr; + u8 hdr_cap; + u8 max_read_ds; + u8 max_write_ds; + u8 max_ibi_len; + u32 max_read_turnaround; + u16 max_read_len; + u16 max_write_len; +}; + +/* + * I3C device internals are kept hidden from I3C device users. It's just + * simpler to refactor things when everything goes through getter/setters, and + * I3C device drivers should not have to worry about internal representation + * anyway. + */ +struct i3c_device; + +/* These macros should be used to i3c_device_id entries. */ +#define I3C_MATCH_MANUF_AND_PART (I3C_MATCH_MANUF | I3C_MATCH_PART) + +#define I3C_DEVICE(_manufid, _partid, _drvdata) \ + { \ + .match_flags = I3C_MATCH_MANUF_AND_PART, \ + .manuf_id = _manufid, \ + .part_id = _partid, \ + .data = _drvdata, \ + } + +#define I3C_DEVICE_EXTRA_INFO(_manufid, _partid, _info, _drvdata) \ + { \ + .match_flags = I3C_MATCH_MANUF_AND_PART | \ + I3C_MATCH_EXTRA_INFO, \ + .manuf_id = _manufid, \ + .part_id = _partid, \ + .extra_info = _info, \ + .data = _drvdata, \ + } + +#define I3C_CLASS(_dcr, _drvdata) \ + { \ + .match_flags = I3C_MATCH_DCR, \ + .dcr = _dcr, \ + } + +/** + * struct i3c_driver - I3C device driver + * @driver: inherit from device_driver + * @probe: I3C device probe method + * @remove: I3C device remove method + * @id_table: I3C device match table. Will be used by the framework to decide + * which device to bind to this driver + */ +struct i3c_driver { + struct device_driver driver; + int (*probe)(struct i3c_device *dev); + int (*remove)(struct i3c_device *dev); + const struct i3c_device_id *id_table; +}; + +static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv) +{ + return container_of(drv, struct i3c_driver, driver); +} + +struct device *i3cdev_to_dev(struct i3c_device *i3cdev); +struct i3c_device *dev_to_i3cdev(struct device *dev); + +const struct i3c_device_id * +i3c_device_match_id(struct i3c_device *i3cdev, + const struct i3c_device_id *id_table); + +static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev, + void *data) +{ + struct device *dev = i3cdev_to_dev(i3cdev); + + dev_set_drvdata(dev, data); +} + +static inline void *i3cdev_get_drvdata(struct i3c_device *i3cdev) +{ + struct device *dev = i3cdev_to_dev(i3cdev); + + return dev_get_drvdata(dev); +} + +int i3c_driver_register_with_owner(struct i3c_driver *drv, + struct module *owner); +void i3c_driver_unregister(struct i3c_driver *drv); + +#define i3c_driver_register(__drv) \ + i3c_driver_register_with_owner(__drv, THIS_MODULE) + +/** + * module_i3c_driver() - Register a module providing an I3C driver + * @__drv: the I3C driver to register + * + * Provide generic init/exit functions that simply register/unregister an I3C + * driver. + * Should be used by any driver that does not require extra init/cleanup steps. + */ +#define module_i3c_driver(__drv) \ + module_driver(__drv, i3c_driver_register, i3c_driver_unregister) + +/** + * i3c_i2c_driver_register() - Register an i2c and an i3c driver + * @i3cdrv: the I3C driver to register + * @i2cdrv: the I2C driver to register + * + * This function registers both @i2cdev and @i3cdev, and fails if one of these + * registrations fails. This is mainly useful for devices that support both I2C + * and I3C modes. + * Note that when CONFIG_I3C is not enabled, this function only registers the + * I2C driver. + * + * Return: 0 if both registrations succeeds, a negative error code otherwise. + */ +static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv, + struct i2c_driver *i2cdrv) +{ + int ret; + + ret = i2c_add_driver(i2cdrv); + if (ret || !IS_ENABLED(CONFIG_I3C)) + return ret; + + ret = i3c_driver_register(i3cdrv); + if (ret) + i2c_del_driver(i2cdrv); + + return ret; +} + +/** + * i3c_i2c_driver_unregister() - Unregister an i2c and an i3c driver + * @i3cdrv: the I3C driver to register + * @i2cdrv: the I2C driver to register + * + * This function unregisters both @i3cdrv and @i2cdrv. + * Note that when CONFIG_I3C is not enabled, this function only unregisters the + * @i2cdrv. + */ +static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv, + struct i2c_driver *i2cdrv) +{ + if (IS_ENABLED(CONFIG_I3C)) + i3c_driver_unregister(i3cdrv); + + i2c_del_driver(i2cdrv); +} + +/** + * module_i3c_i2c_driver() - Register a module providing an I3C and an I2C + * driver + * @__i3cdrv: the I3C driver to register + * @__i2cdrv: the I3C driver to register + * + * Provide generic init/exit functions that simply register/unregister an I3C + * and an I2C driver. + * This macro can be used even if CONFIG_I3C is disabled, in this case, only + * the I2C driver will be registered. + * Should be used by any driver that does not require extra init/cleanup steps. + */ +#define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \ + module_driver(__i3cdrv, \ + i3c_i2c_driver_register, \ + i3c_i2c_driver_unregister) + +int i3c_device_do_priv_xfers(struct i3c_device *dev, + struct i3c_priv_xfer *xfers, + int nxfers); + +void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info); + +struct i3c_ibi_payload { + unsigned int len; + const void *data; +}; + +/** + * struct i3c_ibi_setup - IBI setup object + * @max_payload_len: maximum length of the payload associated to an IBI. If one + * IBI appears to have a payload that is bigger than this + * number, the IBI will be rejected. + * @num_slots: number of pre-allocated IBI slots. This should be chosen so that + * the system never runs out of IBI slots, otherwise you'll lose + * IBIs. + * @handler: IBI handler, every time an IBI is received. This handler is called + * in a workqueue context. It is allowed to sleep and send new + * messages on the bus, though it's recommended to keep the + * processing done there as fast as possible to avoid delaying + * processing of other queued on the same workqueue. + * + * Temporary structure used to pass information to i3c_device_request_ibi(). + * This object can be allocated on the stack since i3c_device_request_ibi() + * copies every bit of information and do not use it after + * i3c_device_request_ibi() has returned. + */ +struct i3c_ibi_setup { + unsigned int max_payload_len; + unsigned int num_slots; + void (*handler)(struct i3c_device *dev, + const struct i3c_ibi_payload *payload); +}; + +int i3c_device_request_ibi(struct i3c_device *dev, + const struct i3c_ibi_setup *setup); +void i3c_device_free_ibi(struct i3c_device *dev); +int i3c_device_enable_ibi(struct i3c_device *dev); +int i3c_device_disable_ibi(struct i3c_device *dev); + +#endif /* I3C_DEV_H */ diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h new file mode 100644 index 0000000000000000000000000000000000000000..9cb39d901cd5f95070adc82609f50134ff9d8937 --- /dev/null +++ b/include/linux/i3c/master.h @@ -0,0 +1,655 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Cadence Design Systems Inc. + * + * Author: Boris Brezillon + */ + +#ifndef I3C_MASTER_H +#define I3C_MASTER_H + +#include + +#include +#include +#include +#include +#include +#include +#include + +#define I3C_HOT_JOIN_ADDR 0x2 +#define I3C_BROADCAST_ADDR 0x7e +#define I3C_MAX_ADDR GENMASK(6, 0) + +struct i3c_master_controller; +struct i3c_bus; +struct i2c_device; +struct i3c_device; + +/** + * struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor + * @node: node element used to insert the slot into the I2C or I3C device + * list + * @master: I3C master that instantiated this device. Will be used to do + * I2C/I3C transfers + * @master_priv: master private data assigned to the device. Can be used to + * add master specific information + * + * This structure is describing common I3C/I2C dev information. + */ +struct i3c_i2c_dev_desc { + struct list_head node; + struct i3c_master_controller *master; + void *master_priv; +}; + +#define I3C_LVR_I2C_INDEX_MASK GENMASK(7, 5) +#define I3C_LVR_I2C_INDEX(x) ((x) << 5) +#define I3C_LVR_I2C_FM_MODE BIT(4) + +#define I2C_MAX_ADDR GENMASK(6, 0) + +/** + * struct i2c_dev_boardinfo - I2C device board information + * @node: used to insert the boardinfo object in the I2C boardinfo list + * @base: regular I2C board information + * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about + * the I2C device limitations + * + * This structure is used to attach board-level information to an I2C device. + * Each I2C device connected on the I3C bus should have one. + */ +struct i2c_dev_boardinfo { + struct list_head node; + struct i2c_board_info base; + u8 lvr; +}; + +/** + * struct i2c_dev_desc - I2C device descriptor + * @common: common part of the I2C device descriptor + * @boardinfo: pointer to the boardinfo attached to this I2C device + * @dev: I2C device object registered to the I2C framework + * @addr: I2C device address + * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about + * the I2C device limitations + * + * Each I2C device connected on the bus will have an i2c_dev_desc. + * This object is created by the core and later attached to the controller + * using &struct_i3c_master_controller->ops->attach_i2c_dev(). + * + * &struct_i2c_dev_desc is the internal representation of an I2C device + * connected on an I3C bus. This object is also passed to all + * &struct_i3c_master_controller_ops hooks. + */ +struct i2c_dev_desc { + struct i3c_i2c_dev_desc common; + const struct i2c_dev_boardinfo *boardinfo; + struct i2c_client *dev; + u16 addr; + u8 lvr; +}; + +/** + * struct i3c_ibi_slot - I3C IBI (In-Band Interrupt) slot + * @work: work associated to this slot. The IBI handler will be called from + * there + * @dev: the I3C device that has generated this IBI + * @len: length of the payload associated to this IBI + * @data: payload buffer + * + * An IBI slot is an object pre-allocated by the controller and used when an + * IBI comes in. + * Every time an IBI comes in, the I3C master driver should find a free IBI + * slot in its IBI slot pool, retrieve the IBI payload and queue the IBI using + * i3c_master_queue_ibi(). + * + * How IBI slots are allocated is left to the I3C master driver, though, for + * simple kmalloc-based allocation, the generic IBI slot pool can be used. + */ +struct i3c_ibi_slot { + struct work_struct work; + struct i3c_dev_desc *dev; + unsigned int len; + void *data; +}; + +/** + * struct i3c_device_ibi_info - IBI information attached to a specific device + * @all_ibis_handled: used to be informed when no more IBIs are waiting to be + * processed. Used by i3c_device_disable_ibi() to wait for + * all IBIs to be dequeued + * @pending_ibis: count the number of pending IBIs. Each pending IBI has its + * work element queued to the controller workqueue + * @max_payload_len: maximum payload length for an IBI coming from this device. + * this value is specified when calling + * i3c_device_request_ibi() and should not change at run + * time. All messages IBIs exceeding this limit should be + * rejected by the master + * @num_slots: number of IBI slots reserved for this device + * @enabled: reflect the IBI status + * @handler: IBI handler specified at i3c_device_request_ibi() call time. This + * handler will be called from the controller workqueue, and as such + * is allowed to sleep (though it is recommended to process the IBI + * as fast as possible to not stall processing of other IBIs queued + * on the same workqueue). + * New I3C messages can be sent from the IBI handler + * + * The &struct_i3c_device_ibi_info object is allocated when + * i3c_device_request_ibi() is called and attached to a specific device. This + * object is here to manage IBIs coming from a specific I3C device. + * + * Note that this structure is the generic view of the IBI management + * infrastructure. I3C master drivers may have their own internal + * representation which they can associate to the device using + * controller-private data. + */ +struct i3c_device_ibi_info { + struct completion all_ibis_handled; + atomic_t pending_ibis; + unsigned int max_payload_len; + unsigned int num_slots; + unsigned int enabled; + void (*handler)(struct i3c_device *dev, + const struct i3c_ibi_payload *payload); +}; + +/** + * struct i3c_dev_boardinfo - I3C device board information + * @node: used to insert the boardinfo object in the I3C boardinfo list + * @init_dyn_addr: initial dynamic address requested by the FW. We provide no + * guarantee that the device will end up using this address, + * but try our best to assign this specific address to the + * device + * @static_addr: static address the I3C device listen on before it's been + * assigned a dynamic address by the master. Will be used during + * bus initialization to assign it a specific dynamic address + * before starting DAA (Dynamic Address Assignment) + * @pid: I3C Provisional ID exposed by the device. This is a unique identifier + * that may be used to attach boardinfo to i3c_dev_desc when the device + * does not have a static address + * @of_node: optional DT node in case the device has been described in the DT + * + * This structure is used to attach board-level information to an I3C device. + * Not all I3C devices connected on the bus will have a boardinfo. It's only + * needed if you want to attach extra resources to a device or assign it a + * specific dynamic address. + */ +struct i3c_dev_boardinfo { + struct list_head node; + u8 init_dyn_addr; + u8 static_addr; + u64 pid; + struct device_node *of_node; +}; + +/** + * struct i3c_dev_desc - I3C device descriptor + * @common: common part of the I3C device descriptor + * @info: I3C device information. Will be automatically filled when you create + * your device with i3c_master_add_i3c_dev_locked() + * @ibi_lock: lock used to protect the &struct_i3c_device->ibi + * @ibi: IBI info attached to a device. Should be NULL until + * i3c_device_request_ibi() is called + * @dev: pointer to the I3C device object exposed to I3C device drivers. This + * should never be accessed from I3C master controller drivers. Only core + * code should manipulate it in when updating the dev <-> desc link or + * when propagating IBI events to the driver + * @boardinfo: pointer to the boardinfo attached to this I3C device + * + * Internal representation of an I3C device. This object is only used by the + * core and passed to I3C master controller drivers when they're requested to + * do some operations on the device. + * The core maintains the link between the internal I3C dev descriptor and the + * object exposed to the I3C device drivers (&struct_i3c_device). + */ +struct i3c_dev_desc { + struct i3c_i2c_dev_desc common; + struct i3c_device_info info; + struct mutex ibi_lock; + struct i3c_device_ibi_info *ibi; + struct i3c_device *dev; + const struct i3c_dev_boardinfo *boardinfo; +}; + +/** + * struct i3c_device - I3C device object + * @dev: device object to register the I3C dev to the device model + * @desc: pointer to an i3c device descriptor object. This link is updated + * every time the I3C device is rediscovered with a different dynamic + * address assigned + * @bus: I3C bus this device is attached to + * + * I3C device object exposed to I3C device drivers. The takes care of linking + * this object to the relevant &struct_i3c_dev_desc one. + * All I3C devs on the I3C bus are represented, including I3C masters. For each + * of them, we have an instance of &struct i3c_device. + */ +struct i3c_device { + struct device dev; + struct i3c_dev_desc *desc; + struct i3c_bus *bus; +}; + +/* + * The I3C specification says the maximum number of devices connected on the + * bus is 11, but this number depends on external parameters like trace length, + * capacitive load per Device, and the types of Devices present on the Bus. + * I3C master can also have limitations, so this number is just here as a + * reference and should be adjusted on a per-controller/per-board basis. + */ +#define I3C_BUS_MAX_DEVS 11 + +#define I3C_BUS_MAX_I3C_SCL_RATE 12900000 +#define I3C_BUS_TYP_I3C_SCL_RATE 12500000 +#define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000 +#define I3C_BUS_I2C_FM_SCL_RATE 400000 +#define I3C_BUS_TLOW_OD_MIN_NS 200 + +/** + * enum i3c_bus_mode - I3C bus mode + * @I3C_BUS_MODE_PURE: only I3C devices are connected to the bus. No limitation + * expected + * @I3C_BUS_MODE_MIXED_FAST: I2C devices with 50ns spike filter are present on + * the bus. The only impact in this mode is that the + * high SCL pulse has to stay below 50ns to trick I2C + * devices when transmitting I3C frames + * @I3C_BUS_MODE_MIXED_LIMITED: I2C devices without 50ns spike filter are + * present on the bus. However they allow + * compliance up to the maximum SDR SCL clock + * frequency. + * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present + * on the bus + */ +enum i3c_bus_mode { + I3C_BUS_MODE_PURE, + I3C_BUS_MODE_MIXED_FAST, + I3C_BUS_MODE_MIXED_LIMITED, + I3C_BUS_MODE_MIXED_SLOW, +}; + +/** + * enum i3c_addr_slot_status - I3C address slot status + * @I3C_ADDR_SLOT_FREE: address is free + * @I3C_ADDR_SLOT_RSVD: address is reserved + * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device + * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device + * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask + * + * On an I3C bus, addresses are assigned dynamically, and we need to know which + * addresses are free to use and which ones are already assigned. + * + * Addresses marked as reserved are those reserved by the I3C protocol + * (broadcast address, ...). + */ +enum i3c_addr_slot_status { + I3C_ADDR_SLOT_FREE, + I3C_ADDR_SLOT_RSVD, + I3C_ADDR_SLOT_I2C_DEV, + I3C_ADDR_SLOT_I3C_DEV, + I3C_ADDR_SLOT_STATUS_MASK = 3, +}; + +/** + * struct i3c_bus - I3C bus object + * @cur_master: I3C master currently driving the bus. Since I3C is multi-master + * this can change over the time. Will be used to let a master + * know whether it needs to request bus ownership before sending + * a frame or not + * @id: bus ID. Assigned by the framework when register the bus + * @addrslots: a bitmap with 2-bits per-slot to encode the address status and + * ease the DAA (Dynamic Address Assignment) procedure (see + * &enum i3c_addr_slot_status) + * @mode: bus mode (see &enum i3c_bus_mode) + * @scl_rate.i3c: maximum rate for the clock signal when doing I3C SDR/priv + * transfers + * @scl_rate.i2c: maximum rate for the clock signal when doing I2C transfers + * @scl_rate: SCL signal rate for I3C and I2C mode + * @devs.i3c: contains a list of I3C device descriptors representing I3C + * devices connected on the bus and successfully attached to the + * I3C master + * @devs.i2c: contains a list of I2C device descriptors representing I2C + * devices connected on the bus and successfully attached to the + * I3C master + * @devs: 2 lists containing all I3C/I2C devices connected to the bus + * @lock: read/write lock on the bus. This is needed to protect against + * operations that have an impact on the whole bus and the devices + * connected to it. For example, when asking slaves to drop their + * dynamic address (RSTDAA CCC), we need to make sure no one is trying + * to send I3C frames to these devices. + * Note that this lock does not protect against concurrency between + * devices: several drivers can send different I3C/I2C frames through + * the same master in parallel. This is the responsibility of the + * master to guarantee that frames are actually sent sequentially and + * not interlaced + * + * The I3C bus is represented with its own object and not implicitly described + * by the I3C master to cope with the multi-master functionality, where one bus + * can be shared amongst several masters, each of them requesting bus ownership + * when they need to. + */ +struct i3c_bus { + struct i3c_dev_desc *cur_master; + int id; + unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG]; + enum i3c_bus_mode mode; + struct { + unsigned long i3c; + unsigned long i2c; + } scl_rate; + struct { + struct list_head i3c; + struct list_head i2c; + } devs; + struct rw_semaphore lock; +}; + +/** + * struct i3c_master_controller_ops - I3C master methods + * @bus_init: hook responsible for the I3C bus initialization. You should at + * least call master_set_info() from there and set the bus mode. + * You can also put controller specific initialization in there. + * This method is mandatory. + * @bus_cleanup: cleanup everything done in + * &i3c_master_controller_ops->bus_init(). + * This method is optional. + * @attach_i3c_dev: called every time an I3C device is attached to the bus. It + * can be after a DAA or when a device is statically declared + * by the FW, in which case it will only have a static address + * and the dynamic address will be 0. + * When this function is called, device information have not + * been retrieved yet. + * This is a good place to attach master controller specific + * data to I3C devices. + * This method is optional. + * @reattach_i3c_dev: called every time an I3C device has its addressed + * changed. It can be because the device has been powered + * down and has lost its address, or it can happen when a + * device had a static address and has been assigned a + * dynamic address with SETDASA. + * This method is optional. + * @detach_i3c_dev: called when an I3C device is detached from the bus. Usually + * happens when the master device is unregistered. + * This method is optional. + * @do_daa: do a DAA (Dynamic Address Assignment) procedure. This is procedure + * should send an ENTDAA CCC command and then add all devices + * discovered sure the DAA using i3c_master_add_i3c_dev_locked(). + * Add devices added with i3c_master_add_i3c_dev_locked() will then be + * attached or re-attached to the controller. + * This method is mandatory. + * @supports_ccc_cmd: should return true if the CCC command is supported, false + * otherwise. + * This method is optional, if not provided the core assumes + * all CCC commands are supported. + * @send_ccc_cmd: send a CCC command + * This method is mandatory. + * @priv_xfers: do one or several private I3C SDR transfers + * This method is mandatory. + * @attach_i2c_dev: called every time an I2C device is attached to the bus. + * This is a good place to attach master controller specific + * data to I2C devices. + * This method is optional. + * @detach_i2c_dev: called when an I2C device is detached from the bus. Usually + * happens when the master device is unregistered. + * This method is optional. + * @i2c_xfers: do one or several I2C transfers. Note that, unlike i3c + * transfers, the core does not guarantee that buffers attached to + * the transfers are DMA-safe. If drivers want to have DMA-safe + * buffers, they should use the i2c_get_dma_safe_msg_buf() + * and i2c_put_dma_safe_msg_buf() helpers provided by the I2C + * framework. + * This method is mandatory. + * @request_ibi: attach an IBI handler to an I3C device. This implies defining + * an IBI handler and the constraints of the IBI (maximum payload + * length and number of pre-allocated slots). + * Some controllers support less IBI-capable devices than regular + * devices, so this method might return -%EBUSY if there's no + * more space for an extra IBI registration + * This method is optional. + * @free_ibi: free an IBI previously requested with ->request_ibi(). The IBI + * should have been disabled with ->disable_irq() prior to that + * This method is mandatory only if ->request_ibi is not NULL. + * @enable_ibi: enable the IBI. Only valid if ->request_ibi() has been called + * prior to ->enable_ibi(). The controller should first enable + * the IBI on the controller end (for example, unmask the hardware + * IRQ) and then send the ENEC CCC command (with the IBI flag set) + * to the I3C device. + * This method is mandatory only if ->request_ibi is not NULL. + * @disable_ibi: disable an IBI. First send the DISEC CCC command with the IBI + * flag set and then deactivate the hardware IRQ on the + * controller end. + * This method is mandatory only if ->request_ibi is not NULL. + * @recycle_ibi_slot: recycle an IBI slot. Called every time an IBI has been + * processed by its handler. The IBI slot should be put back + * in the IBI slot pool so that the controller can re-use it + * for a future IBI + * This method is mandatory only if ->request_ibi is not + * NULL. + */ +struct i3c_master_controller_ops { + int (*bus_init)(struct i3c_master_controller *master); + void (*bus_cleanup)(struct i3c_master_controller *master); + int (*attach_i3c_dev)(struct i3c_dev_desc *dev); + int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr); + void (*detach_i3c_dev)(struct i3c_dev_desc *dev); + int (*do_daa)(struct i3c_master_controller *master); + bool (*supports_ccc_cmd)(struct i3c_master_controller *master, + const struct i3c_ccc_cmd *cmd); + int (*send_ccc_cmd)(struct i3c_master_controller *master, + struct i3c_ccc_cmd *cmd); + int (*priv_xfers)(struct i3c_dev_desc *dev, + struct i3c_priv_xfer *xfers, + int nxfers); + int (*attach_i2c_dev)(struct i2c_dev_desc *dev); + void (*detach_i2c_dev)(struct i2c_dev_desc *dev); + int (*i2c_xfers)(struct i2c_dev_desc *dev, + const struct i2c_msg *xfers, int nxfers); + int (*request_ibi)(struct i3c_dev_desc *dev, + const struct i3c_ibi_setup *req); + void (*free_ibi)(struct i3c_dev_desc *dev); + int (*enable_ibi)(struct i3c_dev_desc *dev); + int (*disable_ibi)(struct i3c_dev_desc *dev); + void (*recycle_ibi_slot)(struct i3c_dev_desc *dev, + struct i3c_ibi_slot *slot); +}; + +/** + * struct i3c_master_controller - I3C master controller object + * @dev: device to be registered to the device-model + * @this: an I3C device object representing this master. This device will be + * added to the list of I3C devs available on the bus + * @i2c: I2C adapter used for backward compatibility. This adapter is + * registered to the I2C subsystem to be as transparent as possible to + * existing I2C drivers + * @ops: master operations. See &struct i3c_master_controller_ops + * @secondary: true if the master is a secondary master + * @init_done: true when the bus initialization is done + * @boardinfo.i3c: list of I3C boardinfo objects + * @boardinfo.i2c: list of I2C boardinfo objects + * @boardinfo: board-level information attached to devices connected on the bus + * @bus: I3C bus exposed by this master + * @wq: workqueue used to execute IBI handlers. Can also be used by master + * drivers if they need to postpone operations that need to take place + * in a thread context. Typical examples are Hot Join processing which + * requires taking the bus lock in maintenance, which in turn, can only + * be done from a sleep-able context + * + * A &struct i3c_master_controller has to be registered to the I3C subsystem + * through i3c_master_register(). None of &struct i3c_master_controller fields + * should be set manually, just pass appropriate values to + * i3c_master_register(). + */ +struct i3c_master_controller { + struct device dev; + struct i3c_dev_desc *this; + struct i2c_adapter i2c; + const struct i3c_master_controller_ops *ops; + unsigned int secondary : 1; + unsigned int init_done : 1; + struct { + struct list_head i3c; + struct list_head i2c; + } boardinfo; + struct i3c_bus bus; + struct workqueue_struct *wq; +}; + +/** + * i3c_bus_for_each_i2cdev() - iterate over all I2C devices present on the bus + * @bus: the I3C bus + * @dev: an I2C device descriptor pointer updated to point to the current slot + * at each iteration of the loop + * + * Iterate over all I2C devs present on the bus. + */ +#define i3c_bus_for_each_i2cdev(bus, dev) \ + list_for_each_entry(dev, &(bus)->devs.i2c, common.node) + +/** + * i3c_bus_for_each_i3cdev() - iterate over all I3C devices present on the bus + * @bus: the I3C bus + * @dev: and I3C device descriptor pointer updated to point to the current slot + * at each iteration of the loop + * + * Iterate over all I3C devs present on the bus. + */ +#define i3c_bus_for_each_i3cdev(bus, dev) \ + list_for_each_entry(dev, &(bus)->devs.i3c, common.node) + +int i3c_master_do_i2c_xfers(struct i3c_master_controller *master, + const struct i2c_msg *xfers, + int nxfers); + +int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr, + u8 evts); +int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr, + u8 evts); +int i3c_master_entdaa_locked(struct i3c_master_controller *master); +int i3c_master_defslvs_locked(struct i3c_master_controller *master); + +int i3c_master_get_free_addr(struct i3c_master_controller *master, + u8 start_addr); + +int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, + u8 addr); +int i3c_master_do_daa(struct i3c_master_controller *master); + +int i3c_master_set_info(struct i3c_master_controller *master, + const struct i3c_device_info *info); + +int i3c_master_register(struct i3c_master_controller *master, + struct device *parent, + const struct i3c_master_controller_ops *ops, + bool secondary); +int i3c_master_unregister(struct i3c_master_controller *master); + +/** + * i3c_dev_get_master_data() - get master private data attached to an I3C + * device descriptor + * @dev: the I3C device descriptor to get private data from + * + * Return: the private data previously attached with i3c_dev_set_master_data() + * or NULL if no data has been attached to the device. + */ +static inline void *i3c_dev_get_master_data(const struct i3c_dev_desc *dev) +{ + return dev->common.master_priv; +} + +/** + * i3c_dev_set_master_data() - attach master private data to an I3C device + * descriptor + * @dev: the I3C device descriptor to attach private data to + * @data: private data + * + * This functions allows a master controller to attach per-device private data + * which can then be retrieved with i3c_dev_get_master_data(). + */ +static inline void i3c_dev_set_master_data(struct i3c_dev_desc *dev, + void *data) +{ + dev->common.master_priv = data; +} + +/** + * i2c_dev_get_master_data() - get master private data attached to an I2C + * device descriptor + * @dev: the I2C device descriptor to get private data from + * + * Return: the private data previously attached with i2c_dev_set_master_data() + * or NULL if no data has been attached to the device. + */ +static inline void *i2c_dev_get_master_data(const struct i2c_dev_desc *dev) +{ + return dev->common.master_priv; +} + +/** + * i2c_dev_set_master_data() - attach master private data to an I2C device + * descriptor + * @dev: the I2C device descriptor to attach private data to + * @data: private data + * + * This functions allows a master controller to attach per-device private data + * which can then be retrieved with i2c_device_get_master_data(). + */ +static inline void i2c_dev_set_master_data(struct i2c_dev_desc *dev, + void *data) +{ + dev->common.master_priv = data; +} + +/** + * i3c_dev_get_master() - get master used to communicate with a device + * @dev: I3C dev + * + * Return: the master controller driving @dev + */ +static inline struct i3c_master_controller * +i3c_dev_get_master(struct i3c_dev_desc *dev) +{ + return dev->common.master; +} + +/** + * i2c_dev_get_master() - get master used to communicate with a device + * @dev: I2C dev + * + * Return: the master controller driving @dev + */ +static inline struct i3c_master_controller * +i2c_dev_get_master(struct i2c_dev_desc *dev) +{ + return dev->common.master; +} + +/** + * i3c_master_get_bus() - get the bus attached to a master + * @master: master object + * + * Return: the I3C bus @master is connected to + */ +static inline struct i3c_bus * +i3c_master_get_bus(struct i3c_master_controller *master) +{ + return &master->bus; +} + +struct i3c_generic_ibi_pool; + +struct i3c_generic_ibi_pool * +i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev, + const struct i3c_ibi_setup *req); +void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool); + +struct i3c_ibi_slot * +i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool); +void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool, + struct i3c_ibi_slot *slot); + +void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot); + +struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev); + +#endif /* I3C_MASTER_H */ diff --git a/include/linux/idr.h b/include/linux/idr.h index b6c6151c7446f0c14dfaf869cdeb6c0e3a01cb7f..81c9df5c04fa504f88a5f7fb9f11bd135a3f6d07 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -169,10 +169,7 @@ static inline bool idr_is_empty(const struct idr *idr) * Each idr_preload() should be matched with an invocation of this * function. See idr_preload() for details. */ -static inline void idr_preload_end(void) -{ - preempt_enable(); -} +void idr_preload_end(void); /** * idr_for_each_entry() - Iterate over an IDR's elements of a given type. diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index eeceac3376fc83ec0029b4397e9a4abecddd4021..a3b5edb26bc51dc8c000928682288c6b537436db 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -61,6 +61,7 @@ * interrupt handler after suspending interrupts. For system * wakeup devices users need to implement wakeup detection in * their interrupt handlers. + * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 @@ -74,6 +75,7 @@ #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 +#define IRQF_NO_SOFTIRQ_CALL 0x00080000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) @@ -427,7 +429,11 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool state); #ifdef CONFIG_IRQ_FORCED_THREADING +# ifdef CONFIG_PREEMPT_RT_BASE +# define force_irqthreads (true) +# else extern bool force_irqthreads; +# endif #else #define force_irqthreads (0) #endif @@ -493,9 +499,10 @@ struct softirq_action void (*action)(struct softirq_action *); }; +#ifndef CONFIG_PREEMPT_RT_FULL asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); - +static inline void thread_do_softirq(void) { do_softirq(); } #ifdef __ARCH_HAS_DO_SOFTIRQ void do_softirq_own_stack(void); #else @@ -504,13 +511,25 @@ static inline void do_softirq_own_stack(void) __do_softirq(); } #endif +#else +extern void thread_do_softirq(void); +#endif extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); +#ifdef CONFIG_PREEMPT_RT_FULL +extern void __raise_softirq_irqoff_ksoft(unsigned int nr); +#else +static inline void __raise_softirq_irqoff_ksoft(unsigned int nr) +{ + __raise_softirq_irqoff(nr); +} +#endif extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); +extern void softirq_check_pending_idle(void); DECLARE_PER_CPU(struct task_struct *, ksoftirqd); @@ -532,8 +551,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void) to be executed on some cpu at least once after this. * If the tasklet is already scheduled, but its execution is still not started, it will be executed only once. - * If this tasklet is already running on another CPU (or schedule is called - from tasklet itself), it is rescheduled for later. + * If this tasklet is already running on another CPU, it is rescheduled + for later. + * Schedule must not be called from the tasklet itself (a lockup occurs) * Tasklet is strictly serialized wrt itself, but not wrt another tasklets. If client needs some intertask synchronization, he makes it with spinlocks. @@ -558,27 +578,39 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } enum { TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ - TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ + TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ + TASKLET_STATE_PENDING, /* Tasklet is pending */ + TASKLET_STATE_CHAINED /* Tasklet is chained */ }; -#ifdef CONFIG_SMP +#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) +#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) +#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) +#define TASKLET_STATEF_CHAINED (1 << TASKLET_STATE_CHAINED) +#define TASKLET_STATEF_RC (TASKLET_STATEF_RUN | TASKLET_STATEF_CHAINED) + +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) static inline int tasklet_trylock(struct tasklet_struct *t) { return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); } +static inline int tasklet_tryunlock(struct tasklet_struct *t) +{ + return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; +} + static inline void tasklet_unlock(struct tasklet_struct *t) { smp_mb__before_atomic(); clear_bit(TASKLET_STATE_RUN, &(t)->state); } -static inline void tasklet_unlock_wait(struct tasklet_struct *t) -{ - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } -} +extern void tasklet_unlock_wait(struct tasklet_struct *t); + #else #define tasklet_trylock(t) 1 +#define tasklet_tryunlock(t) 1 #define tasklet_unlock_wait(t) do { } while (0) #define tasklet_unlock(t) do { } while (0) #endif @@ -612,17 +644,18 @@ static inline void tasklet_disable(struct tasklet_struct *t) smp_mb(); } -static inline void tasklet_enable(struct tasklet_struct *t) -{ - smp_mb__before_atomic(); - atomic_dec(&t->count); -} - +extern void tasklet_enable(struct tasklet_struct *t); extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); +#ifdef CONFIG_PREEMPT_RT_FULL +extern void softirq_early_init(void); +#else +static inline void softirq_early_init(void) { } +#endif + struct tasklet_hrtimer { struct hrtimer timer; struct tasklet_struct tasklet; diff --git a/include/linux/irq.h b/include/linux/irq.h index 9504267414a452f6a542c115cdb7ae2a23d720f4..568fa79c36b1649b233f16f5a12c8dc968c44342 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -69,6 +69,7 @@ enum irqchip_irq_state; * IRQ_IS_POLLED - Always polled by another interrupt. Exclude * it from the spurious interrupt detection * mechanism and from core side polling. + * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) * IRQ_DISABLE_UNLAZY - Disable lazy irq disable */ enum { @@ -96,13 +97,14 @@ enum { IRQ_PER_CPU_DEVID = (1 << 17), IRQ_IS_POLLED = (1 << 18), IRQ_DISABLE_UNLAZY = (1 << 19), + IRQ_NO_SOFTIRQ_CALL = (1 << 20), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ - IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) + IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) @@ -212,6 +214,8 @@ struct irq_data { * required * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call * irq_chip::irq_set_affinity() when deactivated. + * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked + * from actual interrupt context. */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -235,6 +239,7 @@ enum { IRQD_DEFAULT_TRIGGER_SET = (1 << 25), IRQD_CAN_RESERVE = (1 << 26), IRQD_MSI_NOMASK_QUIRK = (1 << 27), + IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), }; @@ -305,6 +310,16 @@ static inline bool irqd_is_single_target(struct irq_data *d) return __irqd_to_state(d) & IRQD_SINGLE_TARGET; } +static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX; +} + +static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX; +} + static inline bool irqd_is_wakeup_set(struct irq_data *d) { return __irqd_to_state(d) & IRQD_WAKEUP_STATE; diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index b11fcdfd077076e9593e9af3ea64f443d34572a2..0c50559987c55c4dd2aeb67d2d14f973d3831cb6 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -18,6 +18,8 @@ /* Doesn't want IPI, wait for tick: */ #define IRQ_WORK_LAZY BIT(2) +/* Run hard IRQ context, even on RT */ +#define IRQ_WORK_HARD_IRQ BIT(3) #define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) @@ -52,4 +54,10 @@ static inline bool irq_work_needs_cpu(void) { return false; } static inline void irq_work_run(void) { } #endif +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) +void irq_work_tick_soft(void); +#else +static inline void irq_work_tick_soft(void) { } +#endif + #endif /* _LINUX_IRQ_WORK_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 1d21e98d68549440aaaaff08a0e5919eee9232c1..1f33daa5c674e7bfa81a8288d2bc2da1285d7c3e 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -585,8 +585,10 @@ struct rdists { void __iomem *rd_base; struct page *pend_page; phys_addr_t phys_base; + bool lpi_enabled; } __percpu *rdist; - struct page *prop_page; + phys_addr_t prop_table_pa; + void *prop_table_va; u64 flags; u32 gicd_typer; bool has_vlpis; diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 875c41b23f2031d6d8e9bd21b682397097b26ad4..ff5eb8d1ede4b48e47ff2a92b52285b076676290 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -71,6 +71,7 @@ struct irq_desc { unsigned int irqs_unhandled; atomic_t threads_handled; int threads_handled_last; + u64 random_ip; raw_spinlock_t lock; struct cpumask *percpu_enabled; const struct cpumask *percpu_affinity; diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 21619c92c3770ca0cfc1f3381bee2fe8f065da88..b20eeb25e9fae858b2e2d0d279a2c88aafbf5aed 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -43,14 +43,6 @@ do { \ do { \ current->hardirq_context--; \ } while (0) -# define lockdep_softirq_enter() \ -do { \ - current->softirq_context++; \ -} while (0) -# define lockdep_softirq_exit() \ -do { \ - current->softirq_context--; \ -} while (0) #else # define trace_hardirqs_on() do { } while (0) # define trace_hardirqs_off() do { } while (0) @@ -64,6 +56,21 @@ do { \ # define lockdep_softirq_exit() do { } while (0) #endif +#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) +# define lockdep_softirq_enter() \ +do { \ + current->softirq_context++; \ +} while (0) +# define lockdep_softirq_exit() \ +do { \ + current->softirq_context--; \ +} while (0) + +#else +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) +#endif + #if defined(CONFIG_IRQSOFF_TRACER) || \ defined(CONFIG_PREEMPT_TRACER) extern void stop_critical_timings(void); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 268f3000d1b34d0e365f2eca252594d3a17505bb..8f5d6ecb802ee1400035522d4705e94e354c8830 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) static inline void jbd_lock_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(BH_State, &bh->b_state); +#else + spin_lock(&bh->b_state_lock); +#endif } static inline int jbd_trylock_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE return bit_spin_trylock(BH_State, &bh->b_state); +#else + return spin_trylock(&bh->b_state_lock); +#endif } static inline int jbd_is_locked_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE return bit_spin_is_locked(BH_State, &bh->b_state); +#else + return spin_is_locked(&bh->b_state_lock); +#endif } static inline void jbd_unlock_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(BH_State, &bh->b_state); +#else + spin_unlock(&bh->b_state_lock); +#endif } static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(BH_JournalHead, &bh->b_state); +#else + spin_lock(&bh->b_journal_head_lock); +#endif } static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(BH_JournalHead, &bh->b_state); +#else + spin_unlock(&bh->b_journal_head_lock); +#endif } #define J_ASSERT(assert) BUG_ON(!(assert)) diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 68bd88223417c058d7a319790993fe8691800e68..e033b25b0b72549944c866f1d56666bfd6ccdfc4 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt, extern __printf(1, 2) int kdb_printf(const char *, ...); typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); +#define in_kdb_printk() (kdb_trap_printk) extern void kdb_init(int level); /* Access to kdb specific polling devices */ @@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kdb_func_t, char *, char *, extern int kdb_unregister(char *); #else /* ! CONFIG_KGDB_KDB */ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } +#define in_kdb_printk() (0) static inline void kdb_init(int level) {} static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, char *help, short minlen) { return 0; } diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 50733abbe548ef31b9cda63c8e00d0e4add5d354..64ef575435bb255b0fc1d4a80875831d94165a87 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -260,6 +260,9 @@ extern int _cond_resched(void); */ # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) + +# define might_sleep_no_state_check() \ + do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) # define sched_annotate_sleep() (current->task_state_change = 0) #else static inline void ___might_sleep(const char *file, int line, @@ -267,6 +270,7 @@ extern int _cond_resched(void); static inline void __might_sleep(const char *file, int line, int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) +# define might_sleep_no_state_check() do { might_resched(); } while (0) # define sched_annotate_sleep() do { } while (0) #endif diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 72308c38e06c48f237152549498efeae6e122894..0e3b9b528c9e1573d5cbb6bf778a611c96dfc524 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -88,7 +88,7 @@ enum { struct kthread_worker { unsigned int flags; - spinlock_t lock; + raw_spinlock_t lock; struct list_head work_list; struct list_head delayed_work_list; struct task_struct *task; @@ -109,7 +109,7 @@ struct kthread_delayed_work { }; #define KTHREAD_WORKER_INIT(worker) { \ - .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ + .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ .work_list = LIST_HEAD_INIT((worker).work_list), \ .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ } diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index 3fc2cc57ba1bc6d5badfd24ef67982683457507e..0b5de7d9ffcf8ebaf1d30f0f8cf3759d675ccc59 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -3,6 +3,7 @@ #define _LINUX_LIST_BL_H #include +#include #include /* @@ -33,13 +34,24 @@ struct hlist_bl_head { struct hlist_bl_node *first; +#ifdef CONFIG_PREEMPT_RT_BASE + raw_spinlock_t lock; +#endif }; struct hlist_bl_node { struct hlist_bl_node *next, **pprev; }; -#define INIT_HLIST_BL_HEAD(ptr) \ - ((ptr)->first = NULL) + +#ifdef CONFIG_PREEMPT_RT_BASE +#define INIT_HLIST_BL_HEAD(h) \ +do { \ + (h)->first = NULL; \ + raw_spin_lock_init(&(h)->lock); \ +} while (0) +#else +#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL +#endif static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) { @@ -119,12 +131,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n) static inline void hlist_bl_lock(struct hlist_bl_head *b) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(0, (unsigned long *)b); +#else + raw_spin_lock(&b->lock); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __set_bit(0, (unsigned long *)b); +#endif +#endif } static inline void hlist_bl_unlock(struct hlist_bl_head *b) { +#ifndef CONFIG_PREEMPT_RT_BASE __bit_spin_unlock(0, (unsigned long *)b); +#else +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __clear_bit(0, (unsigned long *)b); +#endif + raw_spin_unlock(&b->lock); +#endif } static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) diff --git a/include/linux/locallock.h b/include/linux/locallock.h new file mode 100644 index 0000000000000000000000000000000000000000..81c89d87723b5210e3ccf93355ba56af78ce1ccb --- /dev/null +++ b/include/linux/locallock.h @@ -0,0 +1,282 @@ +#ifndef _LINUX_LOCALLOCK_H +#define _LINUX_LOCALLOCK_H + +#include +#include +#include + +#ifdef CONFIG_PREEMPT_RT_BASE + +#ifdef CONFIG_DEBUG_SPINLOCK +# define LL_WARN(cond) WARN_ON(cond) +#else +# define LL_WARN(cond) do { } while (0) +#endif + +/* + * per cpu lock based substitute for local_irq_*() + */ +struct local_irq_lock { + spinlock_t lock; + struct task_struct *owner; + int nestcnt; + unsigned long flags; +}; + +#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ + DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ + .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } + +#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ + DECLARE_PER_CPU(struct local_irq_lock, lvar) + +#define local_irq_lock_init(lvar) \ + do { \ + int __cpu; \ + for_each_possible_cpu(__cpu) \ + spin_lock_init(&per_cpu(lvar, __cpu).lock); \ + } while (0) + +static inline void __local_lock(struct local_irq_lock *lv) +{ + if (lv->owner != current) { + spin_lock(&lv->lock); + LL_WARN(lv->owner); + LL_WARN(lv->nestcnt); + lv->owner = current; + } + lv->nestcnt++; +} + +#define local_lock(lvar) \ + do { __local_lock(&get_local_var(lvar)); } while (0) + +#define local_lock_on(lvar, cpu) \ + do { __local_lock(&per_cpu(lvar, cpu)); } while (0) + +static inline int __local_trylock(struct local_irq_lock *lv) +{ + if (lv->owner != current && spin_trylock(&lv->lock)) { + LL_WARN(lv->owner); + LL_WARN(lv->nestcnt); + lv->owner = current; + lv->nestcnt = 1; + return 1; + } else if (lv->owner == current) { + lv->nestcnt++; + return 1; + } + return 0; +} + +#define local_trylock(lvar) \ + ({ \ + int __locked; \ + __locked = __local_trylock(&get_local_var(lvar)); \ + if (!__locked) \ + put_local_var(lvar); \ + __locked; \ + }) + +static inline void __local_unlock(struct local_irq_lock *lv) +{ + LL_WARN(lv->nestcnt == 0); + LL_WARN(lv->owner != current); + if (--lv->nestcnt) + return; + + lv->owner = NULL; + spin_unlock(&lv->lock); +} + +#define local_unlock(lvar) \ + do { \ + __local_unlock(this_cpu_ptr(&lvar)); \ + put_local_var(lvar); \ + } while (0) + +#define local_unlock_on(lvar, cpu) \ + do { __local_unlock(&per_cpu(lvar, cpu)); } while (0) + +static inline void __local_lock_irq(struct local_irq_lock *lv) +{ + spin_lock_irqsave(&lv->lock, lv->flags); + LL_WARN(lv->owner); + LL_WARN(lv->nestcnt); + lv->owner = current; + lv->nestcnt = 1; +} + +#define local_lock_irq(lvar) \ + do { __local_lock_irq(&get_local_var(lvar)); } while (0) + +#define local_lock_irq_on(lvar, cpu) \ + do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) + +static inline void __local_unlock_irq(struct local_irq_lock *lv) +{ + LL_WARN(!lv->nestcnt); + LL_WARN(lv->owner != current); + lv->owner = NULL; + lv->nestcnt = 0; + spin_unlock_irq(&lv->lock); +} + +#define local_unlock_irq(lvar) \ + do { \ + __local_unlock_irq(this_cpu_ptr(&lvar)); \ + put_local_var(lvar); \ + } while (0) + +#define local_unlock_irq_on(lvar, cpu) \ + do { \ + __local_unlock_irq(&per_cpu(lvar, cpu)); \ + } while (0) + +static inline int __local_lock_irqsave(struct local_irq_lock *lv) +{ + if (lv->owner != current) { + __local_lock_irq(lv); + return 0; + } else { + lv->nestcnt++; + return 1; + } +} + +#define local_lock_irqsave(lvar, _flags) \ + do { \ + if (__local_lock_irqsave(&get_local_var(lvar))) \ + put_local_var(lvar); \ + _flags = __this_cpu_read(lvar.flags); \ + } while (0) + +#define local_lock_irqsave_on(lvar, _flags, cpu) \ + do { \ + __local_lock_irqsave(&per_cpu(lvar, cpu)); \ + _flags = per_cpu(lvar, cpu).flags; \ + } while (0) + +static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, + unsigned long flags) +{ + LL_WARN(!lv->nestcnt); + LL_WARN(lv->owner != current); + if (--lv->nestcnt) + return 0; + + lv->owner = NULL; + spin_unlock_irqrestore(&lv->lock, lv->flags); + return 1; +} + +#define local_unlock_irqrestore(lvar, flags) \ + do { \ + if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \ + put_local_var(lvar); \ + } while (0) + +#define local_unlock_irqrestore_on(lvar, flags, cpu) \ + do { \ + __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ + } while (0) + +#define local_spin_trylock_irq(lvar, lock) \ + ({ \ + int __locked; \ + local_lock_irq(lvar); \ + __locked = spin_trylock(lock); \ + if (!__locked) \ + local_unlock_irq(lvar); \ + __locked; \ + }) + +#define local_spin_lock_irq(lvar, lock) \ + do { \ + local_lock_irq(lvar); \ + spin_lock(lock); \ + } while (0) + +#define local_spin_unlock_irq(lvar, lock) \ + do { \ + spin_unlock(lock); \ + local_unlock_irq(lvar); \ + } while (0) + +#define local_spin_lock_irqsave(lvar, lock, flags) \ + do { \ + local_lock_irqsave(lvar, flags); \ + spin_lock(lock); \ + } while (0) + +#define local_spin_unlock_irqrestore(lvar, lock, flags) \ + do { \ + spin_unlock(lock); \ + local_unlock_irqrestore(lvar, flags); \ + } while (0) + +#define get_locked_var(lvar, var) \ + (*({ \ + local_lock(lvar); \ + this_cpu_ptr(&var); \ + })) + +#define put_locked_var(lvar, var) local_unlock(lvar); + +#define get_locked_ptr(lvar, var) \ + ({ \ + local_lock(lvar); \ + this_cpu_ptr(var); \ + }) + +#define put_locked_ptr(lvar, var) local_unlock(lvar); + +#define local_lock_cpu(lvar) \ + ({ \ + local_lock(lvar); \ + smp_processor_id(); \ + }) + +#define local_unlock_cpu(lvar) local_unlock(lvar) + +#else /* PREEMPT_RT_BASE */ + +#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar +#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar + +static inline void local_irq_lock_init(int lvar) { } + +#define local_trylock(lvar) \ + ({ \ + preempt_disable(); \ + 1; \ + }) + +#define local_lock(lvar) preempt_disable() +#define local_unlock(lvar) preempt_enable() +#define local_lock_irq(lvar) local_irq_disable() +#define local_lock_irq_on(lvar, cpu) local_irq_disable() +#define local_unlock_irq(lvar) local_irq_enable() +#define local_unlock_irq_on(lvar, cpu) local_irq_enable() +#define local_lock_irqsave(lvar, flags) local_irq_save(flags) +#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) + +#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) +#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) +#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) +#define local_spin_lock_irqsave(lvar, lock, flags) \ + spin_lock_irqsave(lock, flags) +#define local_spin_unlock_irqrestore(lvar, lock, flags) \ + spin_unlock_irqrestore(lock, flags) + +#define get_locked_var(lvar, var) get_cpu_var(var) +#define put_locked_var(lvar, var) put_cpu_var(var) +#define get_locked_ptr(lvar, var) get_cpu_ptr(var) +#define put_locked_ptr(lvar, var) put_cpu_ptr(var) + +#define local_lock_cpu(lvar) get_cpu() +#define local_unlock_cpu(lvar) put_cpu() + +#endif + +#endif diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 2acdd046df2d5dda3edd9b30e9e4bd7700f42dca..1b06db7a6096516414ea170450b853226eecc59e 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -301,6 +301,9 @@ static inline int memblock_get_region_node(const struct memblock_region *r) } #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid); phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index cc6b6532eb56e75943e8cc999514dddfdc79dafe..dbb2c4e27277bbacea7f7f39b0064f23c11708f2 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -665,6 +665,7 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec, pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + preempt_disable_rt(); /* Update memcg */ __mod_memcg_state(pn->memcg, idx, val); @@ -675,6 +676,7 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec, x = 0; } __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); + preempt_enable_rt(); } static inline void mod_lruvec_state(struct lruvec *lruvec, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 3a9a996af229eaefafb78c1392514cb25e7ab56b..202b736ccbfafa08e3f274eaa1ea00ef9ca70f41 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -487,6 +488,9 @@ struct mm_struct { bool tlb_flush_batched; #endif struct uprobes_state uprobes_state; +#ifdef CONFIG_PREEMPT_RT_BASE + struct rcu_head delayed_drop; +#endif #ifdef CONFIG_HUGETLB_PAGE atomic_long_t hugetlb_usage; #endif diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 610cdf8082f2e60cc507cd1510bdc3a2f2691d2e..7ec902aefb2735dfa4b0ceea2068f9f1e238eaef 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -448,6 +448,23 @@ struct pci_epf_device_id { kernel_ulong_t driver_data; }; +/* i3c */ + +#define I3C_MATCH_DCR 0x1 +#define I3C_MATCH_MANUF 0x2 +#define I3C_MATCH_PART 0x4 +#define I3C_MATCH_EXTRA_INFO 0x8 + +struct i3c_device_id { + __u8 match_flags; + __u8 dcr; + __u16 manuf_id; + __u16 part_id; + __u16 extra_info; + + const void *data; +}; + /* spi */ #define SPI_NAME_SIZE 32 diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 8f7cdf83f3592fb1b25e585ab8840e78024502ce..6aa217c6e3caa77bc450b6b8f0940beaf4048f9c 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -22,6 +22,17 @@ struct ww_acquire_ctx; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + +#ifdef CONFIG_PREEMPT_RT_FULL +# include +#else + /* * Simple, straightforward mutexes with strict semantics: * @@ -118,13 +129,6 @@ do { \ __mutex_init((mutex), #mutex, &__key); \ } while (0) -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { .name = #lockname } -#else -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -#endif - #define __MUTEX_INITIALIZER(lockname) \ { .owner = ATOMIC_LONG_INIT(0) \ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ @@ -229,4 +233,6 @@ mutex_trylock_recursive(struct mutex *lock) return mutex_trylock(lock); } +#endif /* !PREEMPT_RT_FULL */ + #endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h new file mode 100644 index 0000000000000000000000000000000000000000..3fcb5edb1d2b46d6e1862f2b31a092272f3dfd0d --- /dev/null +++ b/include/linux/mutex_rt.h @@ -0,0 +1,130 @@ +#ifndef __LINUX_MUTEX_RT_H +#define __LINUX_MUTEX_RT_H + +#ifndef __LINUX_MUTEX_H +#error "Please include mutex.h" +#endif + +#include + +/* FIXME: Just for __lockfunc */ +#include + +struct mutex { + struct rt_mutex lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __MUTEX_INITIALIZER(mutexname) \ + { \ + .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ + __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ + } + +#define DEFINE_MUTEX(mutexname) \ + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); +extern void __lockfunc _mutex_lock(struct mutex *lock); +extern void __lockfunc _mutex_lock_io(struct mutex *lock); +extern void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass); +extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); +extern int __lockfunc _mutex_lock_killable(struct mutex *lock); +extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); +extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); +extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); +extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); +extern int __lockfunc _mutex_trylock(struct mutex *lock); +extern void __lockfunc _mutex_unlock(struct mutex *lock); + +#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) +#define mutex_lock(l) _mutex_lock(l) +#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) +#define mutex_lock_killable(l) _mutex_lock_killable(l) +#define mutex_trylock(l) _mutex_trylock(l) +#define mutex_unlock(l) _mutex_unlock(l) +#define mutex_lock_io(l) _mutex_lock_io(l); + +#define __mutex_owner(l) ((l)->lock.owner) + +#ifdef CONFIG_DEBUG_MUTEXES +#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) +#else +static inline void mutex_destroy(struct mutex *lock) {} +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) +# define mutex_lock_interruptible_nested(l, s) \ + _mutex_lock_interruptible_nested(l, s) +# define mutex_lock_killable_nested(l, s) \ + _mutex_lock_killable_nested(l, s) +# define mutex_lock_io_nested(l, s) _mutex_lock_io_nested(l, s) + +# define mutex_lock_nest_lock(lock, nest_lock) \ +do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ +} while (0) + +#else +# define mutex_lock_nested(l, s) _mutex_lock(l) +# define mutex_lock_interruptible_nested(l, s) \ + _mutex_lock_interruptible(l) +# define mutex_lock_killable_nested(l, s) \ + _mutex_lock_killable(l) +# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) +# define mutex_lock_io_nested(l, s) _mutex_lock_io(l) +#endif + +# define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_init(&(mutex)->lock); \ + __mutex_do_init((mutex), #mutex, &__key); \ +} while (0) + +# define __mutex_init(mutex, name, key) \ +do { \ + rt_mutex_init(&(mutex)->lock); \ + __mutex_do_init((mutex), name, key); \ +} while (0) + +/** + * These values are chosen such that FAIL and SUCCESS match the + * values of the regular mutex_trylock(). + */ +enum mutex_trylock_recursive_enum { + MUTEX_TRYLOCK_FAILED = 0, + MUTEX_TRYLOCK_SUCCESS = 1, + MUTEX_TRYLOCK_RECURSIVE, +}; +/** + * mutex_trylock_recursive - trylock variant that allows recursive locking + * @lock: mutex to be locked + * + * This function should not be used, _ever_. It is purely for hysterical GEM + * raisins, and once those are gone this will be removed. + * + * Returns: + * MUTEX_TRYLOCK_FAILED - trylock failed, + * MUTEX_TRYLOCK_SUCCESS - lock acquired, + * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. + */ +int __rt_mutex_owner_current(struct rt_mutex *lock); + +static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum +mutex_trylock_recursive(struct mutex *lock) +{ + if (unlikely(__rt_mutex_owner_current(&lock->lock))) + return MUTEX_TRYLOCK_RECURSIVE; + + return mutex_trylock(lock); +} + +extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + +#endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8d48b352ee74f891f6ddc1ebb018f01e41d8631e..bd73117e263666b1a053e0edcceaba333d83e3b1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -423,7 +423,19 @@ typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); void __napi_schedule(struct napi_struct *n); + +/* + * When PREEMPT_RT_FULL is defined, all device interrupt handlers + * run as threads, and they can also be preempted (without PREEMPT_RT + * interrupt threads can not be preempted). Which means that calling + * __napi_schedule_irqoff() from an interrupt handler can be preempted + * and can corrupt the napi->poll_list. + */ +#ifdef CONFIG_PREEMPT_RT_FULL +#define __napi_schedule_irqoff(n) __napi_schedule(n) +#else void __napi_schedule_irqoff(struct napi_struct *n); +#endif static inline bool napi_disable_pending(struct napi_struct *n) { @@ -588,7 +600,11 @@ struct netdev_queue { * write-mostly part */ spinlock_t _xmit_lock ____cacheline_aligned_in_smp; +#ifdef CONFIG_PREEMPT_RT_FULL + struct task_struct *xmit_lock_owner; +#else int xmit_lock_owner; +#endif /* * Time (in jiffies) of last Tx */ @@ -2990,6 +3006,7 @@ struct softnet_data { unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; + struct sk_buff_head tofree_queue; }; @@ -3008,14 +3025,38 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd, #endif } +#define XMIT_RECURSION_LIMIT 8 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); +#ifdef CONFIG_PREEMPT_RT_FULL +static inline int dev_recursion_level(void) +{ + return current->xmit_recursion; +} + +static inline bool dev_xmit_recursion(void) +{ + return unlikely(current->xmit_recursion > + XMIT_RECURSION_LIMIT); +} + +static inline void dev_xmit_recursion_inc(void) +{ + current->xmit_recursion++; +} + +static inline void dev_xmit_recursion_dec(void) +{ + current->xmit_recursion--; +} + +#else + static inline int dev_recursion_level(void) { return this_cpu_read(softnet_data.xmit.recursion); } -#define XMIT_RECURSION_LIMIT 8 static inline bool dev_xmit_recursion(void) { return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > @@ -3031,6 +3072,7 @@ static inline void dev_xmit_recursion_dec(void) { __this_cpu_dec(softnet_data.xmit.recursion); } +#endif void __netif_schedule(struct Qdisc *q); void netif_schedule_queue(struct netdev_queue *txq); @@ -3840,13 +3882,52 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) return (1U << debug_value) - 1; } -static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) +#ifdef CONFIG_PREEMPT_RT_FULL +static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) +{ + WRITE_ONCE(txq->xmit_lock_owner, current); +} + +static inline void netdev_queue_clear_owner(struct netdev_queue *txq) +{ + WRITE_ONCE(txq->xmit_lock_owner, NULL); +} + +static inline bool netdev_queue_has_owner(struct netdev_queue *txq) +{ + if (READ_ONCE(txq->xmit_lock_owner) != NULL) + return true; + return false; +} + +#else + +static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) { - spin_lock(&txq->_xmit_lock); /* Pairs with READ_ONCE() in __dev_queue_xmit() */ WRITE_ONCE(txq->xmit_lock_owner, cpu); } +static inline void netdev_queue_clear_owner(struct netdev_queue *txq) +{ + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, -1); +} + +static inline bool netdev_queue_has_owner(struct netdev_queue *txq) +{ + if (READ_ONCE(txq->xmit_lock_owner) != -1) + return true; + return false; +} +#endif + +static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) +{ + spin_lock(&txq->_xmit_lock); + netdev_queue_set_owner(txq, cpu); +} + static inline bool __netif_tx_acquire(struct netdev_queue *txq) { __acquire(&txq->_xmit_lock); @@ -3861,8 +3942,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq) static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ - WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); + netdev_queue_set_owner(txq, smp_processor_id()); } static inline bool __netif_tx_trylock(struct netdev_queue *txq) @@ -3870,29 +3950,26 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq) bool ok = spin_trylock(&txq->_xmit_lock); if (likely(ok)) { - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ - WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); + netdev_queue_set_owner(txq, smp_processor_id()); } return ok; } static inline void __netif_tx_unlock(struct netdev_queue *txq) { - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ - WRITE_ONCE(txq->xmit_lock_owner, -1); + netdev_queue_clear_owner(txq); spin_unlock(&txq->_xmit_lock); } static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) { - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ - WRITE_ONCE(txq->xmit_lock_owner, -1); + netdev_queue_clear_owner(txq); spin_unlock_bh(&txq->_xmit_lock); } static inline void txq_trans_update(struct netdev_queue *txq) { - if (txq->xmit_lock_owner != -1) + if (netdev_queue_has_owner(txq)) txq->trans_start = jiffies; } diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 0ade4d1e4dd96fc111bf736499ce3843a434c654..3e21ce64ce5416a8c36e7e9eecde2f6e6cabfbad 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -6,6 +6,7 @@ #include #include #include +#include #include /* Test a struct->invflags and a boolean for inequality */ @@ -345,6 +346,8 @@ void xt_free_table_info(struct xt_table_info *info); */ DECLARE_PER_CPU(seqcount_t, xt_recseq); +DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); + /* xt_tee_enabled - true if x_tables needs to handle reentrancy * * Enabled if current ip(6)tables ruleset has at least one -j TEE rule. @@ -365,6 +368,9 @@ static inline unsigned int xt_write_recseq_begin(void) { unsigned int addend; + /* RT protection */ + local_lock(xt_write_lock); + /* * Low order bit of sequence is set if we already * called xt_write_recseq_begin(). @@ -395,6 +401,7 @@ static inline void xt_write_recseq_end(unsigned int addend) /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ smp_wmb(); __this_cpu_add(xt_recseq.sequence, addend); + local_unlock(xt_write_lock); } /* diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 8ea7ceed828519f5ee9912c1d95f15442ff1d62a..d52dfae05d2ab68fcd5821668c03e345bcedec47 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -164,7 +164,11 @@ struct nfs_inode { /* Readers: in-flight sillydelete RPC calls */ /* Writers: rmdir */ +#ifdef CONFIG_PREEMPT_RT_BASE + struct semaphore rmdir_sem; +#else struct rw_semaphore rmdir_sem; +#endif struct mutex commit_mutex; #if IS_ENABLED(CONFIG_NFS_V4) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index cab24a127feb34bab337102fcb69500ca18a1d54..73b0d19ef0d9435ceb17162a7f9da693a79a0b46 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1549,7 +1549,7 @@ struct nfs_unlinkdata { struct nfs_removeargs args; struct nfs_removeres res; struct dentry *dentry; - wait_queue_head_t wq; + struct swait_queue_head wq; struct rpc_cred *cred; struct nfs_fattr dir_attr; long timeout; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c0dd2f749d3f89d8f0bb257c20291c53b2e0f72f..2019eca4cbdc16ceaf18b98f8a83e41493d6c66b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3118,4 +3118,6 @@ #define PCI_VENDOR_ID_NCUBE 0x10ff +#define PCI_VENDOR_ID_PHYTIUM 0x1db7 + #endif /* _LINUX_PCI_IDS_H */ diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 79b99d653e030d113e4401fc26c7b47e81dcff8c..fb44e237316d8576d6ef1ed1f6fecb6cfd1bfbe9 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \ extern int __percpu_down_read(struct percpu_rw_semaphore *, int); extern void __percpu_up_read(struct percpu_rw_semaphore *); -static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) +static inline void percpu_down_read(struct percpu_rw_semaphore *sem) { might_sleep(); @@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore * __this_cpu_inc(*sem->read_count); if (unlikely(!rcu_sync_is_idle(&sem->rss))) __percpu_down_read(sem, false); /* Unconditional memory barrier */ - barrier(); /* - * The barrier() prevents the compiler from + * The preempt_enable() prevents the compiler from * bleeding the critical section out. */ -} - -static inline void percpu_down_read(struct percpu_rw_semaphore *sem) -{ - percpu_down_read_preempt_disable(sem); preempt_enable(); } @@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) return ret; } -static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) +static inline void percpu_up_read(struct percpu_rw_semaphore *sem) { - /* - * The barrier() prevents the compiler from - * bleeding the critical section out. - */ - barrier(); + preempt_disable(); /* * Same as in percpu_down_read(). */ @@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); } -static inline void percpu_up_read(struct percpu_rw_semaphore *sem) -{ - preempt_disable(); - percpu_up_read_preempt_enable(sem); -} - extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *); diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 70b7123f38c7c9044bb262a2cecc96ab415b1f4d..24421bf8c4b32b3cb8a94bb3c0462135e5a7922b 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -19,6 +19,35 @@ #define PERCPU_MODULE_RESERVE 0 #endif +#ifdef CONFIG_PREEMPT_RT_FULL + +#define get_local_var(var) (*({ \ + migrate_disable(); \ + this_cpu_ptr(&var); })) + +#define put_local_var(var) do { \ + (void)&(var); \ + migrate_enable(); \ +} while (0) + +# define get_local_ptr(var) ({ \ + migrate_disable(); \ + this_cpu_ptr(var); }) + +# define put_local_ptr(var) do { \ + (void)(var); \ + migrate_enable(); \ +} while (0) + +#else + +#define get_local_var(var) get_cpu_var(var) +#define put_local_var(var) put_cpu_var(var) +#define get_local_ptr(var) get_cpu_ptr(var) +#define put_local_ptr(var) put_cpu_ptr(var) + +#endif + /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) diff --git a/include/linux/phy.h b/include/linux/phy.h old mode 100644 new mode 100755 index 42766e7179d383dfcb6f618760cdb1171c5a36af..34e6d4bbc988206a16bec4d38287f2d12bda71cf --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -88,6 +88,7 @@ typedef enum { PHY_INTERFACE_MODE_XAUI, /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */ PHY_INTERFACE_MODE_10GKR, + PHY_INTERFACE_MODE_USXGMII, PHY_INTERFACE_MODE_MAX, } phy_interface_t; @@ -160,6 +161,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "xaui"; case PHY_INTERFACE_MODE_10GKR: return "10gbase-kr"; + case PHY_INTERFACE_MODE_USXGMII: + return "usxgmii"; default: return "unknown"; } @@ -405,7 +408,7 @@ struct phy_device { struct phy_driver *drv; u32 phy_id; - + u32 force_mode; struct phy_c45_device_ids c45_ids; unsigned is_c45:1; unsigned is_internal:1; @@ -667,6 +670,10 @@ struct phy_driver { #define PHY_ANY_ID "MATCH ANY PHY" #define PHY_ANY_UID 0xffffffff +#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0) +#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4) +#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10) + /* A Structure for boards to register fixups with the PHY Lib */ struct phy_fixup { struct list_head list; diff --git a/include/linux/pid.h b/include/linux/pid.h index 14a9a39da9c77eab3fa21cb05e2b30206ed526d9..a9026a5da196eb77fa2a2ef04e3a924a6ff8e70a 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -3,6 +3,7 @@ #define _LINUX_PID_H #include +#include enum pid_type { diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index ee7e987ea1b4354ef93a149c997caf75b6269c1b..3e6c91bdf2ef8678f3319fe7b81c9f20359a4448 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -15,6 +15,7 @@ struct cpu_timer_list { u64 expires, incr; struct task_struct *task; int firing; + int firing_cpu; }; /* @@ -114,8 +115,8 @@ struct k_itimer { struct { struct alarm alarmtimer; } alarm; - struct rcu_head rcu; } it; + struct rcu_head rcu; }; void run_posix_cpu_timers(struct task_struct *task); diff --git a/include/linux/preempt.h b/include/linux/preempt.h index c01813c3fbe9336ed69a54a0296ed7bb69186a5f..9c74a019bf57858b2a9bd9b4b251bd6a5b56c129 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -51,7 +51,11 @@ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #define NMI_OFFSET (1UL << NMI_SHIFT) -#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) +#ifndef CONFIG_PREEMPT_RT_FULL +# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) +#else +# define SOFTIRQ_DISABLE_OFFSET (0) +#endif /* We use the MSB mostly because its available */ #define PREEMPT_NEED_RESCHED 0x80000000 @@ -81,9 +85,15 @@ #include #define hardirq_count() (preempt_count() & HARDIRQ_MASK) -#define softirq_count() (preempt_count() & SOFTIRQ_MASK) #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ | NMI_MASK)) +#ifndef CONFIG_PREEMPT_RT_FULL +# define softirq_count() (preempt_count() & SOFTIRQ_MASK) +# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) +#else +# define softirq_count() ((unsigned long)current->softirq_nestcnt) +extern int in_serving_softirq(void); +#endif /* * Are we doing bottom half or hardware interrupt processing? @@ -101,7 +111,6 @@ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) -#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) #define in_nmi() (preempt_count() & NMI_MASK) #define in_task() (!(preempt_count() & \ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) @@ -118,7 +127,11 @@ /* * The preempt_count offset after spin_lock() */ +#if !defined(CONFIG_PREEMPT_RT_FULL) #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET +#else +#define PREEMPT_LOCK_OFFSET 0 +#endif /* * The preempt_count offset needed for things like: @@ -167,6 +180,20 @@ extern void preempt_count_sub(int val); #define preempt_count_inc() preempt_count_add(1) #define preempt_count_dec() preempt_count_sub(1) +#ifdef CONFIG_PREEMPT_LAZY +#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) +#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) +#define inc_preempt_lazy_count() add_preempt_lazy_count(1) +#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) +#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) +#else +#define add_preempt_lazy_count(val) do { } while (0) +#define sub_preempt_lazy_count(val) do { } while (0) +#define inc_preempt_lazy_count() do { } while (0) +#define dec_preempt_lazy_count() do { } while (0) +#define preempt_lazy_count() (0) +#endif + #ifdef CONFIG_PREEMPT_COUNT #define preempt_disable() \ @@ -175,16 +202,53 @@ do { \ barrier(); \ } while (0) +#define preempt_lazy_disable() \ +do { \ + inc_preempt_lazy_count(); \ + barrier(); \ +} while (0) + #define sched_preempt_enable_no_resched() \ do { \ barrier(); \ preempt_count_dec(); \ } while (0) -#define preempt_enable_no_resched() sched_preempt_enable_no_resched() +#ifdef CONFIG_PREEMPT_RT_BASE +# define preempt_enable_no_resched() sched_preempt_enable_no_resched() +# define preempt_check_resched_rt() preempt_check_resched() +#else +# define preempt_enable_no_resched() preempt_enable() +# define preempt_check_resched_rt() barrier(); +#endif #define preemptible() (preempt_count() == 0 && !irqs_disabled()) +#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) + +extern void migrate_disable(void); +extern void migrate_enable(void); + +int __migrate_disabled(struct task_struct *p); + +#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) + +extern void migrate_disable(void); +extern void migrate_enable(void); +static inline int __migrate_disabled(struct task_struct *p) +{ + return 0; +} + +#else +#define migrate_disable() preempt_disable() +#define migrate_enable() preempt_enable() +static inline int __migrate_disabled(struct task_struct *p) +{ + return 0; +} +#endif + #ifdef CONFIG_PREEMPT #define preempt_enable() \ do { \ @@ -206,6 +270,13 @@ do { \ __preempt_schedule(); \ } while (0) +#define preempt_lazy_enable() \ +do { \ + dec_preempt_lazy_count(); \ + barrier(); \ + preempt_check_resched(); \ +} while (0) + #else /* !CONFIG_PREEMPT */ #define preempt_enable() \ do { \ @@ -213,6 +284,12 @@ do { \ preempt_count_dec(); \ } while (0) +#define preempt_lazy_enable() \ +do { \ + dec_preempt_lazy_count(); \ + barrier(); \ +} while (0) + #define preempt_enable_notrace() \ do { \ barrier(); \ @@ -251,8 +328,16 @@ do { \ #define preempt_disable_notrace() barrier() #define preempt_enable_no_resched_notrace() barrier() #define preempt_enable_notrace() barrier() +#define preempt_check_resched_rt() barrier() #define preemptible() 0 +#define migrate_disable() barrier() +#define migrate_enable() barrier() + +static inline int __migrate_disabled(struct task_struct *p) +{ + return 0; +} #endif /* CONFIG_PREEMPT_COUNT */ #ifdef MODULE @@ -271,10 +356,22 @@ do { \ } while (0) #define preempt_fold_need_resched() \ do { \ - if (tif_need_resched()) \ + if (tif_need_resched_now()) \ set_preempt_need_resched(); \ } while (0) +#ifdef CONFIG_PREEMPT_RT_FULL +# define preempt_disable_rt() preempt_disable() +# define preempt_enable_rt() preempt_enable() +# define preempt_disable_nort() barrier() +# define preempt_enable_nort() barrier() +#else +# define preempt_disable_rt() barrier() +# define preempt_enable_rt() barrier() +# define preempt_disable_nort() preempt_disable() +# define preempt_enable_nort() preempt_enable() +#endif + #ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier; diff --git a/include/linux/printk.h b/include/linux/printk.h index 6dd867e3936518c3ea8632adc8f2f6c9614ba242..b9d0504284569ebece2194c8e0da4fdad6b285c9 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -140,9 +140,11 @@ struct va_format { #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) void early_printk(const char *fmt, ...); +extern void printk_kill(void); #else static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } +static inline void printk_kill(void) { } #endif #ifdef CONFIG_PRINTK_NMI diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 34149e8b5f73fe6cd821ba58c5196974d0adee2e..affb0fc4c5b6e47eb01c6f33c653706923d60e28 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -330,6 +330,8 @@ unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); +void radix_tree_preload_end(void); + void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *, unsigned long index, unsigned int tag); @@ -349,11 +351,6 @@ unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, unsigned int max_items, unsigned int tag); int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); -static inline void radix_tree_preload_end(void) -{ - preempt_enable(); -} - int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); int radix_tree_split(struct radix_tree_root *, unsigned long index, unsigned new_order); diff --git a/include/linux/random.h b/include/linux/random.h index 37209b3b22ae498170f059092d06707b83857f01..c4342f7658cd5d05e415fa9f676ab5b2be66c4c4 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -32,7 +32,7 @@ static inline void add_latent_entropy(void) {} extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) __latent_entropy; -extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; +extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy; extern void get_random_bytes(void *buf, int nbytes); extern int wait_for_random_bytes(void); diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index fcbeed4053efbba81993fea4151a8a6a6cd6f5d1..2aa2aec354c230f40ea8f0244b534e791f979c3f 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -31,7 +31,7 @@ #include #include -#include +#include struct rb_node { unsigned long __rb_parent_color; diff --git a/include/linux/rcu_assign_pointer.h b/include/linux/rcu_assign_pointer.h new file mode 100644 index 0000000000000000000000000000000000000000..7066962a4379ef522e621320b8c39a1d7a7a65e8 --- /dev/null +++ b/include/linux/rcu_assign_pointer.h @@ -0,0 +1,54 @@ +#ifndef __LINUX_RCU_ASSIGN_POINTER_H__ +#define __LINUX_RCU_ASSIGN_POINTER_H__ +#include +#include + +/** + * RCU_INITIALIZER() - statically initialize an RCU-protected global variable + * @v: The value to statically initialize with. + */ +#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) + +/** + * rcu_assign_pointer() - assign to RCU-protected pointer + * @p: pointer to assign to + * @v: value to assign (publish) + * + * Assigns the specified value to the specified RCU-protected + * pointer, ensuring that any concurrent RCU readers will see + * any prior initialization. + * + * Inserts memory barriers on architectures that require them + * (which is most of them), and also prevents the compiler from + * reordering the code that initializes the structure after the pointer + * assignment. More importantly, this call documents which pointers + * will be dereferenced by RCU read-side code. + * + * In some special cases, you may use RCU_INIT_POINTER() instead + * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due + * to the fact that it does not constrain either the CPU or the compiler. + * That said, using RCU_INIT_POINTER() when you should have used + * rcu_assign_pointer() is a very bad thing that results in + * impossible-to-diagnose memory corruption. So please be careful. + * See the RCU_INIT_POINTER() comment header for details. + * + * Note that rcu_assign_pointer() evaluates each of its arguments only + * once, appearances notwithstanding. One of the "extra" evaluations + * is in typeof() and the other visible only to sparse (__CHECKER__), + * neither of which actually execute the argument. As with most cpp + * macros, this execute-arguments-only-once property is important, so + * please be careful when making changes to rcu_assign_pointer() and the + * other macros that it invokes. + */ +#define rcu_assign_pointer(p, v) \ +({ \ + uintptr_t _r_a_p__v = (uintptr_t)(v); \ + \ + if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ + WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ + else \ + smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ + _r_a_p__v; \ +}) + +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 68cbe111420bc05b698eb192c5f6c5191325829e..08d64e5713fc997540d8a7ae8a7d97536247464d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -42,6 +42,7 @@ #include #include #include +#include #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) @@ -55,7 +56,11 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); #define call_rcu call_rcu_sched #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_PREEMPT_RT_FULL +#define call_rcu_bh call_rcu +#else void call_rcu_bh(struct rcu_head *head, rcu_callback_t func); +#endif void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void synchronize_sched(void); void rcu_barrier_tasks(void); @@ -73,6 +78,11 @@ void synchronize_rcu(void); * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() (current->rcu_read_lock_nesting) +#ifndef CONFIG_PREEMPT_RT_FULL +#define sched_rcu_preempt_depth() rcu_preempt_depth() +#else +static inline int sched_rcu_preempt_depth(void) { return 0; } +#endif #else /* #ifdef CONFIG_PREEMPT_RCU */ @@ -96,6 +106,8 @@ static inline int rcu_preempt_depth(void) return 0; } +#define sched_rcu_preempt_depth() rcu_preempt_depth() + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /* Internal to kernel */ @@ -253,7 +265,14 @@ extern struct lockdep_map rcu_sched_lock_map; extern struct lockdep_map rcu_callback_map; int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); +#ifdef CONFIG_PREEMPT_RT_FULL +static inline int rcu_read_lock_bh_held(void) +{ + return rcu_read_lock_held(); +} +#else int rcu_read_lock_bh_held(void); +#endif int rcu_read_lock_sched_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -362,54 +381,6 @@ static inline void rcu_preempt_sleep_check(void) { } ((typeof(*p) __force __kernel *)(________p1)); \ }) -/** - * RCU_INITIALIZER() - statically initialize an RCU-protected global variable - * @v: The value to statically initialize with. - */ -#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) - -/** - * rcu_assign_pointer() - assign to RCU-protected pointer - * @p: pointer to assign to - * @v: value to assign (publish) - * - * Assigns the specified value to the specified RCU-protected - * pointer, ensuring that any concurrent RCU readers will see - * any prior initialization. - * - * Inserts memory barriers on architectures that require them - * (which is most of them), and also prevents the compiler from - * reordering the code that initializes the structure after the pointer - * assignment. More importantly, this call documents which pointers - * will be dereferenced by RCU read-side code. - * - * In some special cases, you may use RCU_INIT_POINTER() instead - * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due - * to the fact that it does not constrain either the CPU or the compiler. - * That said, using RCU_INIT_POINTER() when you should have used - * rcu_assign_pointer() is a very bad thing that results in - * impossible-to-diagnose memory corruption. So please be careful. - * See the RCU_INIT_POINTER() comment header for details. - * - * Note that rcu_assign_pointer() evaluates each of its arguments only - * once, appearances notwithstanding. One of the "extra" evaluations - * is in typeof() and the other visible only to sparse (__CHECKER__), - * neither of which actually execute the argument. As with most cpp - * macros, this execute-arguments-only-once property is important, so - * please be careful when making changes to rcu_assign_pointer() and the - * other macros that it invokes. - */ -#define rcu_assign_pointer(p, v) \ -({ \ - uintptr_t _r_a_p__v = (uintptr_t)(v); \ - \ - if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ - WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ - else \ - smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ - _r_a_p__v; \ -}) - /** * rcu_swap_protected() - swap an RCU and a regular pointer * @rcu_ptr: RCU pointer @@ -701,10 +672,14 @@ static inline void rcu_read_unlock(void) static inline void rcu_read_lock_bh(void) { local_bh_disable(); +#ifdef CONFIG_PREEMPT_RT_FULL + rcu_read_lock(); +#else __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_bh() used illegally while idle"); +#endif } /* @@ -714,10 +689,14 @@ static inline void rcu_read_lock_bh(void) */ static inline void rcu_read_unlock_bh(void) { +#ifdef CONFIG_PREEMPT_RT_FULL + rcu_read_unlock(); +#else RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); +#endif local_bh_enable(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 914655848ef6a940bd343799e9f350fc7b9a8639..462ce061bac75ba2d94347a827c7009ab96e8779 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu) rcu_note_context_switch(false); } +#ifdef CONFIG_PREEMPT_RT_FULL +# define synchronize_rcu_bh synchronize_rcu +#else void synchronize_rcu_bh(void); +#endif void synchronize_sched_expedited(void); void synchronize_rcu_expedited(void); @@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_expedited(void) } void rcu_barrier(void); +#ifdef CONFIG_PREEMPT_RT_FULL +# define rcu_barrier_bh rcu_barrier +#else void rcu_barrier_bh(void); +#endif void rcu_barrier_sched(void); bool rcu_eqs_special_set(int cpu); unsigned long get_state_synchronize_rcu(void); diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index e3c5d856b6da52f367873271db0e436d5b371284..e1266615b9e9d4b118dd54f7e269d9139830eff2 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -558,6 +558,9 @@ void rproc_shutdown(struct rproc *rproc); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size); +void rproc_handle_ipi(int ipinr); +int rproc_set_handle_irq(void (handle_irq)(void)); + static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) { return container_of(vdev, struct rproc_vdev, vdev); diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 02166e815afb904c0396159c8c9d556c370f1e9d..0b31df1af698d846d174d2ed041c03789c10f573 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -72,7 +72,7 @@ struct reservation_object_list { */ struct reservation_object { struct ww_mutex lock; - seqcount_t seq; + seqlock_t seq; struct dma_fence __rcu *fence_excl; struct reservation_object_list __rcu *fence; @@ -92,7 +92,7 @@ reservation_object_init(struct reservation_object *obj) { ww_mutex_init(&obj->lock, &reservation_ww_class); - __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); + seqlock_init(&obj->seq); RCU_INIT_POINTER(obj->fence, NULL); RCU_INIT_POINTER(obj->fence_excl, NULL); obj->staged = NULL; diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 6fd615a0eea94278fa1d6d9a1ae711f4b60e347a..138bd1e183e0976ff455ab6a1a73df145b80a445 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -14,11 +14,15 @@ #define __LINUX_RT_MUTEX_H #include +#include #include -#include extern int max_lock_depth; /* for sysctl */ +#ifdef CONFIG_DEBUG_MUTEXES +#include +#endif + /** * The rt_mutex structure * @@ -31,8 +35,8 @@ struct rt_mutex { raw_spinlock_t wait_lock; struct rb_root_cached waiters; struct task_struct *owner; -#ifdef CONFIG_DEBUG_RT_MUTEXES int save_state; +#ifdef CONFIG_DEBUG_RT_MUTEXES const char *name, *file; int line; void *magic; @@ -82,16 +86,23 @@ do { \ #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) #endif -#define __RT_MUTEX_INITIALIZER(mutexname) \ - { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ +#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ , .waiters = RB_ROOT_CACHED \ , .owner = NULL \ __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ - __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} + __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) + +#define __RT_MUTEX_INITIALIZER(mutexname) \ + { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } #define DEFINE_RT_MUTEX(mutexname) \ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) +#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ + { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ + , .save_state = 1 } + /** * rt_mutex_is_locked - is the mutex locked * @lock: the mutex to be queried @@ -115,6 +126,7 @@ extern void rt_mutex_lock(struct rt_mutex *lock); #endif extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); +extern int rt_mutex_lock_killable(struct rt_mutex *lock); extern int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout); diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h new file mode 100644 index 0000000000000000000000000000000000000000..a9c4c2ac4d1fa1cbf41c4ff27cb8bf827bd7cac4 --- /dev/null +++ b/include/linux/rwlock_rt.h @@ -0,0 +1,119 @@ +#ifndef __LINUX_RWLOCK_RT_H +#define __LINUX_RWLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_H +#error Do not include directly. Use spinlock.h +#endif + +extern void __lockfunc rt_write_lock(rwlock_t *rwlock); +extern void __lockfunc rt_read_lock(rwlock_t *rwlock); +extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); +extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); +extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); +extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); +extern int __lockfunc rt_read_can_lock(rwlock_t *rwlock); +extern int __lockfunc rt_write_can_lock(rwlock_t *rwlock); +extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); + +#define read_can_lock(rwlock) rt_read_can_lock(rwlock) +#define write_can_lock(rwlock) rt_write_can_lock(rwlock) + +#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) +#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) + +static inline int __write_trylock_rt_irqsave(rwlock_t *lock, unsigned long *flags) +{ + /* XXX ARCH_IRQ_ENABLED */ + *flags = 0; + return rt_write_trylock(lock); +} + +#define write_trylock_irqsave(lock, flags) \ + __cond_lock(lock, __write_trylock_rt_irqsave(lock, &(flags))) + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + rt_read_lock(lock); \ + flags = 0; \ + } while (0) + +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + rt_write_lock(lock); \ + flags = 0; \ + } while (0) + +#define read_lock(lock) rt_read_lock(lock) + +#define read_lock_bh(lock) \ + do { \ + local_bh_disable(); \ + rt_read_lock(lock); \ + } while (0) + +#define read_lock_irq(lock) read_lock(lock) + +#define write_lock(lock) rt_write_lock(lock) + +#define write_lock_bh(lock) \ + do { \ + local_bh_disable(); \ + rt_write_lock(lock); \ + } while (0) + +#define write_lock_irq(lock) write_lock(lock) + +#define read_unlock(lock) rt_read_unlock(lock) + +#define read_unlock_bh(lock) \ + do { \ + rt_read_unlock(lock); \ + local_bh_enable(); \ + } while (0) + +#define read_unlock_irq(lock) read_unlock(lock) + +#define write_unlock(lock) rt_write_unlock(lock) + +#define write_unlock_bh(lock) \ + do { \ + rt_write_unlock(lock); \ + local_bh_enable(); \ + } while (0) + +#define write_unlock_irq(lock) write_unlock(lock) + +#define read_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + (void) flags; \ + rt_read_unlock(lock); \ + } while (0) + +#define write_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + (void) flags; \ + rt_write_unlock(lock); \ + } while (0) + +#define rwlock_init(rwl) \ +do { \ + static struct lock_class_key __key; \ + \ + __rt_rwlock_init(rwl, #rwl, &__key); \ +} while (0) + +/* + * Internal functions made global for CPU pinning + */ +void __read_rt_lock(struct rt_rw_lock *lock); +int __read_rt_trylock(struct rt_rw_lock *lock); +void __write_rt_lock(struct rt_rw_lock *lock); +int __write_rt_trylock(struct rt_rw_lock *lock); +void __read_rt_unlock(struct rt_rw_lock *lock); +void __write_rt_unlock(struct rt_rw_lock *lock); + +#endif diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index 857a72ceb794252eb8cb22c55ef85d17453c34a0..c21683f3e14afc30b4957046daf8afac38affea5 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -1,6 +1,10 @@ #ifndef __LINUX_RWLOCK_TYPES_H #define __LINUX_RWLOCK_TYPES_H +#if !defined(__LINUX_SPINLOCK_TYPES_H) +# error "Do not include directly, include spinlock_types.h" +#endif + /* * include/linux/rwlock_types.h - generic rwlock type definitions * and initializers diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h new file mode 100644 index 0000000000000000000000000000000000000000..546a1f8f12748c2117dbe321d4d2b942abdd4c38 --- /dev/null +++ b/include/linux/rwlock_types_rt.h @@ -0,0 +1,55 @@ +#ifndef __LINUX_RWLOCK_TYPES_RT_H +#define __LINUX_RWLOCK_TYPES_RT_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +#error "Do not include directly. Include spinlock_types.h instead" +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + +typedef struct rt_rw_lock rwlock_t; + +#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name) + +#define DEFINE_RWLOCK(name) \ + rwlock_t name = __RW_LOCK_UNLOCKED(name) + +/* + * A reader biased implementation primarily for CPU pinning. + * + * Can be selected as general replacement for the single reader RT rwlock + * variant + */ +struct rt_rw_lock { + struct rt_mutex rtmutex; + atomic_t readers; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define READER_BIAS (1U << 31) +#define WRITER_BIAS (1U << 30) + +#define __RWLOCK_RT_INITIALIZER(name) \ +{ \ + .readers = ATOMIC_INIT(READER_BIAS), \ + .rtmutex = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.rtmutex), \ + RW_DEP_MAP_INIT(name) \ +} + +void __rwlock_biased_rt_init(struct rt_rw_lock *lock, const char *name, + struct lock_class_key *key); + +#define rwlock_biased_rt_init(rwlock) \ + do { \ + static struct lock_class_key __key; \ + \ + __rwlock_biased_rt_init((rwlock), #rwlock, &__key); \ + } while (0) + +#endif diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index ab93b6eae6968e4eef4b29b2f1f134c4b21fe5d1..b1e32373f44fc44499d4393e8e521519dd98d6fc 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -20,6 +20,10 @@ #include #endif +#ifdef CONFIG_PREEMPT_RT_FULL +#include +#else /* PREEMPT_RT_FULL */ + struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK @@ -114,6 +118,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) return !list_empty(&sem->wait_list); } +#endif /* !PREEMPT_RT_FULL */ + +/* + * The functions below are the same for all rwsem implementations including + * the RT specific variant. + */ + /* * lock for reading */ diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h new file mode 100644 index 0000000000000000000000000000000000000000..3fb092b7bcc0cb20ad2e979ba3e27bdaffd889f6 --- /dev/null +++ b/include/linux/rwsem_rt.h @@ -0,0 +1,69 @@ +#ifndef _LINUX_RWSEM_RT_H +#define _LINUX_RWSEM_RT_H + +#ifndef _LINUX_RWSEM_H +#error "Include rwsem.h" +#endif + +#include +#include + +#define READER_BIAS (1U << 31) +#define WRITER_BIAS (1U << 30) + +struct rw_semaphore { + atomic_t readers; + struct rt_mutex rtmutex; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __RWSEM_INITIALIZER(name) \ +{ \ + .readers = ATOMIC_INIT(READER_BIAS), \ + .rtmutex = __RT_MUTEX_INITIALIZER(name.rtmutex), \ + RW_DEP_MAP_INIT(name) \ +} + +#define DECLARE_RWSEM(lockname) \ + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) + +extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name, + struct lock_class_key *key); + +#define __init_rwsem(sem, name, key) \ +do { \ + rt_mutex_init(&(sem)->rtmutex); \ + __rwsem_init((sem), (name), (key)); \ +} while (0) + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + +static inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return atomic_read(&sem->readers) != READER_BIAS; +} + +static inline int rwsem_is_contended(struct rw_semaphore *sem) +{ + return atomic_read(&sem->readers) > 0; +} + +extern void __down_read(struct rw_semaphore *sem); +extern int __down_read_interruptible(struct rw_semaphore *sem); +extern int __down_read_killable(struct rw_semaphore *sem); +extern int __down_read_trylock(struct rw_semaphore *sem); +extern void __down_write(struct rw_semaphore *sem); +extern int __must_check __down_write_killable(struct rw_semaphore *sem); +extern int __down_write_trylock(struct rw_semaphore *sem); +extern void __up_read(struct rw_semaphore *sem); +extern void __up_write(struct rw_semaphore *sem); +extern void __downgrade_write(struct rw_semaphore *sem); + +#endif diff --git a/include/linux/sched.h b/include/linux/sched.h index f92d5ae6d04e705da7b449eb46167435bf65b9b7..65069db8923c58592c7c18af2a712636759b647a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -28,6 +28,7 @@ #include #include #include +#include /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; @@ -101,12 +102,8 @@ struct task_group; __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ TASK_PARKED) -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) - #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) -#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) - #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ (task->flags & PF_FROZEN) == 0 && \ (task->state & TASK_NOLOAD) == 0) @@ -134,6 +131,9 @@ struct task_group; smp_store_mb(current->state, (state_value)); \ } while (0) +#define __set_current_state_no_track(state_value) \ + current->state = (state_value); + #define set_special_state(state_value) \ do { \ unsigned long flags; /* may shadow */ \ @@ -143,6 +143,7 @@ struct task_group; current->state = (state_value); \ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ } while (0) + #else /* * set_current_state() includes a barrier so that the write of current->state @@ -187,6 +188,9 @@ struct task_group; #define set_current_state(state_value) \ smp_store_mb(current->state, (state_value)) +#define __set_current_state_no_track(state_value) \ + __set_current_state(state_value) + /* * set_special_state() should be used for those states when the blocking task * can not use the regular condition based wait-loop. In that case we must @@ -223,6 +227,8 @@ extern void io_schedule_finish(int token); extern long io_schedule_timeout(long timeout); extern void io_schedule(void); +int cpu_nr_pinned(int cpu); + /** * struct prev_cputime - snapshot of system and user cputime * @utime: time spent in user mode @@ -600,6 +606,8 @@ struct task_struct { #endif /* -1 unrunnable, 0 runnable, >0 stopped: */ volatile long state; + /* saved state for "spinlock sleepers" */ + volatile long saved_state; /* * This begins the randomizable portion of task_struct. Only @@ -660,7 +668,22 @@ struct task_struct { unsigned int policy; int nr_cpus_allowed; - cpumask_t cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t cpus_mask; +#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) + int migrate_disable; + bool migrate_disable_scheduled; +# ifdef CONFIG_SCHED_DEBUG + int pinned_on_cpu; +# endif +#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) +# ifdef CONFIG_SCHED_DEBUG + int migrate_disable; +# endif +#endif +#ifdef CONFIG_PREEMPT_RT_FULL + int sleeping_lock; +#endif #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; @@ -824,6 +847,9 @@ struct task_struct { #ifdef CONFIG_POSIX_TIMERS struct task_cputime cputime_expires; struct list_head cpu_timers[3]; +#ifdef CONFIG_PREEMPT_RT_BASE + struct task_struct *posix_timer_list; +#endif #endif /* Process credentials: */ @@ -868,11 +894,17 @@ struct task_struct { /* Signal handlers: */ struct signal_struct *signal; struct sighand_struct *sighand; + struct sigqueue *sigqueue_cache; + sigset_t blocked; sigset_t real_blocked; /* Restored if set_restore_sigmask() was used: */ sigset_t saved_sigmask; struct sigpending pending; +#ifdef CONFIG_PREEMPT_RT_FULL + /* TODO: move me into ->restart_block ? */ + struct siginfo forced_info; +#endif unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; @@ -897,6 +929,7 @@ struct task_struct { raw_spinlock_t pi_lock; struct wake_q_node wake_q; + struct wake_q_node wake_q_sleeper; #ifdef CONFIG_RT_MUTEXES /* PI waiters blocked on a rt_mutex held by this task: */ @@ -1181,8 +1214,22 @@ struct task_struct { unsigned int sequential_io; unsigned int sequential_io_avg; #endif +#ifdef CONFIG_PREEMPT_RT_BASE + struct rcu_head put_rcu; + int softirq_nestcnt; + unsigned int softirqs_raised; +#endif +#ifdef CONFIG_PREEMPT_RT_FULL +# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 + int kmap_idx; + pte_t kmap_pte[KM_TYPE_NR]; +# endif +#endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; +#endif +#ifdef CONFIG_PREEMPT_RT_FULL + int xmit_recursion; #endif int pagefault_disabled; #ifdef CONFIG_MMU @@ -1377,6 +1424,7 @@ extern struct pid *cad_pid; /* * Per process flags */ +#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ #define PF_IDLE 0x00000002 /* I am an IDLE thread */ #define PF_EXITING 0x00000004 /* Getting shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ @@ -1398,7 +1446,7 @@ extern struct pid *cad_pid; #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ -#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ +#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ @@ -1603,6 +1651,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); +extern int wake_up_lock_sleeper(struct task_struct *tsk); extern void wake_up_new_task(struct task_struct *tsk); #ifdef CONFIG_SMP @@ -1685,6 +1734,89 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } +#ifdef CONFIG_PREEMPT_LAZY +static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) +{ + set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); +} + +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) +{ + clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); +} + +static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) +{ + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); +} + +static inline int need_resched_lazy(void) +{ + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +} + +static inline int need_resched_now(void) +{ + return test_thread_flag(TIF_NEED_RESCHED); +} + +#else +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } +static inline int need_resched_lazy(void) { return 0; } + +static inline int need_resched_now(void) +{ + return test_thread_flag(TIF_NEED_RESCHED); +} + +#endif + + +static inline bool __task_is_stopped_or_traced(struct task_struct *task) +{ + if (task->state & (__TASK_STOPPED | __TASK_TRACED)) + return true; +#ifdef CONFIG_PREEMPT_RT_FULL + if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) + return true; +#endif + return false; +} + +static inline bool task_is_stopped_or_traced(struct task_struct *task) +{ + bool traced_stopped; + +#ifdef CONFIG_PREEMPT_RT_FULL + unsigned long flags; + + raw_spin_lock_irqsave(&task->pi_lock, flags); + traced_stopped = __task_is_stopped_or_traced(task); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); +#else + traced_stopped = __task_is_stopped_or_traced(task); +#endif + return traced_stopped; +} + +static inline bool task_is_traced(struct task_struct *task) +{ + bool traced = false; + + if (task->state & __TASK_TRACED) + return true; +#ifdef CONFIG_PREEMPT_RT_FULL + /* in case the task is sleeping on tasklist_lock */ + raw_spin_lock_irq(&task->pi_lock); + if (task->state & __TASK_TRACED) + traced = true; + else if (task->saved_state & __TASK_TRACED) + traced = true; + raw_spin_unlock_irq(&task->pi_lock); +#endif + return traced; +} + /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return @@ -1737,6 +1869,23 @@ static __always_inline bool need_resched(void) return unlikely(tif_need_resched()); } +#ifdef CONFIG_PREEMPT_RT_FULL +static inline void sleeping_lock_inc(void) +{ + current->sleeping_lock++; +} + +static inline void sleeping_lock_dec(void) +{ + current->sleeping_lock--; +} + +#else + +static inline void sleeping_lock_inc(void) { } +static inline void sleeping_lock_dec(void) { } +#endif + /* * Wrappers for p->thread_info->cpu access. No-op on UP. */ @@ -1908,4 +2057,6 @@ static inline void rseq_syscall(struct pt_regs *regs) #endif +extern struct task_struct *takedown_cpu_task; + #endif diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index ef54f4b3f1e4437949f8a5998fa4751a8b5dfe16..88e1a563f762ee57fed051e25816d79c616d2845 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -49,6 +49,17 @@ static inline void mmdrop(struct mm_struct *mm) __mmdrop(mm); } +#ifdef CONFIG_PREEMPT_RT_BASE +extern void __mmdrop_delayed(struct rcu_head *rhp); +static inline void mmdrop_delayed(struct mm_struct *mm) +{ + if (atomic_dec_and_test(&mm->mm_count)) + call_rcu(&mm->delayed_drop, __mmdrop_delayed); +} +#else +# define mmdrop_delayed(mm) mmdrop(mm) +#endif + void mmdrop(struct mm_struct *mm); /* diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 91401309b1aa224c4a0abfd131308c810471c0cd..5f8c1d53bc1136ed0b85824eb50f3374b34a5070 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -90,6 +90,15 @@ extern void sched_exec(void); #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) +#ifdef CONFIG_PREEMPT_RT_BASE +extern void __put_task_struct_cb(struct rcu_head *rhp); + +static inline void put_task_struct(struct task_struct *t) +{ + if (atomic_dec_and_test(&t->usage)) + call_rcu(&t->put_rcu, __put_task_struct_cb); +} +#else extern void __put_task_struct(struct task_struct *t); static inline void put_task_struct(struct task_struct *t) @@ -97,7 +106,7 @@ static inline void put_task_struct(struct task_struct *t) if (atomic_dec_and_test(&t->usage)) __put_task_struct(t); } - +#endif struct task_struct *task_rcu_dereference(struct task_struct **ptask); #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index 10b19a192b2d000121eff7dc91e8f224918d5fec..ce3ccff3d9d83d481f23547843c8615ff3037c89 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -47,8 +47,29 @@ static inline void wake_q_init(struct wake_q_head *head) head->lastp = &head->first; } -extern void wake_q_add(struct wake_q_head *head, - struct task_struct *task); -extern void wake_up_q(struct wake_q_head *head); +extern void __wake_q_add(struct wake_q_head *head, + struct task_struct *task, bool sleeper); +static inline void wake_q_add(struct wake_q_head *head, + struct task_struct *task) +{ + __wake_q_add(head, task, false); +} + +static inline void wake_q_add_sleeper(struct wake_q_head *head, + struct task_struct *task) +{ + __wake_q_add(head, task, true); +} + +extern void __wake_up_q(struct wake_q_head *head, bool sleeper); +static inline void wake_up_q(struct wake_q_head *head) +{ + __wake_up_q(head, false); +} + +static inline void wake_up_q_sleeper(struct wake_q_head *head) +{ + __wake_up_q(head, true); +} #endif /* _LINUX_SCHED_WAKE_Q_H */ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index a42a29952889c008876fce06dc40a431dcebc905..31bc2cc99bf05c2964ed9eb144832948ef726a28 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -221,20 +221,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) return __read_seqcount_retry(s, start); } - - -static inline void raw_write_seqcount_begin(seqcount_t *s) +static inline void __raw_write_seqcount_begin(seqcount_t *s) { s->sequence++; smp_wmb(); } -static inline void raw_write_seqcount_end(seqcount_t *s) +static inline void raw_write_seqcount_begin(seqcount_t *s) +{ + preempt_disable_rt(); + __raw_write_seqcount_begin(s); +} + +static inline void __raw_write_seqcount_end(seqcount_t *s) { smp_wmb(); s->sequence++; } +static inline void raw_write_seqcount_end(seqcount_t *s) +{ + __raw_write_seqcount_end(s); + preempt_enable_rt(); +} + /** * raw_write_seqcount_barrier - do a seq write barrier * @s: pointer to seqcount_t @@ -435,10 +445,33 @@ typedef struct { /* * Read side functions for starting and finalizing a read side section. */ +#ifndef CONFIG_PREEMPT_RT_FULL static inline unsigned read_seqbegin(const seqlock_t *sl) { return read_seqcount_begin(&sl->seqcount); } +#else +/* + * Starvation safe read side for RT + */ +static inline unsigned read_seqbegin(seqlock_t *sl) +{ + unsigned ret; + +repeat: + ret = READ_ONCE(sl->seqcount.sequence); + if (unlikely(ret & 1)) { + /* + * Take the lock and let the writer proceed (i.e. evtl + * boost it), otherwise we could loop here forever. + */ + spin_unlock_wait(&sl->lock); + goto repeat; + } + smp_rmb(); + return ret; +} +#endif static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { @@ -453,36 +486,45 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); +} + +static inline int try_write_seqlock(seqlock_t *sl) +{ + if (spin_trylock(&sl->lock)) { + __raw_write_seqcount_begin(&sl->seqcount); + return 1; + } + return 0; } static inline void write_sequnlock(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + __raw_write_seqcount_end(&sl->seqcount); spin_unlock(&sl->lock); } static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock_bh(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + __raw_write_seqcount_end(&sl->seqcount); spin_unlock_bh(&sl->lock); } static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock_irq(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + __raw_write_seqcount_end(&sl->seqcount); spin_unlock_irq(&sl->lock); } @@ -491,7 +533,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); return flags; } @@ -501,7 +543,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { - write_seqcount_end(&sl->seqcount); + __raw_write_seqcount_end(&sl->seqcount); spin_unlock_irqrestore(&sl->lock, flags); } diff --git a/include/linux/signal.h b/include/linux/signal.h index 0be5ce2375cb913c91cccbdd6111f5cd54619d6f..6495fda18c2cdadb307f77d4eb2283d46f8a9e9c 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -245,6 +245,7 @@ static inline void init_sigpending(struct sigpending *sig) } extern void flush_sigqueue(struct sigpending *queue); +extern void flush_task_sigqueue(struct task_struct *tsk); /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f97734f34746ac6e2948b6df69c5476b742379d6..3ede4f0eac1026b1f222e46280646f83d006a015 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -287,6 +287,7 @@ struct sk_buff_head { __u32 qlen; spinlock_t lock; + raw_spinlock_t raw_lock; }; struct sk_buff; @@ -1735,6 +1736,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) __skb_queue_head_init(list); } +static inline void skb_queue_head_init_raw(struct sk_buff_head *list) +{ + raw_spin_lock_init(&list->raw_lock); + __skb_queue_head_init(list); +} + static inline void skb_queue_head_init_class(struct sk_buff_head *list, struct lock_class_key *class) { diff --git a/include/linux/smp.h b/include/linux/smp.h index 6bb7f07bc1dd22a4137ab1bd4eafe745ad5635df..039da089482cd1bc29fd17c3bf613ffabdb033c5 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -202,6 +202,9 @@ static inline int get_boot_cpu_id(void) #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() +#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) +#define put_cpu_light() migrate_enable() + /* * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index e089157dcf97cd6cc44eb2bbfb7eca8afd054e03..5f5ad0630a268ad0c2039adae9355b4f7c30008a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -298,7 +298,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) }) /* Include rwlock functions */ -#include +#ifdef CONFIG_PREEMPT_RT_FULL +# include +#else +# include +#endif /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: @@ -309,6 +313,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # include #endif +#ifdef CONFIG_PREEMPT_RT_FULL +# include +#else /* PREEMPT_RT_FULL */ + /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ @@ -429,6 +437,8 @@ static __always_inline int spin_is_contended(spinlock_t *lock) #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) +#endif /* !PREEMPT_RT_FULL */ + /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 42dfab89e740aeb08de1896e491607789eacda4b..29d99ae5a8ab4facb54ef0adc707621e2f3cc828 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) return 0; } -#include +#ifndef CONFIG_PREEMPT_RT_FULL +# include +#endif #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h new file mode 100644 index 0000000000000000000000000000000000000000..3696a77fa77d6a8b451972e3e76d028b210fe646 --- /dev/null +++ b/include/linux/spinlock_rt.h @@ -0,0 +1,156 @@ +#ifndef __LINUX_SPINLOCK_RT_H +#define __LINUX_SPINLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_H +#error Do not include directly. Use spinlock.h +#endif + +#include + +extern void +__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key); + +#define spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key); \ +} while (0) + +extern void __lockfunc rt_spin_lock(spinlock_t *lock); +extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); +extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); +extern void __lockfunc rt_spin_unlock(spinlock_t *lock); +extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); +extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); +extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); +extern int __lockfunc rt_spin_trylock(spinlock_t *lock); +extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); + +/* + * lockdep-less calls, for derived types like rwlock: + * (for trylock they can use rt_mutex_trylock() directly. + * Migrate disable handling must be done at the call site. + */ +extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); +extern void __lockfunc __rt_spin_trylock(struct rt_mutex *lock); +extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); + +#define spin_lock(lock) rt_spin_lock(lock) + +#define spin_lock_bh(lock) \ + do { \ + local_bh_disable(); \ + rt_spin_lock(lock); \ + } while (0) + +#define spin_lock_irq(lock) spin_lock(lock) + +#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) + +#define spin_trylock(lock) \ +({ \ + int __locked; \ + __locked = spin_do_trylock(lock); \ + __locked; \ +}) + +#ifdef CONFIG_LOCKDEP +# define spin_lock_nested(lock, subclass) \ + do { \ + rt_spin_lock_nested(lock, subclass); \ + } while (0) + +#define spin_lock_bh_nested(lock, subclass) \ + do { \ + local_bh_disable(); \ + rt_spin_lock_nested(lock, subclass); \ + } while (0) + +# define spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + rt_spin_lock_nested(lock, subclass); \ + } while (0) +#else +# define spin_lock_nested(lock, subclass) spin_lock(lock) +# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock) + +# define spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + spin_lock(lock); \ + } while (0) +#endif + +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + spin_lock(lock); \ + } while (0) + +static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) +{ + unsigned long flags = 0; +#ifdef CONFIG_TRACE_IRQFLAGS + flags = rt_spin_lock_trace_flags(lock); +#else + spin_lock(lock); /* lock_local */ +#endif + return flags; +} + +/* FIXME: we need rt_spin_lock_nest_lock */ +#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) + +#define spin_unlock(lock) rt_spin_unlock(lock) + +#define spin_unlock_bh(lock) \ + do { \ + rt_spin_unlock(lock); \ + local_bh_enable(); \ + } while (0) + +#define spin_unlock_irq(lock) spin_unlock(lock) + +#define spin_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + (void) flags; \ + spin_unlock(lock); \ + } while (0) + +#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) +#define spin_trylock_irq(lock) spin_trylock(lock) + +#define spin_trylock_irqsave(lock, flags) \ + rt_spin_trylock_irqsave(lock, &(flags)) + +#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) + +#ifdef CONFIG_GENERIC_LOCKBREAK +# define spin_is_contended(lock) ((lock)->break_lock) +#else +# define spin_is_contended(lock) (((void)(lock), 0)) +#endif + +static inline int spin_can_lock(spinlock_t *lock) +{ + return !rt_mutex_is_locked(&lock->lock); +} + +static inline int spin_is_locked(spinlock_t *lock) +{ + return rt_mutex_is_locked(&lock->lock); +} + +static inline void assert_spin_locked(spinlock_t *lock) +{ + BUG_ON(!spin_is_locked(lock)); +} + +#endif diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 24b4e6f2c1a22fe2a6717f4d6076ea87d595994b..10bac715ea965bcda3728029c00e43eb487479dd 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,77 +9,15 @@ * Released under the General Public License (GPL). */ -#if defined(CONFIG_SMP) -# include -#else -# include -#endif - -#include - -typedef struct raw_spinlock { - arch_spinlock_t raw_lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned int magic, owner_cpu; - void *owner; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} raw_spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#define SPINLOCK_OWNER_INIT ((void *)-1L) - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -#else -# define SPIN_DEP_MAP_INIT(lockname) -#endif +#include -#ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_DEBUG_INIT(lockname) \ - .magic = SPINLOCK_MAGIC, \ - .owner_cpu = -1, \ - .owner = SPINLOCK_OWNER_INIT, +#ifndef CONFIG_PREEMPT_RT_FULL +# include +# include #else -# define SPIN_DEBUG_INIT(lockname) +# include +# include +# include #endif -#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ - { \ - .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ - SPIN_DEBUG_INIT(lockname) \ - SPIN_DEP_MAP_INIT(lockname) } - -#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ - (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) - -#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) - -typedef struct spinlock { - union { - struct raw_spinlock rlock; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) - struct { - u8 __padding[LOCK_PADSIZE]; - struct lockdep_map dep_map; - }; -#endif - }; -} spinlock_t; - -#define __SPIN_LOCK_INITIALIZER(lockname) \ - { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } - -#define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) - -#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) - -#include - #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h new file mode 100644 index 0000000000000000000000000000000000000000..f1dac1fb1d6acdf614fe0beb257725447f50ed18 --- /dev/null +++ b/include/linux/spinlock_types_nort.h @@ -0,0 +1,33 @@ +#ifndef __LINUX_SPINLOCK_TYPES_NORT_H +#define __LINUX_SPINLOCK_TYPES_NORT_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +#error "Do not include directly. Include spinlock_types.h instead" +#endif + +/* + * The non RT version maps spinlocks to raw_spinlocks + */ +typedef struct spinlock { + union { + struct raw_spinlock rlock; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) + struct { + u8 __padding[LOCK_PADSIZE]; + struct lockdep_map dep_map; + }; +#endif + }; +} spinlock_t; + +#define __SPIN_LOCK_INITIALIZER(lockname) \ + { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } + +#define __SPIN_LOCK_UNLOCKED(lockname) \ + (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) + +#endif diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h new file mode 100644 index 0000000000000000000000000000000000000000..822bf64a61d3b12ca210ac69f5152a9a17af3e94 --- /dev/null +++ b/include/linux/spinlock_types_raw.h @@ -0,0 +1,55 @@ +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H +#define __LINUX_SPINLOCK_TYPES_RAW_H + +#include + +#if defined(CONFIG_SMP) +# include +#else +# include +#endif + +#include + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} raw_spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPINLOCK_OWNER_INIT ((void *)-1L) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, +#else +# define SPIN_DEBUG_INIT(lockname) +#endif + +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ + { \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +#endif diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h new file mode 100644 index 0000000000000000000000000000000000000000..3e3d8c5f7a9ad1602e6bd87ac5414b88c0601937 --- /dev/null +++ b/include/linux/spinlock_types_rt.h @@ -0,0 +1,48 @@ +#ifndef __LINUX_SPINLOCK_TYPES_RT_H +#define __LINUX_SPINLOCK_TYPES_RT_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +#error "Do not include directly. Include spinlock_types.h instead" +#endif + +#include + +/* + * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: + */ +typedef struct spinlock { + struct rt_mutex lock; + unsigned int break_lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} spinlock_t; + +#ifdef CONFIG_DEBUG_RT_MUTEXES +# define __RT_SPIN_INITIALIZER(name) \ + { \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ + .save_state = 1, \ + .file = __FILE__, \ + .line = __LINE__ , \ + } +#else +# define __RT_SPIN_INITIALIZER(name) \ + { \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ + .save_state = 1, \ + } +#endif + +/* +.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) +*/ + +#define __SPIN_LOCK_UNLOCKED(name) \ + { .lock = __RT_SPIN_INITIALIZER(name.lock), \ + SPIN_DEP_MAP_INIT(name) } + +#define DEFINE_SPINLOCK(name) \ + spinlock_t name = __SPIN_LOCK_UNLOCKED(name) + +#endif diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index c09b6407ae1b3cd633ac9bcc30030249f520b872..b0243ba07fb789f1ea97cd9a156eae6aac8ad770 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -1,10 +1,6 @@ #ifndef __LINUX_SPINLOCK_TYPES_UP_H #define __LINUX_SPINLOCK_TYPES_UP_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - /* * include/linux/spinlock_types_up.h - spinlock type definitions for UP * diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index ccdaa8fd5657fb69b46218dbfeb62cbdbe637bb2..150e886ed83d8a93d7be9dee6dcdfb979aeaf7c5 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -26,6 +26,8 @@ struct cpu_stop_work { cpu_stop_fn_t fn; void *arg; struct cpu_stop_done *done; + /* Did not run due to disabled stopper; for nowait debug checks */ + bool disabled; }; int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3f529ad9a9d2ec33bf8fcdfa72bb9ddce46a292a..328439ce71f53236fc29a5fd110ea910d94bc1ae 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -196,6 +196,12 @@ struct platform_s2idle_ops { void (*end)(void); }; +#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) +extern bool pm_in_action; +#else +# define pm_in_action false +#endif + #ifdef CONFIG_SUSPEND extern suspend_state_t mem_sleep_current; extern suspend_state_t mem_sleep_default; diff --git a/include/linux/swait.h b/include/linux/swait.h index 73e06e9986d4b4a57807a16224eaed0ea6a0f396..21ae66cd41d3061c0f8c00aac67a17f13730eadd 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h @@ -160,7 +160,9 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq) extern void swake_up_one(struct swait_queue_head *q); extern void swake_up_all(struct swait_queue_head *q); extern void swake_up_locked(struct swait_queue_head *q); +extern void swake_up_all_locked(struct swait_queue_head *q); +extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); @@ -297,4 +299,18 @@ do { \ __ret; \ }) +#define __swait_event_lock_irq(wq, condition, lock, cmd) \ + ___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ + raw_spin_unlock_irq(&lock); \ + cmd; \ + schedule(); \ + raw_spin_lock_irq(&lock)) + +#define swait_event_lock_irq(wq_head, condition, lock) \ + do { \ + if (condition) \ + break; \ + __swait_event_lock_irq(wq_head, condition, lock, ); \ + } while (0) + #endif /* _LINUX_SWAIT_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index ee8f9f554a9e1299d72d6e5bb1cdcad044445adf..2ad000e362bdfb146b4f4dfb1e41cfa1270877fa 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -12,6 +12,7 @@ #include #include #include +#include #include struct notifier_block; @@ -331,6 +332,7 @@ extern unsigned long nr_free_pagecache_pages(void); /* linux/mm/swap.c */ +DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); extern void lru_cache_add(struct page *); extern void lru_cache_add_anon(struct page *page); extern void lru_cache_add_file(struct page *page); diff --git a/include/linux/swork.h b/include/linux/swork.h new file mode 100644 index 0000000000000000000000000000000000000000..f175fa9a60169ffa7120357b3d7729e979991629 --- /dev/null +++ b/include/linux/swork.h @@ -0,0 +1,24 @@ +#ifndef _LINUX_SWORK_H +#define _LINUX_SWORK_H + +#include + +struct swork_event { + struct list_head item; + unsigned long flags; + void (*func)(struct swork_event *); +}; + +static inline void INIT_SWORK(struct swork_event *event, + void (*func)(struct swork_event *)) +{ + event->flags = 0; + event->func = func; +} + +bool swork_queue(struct swork_event *sev); + +int swork_get(void); +void swork_put(void); + +#endif /* _LINUX_SWORK_H */ diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 535f2ecf3c160797bae668dcba59db1a79e54a9c..4a0eecb09cb2d84f289524e08934c2832ad17312 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -15,6 +15,7 @@ #ifndef __TEE_DRV_H #define __TEE_DRV_H +#include #include #include #include @@ -166,6 +167,23 @@ int tee_device_register(struct tee_device *teedev); */ void tee_device_unregister(struct tee_device *teedev); +/** + * tee_session_calc_client_uuid() - Calculates client UUID for session + * @uuid: Resulting UUID + * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*) + * @connectuon_data: Connection data for opening session + * + * Based on connection method calculates UUIDv5 based client UUID. + * + * For group based logins verifies that calling process has specified + * credentials. + * + * @return < 0 on failure + */ +int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, + const u8 connection_data[TEE_IOCTL_UUID_LEN]); + + /** * struct tee_shm - shared memory object * @teedev: device used to allocate the object diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 62dbecfe91328b035ef555ecaad8b90fdf323b35..57a3d99dc067639367500b829eda373ffd9eb897 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -110,7 +110,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) #define test_thread_flag(flag) \ test_ti_thread_flag(current_thread_info(), flag) -#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) +#ifdef CONFIG_PREEMPT_LAZY +#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ + test_thread_flag(TIF_NEED_RESCHED_LAZY)) +#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) +#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)) + +#else +#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) +#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) +#define tif_need_resched_lazy() 0 +#endif #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES static inline int arch_within_stack_frames(const void * const stack, diff --git a/include/linux/timer.h b/include/linux/timer.h index 7b066fd38248bb48466fa53c0e6462d2af76dc33..54627d046b3a25c2e21317871b0e59e395fbcb1d 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -172,7 +172,7 @@ extern void add_timer(struct timer_list *timer); extern int try_to_del_timer_sync(struct timer_list *timer); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) extern int del_timer_sync(struct timer_list *timer); #else # define del_timer_sync(t) del_timer(t) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 0643c083ed8623d5a57eae828b75c985283b059d..e26a85c1b7baf8c23bbf437f82012c0ca1080693 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -62,6 +62,8 @@ struct trace_entry { unsigned char flags; unsigned char preempt_count; int pid; + unsigned char migrate_disable; + unsigned char preempt_lazy_count; }; #define TRACE_EVENT_TYPE_MAX \ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index db9b0dd0a7a3b8560201a17c632f2c5227f8c5f9..e151bd0f6d4920e14ea36d3737a39892d70968d9 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -185,6 +185,7 @@ static __always_inline void pagefault_disabled_dec(void) */ static inline void pagefault_disable(void) { + migrate_disable(); pagefault_disabled_inc(); /* * make sure to have issued the store before a pagefault @@ -201,6 +202,7 @@ static inline void pagefault_enable(void) */ barrier(); pagefault_disabled_dec(); + migrate_enable(); } /* diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index f25cef84b41db718a0d977c471b9afbc6773378e..febee8649220773cd3c606023b3bdadbef53e574 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -54,7 +54,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); */ static inline void __count_vm_event(enum vm_event_item item) { + preempt_disable_rt(); raw_cpu_inc(vm_event_states.event[item]); + preempt_enable_rt(); } static inline void count_vm_event(enum vm_event_item item) @@ -64,7 +66,9 @@ static inline void count_vm_event(enum vm_event_item item) static inline void __count_vm_events(enum vm_event_item item, long delta) { + preempt_disable_rt(); raw_cpu_add(vm_event_states.event[item], delta); + preempt_enable_rt(); } static inline void count_vm_events(enum vm_event_item item, long delta) diff --git a/include/linux/wait.h b/include/linux/wait.h index 1d726d79063c573b9c13a8b83aea8113d490f2ab..523c68edc48aa25552bdfb25599899ff103ab4d9 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -10,6 +10,7 @@ #include #include +#include typedef struct wait_queue_entry wait_queue_entry_t; @@ -514,8 +515,8 @@ do { \ int __ret = 0; \ struct hrtimer_sleeper __t; \ \ - hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \ - hrtimer_init_sleeper(&__t, current); \ + hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, HRTIMER_MODE_REL, \ + current); \ if ((timeout) != KTIME_MAX) \ hrtimer_start_range_ns(&__t.timer, timeout, \ current->timer_slack_ns, \ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 60d673e15632126a038b1efe12108abe122fc235..546aa73fba6a4bcf8a55801fca5db8d1d844184a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -455,10 +455,6 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, extern void destroy_workqueue(struct workqueue_struct *wq); -struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); -void free_workqueue_attrs(struct workqueue_attrs *attrs); -int apply_workqueue_attrs(struct workqueue_struct *wq, - const struct workqueue_attrs *attrs); int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); extern bool queue_work_on(int cpu, struct workqueue_struct *wq, diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index 883bb9085f1589b84196fd289bd1c70df9ab957f..3b593cdeb9af1f1fa44455db92606c0ba17e52ce 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -6,6 +6,7 @@ #include #include #include +#include struct gnet_stats_basic_cpu { struct gnet_stats_basic_packed bstats; @@ -36,11 +37,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, spinlock_t *lock, struct gnet_dump *d, int padattr); -int gnet_stats_copy_basic(const seqcount_t *running, +int gnet_stats_copy_basic(net_seqlock_t *running, struct gnet_dump *d, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b); -void __gnet_stats_copy_basic(const seqcount_t *running, +void __gnet_stats_copy_basic(net_seqlock_t *running, struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b); @@ -60,13 +61,13 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, spinlock_t *lock, - seqcount_t *running, struct nlattr *opt); + net_seqlock_t *running, struct nlattr *opt); void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **ptr, spinlock_t *lock, - seqcount_t *running, struct nlattr *opt); + net_seqlock_t *running, struct nlattr *opt); bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); bool gen_estimator_read(struct net_rate_estimator __rcu **ptr, struct gnet_stats_rate_est64 *sample); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 5ce035984a4ddb091eba1fb9062ccff5e224e164..1166fc17b7579266cd771688cd1debb85d7cc8a7 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -451,7 +451,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) } #endif -static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) +static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) { unsigned int hh_alen = 0; unsigned int seq; @@ -493,7 +493,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb static inline int neigh_output(struct neighbour *n, struct sk_buff *skb) { - const struct hh_cache *hh = &n->hh; + struct hh_cache *hh = &n->hh; if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) return neigh_hh_output(hh, skb); @@ -534,7 +534,7 @@ struct neighbour_cb { #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) -static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, +static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, const struct net_device *dev) { unsigned int seq; diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h new file mode 100644 index 0000000000000000000000000000000000000000..a7034298a82a39f1f0542802dfccf65d188c0d4e --- /dev/null +++ b/include/net/net_seq_lock.h @@ -0,0 +1,15 @@ +#ifndef __NET_NET_SEQ_LOCK_H__ +#define __NET_NET_SEQ_LOCK_H__ + +#ifdef CONFIG_PREEMPT_RT_BASE +# define net_seqlock_t seqlock_t +# define net_seq_begin(__r) read_seqbegin(__r) +# define net_seq_retry(__r, __s) read_seqretry(__r, __s) + +#else +# define net_seqlock_t seqcount_t +# define net_seq_begin(__r) read_seqcount_begin(__r) +# define net_seq_retry(__r, __s) read_seqcount_retry(__r, __s) +#endif + +#endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 483303adf3df8ee8cfa021b972aa0bef82f5592a..038204c7b05e84414e1c10a24fad705f64ff06ab 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -100,7 +101,7 @@ struct Qdisc { struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; struct qdisc_skb_head q; struct gnet_stats_basic_packed bstats; - seqcount_t running; + net_seqlock_t running; struct gnet_stats_queue qstats; unsigned long state; struct Qdisc *next_sched; @@ -135,7 +136,11 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_NOLOCK) return spin_is_locked(&qdisc->seqlock); +#ifdef CONFIG_PREEMPT_RT_BASE + return spin_is_locked(&qdisc->running.lock) ? true : false; +#else return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; +#endif } static inline bool qdisc_run_begin(struct Qdisc *qdisc) @@ -146,17 +151,27 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) } else if (qdisc_is_running(qdisc)) { return false; } +#ifdef CONFIG_PREEMPT_RT_BASE + if (try_write_seqlock(&qdisc->running)) + return true; + return false; +#else /* Variant of write_seqcount_begin() telling lockdep a trylock * was attempted. */ raw_write_seqcount_begin(&qdisc->running); seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); return true; +#endif } static inline void qdisc_run_end(struct Qdisc *qdisc) { +#ifdef CONFIG_PREEMPT_RT_BASE + write_sequnlock(&qdisc->running); +#else write_seqcount_end(&qdisc->running); +#endif if (qdisc->flags & TCQ_F_NOLOCK) spin_unlock(&qdisc->seqlock); } @@ -475,7 +490,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) return qdisc_lock(root); } -static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) +static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) { struct Qdisc *root = qdisc_root_sleeping(qdisc); diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h new file mode 100644 index 0000000000000000000000000000000000000000..657e234b14832350138a785bb916183ba628a203 --- /dev/null +++ b/include/soc/at91/atmel_tcb.h @@ -0,0 +1,183 @@ +//SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018 Microchip */ + +#ifndef __SOC_ATMEL_TCB_H +#define __SOC_ATMEL_TCB_H + +/* Channel registers */ +#define ATMEL_TC_COFFS(c) ((c) * 0x40) +#define ATMEL_TC_CCR(c) ATMEL_TC_COFFS(c) +#define ATMEL_TC_CMR(c) (ATMEL_TC_COFFS(c) + 0x4) +#define ATMEL_TC_SMMR(c) (ATMEL_TC_COFFS(c) + 0x8) +#define ATMEL_TC_RAB(c) (ATMEL_TC_COFFS(c) + 0xc) +#define ATMEL_TC_CV(c) (ATMEL_TC_COFFS(c) + 0x10) +#define ATMEL_TC_RA(c) (ATMEL_TC_COFFS(c) + 0x14) +#define ATMEL_TC_RB(c) (ATMEL_TC_COFFS(c) + 0x18) +#define ATMEL_TC_RC(c) (ATMEL_TC_COFFS(c) + 0x1c) +#define ATMEL_TC_SR(c) (ATMEL_TC_COFFS(c) + 0x20) +#define ATMEL_TC_IER(c) (ATMEL_TC_COFFS(c) + 0x24) +#define ATMEL_TC_IDR(c) (ATMEL_TC_COFFS(c) + 0x28) +#define ATMEL_TC_IMR(c) (ATMEL_TC_COFFS(c) + 0x2c) +#define ATMEL_TC_EMR(c) (ATMEL_TC_COFFS(c) + 0x30) + +/* Block registers */ +#define ATMEL_TC_BCR 0xc0 +#define ATMEL_TC_BMR 0xc4 +#define ATMEL_TC_QIER 0xc8 +#define ATMEL_TC_QIDR 0xcc +#define ATMEL_TC_QIMR 0xd0 +#define ATMEL_TC_QISR 0xd4 +#define ATMEL_TC_FMR 0xd8 +#define ATMEL_TC_WPMR 0xe4 + +/* CCR fields */ +#define ATMEL_TC_CCR_CLKEN BIT(0) +#define ATMEL_TC_CCR_CLKDIS BIT(1) +#define ATMEL_TC_CCR_SWTRG BIT(2) + +/* Common CMR fields */ +#define ATMEL_TC_CMR_TCLKS_MSK GENMASK(2, 0) +#define ATMEL_TC_CMR_TCLK(x) (x) +#define ATMEL_TC_CMR_XC(x) ((x) + 5) +#define ATMEL_TC_CMR_CLKI BIT(3) +#define ATMEL_TC_CMR_BURST_MSK GENMASK(5, 4) +#define ATMEL_TC_CMR_BURST_XC(x) (((x) + 1) << 4) +#define ATMEL_TC_CMR_WAVE BIT(15) + +/* Capture mode CMR fields */ +#define ATMEL_TC_CMR_LDBSTOP BIT(6) +#define ATMEL_TC_CMR_LDBDIS BIT(7) +#define ATMEL_TC_CMR_ETRGEDG_MSK GENMASK(9, 8) +#define ATMEL_TC_CMR_ETRGEDG_NONE (0 << 8) +#define ATMEL_TC_CMR_ETRGEDG_RISING (1 << 8) +#define ATMEL_TC_CMR_ETRGEDG_FALLING (2 << 8) +#define ATMEL_TC_CMR_ETRGEDG_BOTH (3 << 8) +#define ATMEL_TC_CMR_ABETRG BIT(10) +#define ATMEL_TC_CMR_CPCTRG BIT(14) +#define ATMEL_TC_CMR_LDRA_MSK GENMASK(17, 16) +#define ATMEL_TC_CMR_LDRA_NONE (0 << 16) +#define ATMEL_TC_CMR_LDRA_RISING (1 << 16) +#define ATMEL_TC_CMR_LDRA_FALLING (2 << 16) +#define ATMEL_TC_CMR_LDRA_BOTH (3 << 16) +#define ATMEL_TC_CMR_LDRB_MSK GENMASK(19, 18) +#define ATMEL_TC_CMR_LDRB_NONE (0 << 18) +#define ATMEL_TC_CMR_LDRB_RISING (1 << 18) +#define ATMEL_TC_CMR_LDRB_FALLING (2 << 18) +#define ATMEL_TC_CMR_LDRB_BOTH (3 << 18) +#define ATMEL_TC_CMR_SBSMPLR_MSK GENMASK(22, 20) +#define ATMEL_TC_CMR_SBSMPLR(x) ((x) << 20) + +/* Waveform mode CMR fields */ +#define ATMEL_TC_CMR_CPCSTOP BIT(6) +#define ATMEL_TC_CMR_CPCDIS BIT(7) +#define ATMEL_TC_CMR_EEVTEDG_MSK GENMASK(9, 8) +#define ATMEL_TC_CMR_EEVTEDG_NONE (0 << 8) +#define ATMEL_TC_CMR_EEVTEDG_RISING (1 << 8) +#define ATMEL_TC_CMR_EEVTEDG_FALLING (2 << 8) +#define ATMEL_TC_CMR_EEVTEDG_BOTH (3 << 8) +#define ATMEL_TC_CMR_EEVT_MSK GENMASK(11, 10) +#define ATMEL_TC_CMR_EEVT_XC(x) (((x) + 1) << 10) +#define ATMEL_TC_CMR_ENETRG BIT(12) +#define ATMEL_TC_CMR_WAVESEL_MSK GENMASK(14, 13) +#define ATMEL_TC_CMR_WAVESEL_UP (0 << 13) +#define ATMEL_TC_CMR_WAVESEL_UPDOWN (1 << 13) +#define ATMEL_TC_CMR_WAVESEL_UPRC (2 << 13) +#define ATMEL_TC_CMR_WAVESEL_UPDOWNRC (3 << 13) +#define ATMEL_TC_CMR_ACPA_MSK GENMASK(17, 16) +#define ATMEL_TC_CMR_ACPA(a) (ATMEL_TC_CMR_ACTION_##a << 16) +#define ATMEL_TC_CMR_ACPC_MSK GENMASK(19, 18) +#define ATMEL_TC_CMR_ACPC(a) (ATMEL_TC_CMR_ACTION_##a << 18) +#define ATMEL_TC_CMR_AEEVT_MSK GENMASK(21, 20) +#define ATMEL_TC_CMR_AEEVT(a) (ATMEL_TC_CMR_ACTION_##a << 20) +#define ATMEL_TC_CMR_ASWTRG_MSK GENMASK(23, 22) +#define ATMEL_TC_CMR_ASWTRG(a) (ATMEL_TC_CMR_ACTION_##a << 22) +#define ATMEL_TC_CMR_BCPB_MSK GENMASK(25, 24) +#define ATMEL_TC_CMR_BCPB(a) (ATMEL_TC_CMR_ACTION_##a << 24) +#define ATMEL_TC_CMR_BCPC_MSK GENMASK(27, 26) +#define ATMEL_TC_CMR_BCPC(a) (ATMEL_TC_CMR_ACTION_##a << 26) +#define ATMEL_TC_CMR_BEEVT_MSK GENMASK(29, 28) +#define ATMEL_TC_CMR_BEEVT(a) (ATMEL_TC_CMR_ACTION_##a << 28) +#define ATMEL_TC_CMR_BSWTRG_MSK GENMASK(31, 30) +#define ATMEL_TC_CMR_BSWTRG(a) (ATMEL_TC_CMR_ACTION_##a << 30) +#define ATMEL_TC_CMR_ACTION_NONE 0 +#define ATMEL_TC_CMR_ACTION_SET 1 +#define ATMEL_TC_CMR_ACTION_CLEAR 2 +#define ATMEL_TC_CMR_ACTION_TOGGLE 3 + +/* SMMR fields */ +#define ATMEL_TC_SMMR_GCEN BIT(0) +#define ATMEL_TC_SMMR_DOWN BIT(1) + +/* SR/IER/IDR/IMR fields */ +#define ATMEL_TC_COVFS BIT(0) +#define ATMEL_TC_LOVRS BIT(1) +#define ATMEL_TC_CPAS BIT(2) +#define ATMEL_TC_CPBS BIT(3) +#define ATMEL_TC_CPCS BIT(4) +#define ATMEL_TC_LDRAS BIT(5) +#define ATMEL_TC_LDRBS BIT(6) +#define ATMEL_TC_ETRGS BIT(7) +#define ATMEL_TC_CLKSTA BIT(16) +#define ATMEL_TC_MTIOA BIT(17) +#define ATMEL_TC_MTIOB BIT(18) + +/* EMR fields */ +#define ATMEL_TC_EMR_TRIGSRCA_MSK GENMASK(1, 0) +#define ATMEL_TC_EMR_TRIGSRCA_TIOA 0 +#define ATMEL_TC_EMR_TRIGSRCA_PWMX 1 +#define ATMEL_TC_EMR_TRIGSRCB_MSK GENMASK(5, 4) +#define ATMEL_TC_EMR_TRIGSRCB_TIOB (0 << 4) +#define ATMEL_TC_EMR_TRIGSRCB_PWM (1 << 4) +#define ATMEL_TC_EMR_NOCLKDIV BIT(8) + +/* BCR fields */ +#define ATMEL_TC_BCR_SYNC BIT(0) + +/* BMR fields */ +#define ATMEL_TC_BMR_TCXC_MSK(c) GENMASK(((c) * 2) + 1, (c) * 2) +#define ATMEL_TC_BMR_TCXC(x, c) ((x) << (2 * (c))) +#define ATMEL_TC_BMR_QDEN BIT(8) +#define ATMEL_TC_BMR_POSEN BIT(9) +#define ATMEL_TC_BMR_SPEEDEN BIT(10) +#define ATMEL_TC_BMR_QDTRANS BIT(11) +#define ATMEL_TC_BMR_EDGPHA BIT(12) +#define ATMEL_TC_BMR_INVA BIT(13) +#define ATMEL_TC_BMR_INVB BIT(14) +#define ATMEL_TC_BMR_INVIDX BIT(15) +#define ATMEL_TC_BMR_SWAP BIT(16) +#define ATMEL_TC_BMR_IDXPHB BIT(17) +#define ATMEL_TC_BMR_AUTOC BIT(18) +#define ATMEL_TC_MAXFILT_MSK GENMASK(25, 20) +#define ATMEL_TC_MAXFILT(x) (((x) - 1) << 20) +#define ATMEL_TC_MAXCMP_MSK GENMASK(29, 26) +#define ATMEL_TC_MAXCMP(x) ((x) << 26) + +/* QEDC fields */ +#define ATMEL_TC_QEDC_IDX BIT(0) +#define ATMEL_TC_QEDC_DIRCHG BIT(1) +#define ATMEL_TC_QEDC_QERR BIT(2) +#define ATMEL_TC_QEDC_MPE BIT(3) +#define ATMEL_TC_QEDC_DIR BIT(8) + +/* FMR fields */ +#define ATMEL_TC_FMR_ENCF(x) BIT(x) + +/* WPMR fields */ +#define ATMEL_TC_WPMR_WPKEY (0x54494d << 8) +#define ATMEL_TC_WPMR_WPEN BIT(0) + +static const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, }; + +static const struct of_device_id atmel_tcb_dt_ids[] = { + { + .compatible = "atmel,at91rm9200-tcb", + .data = (void *)16, + }, { + .compatible = "atmel,at91sam9x5-tcb", + .data = (void *)32, + }, { + /* sentinel */ + } +}; + +#endif /* __SOC_ATMEL_TCB_H */ diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index cd1773d0e08f07247a42788a035a0355da113493..935f53b9b77178f3c04976af9545ae950c2fb2d1 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -354,6 +354,7 @@ struct hdac_bus { bool align_bdle_4k:1; /* BDLE align 4K boundary */ bool reverse_assign:1; /* assign devices in reverse order */ bool corbrp_self_clear:1; /* CORBRP clears itself after reset */ + bool cmd_resend:1; /* command resend */ int bdl_pos_adj; /* BDL position adjustment */ diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h index 9483c55f871b992b78e8059f70017848c3d2ad09..1e3f2d0fe5473c5de5e3a1b48c1004208bdbda08 100644 --- a/include/sound/hdmi-codec.h +++ b/include/sound/hdmi-codec.h @@ -55,6 +55,9 @@ struct hdmi_codec_params { int channels; }; +typedef void (*hdmi_codec_plugged_cb)(struct device *dev, + bool plugged); + struct hdmi_codec_pdata; struct hdmi_codec_ops { /* @@ -96,6 +99,14 @@ struct hdmi_codec_ops { */ int (*get_dai_id)(struct snd_soc_component *comment, struct device_node *endpoint); + + /* + * Hook callback function to handle connector plug event. + * Optional + */ + int (*hook_plugged_cb)(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev); }; /* HDMI codec initalization data */ @@ -107,6 +118,12 @@ struct hdmi_codec_pdata { void *data; }; +struct snd_soc_component; +struct snd_soc_jack; + +int hdmi_codec_set_jack_detect(struct snd_soc_component *component, + struct snd_soc_jack *jack); + #define HDMI_CODEC_DRV_NAME "hdmi-audio-codec" #endif /* __HDMI_CODEC_H__ */ diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index dce5f9dae1210455734e555044749f94d399594e..998ca4e67c0e25c994e0ffdbb18d52cb467df743 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -281,4 +281,7 @@ /* MediaTek BTIF */ #define PORT_MTK_BTIF 117 +/* Phytium PCI UART */ +#define PORT_PHYTIUM 118 + #endif /* _UAPILINUX_SERIAL_CORE_H */ diff --git a/init/Kconfig b/init/Kconfig index 15c02e15d3fd416ab01b589f6701e06c70b98424..0c912e74ee40bd86f55ba1c91ced89b8cd46c278 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -787,6 +787,7 @@ config CFS_BANDWIDTH config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" depends on CGROUP_SCHED + depends on !PREEMPT_RT_FULL default n help This feature lets you explicitly allocate real CPU bandwidth @@ -1650,6 +1651,7 @@ choice config SLAB bool "SLAB" + depends on !PREEMPT_RT_FULL select HAVE_HARDENED_USERCOPY_ALLOCATOR help The regular slab allocator that is established and known to work @@ -1670,6 +1672,7 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" + depends on !PREEMPT_RT_FULL help SLOB replaces the stock allocator with a drastically simpler allocator. SLOB is generally more space efficient but @@ -1711,7 +1714,7 @@ config SLAB_FREELIST_HARDENED config SLUB_CPU_PARTIAL default y - depends on SLUB && SMP + depends on SLUB && SMP && !PREEMPT_RT_FULL bool "SLUB per cpu partial cache" help Per cpu partial caches accellerate objects allocation and freeing diff --git a/init/Makefile b/init/Makefile index a3e5ce2bcf08bf2bee9c6139ac61360055dd4949..7779232563ae35ccd0ab8c3f1e8f034f64f641ee 100644 --- a/init/Makefile +++ b/init/Makefile @@ -34,4 +34,4 @@ silent_chk_compile.h = : include/generated/compile.h: FORCE @$($(quiet)chk_compile.h) $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ - "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" + "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" diff --git a/init/init_task.c b/init/init_task.c index 994ffe018120859569b99c58b18f5df7d5550fdf..45b84137c4b32ce046c853a6ccbffb36cc3a1760 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -50,6 +50,12 @@ static struct sighand_struct init_sighand = { .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh), }; +#if defined(CONFIG_POSIX_TIMERS) && defined(CONFIG_PREEMPT_RT_BASE) +# define INIT_TIMER_LIST .posix_timer_list = NULL, +#else +# define INIT_TIMER_LIST +#endif + /* * Set up the first task table, touch at your own risk!. Base=0, * limit=0x1fffff (=2MB) @@ -71,8 +77,13 @@ struct task_struct init_task .static_prio = MAX_PRIO - 20, .normal_prio = MAX_PRIO - 20, .policy = SCHED_NORMAL, - .cpus_allowed = CPU_MASK_ALL, + .cpus_ptr = &init_task.cpus_mask, + .cpus_mask = CPU_MASK_ALL, .nr_cpus_allowed= NR_CPUS, +#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) && \ + defined(CONFIG_SCHED_DEBUG) + .pinned_on_cpu = -1, +#endif .mm = NULL, .active_mm = &init_mm, .restart_block = { @@ -118,6 +129,7 @@ struct task_struct init_task INIT_CPU_TIMERS(init_task) .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock), .timer_slack_ns = 50000, /* 50 usec default slack */ + INIT_TIMER_LIST .thread_pid = &init_struct_pid, .thread_group = LIST_HEAD_INIT(init_task.thread_group), .thread_node = LIST_HEAD_INIT(init_signals.thread_head), diff --git a/init/main.c b/init/main.c index 272ec131211c0667e0b25b4e6df95094dfff281f..0d5763c5da28fc19ad10ece4c8b0777a3f79f612 100644 --- a/init/main.c +++ b/init/main.c @@ -560,6 +560,7 @@ asmlinkage __visible void __init start_kernel(void) setup_command_line(command_line); setup_nr_cpu_ids(); setup_per_cpu_areas(); + softirq_early_init(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ boot_cpu_hotplug_init(); diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index 84d882f3e2993b9ce3fdef1cc6aff5c52a8f4622..af27c40008122384ed9fcc3d2d5ad335be64590c 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW config MUTEX_SPIN_ON_OWNER def_bool y - depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW + depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL config RWSEM_SPIN_ON_OWNER def_bool y - depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW + depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL config LOCK_SPIN_ON_OWNER def_bool y diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index cd1655122ec0bb8b73def7adbb8d8f8f3c7d0cb8..fd924c0bcea70aa8f5505251122e4c164d6725d3 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -1,3 +1,19 @@ +config PREEMPT + bool + select PREEMPT_COUNT + +config PREEMPT_RT_BASE + bool + select PREEMPT + +config PREEMPT_RT + bool + +config HAVE_PREEMPT_LAZY + bool + +config PREEMPT_LAZY + def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL choice prompt "Preemption Model" @@ -34,10 +50,10 @@ config PREEMPT_VOLUNTARY Select this if you are building a kernel for a desktop system. -config PREEMPT +config PREEMPT__LL bool "Preemptible Kernel (Low-Latency Desktop)" depends on !ARCH_NO_PREEMPT - select PREEMPT_COUNT + select PREEMPT select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK help This option reduces the latency of the kernel by making @@ -54,7 +70,24 @@ config PREEMPT embedded system with latency requirements in the milliseconds range. +config PREEMPT_RTB + bool "Preemptible Kernel (Basic RT)" + select PREEMPT_RT_BASE + help + This option is basically the same as (Low-Latency Desktop) but + enables changes which are preliminary for the full preemptible + RT kernel. + +config PREEMPT_RT_FULL + bool "Fully Preemptible Kernel (RT)" + depends on IRQ_FORCED_THREADING + select PREEMPT_RT_BASE + select PREEMPT_RCU + select PREEMPT_RT + help + All and everything + endchoice config PREEMPT_COUNT - bool \ No newline at end of file + bool diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 4e8284d8cacc0664646f3f02d96cfb21da370908..86b10cb1d68a7d6cb723cab62d152a8e61acb32e 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4738,10 +4738,10 @@ static void css_free_rwork_fn(struct work_struct *work) } } -static void css_release_work_fn(struct work_struct *work) +static void css_release_work_fn(struct swork_event *sev) { struct cgroup_subsys_state *css = - container_of(work, struct cgroup_subsys_state, destroy_work); + container_of(sev, struct cgroup_subsys_state, destroy_swork); struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; @@ -4803,8 +4803,8 @@ static void css_release(struct percpu_ref *ref) struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); - INIT_WORK(&css->destroy_work, css_release_work_fn); - queue_work(cgroup_destroy_wq, &css->destroy_work); + INIT_SWORK(&css->destroy_swork, css_release_work_fn); + swork_queue(&css->destroy_swork); } static void init_and_link_css(struct cgroup_subsys_state *css, @@ -5524,6 +5524,7 @@ static int __init cgroup_wq_init(void) */ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); BUG_ON(!cgroup_destroy_wq); + BUG_ON(swork_get()); return 0; } core_initcall(cgroup_wq_init); diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index dcd5755b1fe2aa2e6b3c9ce3b1e1e8fd29df3f6c..42ed4e497336e5a44f621bcf8385a25e27dd9086 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -288,7 +288,7 @@ static struct cpuset top_cpuset = { */ static DEFINE_MUTEX(cpuset_mutex); -static DEFINE_SPINLOCK(callback_lock); +static DEFINE_RAW_SPINLOCK(callback_lock); static struct workqueue_struct *cpuset_migrate_mm_wq; @@ -922,9 +922,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) continue; rcu_read_unlock(); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cpumask_copy(cp->effective_cpus, new_cpus); - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); WARN_ON(!is_in_v2_mode() && !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); @@ -989,9 +989,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, if (retval < 0) return retval; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); /* use trialcs->cpus_allowed as a temp variable */ update_cpumasks_hier(cs, trialcs->cpus_allowed); @@ -1175,9 +1175,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) continue; rcu_read_unlock(); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cp->effective_mems = *new_mems; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); WARN_ON(!is_in_v2_mode() && !nodes_equal(cp->mems_allowed, cp->effective_mems)); @@ -1245,9 +1245,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, if (retval < 0) goto done; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cs->mems_allowed = trialcs->mems_allowed; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); /* use trialcs->mems_allowed as a temp variable */ update_nodemasks_hier(cs, &trialcs->mems_allowed); @@ -1338,9 +1338,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) || (is_spread_page(cs) != is_spread_page(trialcs))); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cs->flags = trialcs->flags; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) rebuild_sched_domains_locked(); @@ -1761,7 +1761,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) cpuset_filetype_t type = seq_cft(sf)->private; int ret = 0; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); switch (type) { case FILE_CPULIST: @@ -1780,7 +1780,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) ret = -EINVAL; } - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); return ret; } @@ -1995,12 +1995,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cpuset_inc(); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (is_in_v2_mode()) { cpumask_copy(cs->effective_cpus, parent->effective_cpus); cs->effective_mems = parent->effective_mems; } - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) goto out_unlock; @@ -2027,12 +2027,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) } rcu_read_unlock(); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cs->mems_allowed = parent->mems_allowed; cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); cpumask_copy(cs->effective_cpus, parent->cpus_allowed); - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); out_unlock: mutex_unlock(&cpuset_mutex); return 0; @@ -2071,7 +2071,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) static void cpuset_bind(struct cgroup_subsys_state *root_css) { mutex_lock(&cpuset_mutex); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (is_in_v2_mode()) { cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); @@ -2082,7 +2082,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) top_cpuset.mems_allowed = top_cpuset.effective_mems; } - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); mutex_unlock(&cpuset_mutex); } @@ -2096,7 +2096,7 @@ static void cpuset_fork(struct task_struct *task) if (task_css_is_root(task, cpuset_cgrp_id)) return; - set_cpus_allowed_ptr(task, ¤t->cpus_allowed); + set_cpus_allowed_ptr(task, current->cpus_ptr); task->mems_allowed = current->mems_allowed; } @@ -2180,12 +2180,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs, { bool is_empty; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cpumask_copy(cs->cpus_allowed, new_cpus); cpumask_copy(cs->effective_cpus, new_cpus); cs->mems_allowed = *new_mems; cs->effective_mems = *new_mems; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); /* * Don't call update_tasks_cpumask() if the cpuset becomes empty, @@ -2222,10 +2222,10 @@ hotplug_update_tasks(struct cpuset *cs, if (nodes_empty(*new_mems)) *new_mems = parent_cs(cs)->effective_mems; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cpumask_copy(cs->effective_cpus, new_cpus); cs->effective_mems = *new_mems; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); if (cpus_updated) update_tasks_cpumask(cs); @@ -2318,21 +2318,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work) /* synchronize cpus_allowed to cpu_active_mask */ if (cpus_updated) { - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (!on_dfl) cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); cpumask_copy(top_cpuset.effective_cpus, &new_cpus); - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); /* we don't mess with cpumasks of tasks in top_cpuset */ } /* synchronize mems_allowed to N_MEMORY */ if (mems_updated) { - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (!on_dfl) top_cpuset.mems_allowed = new_mems; top_cpuset.effective_mems = new_mems; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); update_tasks_nodemask(&top_cpuset); } @@ -2434,11 +2434,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) { unsigned long flags; - spin_lock_irqsave(&callback_lock, flags); + raw_spin_lock_irqsave(&callback_lock, flags); rcu_read_lock(); guarantee_online_cpus(task_cs(tsk), pmask); rcu_read_unlock(); - spin_unlock_irqrestore(&callback_lock, flags); + raw_spin_unlock_irqrestore(&callback_lock, flags); } /** @@ -2499,11 +2499,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) nodemask_t mask; unsigned long flags; - spin_lock_irqsave(&callback_lock, flags); + raw_spin_lock_irqsave(&callback_lock, flags); rcu_read_lock(); guarantee_online_mems(task_cs(tsk), &mask); rcu_read_unlock(); - spin_unlock_irqrestore(&callback_lock, flags); + raw_spin_unlock_irqrestore(&callback_lock, flags); return mask; } @@ -2595,14 +2595,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask) return true; /* Not hardwall and node outside mems_allowed: scan up cpusets */ - spin_lock_irqsave(&callback_lock, flags); + raw_spin_lock_irqsave(&callback_lock, flags); rcu_read_lock(); cs = nearest_hardwall_ancestor(task_cs(current)); allowed = node_isset(node, cs->mems_allowed); rcu_read_unlock(); - spin_unlock_irqrestore(&callback_lock, flags); + raw_spin_unlock_irqrestore(&callback_lock, flags); return allowed; } diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index d0ed410b4127506849a9c4f0d5a542a99600c3ae..3c949c46c6b3f68edb2420b6c4564c7396d3e1eb 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -149,8 +149,9 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep) raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu); struct cgroup *pos = NULL; + unsigned long flags; - raw_spin_lock(cpu_lock); + raw_spin_lock_irqsave(cpu_lock, flags); while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) { struct cgroup_subsys_state *css; @@ -162,7 +163,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep) css->ss->css_rstat_flush(css, cpu); rcu_read_unlock(); } - raw_spin_unlock(cpu_lock); + raw_spin_unlock_irqrestore(cpu_lock, flags); /* if @may_sleep, play nice and yield if necessary */ if (may_sleep && (need_resched() || diff --git a/kernel/cpu.c b/kernel/cpu.c index d6fd362afc81f4d730c20fdd1a9666271047b66b..44245b35c09c81ddec800fc1b8937bb498ff99f8 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -876,6 +876,15 @@ static int take_cpu_down(void *_param) int err, cpu = smp_processor_id(); int ret; +#ifdef CONFIG_PREEMPT_RT_BASE + /* + * If any tasks disabled migration before we got here, + * go back and sleep again. + */ + if (cpu_nr_pinned(cpu)) + return -EAGAIN; +#endif + /* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) @@ -903,6 +912,10 @@ static int take_cpu_down(void *_param) return 0; } +#ifdef CONFIG_PREEMPT_RT_BASE +struct task_struct *takedown_cpu_task; +#endif + static int takedown_cpu(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); @@ -917,11 +930,39 @@ static int takedown_cpu(unsigned int cpu) */ irq_lock_sparse(); +#ifdef CONFIG_PREEMPT_RT_BASE + WARN_ON_ONCE(takedown_cpu_task); + takedown_cpu_task = current; + +again: + /* + * If a task pins this CPU after we pass this check, take_cpu_down + * will return -EAGAIN. + */ + for (;;) { + int nr_pinned; + + set_current_state(TASK_UNINTERRUPTIBLE); + nr_pinned = cpu_nr_pinned(cpu); + if (nr_pinned == 0) + break; + schedule(); + } + set_current_state(TASK_RUNNING); +#endif + /* * So now all preempt/rcu users must observe !cpu_active(). */ err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); +#ifdef CONFIG_PREEMPT_RT_BASE + if (err == -EAGAIN) + goto again; +#endif if (err) { +#ifdef CONFIG_PREEMPT_RT_BASE + takedown_cpu_task = NULL; +#endif /* CPU refused to die */ irq_unlock_sparse(); /* Unpark the hotplug thread so we can rollback there */ @@ -940,6 +981,9 @@ static int takedown_cpu(unsigned int cpu) wait_for_ap_thread(st, false); BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); +#ifdef CONFIG_PREEMPT_RT_BASE + takedown_cpu_task = NULL; +#endif /* Interrupts are moved away from the dying cpu, reenable alloc/free */ irq_unlock_sparse(); diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index b45576ca3b0da629df963932736f527c441300db..ad1a3bc7908f4d9f238c96e2f6490c11234d7924 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -861,9 +861,11 @@ int kdb_printf(const char *fmt, ...) va_list ap; int r; + kdb_trap_printk++; va_start(ap, fmt); r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap); va_end(ap); + kdb_trap_printk--; return r; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 88dd1398ae88905a99248f6532fcce764d06f392..7f2441f671d352350038ce7ea4f8be11bcff32de 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1108,7 +1108,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); raw_spin_lock_init(&cpuctx->hrtimer_lock); - hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); timer->function = perf_mux_hrtimer_handler; } @@ -9311,7 +9311,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) if (!is_sampling_event(event)) return; - hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); hwc->hrtimer.function = perf_swevent_hrtimer; /* diff --git a/kernel/exit.c b/kernel/exit.c index 908e7a33e1fcb38f40cf8de3eed283e548000e96..2a414fc71b87b020b45e2d03c3847c639164f11b 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -160,7 +160,7 @@ static void __exit_signal(struct task_struct *tsk) * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ - flush_sigqueue(&tsk->pending); + flush_task_sigqueue(tsk); tsk->sighand = NULL; spin_unlock(&sighand->siglock); diff --git a/kernel/fork.c b/kernel/fork.c index b658716005077feebd93fcc763f9c5876118482e..fdc61ac6a748a970b2f79a6e214107a32957739e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -642,6 +643,19 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); +#ifdef CONFIG_PREEMPT_RT_BASE +/* + * RCU callback for delayed mm drop. Not strictly rcu, but we don't + * want another facility to make this work. + */ +void __mmdrop_delayed(struct rcu_head *rhp) +{ + struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); + + __mmdrop(mm); +} +#endif + static void mmdrop_async_fn(struct work_struct *work) { struct mm_struct *mm; @@ -676,13 +690,24 @@ static inline void put_signal_struct(struct signal_struct *sig) if (atomic_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); } - +#ifdef CONFIG_PREEMPT_RT_BASE +static +#endif void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); + /* + * Remove function-return probe instances associated with this + * task and put them back on the free list. + */ + kprobe_flush_task(tsk); + + /* Task is done with its stack. */ + put_task_stack(tsk); + cgroup_free(tsk); task_numa_free(tsk, true); security_task_free(tsk); @@ -693,7 +718,18 @@ void __put_task_struct(struct task_struct *tsk) if (!profile_handoff_task(tsk)) free_task(tsk); } +#ifndef CONFIG_PREEMPT_RT_BASE EXPORT_SYMBOL_GPL(__put_task_struct); +#else +void __put_task_struct_cb(struct rcu_head *rhp) +{ + struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); + + __put_task_struct(tsk); + +} +EXPORT_SYMBOL_GPL(__put_task_struct_cb); +#endif void __init __weak arch_task_cache_init(void) { } @@ -850,6 +886,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) #ifdef CONFIG_STACKPROTECTOR tsk->stack_canary = get_random_canary(); #endif + if (orig->cpus_ptr == &orig->cpus_mask) + tsk->cpus_ptr = &tsk->cpus_mask; /* * One for us, one for whoever does the "release_task()" (usually @@ -862,6 +900,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; + tsk->wake_q_sleeper.next = NULL; account_kernel_stack(tsk, 1); @@ -1585,6 +1624,9 @@ static void rt_mutex_init_task(struct task_struct *p) */ static void posix_cpu_timers_init(struct task_struct *tsk) { +#ifdef CONFIG_PREEMPT_RT_BASE + tsk->posix_timer_list = NULL; +#endif tsk->cputime_expires.prof_exp = 0; tsk->cputime_expires.virt_exp = 0; tsk->cputime_expires.sched_exp = 0; @@ -1806,6 +1848,7 @@ static __latent_entropy struct task_struct *copy_process( spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); + p->sigqueue_cache = NULL; p->utime = p->stime = p->gtime = 0; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME diff --git a/kernel/futex.c b/kernel/futex.c index 3c67da9b84086a4293e82d453c07b749dc17493c..81090f7a3ed92f50bf215975c2d831ec2548d08d 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -962,7 +962,9 @@ static void exit_pi_state_list(struct task_struct *curr) if (head->next != next) { /* retain curr->pi_lock for the loop invariant */ raw_spin_unlock(&pi_state->pi_mutex.wait_lock); + raw_spin_unlock_irq(&curr->pi_lock); spin_unlock(&hb->lock); + raw_spin_lock_irq(&curr->pi_lock); put_pi_state(pi_state); continue; } @@ -1573,6 +1575,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ struct task_struct *new_owner; bool postunlock = false; DEFINE_WAKE_Q(wake_q); + DEFINE_WAKE_Q(wake_sleeper_q); int ret = 0; new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); @@ -1622,14 +1625,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ * not fail. */ pi_state_update_owner(pi_state, new_owner); - postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); + postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q, + &wake_sleeper_q); } out_unlock: raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); if (postunlock) - rt_mutex_postunlock(&wake_q); + rt_mutex_postunlock(&wake_q, &wake_sleeper_q); return ret; } @@ -2253,6 +2257,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; + } else if (ret == -EAGAIN) { + /* + * Waiter was woken by timeout or + * signal and has set pi_blocked_on to + * PI_WAKEUP_INPROGRESS before we + * tried to enqueue it on the rtmutex. + */ + this->pi_state = NULL; + put_pi_state(pi_state); + continue; } else if (ret) { /* * rt_mutex_start_proxy_lock() detected a @@ -2816,10 +2830,9 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, if (abs_time) { to = &timeout; - hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? - CLOCK_REALTIME : CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); - hrtimer_init_sleeper(to, current); + hrtimer_init_sleeper_on_stack(to, (flags & FLAGS_CLOCKRT) ? + CLOCK_REALTIME : CLOCK_MONOTONIC, + HRTIMER_MODE_ABS, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } @@ -2917,9 +2930,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, if (time) { to = &timeout; - hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, - HRTIMER_MODE_ABS); - hrtimer_init_sleeper(to, current); + hrtimer_init_sleeper_on_stack(to, CLOCK_REALTIME, + HRTIMER_MODE_ABS, current); hrtimer_set_expires(&to->timer, *time); } @@ -2982,7 +2994,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, goto no_block; } - rt_mutex_init_waiter(&rt_waiter); + rt_mutex_init_waiter(&rt_waiter, false); /* * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not @@ -2998,6 +3010,14 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, * before __rt_mutex_start_proxy_lock() is done. */ raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); + /* + * the migrate_disable() here disables migration in the in_atomic() fast + * path which is enabled again in the following spin_unlock(). We have + * one migrate_disable() pending in the slow-path which is reversed + * after the raw_spin_unlock_irq() where we leave the atomic context. + */ + migrate_disable(); + spin_unlock(q.lock_ptr); /* * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter @@ -3006,6 +3026,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, */ ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); + migrate_enable(); if (ret) { if (ret == 1) @@ -3140,10 +3161,19 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) * rt_waiter. Also see the WARN in wake_futex_pi(). */ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + /* + * Magic trickery for now to make the RT migrate disable + * logic happy. The following spin_unlock() happens with + * interrupts disabled so the internal migrate_enable() + * won't undo the migrate_disable() which was issued when + * locking hb->lock. + */ + migrate_disable(); spin_unlock(&hb->lock); /* drops pi_state->pi_mutex.wait_lock */ ret = wake_futex_pi(uaddr, uval, pi_state); + migrate_enable(); put_pi_state(pi_state); @@ -3314,7 +3344,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, { struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; - struct futex_hash_bucket *hb; + struct futex_hash_bucket *hb, *hb2; union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; int res, ret; @@ -3330,10 +3360,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (abs_time) { to = &timeout; - hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? - CLOCK_REALTIME : CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); - hrtimer_init_sleeper(to, current); + hrtimer_init_sleeper_on_stack(to, (flags & FLAGS_CLOCKRT) ? + CLOCK_REALTIME : CLOCK_MONOTONIC, + HRTIMER_MODE_ABS, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } @@ -3342,7 +3371,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ - rt_mutex_init_waiter(&rt_waiter); + rt_mutex_init_waiter(&rt_waiter, false); ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) @@ -3373,20 +3402,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); - spin_lock(&hb->lock); - ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); - spin_unlock(&hb->lock); - if (ret) - goto out_put_keys; + /* + * On RT we must avoid races with requeue and trying to block + * on two mutexes (hb->lock and uaddr2's rtmutex) by + * serializing access to pi_blocked_on with pi_lock. + */ + raw_spin_lock_irq(¤t->pi_lock); + if (current->pi_blocked_on) { + /* + * We have been requeued or are in the process of + * being requeued. + */ + raw_spin_unlock_irq(¤t->pi_lock); + } else { + /* + * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS + * prevents a concurrent requeue from moving us to the + * uaddr2 rtmutex. After that we can safely acquire + * (and possibly block on) hb->lock. + */ + current->pi_blocked_on = PI_WAKEUP_INPROGRESS; + raw_spin_unlock_irq(¤t->pi_lock); + + spin_lock(&hb->lock); + + /* + * Clean up pi_blocked_on. We might leak it otherwise + * when we succeeded with the hb->lock in the fast + * path. + */ + raw_spin_lock_irq(¤t->pi_lock); + current->pi_blocked_on = NULL; + raw_spin_unlock_irq(¤t->pi_lock); + + ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); + spin_unlock(&hb->lock); + if (ret) + goto out_put_keys; + } /* - * In order for us to be here, we know our q.key == key2, and since - * we took the hb->lock above, we also know that futex_requeue() has - * completed and we no longer have to concern ourselves with a wakeup - * race with the atomic proxy lock acquisition by the requeue code. The - * futex_requeue dropped our key1 reference and incremented our key2 - * reference count. + * In order to be here, we have either been requeued, are in + * the process of being requeued, or requeue successfully + * acquired uaddr2 on our behalf. If pi_blocked_on was + * non-null above, we may be racing with a requeue. Do not + * rely on q->lock_ptr to be hb2->lock until after blocking on + * hb->lock or hb2->lock. The futex_requeue dropped our key1 + * reference and incremented our key2 reference count. */ + hb2 = hash_futex(&key2); /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { @@ -3395,14 +3459,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { - spin_lock(q.lock_ptr); + spin_lock(&hb2->lock); + BUG_ON(&hb2->lock != q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current); /* * Drop the reference to the pi state which * the requeue_pi() code acquired for us. */ put_pi_state(q.pi_state); - spin_unlock(q.lock_ptr); + spin_unlock(&hb2->lock); /* * Adjust the return value. It's either -EFAULT or * success (1) but the caller expects 0 for success. @@ -3421,7 +3486,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, pi_mutex = &q.pi_state->pi_mutex; ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); - spin_lock(q.lock_ptr); + spin_lock(&hb2->lock); + BUG_ON(&hb2->lock != q.lock_ptr); if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) ret = 0; diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index b3f55dd581b0a56f86f6056c6d8df7e6b3fe7dcb..9473b1d47199f62ca95a674dcf4660618ee712f9 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -209,8 +209,7 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf, err = -EINVAL; } else { desc->istate |= IRQS_PENDING; - check_irq_resend(desc); - err = 0; + err = check_irq_resend(desc); } raw_spin_unlock_irqrestore(&desc->lock, flags); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 38554bc3537558b8fa9deccff5dbc1ff7ceb3b12..06a80bbf78afe6ea200afb69e704cc08b5e9cc9e 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -185,10 +185,16 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) { irqreturn_t retval; unsigned int flags = 0; + struct pt_regs *regs = get_irq_regs(); + u64 ip = regs ? instruction_pointer(regs) : 0; retval = __handle_irq_event_percpu(desc, &flags); - add_interrupt_randomness(desc->irq_data.irq, flags); +#ifdef CONFIG_PREEMPT_RT_FULL + desc->random_ip = ip; +#else + add_interrupt_randomness(desc->irq_data.irq, flags, ip); +#endif if (!noirqdebug) note_interrupt(desc, retval); diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 10eccbc84686156dd3ec659015d37768c62f809c..2fed5ab70ba262483e4394ac2b58e3d3dc76858a 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -108,7 +108,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); irqreturn_t handle_irq_event(struct irq_desc *desc); /* Resending of interrupts :*/ -void check_irq_resend(struct irq_desc *desc); +int check_irq_resend(struct irq_desc *desc); bool irq_wait_for_poll(struct irq_desc *desc); void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); @@ -422,6 +422,10 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) { return desc->pending_mask; } +static inline bool handle_enforce_irqctx(struct irq_data *data) +{ + return irqd_is_handle_enforce_irqctx(data); +} bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); #else /* CONFIG_GENERIC_PENDING_IRQ */ static inline bool irq_can_move_pcntxt(struct irq_data *data) @@ -448,6 +452,10 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) { return false; } +static inline bool handle_enforce_irqctx(struct irq_data *data) +{ + return false; +} #endif /* !CONFIG_GENERIC_PENDING_IRQ */ #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 3633540b0f16076a03b965b73411b2dc0363c781..e9049903ac114ae38d82bcf6178a3286cea23871 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -635,9 +635,15 @@ void irq_init_desc(unsigned int irq) int generic_handle_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); + struct irq_data *data; if (!desc) return -EINVAL; + + data = irq_desc_get_irq_data(desc); + if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data))) + return -EPERM; + generic_handle_irq_desc(desc); return 0; } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 18f3cdbf41fd5d85840fd3e9a428b321ed46af52..8f710f1755d88b4636fce4b397576fc949be2c9b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -24,6 +24,7 @@ #include "internals.h" #ifdef CONFIG_IRQ_FORCED_THREADING +# ifndef CONFIG_PREEMPT_RT_BASE __read_mostly bool force_irqthreads; EXPORT_SYMBOL_GPL(force_irqthreads); @@ -33,6 +34,7 @@ static int __init setup_forced_irqthreads(char *arg) return 0; } early_param("threadirqs", setup_forced_irqthreads); +# endif #endif static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) @@ -996,7 +998,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) irq_finalize_oneshot(desc, action); if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) local_irq_enable(); - local_bh_enable(); + /* + * Interrupts which have real time requirements can be set up + * to avoid softirq processing in the thread handler. This is + * safe as these interrupts do not raise soft interrupts. + */ + if (irq_settings_no_softirq_call(desc)) + _local_bh_enable(); + else + local_bh_enable(); return ret; } @@ -1121,6 +1131,12 @@ static int irq_thread(void *data) if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); +#ifdef CONFIG_PREEMPT_RT_FULL + migrate_disable(); + add_interrupt_randomness(action->irq, 0, + desc->random_ip ^ (unsigned long) action); + migrate_enable(); +#endif wake_threads_waitq(desc); } @@ -1531,6 +1547,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } + if (new->flags & IRQF_NO_SOFTIRQ_CALL) + irq_settings_set_no_softirq_call(desc); + if (irq_settings_can_autoenable(desc)) { irq_startup(desc, IRQ_RESEND, IRQ_START_COND); } else { @@ -2330,7 +2349,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state); * This call sets the internal irqchip state of an interrupt, * depending on the value of @which. * - * This function should be called with preemption disabled if the + * This function should be called with migration disabled if the * interrupt controller has per-cpu registers. */ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 98c04ca5fa43d6a4515fff52a2d956c250777083..7de48bc06c754b960d91c4934d2bd17ac56acca5 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -47,56 +47,88 @@ static void resend_irqs(unsigned long arg) /* Tasklet to handle resend: */ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); +static int irq_sw_resend(struct irq_desc *desc) +{ + unsigned int irq = irq_desc_get_irq(desc); + + /* + * Validate whether this interrupt can be safely injected from + * non interrupt context + */ + if (handle_enforce_irqctx(&desc->irq_data)) + return -EINVAL; + + /* + * If the interrupt is running in the thread context of the parent + * irq we need to be careful, because we cannot trigger it + * directly. + */ + if (irq_settings_is_nested_thread(desc)) { + /* + * If the parent_irq is valid, we retrigger the parent, + * otherwise we do nothing. + */ + if (!desc->parent_irq) + return -EINVAL; + irq = desc->parent_irq; + } + + /* Set it pending and activate the softirq: */ + set_bit(irq, irqs_resend); + tasklet_schedule(&resend_tasklet); + return 0; +} + +#else +static int irq_sw_resend(struct irq_desc *desc) +{ + return -EINVAL; +} +#endif + +static int try_retrigger(struct irq_desc *desc) +{ + if (desc->irq_data.chip->irq_retrigger) + return desc->irq_data.chip->irq_retrigger(&desc->irq_data); + +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + return irq_chip_retrigger_hierarchy(&desc->irq_data); +#else + return 0; #endif +} /* * IRQ resend * * Is called with interrupts disabled and desc->lock held. */ -void check_irq_resend(struct irq_desc *desc) +int check_irq_resend(struct irq_desc *desc) { + int err = 0; + /* - * We do not resend level type interrupts. Level type - * interrupts are resent by hardware when they are still - * active. Clear the pending bit so suspend/resume does not - * get confused. + * We do not resend level type interrupts. Level type interrupts + * are resent by hardware when they are still active. Clear the + * pending bit so suspend/resume does not get confused. */ if (irq_settings_is_level(desc)) { desc->istate &= ~IRQS_PENDING; - return; + return -EINVAL; } if (desc->istate & IRQS_REPLAY) - return; - if (desc->istate & IRQS_PENDING) { - desc->istate &= ~IRQS_PENDING; - desc->istate |= IRQS_REPLAY; + return -EBUSY; - if (!desc->irq_data.chip->irq_retrigger || - !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { -#ifdef CONFIG_HARDIRQS_SW_RESEND - unsigned int irq = irq_desc_get_irq(desc); - - /* - * If the interrupt is running in the thread - * context of the parent irq we need to be - * careful, because we cannot trigger it - * directly. - */ - if (irq_settings_is_nested_thread(desc)) { - /* - * If the parent_irq is valid, we - * retrigger the parent, otherwise we - * do nothing. - */ - if (!desc->parent_irq) - return; - irq = desc->parent_irq; - } - /* Set it pending and activate the softirq: */ - set_bit(irq, irqs_resend); - tasklet_schedule(&resend_tasklet); -#endif - } - } + if (!(desc->istate & IRQS_PENDING)) + return 0; + + desc->istate &= ~IRQS_PENDING; + + if (!try_retrigger(desc)) + err = irq_sw_resend(desc); + + /* If the retrigger was successfull, mark it with the REPLAY bit */ + if (!err) + desc->istate |= IRQS_REPLAY; + return err; } diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index e43795cd2ccfba2f848dcaa0f3b37aff562af83b..47e2f9e23586fc7f239be2e5bd8d1d0350fdfd8d 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -17,6 +17,7 @@ enum { _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, _IRQ_IS_POLLED = IRQ_IS_POLLED, _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, + _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, }; @@ -31,6 +32,7 @@ enum { #define IRQ_PER_CPU_DEVID GOT_YOU_MORON #define IRQ_IS_POLLED GOT_YOU_MORON #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON +#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON #undef IRQF_MODIFY_MASK #define IRQF_MODIFY_MASK GOT_YOU_MORON @@ -41,6 +43,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); } +static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) +{ + return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; +} + +static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) +{ + desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; +} + static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) { return desc->status_use_accessors & _IRQ_PER_CPU; diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index d867d6ddafdd98580e980e2e5aade55da74c64cd..cd12ee86c01eb24870edb60f64951bc4f229d710 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -442,6 +442,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); static int __init irqfixup_setup(char *str) { +#ifdef CONFIG_PREEMPT_RT_BASE + pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); + return 1; +#endif irqfixup = 1; printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); printk(KERN_WARNING "This may impact system performance.\n"); @@ -454,6 +458,10 @@ module_param(irqfixup, int, 0644); static int __init irqpoll_setup(char *str) { +#ifdef CONFIG_PREEMPT_RT_BASE + pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); + return 1; +#endif irqfixup = 2; printk(KERN_WARNING "Misrouted IRQ fixup and polling support " "enabled\n"); diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 73288914ed5e78cc44b596ab5428ac98aceb73b3..b6d9d35941ac46a3ea5e4deeb473071b9999e4b9 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -57,29 +58,40 @@ void __weak arch_irq_work_raise(void) } /* Enqueue on current CPU, work must already be claimed and preempt disabled */ -static void __irq_work_queue_local(struct irq_work *work) +static void __irq_work_queue_local(struct irq_work *work, struct llist_head *list) { - /* If the work is "lazy", handle it from next tick if any */ - if (work->flags & IRQ_WORK_LAZY) { - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && - tick_nohz_tick_stopped()) - arch_irq_work_raise(); - } else { - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) - arch_irq_work_raise(); - } + bool empty; + + empty = llist_add(&work->llnode, list); + + if (empty && + (!(work->flags & IRQ_WORK_LAZY) || + tick_nohz_tick_stopped())) + arch_irq_work_raise(); +} + +static inline bool use_lazy_list(struct irq_work *work) +{ + return (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) + || (work->flags & IRQ_WORK_LAZY); } /* Enqueue the irq work @work on the current CPU */ bool irq_work_queue(struct irq_work *work) { + struct llist_head *list; + /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; /* Queue the entry and raise the IPI if needed. */ preempt_disable(); - __irq_work_queue_local(work); + if (use_lazy_list(work)) + list = this_cpu_ptr(&lazy_list); + else + list = this_cpu_ptr(&raised_list); + __irq_work_queue_local(work, list); preempt_enable(); return true; @@ -98,6 +110,8 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) return irq_work_queue(work); #else /* CONFIG_SMP: */ + struct llist_head *list; + /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(cpu)); @@ -106,13 +120,18 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) return false; preempt_disable(); + if (use_lazy_list(work)) + list = &per_cpu(lazy_list, cpu); + else + list = &per_cpu(raised_list, cpu); + if (cpu != smp_processor_id()) { /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); - if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) + if (llist_add(&work->llnode, list)) arch_send_call_function_single_ipi(cpu); } else { - __irq_work_queue_local(work); + __irq_work_queue_local(work, list); } preempt_enable(); @@ -128,9 +147,8 @@ bool irq_work_needs_cpu(void) raised = this_cpu_ptr(&raised_list); lazy = this_cpu_ptr(&lazy_list); - if (llist_empty(raised) || arch_irq_work_has_interrupt()) - if (llist_empty(lazy)) - return false; + if (llist_empty(raised) && llist_empty(lazy)) + return false; /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); @@ -144,8 +162,12 @@ static void irq_work_run_list(struct llist_head *list) struct llist_node *llnode; unsigned long flags; +#ifndef CONFIG_PREEMPT_RT_FULL + /* + * nort: On RT IRQ-work may run in SOFTIRQ context. + */ BUG_ON(!irqs_disabled()); - +#endif if (llist_empty(list)) return; @@ -177,7 +199,16 @@ static void irq_work_run_list(struct llist_head *list) void irq_work_run(void) { irq_work_run_list(this_cpu_ptr(&raised_list)); - irq_work_run_list(this_cpu_ptr(&lazy_list)); + if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) { + /* + * NOTE: we raise softirq via IPI for safety, + * and execute in irq_work_tick() to move the + * overhead from hard to soft irq context. + */ + if (!llist_empty(this_cpu_ptr(&lazy_list))) + raise_softirq(TIMER_SOFTIRQ); + } else + irq_work_run_list(this_cpu_ptr(&lazy_list)); } EXPORT_SYMBOL_GPL(irq_work_run); @@ -187,8 +218,17 @@ void irq_work_tick(void) if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); + + if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) + irq_work_run_list(this_cpu_ptr(&lazy_list)); +} + +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) +void irq_work_tick_soft(void) +{ irq_work_run_list(this_cpu_ptr(&lazy_list)); } +#endif /* * Synchronize against the irq_work @entry, ensures the entry is not diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 46ba853656f63b7ba1a1e11171b8cdc935666c8d..9a23632b6294fb35700bca09bd0cf981c5f4c4d3 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -140,6 +140,15 @@ KERNEL_ATTR_RO(vmcoreinfo); #endif /* CONFIG_CRASH_CORE */ +#if defined(CONFIG_PREEMPT_RT_FULL) +static ssize_t realtime_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", 1); +} +KERNEL_ATTR_RO(realtime); +#endif + /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -230,6 +239,9 @@ static struct attribute * kernel_attrs[] = { #ifndef CONFIG_TINY_RCU &rcu_expedited_attr.attr, &rcu_normal_attr.attr, +#endif +#ifdef CONFIG_PREEMPT_RT_FULL + &realtime_attr.attr, #endif NULL }; diff --git a/kernel/kthread.c b/kernel/kthread.c index 9750f4f7f9010f59d0dd54da03f8b26f4d64831b..c8cf4731ced8a3d5ff56e1e9d32663f2f70cfd0a 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -637,7 +637,7 @@ void __kthread_init_worker(struct kthread_worker *worker, struct lock_class_key *key) { memset(worker, 0, sizeof(struct kthread_worker)); - spin_lock_init(&worker->lock); + raw_spin_lock_init(&worker->lock); lockdep_set_class_and_name(&worker->lock, key, name); INIT_LIST_HEAD(&worker->work_list); INIT_LIST_HEAD(&worker->delayed_work_list); @@ -679,21 +679,21 @@ int kthread_worker_fn(void *worker_ptr) if (kthread_should_stop()) { __set_current_state(TASK_RUNNING); - spin_lock_irq(&worker->lock); + raw_spin_lock_irq(&worker->lock); worker->task = NULL; - spin_unlock_irq(&worker->lock); + raw_spin_unlock_irq(&worker->lock); return 0; } work = NULL; - spin_lock_irq(&worker->lock); + raw_spin_lock_irq(&worker->lock); if (!list_empty(&worker->work_list)) { work = list_first_entry(&worker->work_list, struct kthread_work, node); list_del_init(&work->node); } worker->current_work = work; - spin_unlock_irq(&worker->lock); + raw_spin_unlock_irq(&worker->lock); if (work) { __set_current_state(TASK_RUNNING); @@ -850,12 +850,12 @@ bool kthread_queue_work(struct kthread_worker *worker, bool ret = false; unsigned long flags; - spin_lock_irqsave(&worker->lock, flags); + raw_spin_lock_irqsave(&worker->lock, flags); if (!queuing_blocked(worker, work)) { kthread_insert_work(worker, work, &worker->work_list); ret = true; } - spin_unlock_irqrestore(&worker->lock, flags); + raw_spin_unlock_irqrestore(&worker->lock, flags); return ret; } EXPORT_SYMBOL_GPL(kthread_queue_work); @@ -881,7 +881,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) if (WARN_ON_ONCE(!worker)) return; - spin_lock(&worker->lock); + raw_spin_lock(&worker->lock); /* Work must not be used with >1 worker, see kthread_queue_work(). */ WARN_ON_ONCE(work->worker != worker); @@ -891,7 +891,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) if (!work->canceling) kthread_insert_work(worker, work, &worker->work_list); - spin_unlock(&worker->lock); + raw_spin_unlock(&worker->lock); } EXPORT_SYMBOL(kthread_delayed_work_timer_fn); @@ -947,14 +947,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker, unsigned long flags; bool ret = false; - spin_lock_irqsave(&worker->lock, flags); + raw_spin_lock_irqsave(&worker->lock, flags); if (!queuing_blocked(worker, work)) { __kthread_queue_delayed_work(worker, dwork, delay); ret = true; } - spin_unlock_irqrestore(&worker->lock, flags); + raw_spin_unlock_irqrestore(&worker->lock, flags); return ret; } EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); @@ -990,7 +990,7 @@ void kthread_flush_work(struct kthread_work *work) if (!worker) return; - spin_lock_irq(&worker->lock); + raw_spin_lock_irq(&worker->lock); /* Work must not be used with >1 worker, see kthread_queue_work(). */ WARN_ON_ONCE(work->worker != worker); @@ -1002,7 +1002,7 @@ void kthread_flush_work(struct kthread_work *work) else noop = true; - spin_unlock_irq(&worker->lock); + raw_spin_unlock_irq(&worker->lock); if (!noop) wait_for_completion(&fwork.done); @@ -1030,9 +1030,9 @@ static void kthread_cancel_delayed_work_timer(struct kthread_work *work, * any queuing is blocked by setting the canceling counter. */ work->canceling++; - spin_unlock_irqrestore(&worker->lock, *flags); + raw_spin_unlock_irqrestore(&worker->lock, *flags); del_timer_sync(&dwork->timer); - spin_lock_irqsave(&worker->lock, *flags); + raw_spin_lock_irqsave(&worker->lock, *flags); work->canceling--; } @@ -1094,7 +1094,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, unsigned long flags; int ret = false; - spin_lock_irqsave(&worker->lock, flags); + raw_spin_lock_irqsave(&worker->lock, flags); /* Do not bother with canceling when never queued. */ if (!work->worker) @@ -1123,7 +1123,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, fast_queue: __kthread_queue_delayed_work(worker, dwork, delay); out: - spin_unlock_irqrestore(&worker->lock, flags); + raw_spin_unlock_irqrestore(&worker->lock, flags); return ret; } EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); @@ -1137,7 +1137,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) if (!worker) goto out; - spin_lock_irqsave(&worker->lock, flags); + raw_spin_lock_irqsave(&worker->lock, flags); /* Work must not be used with >1 worker, see kthread_queue_work(). */ WARN_ON_ONCE(work->worker != worker); @@ -1154,13 +1154,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) * In the meantime, block any queuing by setting the canceling counter. */ work->canceling++; - spin_unlock_irqrestore(&worker->lock, flags); + raw_spin_unlock_irqrestore(&worker->lock, flags); kthread_flush_work(work); - spin_lock_irqsave(&worker->lock, flags); + raw_spin_lock_irqsave(&worker->lock, flags); work->canceling--; out_fast: - spin_unlock_irqrestore(&worker->lock, flags); + raw_spin_unlock_irqrestore(&worker->lock, flags); out: return ret; } diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 392c7f23af7651a52dda141740d890b77114cab6..c0bf04b6b965c124ee8e9028d33a54fb4abcc924 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -3,7 +3,7 @@ # and is generally not a function of system call inputs. KCOV_INSTRUMENT := n -obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o +obj-y += semaphore.o percpu-rwsem.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) @@ -12,7 +12,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) endif +ifneq ($(CONFIG_PREEMPT_RT_FULL),y) +obj-y += mutex.o obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o +endif +obj-y += rwsem.o obj-$(CONFIG_LOCKDEP) += lockdep.o ifeq ($(CONFIG_PROC_FS),y) obj-$(CONFIG_LOCKDEP) += lockdep_proc.o @@ -25,8 +29,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o +ifneq ($(CONFIG_PREEMPT_RT_FULL),y) obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o +endif +obj-$(CONFIG_PREEMPT_RT_FULL) += mutex-rt.o rwsem-rt.o rwlock-rt.o obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 4dc79f57af8277fb57646c13aed61d1b3321c3d3..4dd83428b56d19a2c1c2eba0a32c0a77ae4083b8 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3831,6 +3831,7 @@ static void check_flags(unsigned long flags) } } +#ifndef CONFIG_PREEMPT_RT_FULL /* * We dont accurately track softirq state in e.g. * hardirq contexts (such as on 4KSTACKS), so only @@ -3845,6 +3846,7 @@ static void check_flags(unsigned long flags) DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); } } +#endif if (!debug_locks) print_irqtrace_events(current); diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 95395ef5922ac9262211921507a00a452c332aaf..5748c1936edb2a7020d1b77aa85c0f3141b592ef 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include diff --git a/kernel/locking/mutex-rt.c b/kernel/locking/mutex-rt.c new file mode 100644 index 0000000000000000000000000000000000000000..4f81595c0f5211db966a040ec8c1fd47c0f40db0 --- /dev/null +++ b/kernel/locking/mutex-rt.c @@ -0,0 +1,223 @@ +/* + * kernel/rt.c + * + * Real-Time Preemption Support + * + * started by Ingo Molnar: + * + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner + * + * historic credit for proving that Linux spinlocks can be implemented via + * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow + * and others) who prototyped it on 2.4 and did lots of comparative + * research and analysis; TimeSys, for proving that you can implement a + * fully preemptible kernel via the use of IRQ threading and mutexes; + * Bill Huey for persuasively arguing on lkml that the mutex model is the + * right one; and to MontaVista, who ported pmutexes to 2.6. + * + * This code is a from-scratch implementation and is not based on pmutexes, + * but the idea of converting spinlocks to mutexes is used here too. + * + * lock debugging, locking tree, deadlock detection: + * + * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey + * Released under the General Public License (GPL). + * + * Includes portions of the generic R/W semaphore implementation from: + * + * Copyright (c) 2001 David Howells (dhowells@redhat.com). + * - Derived partially from idea by Andrea Arcangeli + * - Derived also from comments by Linus + * + * Pending ownership of locks and ownership stealing: + * + * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt + * + * (also by Steven Rostedt) + * - Converted single pi_lock to individual task locks. + * + * By Esben Nielsen: + * Doing priority inheritance with help of the scheduler. + * + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner + * - major rework based on Esben Nielsens initial patch + * - replaced thread_info references by task_struct refs + * - removed task->pending_owner dependency + * - BKL drop/reacquire for semaphore style locks to avoid deadlocks + * in the scheduler return path as discussed with Steven Rostedt + * + * Copyright (C) 2006, Kihon Technologies Inc. + * Steven Rostedt + * - debugged and patched Thomas Gleixner's rework. + * - added back the cmpxchg to the rework. + * - turned atomic require back on for SMP. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rtmutex_common.h" + +/* + * struct mutex functions + */ +void __mutex_do_init(struct mutex *mutex, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); + lockdep_init_map(&mutex->dep_map, name, key, 0); +#endif + mutex->lock.save_state = 0; +} +EXPORT_SYMBOL(__mutex_do_init); + +void __lockfunc _mutex_lock(struct mutex *lock) +{ + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); +} +EXPORT_SYMBOL(_mutex_lock); + +void __lockfunc _mutex_lock_io(struct mutex *lock) +{ + int token; + + token = io_schedule_prepare(); + _mutex_lock(lock); + io_schedule_finish(token); +} +EXPORT_SYMBOL_GPL(_mutex_lock_io); + +int __lockfunc _mutex_lock_interruptible(struct mutex *lock) +{ + int ret; + + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = __rt_mutex_lock_state(&lock->lock, TASK_INTERRUPTIBLE); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_interruptible); + +int __lockfunc _mutex_lock_killable(struct mutex *lock) +{ + int ret; + + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = __rt_mutex_lock_state(&lock->lock, TASK_KILLABLE); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_killable); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) +{ + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); + __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); +} +EXPORT_SYMBOL(_mutex_lock_nested); + +void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass) +{ + int token; + + token = io_schedule_prepare(); + + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); + __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); + + io_schedule_finish(token); +} +EXPORT_SYMBOL_GPL(_mutex_lock_io_nested); + +void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) +{ + mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); + __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); +} +EXPORT_SYMBOL(_mutex_lock_nest_lock); + +int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) +{ + int ret; + + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); + ret = __rt_mutex_lock_state(&lock->lock, TASK_INTERRUPTIBLE); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_interruptible_nested); + +int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) +{ + int ret; + + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + ret = __rt_mutex_lock_state(&lock->lock, TASK_KILLABLE); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_killable_nested); +#endif + +int __lockfunc _mutex_trylock(struct mutex *lock) +{ + int ret = __rt_mutex_trylock(&lock->lock); + + if (ret) + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(_mutex_trylock); + +void __lockfunc _mutex_unlock(struct mutex *lock) +{ + mutex_release(&lock->dep_map, 1, _RET_IP_); + __rt_mutex_unlock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_unlock); + +/** + * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 + * @cnt: the atomic which we are to dec + * @lock: the mutex to return holding if we dec to 0 + * + * return true and hold lock if we dec to 0, return false otherwise + */ +int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) +{ + /* dec if we can't possibly hit 0 */ + if (atomic_add_unless(cnt, -1, 1)) + return 0; + /* we might hit 0, so take the lock */ + mutex_lock(lock); + if (!atomic_dec_and_test(cnt)) { + /* when we actually did the dec, we didn't hit 0 */ + mutex_unlock(lock); + return 0; + } + /* we hit 0, and we hold the lock */ + return 1; +} +EXPORT_SYMBOL(atomic_dec_and_mutex_lock); diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index a5ec4f68527e533fb25e447ed18e2efd179a21b5..fe5153fc76654be1fe3aaf4a7592ec832c93d689 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -7,6 +7,11 @@ * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt * Copyright (C) 2006 Esben Nielsen + * Adaptive Spinlocks: + * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, + * and Peter Morreale, + * Adaptive Spinlocks simplification: + * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt * * See Documentation/locking/rt-mutex-design.txt for details. */ @@ -18,6 +23,8 @@ #include #include #include +#include +#include #include "rtmutex_common.h" @@ -135,6 +142,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); } +static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) +{ + return waiter && waiter != PI_WAKEUP_INPROGRESS && + waiter != PI_REQUEUE_INPROGRESS; +} + /* * We can speed up the acquire/release, if there's no debugging state to be * set up. @@ -228,7 +241,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, * Only use with rt_mutex_waiter_{less,equal}() */ #define task_to_waiter(p) \ - &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } + &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) } static inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, @@ -268,6 +281,27 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left, return 1; } +#define STEAL_NORMAL 0 +#define STEAL_LATERAL 1 + +static inline int +rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode) +{ + struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); + + if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter)) + return 1; + + /* + * Note that RT tasks are excluded from lateral-steals + * to prevent the introduction of an unbounded latency. + */ + if (mode == STEAL_NORMAL || rt_task(waiter->task)) + return 0; + + return rt_mutex_waiter_equal(waiter, top_waiter); +} + static void rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { @@ -372,6 +406,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, return debug_rt_mutex_detect_deadlock(waiter, chwalk); } +static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) +{ + if (waiter->savestate) + wake_up_lock_sleeper(waiter->task); + else + wake_up_process(waiter->task); +} + /* * Max number of times we'll walk the boosting chain: */ @@ -379,7 +421,8 @@ int max_lock_depth = 1024; static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) { - return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; + return rt_mutex_real_waiter(p->pi_blocked_on) ? + p->pi_blocked_on->lock : NULL; } /* @@ -515,7 +558,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * reached or the state of the chain has changed while we * dropped the locks. */ - if (!waiter) + if (!rt_mutex_real_waiter(waiter)) goto out_unlock_pi; /* @@ -696,13 +739,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * follow here. This is the end of the chain we are walking. */ if (!rt_mutex_owner(lock)) { + struct rt_mutex_waiter *lock_top_waiter; + /* * If the requeue [7] above changed the top waiter, * then we need to wake the new top waiter up to try * to get the lock. */ - if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) - wake_up_process(rt_mutex_top_waiter(lock)->task); + lock_top_waiter = rt_mutex_top_waiter(lock); + if (prerequeue_top_waiter != lock_top_waiter) + rt_mutex_wake_waiter(lock_top_waiter); raw_spin_unlock_irq(&lock->wait_lock); return 0; } @@ -804,9 +850,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * @task: The task which wants to acquire the lock * @waiter: The waiter that is queued to the lock's wait tree if the * callsite called task_blocked_on_lock(), otherwise NULL + * @mode: Lock steal mode (STEAL_NORMAL, STEAL_LATERAL) */ -static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - struct rt_mutex_waiter *waiter) +static int __try_to_take_rt_mutex(struct rt_mutex *lock, + struct task_struct *task, + struct rt_mutex_waiter *waiter, int mode) { lockdep_assert_held(&lock->wait_lock); @@ -842,12 +890,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, */ if (waiter) { /* - * If waiter is not the highest priority waiter of - * @lock, give up. + * If waiter is not the highest priority waiter of @lock, + * or its peer when lateral steal is allowed, give up. */ - if (waiter != rt_mutex_top_waiter(lock)) + if (!rt_mutex_steal(lock, waiter, mode)) return 0; - /* * We can acquire the lock. Remove the waiter from the * lock waiters tree. @@ -865,14 +912,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, */ if (rt_mutex_has_waiters(lock)) { /* - * If @task->prio is greater than or equal to - * the top waiter priority (kernel view), - * @task lost. + * If @task->prio is greater than the top waiter + * priority (kernel view), or equal to it when a + * lateral steal is forbidden, @task lost. */ - if (!rt_mutex_waiter_less(task_to_waiter(task), - rt_mutex_top_waiter(lock))) + if (!rt_mutex_steal(lock, task_to_waiter(task), mode)) return 0; - /* * The current top waiter stays enqueued. We * don't have to change anything in the lock @@ -919,6 +964,344 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, return 1; } +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * preemptible spin_lock functions: + */ +static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, + void (*slowfn)(struct rt_mutex *lock)) +{ + might_sleep_no_state_check(); + + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return; + else + slowfn(lock); +} + +static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, + void (*slowfn)(struct rt_mutex *lock)) +{ + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) + return; + else + slowfn(lock); +} +#ifdef CONFIG_SMP +/* + * Note that owner is a speculative pointer and dereferencing relies + * on rcu_read_lock() and the check against the lock owner. + */ +static int adaptive_wait(struct rt_mutex *lock, + struct task_struct *owner) +{ + int res = 0; + + rcu_read_lock(); + for (;;) { + if (owner != rt_mutex_owner(lock)) + break; + /* + * Ensure that owner->on_cpu is dereferenced _after_ + * checking the above to be valid. + */ + barrier(); + if (!owner->on_cpu) { + res = 1; + break; + } + cpu_relax(); + } + rcu_read_unlock(); + return res; +} +#else +static int adaptive_wait(struct rt_mutex *lock, + struct task_struct *orig_owner) +{ + return 1; +} +#endif + +static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task, + enum rtmutex_chainwalk chwalk); +/* + * Slow path lock function spin_lock style: this variant is very + * careful not to miss any non-lock wakeups. + * + * We store the current state under p->pi_lock in p->saved_state and + * the try_to_wake_up() code handles this accordingly. + */ +void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + unsigned long flags) +{ + struct task_struct *lock_owner, *self = current; + struct rt_mutex_waiter *top_waiter; + int ret; + + if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) + return; + + BUG_ON(rt_mutex_owner(lock) == self); + + /* + * We save whatever state the task is in and we'll restore it + * after acquiring the lock taking real wakeups into account + * as well. We are serialized via pi_lock against wakeups. See + * try_to_wake_up(). + */ + raw_spin_lock(&self->pi_lock); + self->saved_state = self->state; + __set_current_state_no_track(TASK_UNINTERRUPTIBLE); + raw_spin_unlock(&self->pi_lock); + + ret = task_blocks_on_rt_mutex(lock, waiter, self, RT_MUTEX_MIN_CHAINWALK); + BUG_ON(ret); + + for (;;) { + /* Try to acquire the lock again. */ + if (__try_to_take_rt_mutex(lock, self, waiter, STEAL_LATERAL)) + break; + + top_waiter = rt_mutex_top_waiter(lock); + lock_owner = rt_mutex_owner(lock); + + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + + debug_rt_mutex_print_deadlock(waiter); + + if (top_waiter != waiter || adaptive_wait(lock, lock_owner)) + schedule(); + + raw_spin_lock_irqsave(&lock->wait_lock, flags); + + raw_spin_lock(&self->pi_lock); + __set_current_state_no_track(TASK_UNINTERRUPTIBLE); + raw_spin_unlock(&self->pi_lock); + } + + /* + * Restore the task state to current->saved_state. We set it + * to the original state above and the try_to_wake_up() code + * has possibly updated it when a real (non-rtmutex) wakeup + * happened while we were blocked. Clear saved_state so + * try_to_wakeup() does not get confused. + */ + raw_spin_lock(&self->pi_lock); + __set_current_state_no_track(self->saved_state); + self->saved_state = TASK_RUNNING; + raw_spin_unlock(&self->pi_lock); + + /* + * try_to_take_rt_mutex() sets the waiter bit + * unconditionally. We might have to fix that up: + */ + fixup_rt_mutex_waiters(lock); + + BUG_ON(rt_mutex_has_waiters(lock) && waiter == rt_mutex_top_waiter(lock)); + BUG_ON(!RB_EMPTY_NODE(&waiter->tree_entry)); +} + +static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) +{ + struct rt_mutex_waiter waiter; + unsigned long flags; + + rt_mutex_init_waiter(&waiter, true); + + raw_spin_lock_irqsave(&lock->wait_lock, flags); + rt_spin_lock_slowlock_locked(lock, &waiter, flags); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + debug_rt_mutex_free_waiter(&waiter); +} + +static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock, + struct wake_q_head *wake_q, + struct wake_q_head *wq_sleeper); +/* + * Slow path to release a rt_mutex spin_lock style + */ +void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) +{ + unsigned long flags; + DEFINE_WAKE_Q(wake_q); + DEFINE_WAKE_Q(wake_sleeper_q); + bool postunlock; + + raw_spin_lock_irqsave(&lock->wait_lock, flags); + postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + + if (postunlock) + rt_mutex_postunlock(&wake_q, &wake_sleeper_q); +} + +void __lockfunc rt_spin_lock(spinlock_t *lock) +{ + sleeping_lock_inc(); + rcu_read_lock(); + migrate_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); +} +EXPORT_SYMBOL(rt_spin_lock); + +void __lockfunc __rt_spin_lock(struct rt_mutex *lock) +{ + rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) +{ + sleeping_lock_inc(); + rcu_read_lock(); + migrate_disable(); + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); +} +EXPORT_SYMBOL(rt_spin_lock_nested); +#endif + +void __lockfunc rt_spin_unlock(spinlock_t *lock) +{ + /* NOTE: we always pass in '1' for nested, for simplicity */ + spin_release(&lock->dep_map, 1, _RET_IP_); + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); + migrate_enable(); + rcu_read_unlock(); + sleeping_lock_dec(); +} +EXPORT_SYMBOL(rt_spin_unlock); + +void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) +{ + rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); +} +EXPORT_SYMBOL(__rt_spin_unlock); + +/* + * Wait for the lock to get unlocked: instead of polling for an unlock + * (like raw spinlocks do), we lock and unlock, to force the kernel to + * schedule if there's contention: + */ +void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) +{ + spin_lock(lock); + spin_unlock(lock); +} +EXPORT_SYMBOL(rt_spin_unlock_wait); + +int __lockfunc rt_spin_trylock(spinlock_t *lock) +{ + int ret; + + sleeping_lock_inc(); + migrate_disable(); + ret = __rt_mutex_trylock(&lock->lock); + if (ret) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + rcu_read_lock(); + } else { + migrate_enable(); + sleeping_lock_dec(); + } + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock); + +int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) +{ + int ret; + + local_bh_disable(); + ret = __rt_mutex_trylock(&lock->lock); + if (ret) { + sleeping_lock_inc(); + rcu_read_lock(); + migrate_disable(); + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + } else + local_bh_enable(); + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock_bh); + +int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) +{ + int ret; + + *flags = 0; + ret = __rt_mutex_trylock(&lock->lock); + if (ret) { + sleeping_lock_inc(); + rcu_read_lock(); + migrate_disable(); + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + } + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock_irqsave); + +void +__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key, 0); +#endif +} +EXPORT_SYMBOL(__rt_spin_lock_init); + +#endif /* PREEMPT_RT_FULL */ + +#ifdef CONFIG_PREEMPT_RT_FULL + static inline int __sched +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) +{ + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); + struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); + + if (!hold_ctx) + return 0; + + if (unlikely(ctx == hold_ctx)) + return -EALREADY; + + if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && + (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { +#ifdef CONFIG_DEBUG_MUTEXES + DEBUG_LOCKS_WARN_ON(ctx->contending_lock); + ctx->contending_lock = ww; +#endif + return -EDEADLK; + } + + return 0; +} +#else + static inline int __sched +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) +{ + BUG(); + return 0; +} + +#endif + +static inline int +try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + struct rt_mutex_waiter *waiter) +{ + return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); +} + /* * Task blocks on lock. * @@ -951,6 +1334,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, return -EDEADLK; raw_spin_lock(&task->pi_lock); + /* + * In the case of futex requeue PI, this will be a proxy + * lock. The task will wake unaware that it is enqueueed on + * this lock. Avoid blocking on two locks and corrupting + * pi_blocked_on via the PI_WAKEUP_INPROGRESS + * flag. futex_wait_requeue_pi() sets this when it wakes up + * before requeue (due to a signal or timeout). Do not enqueue + * the task if PI_WAKEUP_INPROGRESS is set. + */ + if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { + raw_spin_unlock(&task->pi_lock); + return -EAGAIN; + } + + BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); + waiter->task = task; waiter->lock = lock; waiter->prio = task->prio; @@ -974,7 +1373,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, rt_mutex_enqueue_pi(owner, waiter); rt_mutex_adjust_prio(owner); - if (owner->pi_blocked_on) + if (rt_mutex_real_waiter(owner->pi_blocked_on)) chain_walk = 1; } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { chain_walk = 1; @@ -1016,6 +1415,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, * Called with lock->wait_lock held and interrupts disabled. */ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, + struct wake_q_head *wake_sleeper_q, struct rt_mutex *lock) { struct rt_mutex_waiter *waiter; @@ -1055,7 +1455,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, * Pairs with preempt_enable() in rt_mutex_postunlock(); */ preempt_disable(); - wake_q_add(wake_q, waiter->task); + if (waiter->savestate) + wake_q_add_sleeper(wake_sleeper_q, waiter->task); + else + wake_q_add(wake_q, waiter->task); raw_spin_unlock(¤t->pi_lock); } @@ -1070,7 +1473,7 @@ static void remove_waiter(struct rt_mutex *lock, { bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); - struct rt_mutex *next_lock; + struct rt_mutex *next_lock = NULL; lockdep_assert_held(&lock->wait_lock); @@ -1096,7 +1499,8 @@ static void remove_waiter(struct rt_mutex *lock, rt_mutex_adjust_prio(owner); /* Store the lock on which owner is blocked or NULL */ - next_lock = task_blocked_on_lock(owner); + if (rt_mutex_real_waiter(owner->pi_blocked_on)) + next_lock = task_blocked_on_lock(owner); raw_spin_unlock(&owner->pi_lock); @@ -1132,26 +1536,28 @@ void rt_mutex_adjust_pi(struct task_struct *task) raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; - if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { + if (!rt_mutex_real_waiter(waiter) || + rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { raw_spin_unlock_irqrestore(&task->pi_lock, flags); return; } next_lock = waiter->lock; - raw_spin_unlock_irqrestore(&task->pi_lock, flags); /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(task); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, next_lock, NULL, task); } -void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) +void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) { debug_rt_mutex_init_waiter(waiter); RB_CLEAR_NODE(&waiter->pi_tree_entry); RB_CLEAR_NODE(&waiter->tree_entry); waiter->task = NULL; + waiter->savestate = savestate; } /** @@ -1167,7 +1573,8 @@ void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - struct rt_mutex_waiter *waiter) + struct rt_mutex_waiter *waiter, + struct ww_acquire_ctx *ww_ctx) { int ret = 0; @@ -1176,16 +1583,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, if (try_to_take_rt_mutex(lock, current, waiter)) break; - /* - * TASK_INTERRUPTIBLE checks for signals and - * timeout. Ignored otherwise. - */ - if (likely(state == TASK_INTERRUPTIBLE)) { - /* Signal pending? */ - if (signal_pending(current)) - ret = -EINTR; - if (timeout && !timeout->task) - ret = -ETIMEDOUT; + if (timeout && !timeout->task) { + ret = -ETIMEDOUT; + break; + } + if (signal_pending_state(state, current)) { + ret = -EINTR; + break; + } + + if (ww_ctx && ww_ctx->acquired > 0) { + ret = __mutex_lock_check_stamp(lock, ww_ctx); if (ret) break; } @@ -1224,33 +1632,104 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, } } -/* - * Slow path lock function: - */ -static int __sched -rt_mutex_slowlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk) +static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, + struct ww_acquire_ctx *ww_ctx) { - struct rt_mutex_waiter waiter; - unsigned long flags; - int ret = 0; +#ifdef CONFIG_DEBUG_MUTEXES + /* + * If this WARN_ON triggers, you used ww_mutex_lock to acquire, + * but released with a normal mutex_unlock in this call. + * + * This should never happen, always use ww_mutex_unlock. + */ + DEBUG_LOCKS_WARN_ON(ww->ctx); + + /* + * Not quite done after calling ww_acquire_done() ? + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); - rt_mutex_init_waiter(&waiter); + if (ww_ctx->contending_lock) { + /* + * After -EDEADLK you tried to + * acquire a different ww_mutex? Bad! + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); + + /* + * You called ww_mutex_lock after receiving -EDEADLK, + * but 'forgot' to unlock everything else first? + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); + ww_ctx->contending_lock = NULL; + } /* - * Technically we could use raw_spin_[un]lock_irq() here, but this can - * be called in early boot if the cmpxchg() fast path is disabled - * (debug, no architecture support). In this case we will acquire the - * rtmutex with lock->wait_lock held. But we cannot unconditionally - * enable interrupts in that early boot case. So we need to use the - * irqsave/restore variants. + * Naughty, using a different class will lead to undefined behavior! */ - raw_spin_lock_irqsave(&lock->wait_lock, flags); + DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); +#endif + ww_ctx->acquired++; +} + +#ifdef CONFIG_PREEMPT_RT_FULL +static void ww_mutex_account_lock(struct rt_mutex *lock, + struct ww_acquire_ctx *ww_ctx) +{ + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); + struct rt_mutex_waiter *waiter, *n; + + /* + * This branch gets optimized out for the common case, + * and is only important for ww_mutex_lock. + */ + ww_mutex_lock_acquired(ww, ww_ctx); + ww->ctx = ww_ctx; + + /* + * Give any possible sleeping processes the chance to wake up, + * so they can recheck if they have to back off. + */ + rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters.rb_root, + tree_entry) { + /* XXX debug rt mutex waiter wakeup */ + + BUG_ON(waiter->lock != lock); + rt_mutex_wake_waiter(waiter); + } +} + +#else + +static void ww_mutex_account_lock(struct rt_mutex *lock, + struct ww_acquire_ctx *ww_ctx) +{ + BUG(); +} +#endif + +int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx, + struct rt_mutex_waiter *waiter) +{ + int ret; + +#ifdef CONFIG_PREEMPT_RT_FULL + if (ww_ctx) { + struct ww_mutex *ww; + + ww = container_of(lock, struct ww_mutex, base.lock); + if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) + return -EALREADY; + } +#endif /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + if (ww_ctx) + ww_mutex_account_lock(lock, ww_ctx); return 0; } @@ -1260,16 +1739,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, if (unlikely(timeout)) hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); - ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); + ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk); - if (likely(!ret)) + if (likely(!ret)) { /* sleep on the mutex */ - ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); + ret = __rt_mutex_slowlock(lock, state, timeout, waiter, + ww_ctx); + } else if (ww_ctx) { + /* ww_mutex received EDEADLK, let it become EALREADY */ + ret = __mutex_lock_check_stamp(lock, ww_ctx); + BUG_ON(!ret); + } if (unlikely(ret)) { __set_current_state(TASK_RUNNING); - remove_waiter(lock, &waiter); - rt_mutex_handle_deadlock(ret, chwalk, &waiter); + remove_waiter(lock, waiter); + /* ww_mutex wants to report EDEADLK/EALREADY, let it */ + if (!ww_ctx) + rt_mutex_handle_deadlock(ret, chwalk, waiter); + } else if (ww_ctx) { + ww_mutex_account_lock(lock, ww_ctx); } /* @@ -1277,6 +1766,36 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); + return ret; +} + +/* + * Slow path lock function: + */ +static int __sched +rt_mutex_slowlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx) +{ + struct rt_mutex_waiter waiter; + unsigned long flags; + int ret = 0; + + rt_mutex_init_waiter(&waiter, false); + + /* + * Technically we could use raw_spin_[un]lock_irq() here, but this can + * be called in early boot if the cmpxchg() fast path is disabled + * (debug, no architecture support). In this case we will acquire the + * rtmutex with lock->wait_lock held. But we cannot unconditionally + * enable interrupts in that early boot case. So we need to use the + * irqsave/restore variants. + */ + raw_spin_lock_irqsave(&lock->wait_lock, flags); + + ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx, + &waiter); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); @@ -1337,7 +1856,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) * Return whether the current task needs to call rt_mutex_postunlock(). */ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, - struct wake_q_head *wake_q) + struct wake_q_head *wake_q, + struct wake_q_head *wake_sleeper_q) { unsigned long flags; @@ -1391,7 +1911,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, * * Queue the next waiter for wakeup once we release the wait_lock. */ - mark_wakeup_next_waiter(wake_q, lock); + mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); return true; /* call rt_mutex_postunlock() */ @@ -1405,29 +1925,45 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, */ static inline int rt_mutex_fastlock(struct rt_mutex *lock, int state, + struct ww_acquire_ctx *ww_ctx, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk)) + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx)) { if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) return 0; - return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); + /* + * If rt_mutex blocks, the function sched_submit_work will not call + * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true). + * We must call blk_schedule_flush_plug here, if we don't call it, + * a deadlock in I/O may happen. + */ + if (unlikely(blk_needs_flush_plug(current))) + blk_schedule_flush_plug(current); + + return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx); } static inline int rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk)) + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx)) { if (chwalk == RT_MUTEX_MIN_CHAINWALK && likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) return 0; - return slowfn(lock, state, timeout, chwalk); + if (unlikely(blk_needs_flush_plug(current))) + blk_schedule_flush_plug(current); + + return slowfn(lock, state, timeout, chwalk, ww_ctx); } static inline int @@ -1443,9 +1979,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, /* * Performs the wakeup of the the top-waiter and re-enables preemption. */ -void rt_mutex_postunlock(struct wake_q_head *wake_q) +void rt_mutex_postunlock(struct wake_q_head *wake_q, + struct wake_q_head *wake_sleeper_q) { wake_up_q(wake_q); + wake_up_q_sleeper(wake_sleeper_q); /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ preempt_enable(); @@ -1454,23 +1992,46 @@ void rt_mutex_postunlock(struct wake_q_head *wake_q) static inline void rt_mutex_fastunlock(struct rt_mutex *lock, bool (*slowfn)(struct rt_mutex *lock, - struct wake_q_head *wqh)) + struct wake_q_head *wqh, + struct wake_q_head *wq_sleeper)) { DEFINE_WAKE_Q(wake_q); + DEFINE_WAKE_Q(wake_sleeper_q); if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) return; - if (slowfn(lock, &wake_q)) - rt_mutex_postunlock(&wake_q); + if (slowfn(lock, &wake_q, &wake_sleeper_q)) + rt_mutex_postunlock(&wake_q, &wake_sleeper_q); } -static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) +int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state) { might_sleep(); + return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock); +} + +/** + * rt_mutex_lock_state - lock a rt_mutex with a given state + * + * @lock: The rt_mutex to be locked + * @state: The state to set when blocking on the rt_mutex + */ +static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock, + unsigned int subclass, int state) +{ + int ret; mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); + ret = __rt_mutex_lock_state(lock, state); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} + +static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) +{ + rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE); } #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -1511,16 +2072,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) { - int ret; - - might_sleep(); - - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); - if (ret) - mutex_release(&lock->dep_map, 1, _RET_IP_); - - return ret; + return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE); } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); @@ -1537,6 +2089,22 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) return __rt_mutex_slowtrylock(lock); } +/** + * rt_mutex_lock_killable - lock a rt_mutex killable + * + * @lock: the rt_mutex to be locked + * @detect_deadlock: deadlock detection on/off + * + * Returns: + * 0 on success + * -EINTR when interrupted by a signal + */ +int __sched rt_mutex_lock_killable(struct rt_mutex *lock) +{ + return rt_mutex_lock_state(lock, 0, TASK_KILLABLE); +} +EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); + /** * rt_mutex_timed_lock - lock a rt_mutex interruptible * the timeout structure is provided @@ -1560,6 +2128,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, RT_MUTEX_MIN_CHAINWALK, + NULL, rt_mutex_slowlock); if (ret) mutex_release(&lock->dep_map, 1, _RET_IP_); @@ -1568,6 +2137,18 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) } EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); +int __sched __rt_mutex_trylock(struct rt_mutex *lock) +{ +#ifdef CONFIG_PREEMPT_RT_FULL + if (WARN_ON_ONCE(in_irq() || in_nmi())) +#else + if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) +#endif + return 0; + + return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); +} + /** * rt_mutex_trylock - try to lock a rt_mutex * @@ -1583,10 +2164,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) { int ret; - if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) - return 0; - - ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); + ret = __rt_mutex_trylock(lock); if (ret) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); @@ -1594,6 +2172,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) } EXPORT_SYMBOL_GPL(rt_mutex_trylock); +void __sched __rt_mutex_unlock(struct rt_mutex *lock) +{ + rt_mutex_fastunlock(lock, rt_mutex_slowunlock); +} + /** * rt_mutex_unlock - unlock a rt_mutex * @@ -1602,16 +2185,13 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); void __sched rt_mutex_unlock(struct rt_mutex *lock) { mutex_release(&lock->dep_map, 1, _RET_IP_); - rt_mutex_fastunlock(lock, rt_mutex_slowunlock); + __rt_mutex_unlock(lock); } EXPORT_SYMBOL_GPL(rt_mutex_unlock); -/** - * Futex variant, that since futex variants do not use the fast-path, can be - * simple and will not need to retry. - */ -bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, - struct wake_q_head *wake_q) +static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock, + struct wake_q_head *wake_q, + struct wake_q_head *wq_sleeper) { lockdep_assert_held(&lock->wait_lock); @@ -1628,23 +2208,35 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, * avoid inversion prior to the wakeup. preempt_disable() * therein pairs with rt_mutex_postunlock(). */ - mark_wakeup_next_waiter(wake_q, lock); + mark_wakeup_next_waiter(wake_q, wq_sleeper, lock); return true; /* call postunlock() */ } +/** + * Futex variant, that since futex variants do not use the fast-path, can be + * simple and will not need to retry. + */ +bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, + struct wake_q_head *wake_q, + struct wake_q_head *wq_sleeper) +{ + return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper); +} + void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) { DEFINE_WAKE_Q(wake_q); + DEFINE_WAKE_Q(wake_sleeper_q); unsigned long flags; bool postunlock; raw_spin_lock_irqsave(&lock->wait_lock, flags); - postunlock = __rt_mutex_futex_unlock(lock, &wake_q); + postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); if (postunlock) - rt_mutex_postunlock(&wake_q); + rt_mutex_postunlock(&wake_q, &wake_sleeper_q); } /** @@ -1683,7 +2275,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name, if (name && key) debug_rt_mutex_init(lock, name, key); } -EXPORT_SYMBOL_GPL(__rt_mutex_init); +EXPORT_SYMBOL(__rt_mutex_init); /** * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a @@ -1703,6 +2295,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { __rt_mutex_init(lock, NULL, NULL); +#ifdef CONFIG_DEBUG_SPINLOCK + /* + * get another key class for the wait_lock. LOCK_PI and UNLOCK_PI is + * holding the ->wait_lock of the proxy_lock while unlocking a sleeping + * lock. + */ + raw_spin_lock_init(&lock->wait_lock); +#endif debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); } @@ -1725,6 +2325,26 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock) rt_mutex_set_owner(lock, NULL); } +static void fixup_rt_mutex_blocked(struct rt_mutex *lock) +{ + struct task_struct *tsk = current; + /* + * RT has a problem here when the wait got interrupted by a timeout + * or a signal. task->pi_blocked_on is still set. The task must + * acquire the hash bucket lock when returning from this function. + * + * If the hash bucket lock is contended then the + * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in + * task_blocks_on_rt_mutex() will trigger. This can be avoided by + * clearing task->pi_blocked_on which removes the task from the + * boosting chain of the rtmutex. That's correct because the task + * is not longer blocked on it. + */ + raw_spin_lock(&tsk->pi_lock); + tsk->pi_blocked_on = NULL; + raw_spin_unlock(&tsk->pi_lock); +} + /** * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task * @lock: the rt_mutex to take @@ -1755,6 +2375,34 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, if (try_to_take_rt_mutex(lock, task, NULL)) return 1; +#ifdef CONFIG_PREEMPT_RT_FULL + /* + * In PREEMPT_RT there's an added race. + * If the task, that we are about to requeue, times out, + * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue + * to skip this task. But right after the task sets + * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then + * block on the spin_lock(&hb->lock), which in RT is an rtmutex. + * This will replace the PI_WAKEUP_INPROGRESS with the actual + * lock that it blocks on. We *must not* place this task + * on this proxy lock in that case. + * + * To prevent this race, we first take the task's pi_lock + * and check if it has updated its pi_blocked_on. If it has, + * we assume that it woke up and we return -EAGAIN. + * Otherwise, we set the task's pi_blocked_on to + * PI_REQUEUE_INPROGRESS, so that if the task is waking up + * it will know that we are in the process of requeuing it. + */ + raw_spin_lock(&task->pi_lock); + if (task->pi_blocked_on) { + raw_spin_unlock(&task->pi_lock); + return -EAGAIN; + } + task->pi_blocked_on = PI_REQUEUE_INPROGRESS; + raw_spin_unlock(&task->pi_lock); +#endif + /* We enforce deadlock detection for futexes */ ret = task_blocks_on_rt_mutex(lock, waiter, task, RT_MUTEX_FULL_CHAINWALK); @@ -1769,6 +2417,9 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, ret = 0; } + if (ret) + fixup_rt_mutex_blocked(lock); + debug_rt_mutex_print_deadlock(waiter); return ret; @@ -1854,12 +2505,15 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, raw_spin_lock_irq(&lock->wait_lock); /* sleep on the mutex */ set_current_state(TASK_INTERRUPTIBLE); - ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); + ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL); /* * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * have to fix that up. */ fixup_rt_mutex_waiters(lock); + if (ret) + fixup_rt_mutex_blocked(lock); + raw_spin_unlock_irq(&lock->wait_lock); return ret; @@ -1921,3 +2575,99 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, return cleanup; } + +static inline int +ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) +{ +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH + unsigned tmp; + + if (ctx->deadlock_inject_countdown-- == 0) { + tmp = ctx->deadlock_inject_interval; + if (tmp > UINT_MAX/4) + tmp = UINT_MAX; + else + tmp = tmp*2 + tmp + tmp/2; + + ctx->deadlock_inject_interval = tmp; + ctx->deadlock_inject_countdown = tmp; + ctx->contending_lock = lock; + + ww_mutex_unlock(lock); + + return -EDEADLK; + } +#endif + + return 0; +} + +#ifdef CONFIG_PREEMPT_RT_FULL +int __sched +ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) +{ + int ret; + + might_sleep(); + + mutex_acquire_nest(&lock->base.dep_map, 0, 0, + ctx ? &ctx->dep_map : NULL, _RET_IP_); + ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, + ctx); + if (ret) + mutex_release(&lock->base.dep_map, 1, _RET_IP_); + else if (!ret && ctx && ctx->acquired > 1) + return ww_mutex_deadlock_injection(lock, ctx); + + return ret; +} +EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); + +int __sched +ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) +{ + int ret; + + might_sleep(); + + mutex_acquire_nest(&lock->base.dep_map, 0, 0, + ctx ? &ctx->dep_map : NULL, _RET_IP_); + ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, + ctx); + if (ret) + mutex_release(&lock->base.dep_map, 1, _RET_IP_); + else if (!ret && ctx && ctx->acquired > 1) + return ww_mutex_deadlock_injection(lock, ctx); + + return ret; +} +EXPORT_SYMBOL_GPL(ww_mutex_lock); + +void __sched ww_mutex_unlock(struct ww_mutex *lock) +{ + int nest = !!lock->ctx; + + /* + * The unlocking fastpath is the 0->1 transition from 'locked' + * into 'unlocked' state: + */ + if (nest) { +#ifdef CONFIG_DEBUG_MUTEXES + DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); +#endif + if (lock->ctx->acquired > 0) + lock->ctx->acquired--; + lock->ctx = NULL; + } + + mutex_release(&lock->base.dep_map, nest, _RET_IP_); + __rt_mutex_unlock(&lock->base.lock); +} +EXPORT_SYMBOL(ww_mutex_unlock); + +int __rt_mutex_owner_current(struct rt_mutex *lock) +{ + return rt_mutex_owner(lock) == current; +} +EXPORT_SYMBOL(__rt_mutex_owner_current); +#endif diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index ca6fb489007b6baced167ea52ccc998179b18eec..8e0c592273e601f577d9e1a5b7314490ffded013 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -15,6 +15,7 @@ #include #include +#include /* * This is the control structure for tasks blocked on a rt_mutex, @@ -29,6 +30,7 @@ struct rt_mutex_waiter { struct rb_node pi_tree_entry; struct task_struct *task; struct rt_mutex *lock; + bool savestate; #ifdef CONFIG_DEBUG_RT_MUTEXES unsigned long ip; struct pid *deadlock_task_pid; @@ -130,11 +132,14 @@ enum rtmutex_chainwalk { /* * PI-futex support (proxy locking functions, etc.): */ +#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) +#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); -extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); +extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savetate); extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task); @@ -152,9 +157,27 @@ extern int __rt_mutex_futex_trylock(struct rt_mutex *l); extern void rt_mutex_futex_unlock(struct rt_mutex *lock); extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, - struct wake_q_head *wqh); - -extern void rt_mutex_postunlock(struct wake_q_head *wake_q); + struct wake_q_head *wqh, + struct wake_q_head *wq_sleeper); + +extern void rt_mutex_postunlock(struct wake_q_head *wake_q, + struct wake_q_head *wake_sleeper_q); + +/* RW semaphore special interface */ +struct ww_acquire_ctx; + +extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state); +extern int __rt_mutex_trylock(struct rt_mutex *lock); +extern void __rt_mutex_unlock(struct rt_mutex *lock); +int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx, + struct rt_mutex_waiter *waiter); +void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + unsigned long flags); +void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock); #ifdef CONFIG_DEBUG_RT_MUTEXES # include "rtmutex-debug.h" diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c new file mode 100644 index 0000000000000000000000000000000000000000..0ae8c62ea8320ee3a09cda108f48992f47d434d2 --- /dev/null +++ b/kernel/locking/rwlock-rt.c @@ -0,0 +1,384 @@ +/* + */ +#include +#include + +#include "rtmutex_common.h" +#include + +/* + * RT-specific reader/writer locks + * + * write_lock() + * 1) Lock lock->rtmutex + * 2) Remove the reader BIAS to force readers into the slow path + * 3) Wait until all readers have left the critical region + * 4) Mark it write locked + * + * write_unlock() + * 1) Remove the write locked marker + * 2) Set the reader BIAS so readers can use the fast path again + * 3) Unlock lock->rtmutex to release blocked readers + * + * read_lock() + * 1) Try fast path acquisition (reader BIAS is set) + * 2) Take lock->rtmutex.wait_lock which protects the writelocked flag + * 3) If !writelocked, acquire it for read + * 4) If writelocked, block on lock->rtmutex + * 5) unlock lock->rtmutex, goto 1) + * + * read_unlock() + * 1) Try fast path release (reader count != 1) + * 2) Wake the writer waiting in write_lock()#3 + * + * read_lock()#3 has the consequence, that rw locks on RT are not writer + * fair, but writers, which should be avoided in RT tasks (think tasklist + * lock), are subject to the rtmutex priority/DL inheritance mechanism. + * + * It's possible to make the rw locks writer fair by keeping a list of + * active readers. A blocked writer would force all newly incoming readers + * to block on the rtmutex, but the rtmutex would have to be proxy locked + * for one reader after the other. We can't use multi-reader inheritance + * because there is no way to support that with + * SCHED_DEADLINE. Implementing the one by one reader boosting/handover + * mechanism is a major surgery for a very dubious value. + * + * The risk of writer starvation is there, but the pathological use cases + * which trigger it are not necessarily the typical RT workloads. + */ + +void __rwlock_biased_rt_init(struct rt_rw_lock *lock, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held semaphore: + */ + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key, 0); +#endif + atomic_set(&lock->readers, READER_BIAS); + rt_mutex_init(&lock->rtmutex); + lock->rtmutex.save_state = 1; +} + +int __read_rt_trylock(struct rt_rw_lock *lock) +{ + int r, old; + + /* + * Increment reader count, if lock->readers < 0, i.e. READER_BIAS is + * set. + */ + for (r = atomic_read(&lock->readers); r < 0;) { + old = atomic_cmpxchg(&lock->readers, r, r + 1); + if (likely(old == r)) + return 1; + r = old; + } + return 0; +} + +void __sched __read_rt_lock(struct rt_rw_lock *lock) +{ + struct rt_mutex *m = &lock->rtmutex; + struct rt_mutex_waiter waiter; + unsigned long flags; + + if (__read_rt_trylock(lock)) + return; + + raw_spin_lock_irqsave(&m->wait_lock, flags); + /* + * Allow readers as long as the writer has not completely + * acquired the semaphore for write. + */ + if (atomic_read(&lock->readers) != WRITER_BIAS) { + atomic_inc(&lock->readers); + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + return; + } + + /* + * Call into the slow lock path with the rtmutex->wait_lock + * held, so this can't result in the following race: + * + * Reader1 Reader2 Writer + * read_lock() + * write_lock() + * rtmutex_lock(m) + * swait() + * read_lock() + * unlock(m->wait_lock) + * read_unlock() + * swake() + * lock(m->wait_lock) + * lock->writelocked=true + * unlock(m->wait_lock) + * + * write_unlock() + * lock->writelocked=false + * rtmutex_unlock(m) + * read_lock() + * write_lock() + * rtmutex_lock(m) + * swait() + * rtmutex_lock(m) + * + * That would put Reader1 behind the writer waiting on + * Reader2 to call read_unlock() which might be unbound. + */ + rt_mutex_init_waiter(&waiter, true); + rt_spin_lock_slowlock_locked(m, &waiter, flags); + /* + * The slowlock() above is guaranteed to return with the rtmutex is + * now held, so there can't be a writer active. Increment the reader + * count and immediately drop the rtmutex again. + */ + atomic_inc(&lock->readers); + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + rt_spin_lock_slowunlock(m); + + debug_rt_mutex_free_waiter(&waiter); +} + +void __read_rt_unlock(struct rt_rw_lock *lock) +{ + struct rt_mutex *m = &lock->rtmutex; + struct task_struct *tsk; + + /* + * sem->readers can only hit 0 when a writer is waiting for the + * active readers to leave the critical region. + */ + if (!atomic_dec_and_test(&lock->readers)) + return; + + raw_spin_lock_irq(&m->wait_lock); + /* + * Wake the writer, i.e. the rtmutex owner. It might release the + * rtmutex concurrently in the fast path, but to clean up the rw + * lock it needs to acquire m->wait_lock. The worst case which can + * happen is a spurious wakeup. + */ + tsk = rt_mutex_owner(m); + if (tsk) + wake_up_process(tsk); + + raw_spin_unlock_irq(&m->wait_lock); +} + +static void __write_unlock_common(struct rt_rw_lock *lock, int bias, + unsigned long flags) +{ + struct rt_mutex *m = &lock->rtmutex; + + atomic_add(READER_BIAS - bias, &lock->readers); + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + rt_spin_lock_slowunlock(m); +} + +void __sched __write_rt_lock(struct rt_rw_lock *lock) +{ + struct rt_mutex *m = &lock->rtmutex; + struct task_struct *self = current; + unsigned long flags; + + /* Take the rtmutex as a first step */ + __rt_spin_lock(m); + + /* Force readers into slow path */ + atomic_sub(READER_BIAS, &lock->readers); + + raw_spin_lock_irqsave(&m->wait_lock, flags); + + raw_spin_lock(&self->pi_lock); + self->saved_state = self->state; + __set_current_state_no_track(TASK_UNINTERRUPTIBLE); + raw_spin_unlock(&self->pi_lock); + + for (;;) { + /* Have all readers left the critical region? */ + if (!atomic_read(&lock->readers)) { + atomic_set(&lock->readers, WRITER_BIAS); + raw_spin_lock(&self->pi_lock); + __set_current_state_no_track(self->saved_state); + self->saved_state = TASK_RUNNING; + raw_spin_unlock(&self->pi_lock); + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + return; + } + + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + + if (atomic_read(&lock->readers) != 0) + schedule(); + + raw_spin_lock_irqsave(&m->wait_lock, flags); + + raw_spin_lock(&self->pi_lock); + __set_current_state_no_track(TASK_UNINTERRUPTIBLE); + raw_spin_unlock(&self->pi_lock); + } +} + +int __write_rt_trylock(struct rt_rw_lock *lock) +{ + struct rt_mutex *m = &lock->rtmutex; + unsigned long flags; + + if (!__rt_mutex_trylock(m)) + return 0; + + atomic_sub(READER_BIAS, &lock->readers); + + raw_spin_lock_irqsave(&m->wait_lock, flags); + if (!atomic_read(&lock->readers)) { + atomic_set(&lock->readers, WRITER_BIAS); + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + return 1; + } + __write_unlock_common(lock, 0, flags); + return 0; +} + +void __write_rt_unlock(struct rt_rw_lock *lock) +{ + struct rt_mutex *m = &lock->rtmutex; + unsigned long flags; + + raw_spin_lock_irqsave(&m->wait_lock, flags); + __write_unlock_common(lock, WRITER_BIAS, flags); +} + +/* Map the reader biased implementation */ +static inline int do_read_rt_trylock(rwlock_t *rwlock) +{ + return __read_rt_trylock(rwlock); +} + +static inline int do_write_rt_trylock(rwlock_t *rwlock) +{ + return __write_rt_trylock(rwlock); +} + +static inline void do_read_rt_lock(rwlock_t *rwlock) +{ + __read_rt_lock(rwlock); +} + +static inline void do_write_rt_lock(rwlock_t *rwlock) +{ + __write_rt_lock(rwlock); +} + +static inline void do_read_rt_unlock(rwlock_t *rwlock) +{ + __read_rt_unlock(rwlock); +} + +static inline void do_write_rt_unlock(rwlock_t *rwlock) +{ + __write_rt_unlock(rwlock); +} + +static inline void do_rwlock_rt_init(rwlock_t *rwlock, const char *name, + struct lock_class_key *key) +{ + __rwlock_biased_rt_init(rwlock, name, key); +} + +int __lockfunc rt_read_can_lock(rwlock_t *rwlock) +{ + return atomic_read(&rwlock->readers) < 0; +} + +int __lockfunc rt_write_can_lock(rwlock_t *rwlock) +{ + return atomic_read(&rwlock->readers) == READER_BIAS; +} + +/* + * The common functions which get wrapped into the rwlock API. + */ +int __lockfunc rt_read_trylock(rwlock_t *rwlock) +{ + int ret; + + sleeping_lock_inc(); + migrate_disable(); + ret = do_read_rt_trylock(rwlock); + if (ret) { + rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); + rcu_read_lock(); + } else { + migrate_enable(); + sleeping_lock_dec(); + } + return ret; +} +EXPORT_SYMBOL(rt_read_trylock); + +int __lockfunc rt_write_trylock(rwlock_t *rwlock) +{ + int ret; + + sleeping_lock_inc(); + migrate_disable(); + ret = do_write_rt_trylock(rwlock); + if (ret) { + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); + rcu_read_lock(); + } else { + migrate_enable(); + sleeping_lock_dec(); + } + return ret; +} +EXPORT_SYMBOL(rt_write_trylock); + +void __lockfunc rt_read_lock(rwlock_t *rwlock) +{ + sleeping_lock_inc(); + rcu_read_lock(); + migrate_disable(); + rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); + do_read_rt_lock(rwlock); +} +EXPORT_SYMBOL(rt_read_lock); + +void __lockfunc rt_write_lock(rwlock_t *rwlock) +{ + sleeping_lock_inc(); + rcu_read_lock(); + migrate_disable(); + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); + do_write_rt_lock(rwlock); +} +EXPORT_SYMBOL(rt_write_lock); + +void __lockfunc rt_read_unlock(rwlock_t *rwlock) +{ + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + do_read_rt_unlock(rwlock); + migrate_enable(); + rcu_read_unlock(); + sleeping_lock_dec(); +} +EXPORT_SYMBOL(rt_read_unlock); + +void __lockfunc rt_write_unlock(rwlock_t *rwlock) +{ + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + do_write_rt_unlock(rwlock); + migrate_enable(); + rcu_read_unlock(); + sleeping_lock_dec(); +} +EXPORT_SYMBOL(rt_write_unlock); + +void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) +{ + do_rwlock_rt_init(rwlock, name, key); +} +EXPORT_SYMBOL(__rt_rwlock_init); diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c new file mode 100644 index 0000000000000000000000000000000000000000..966946454ced741c87ca81184d75ad27e8bb6d2c --- /dev/null +++ b/kernel/locking/rwsem-rt.c @@ -0,0 +1,312 @@ +/* + */ +#include +#include +#include +#include +#include + +#include "rtmutex_common.h" + +/* + * RT-specific reader/writer semaphores + * + * down_write() + * 1) Lock sem->rtmutex + * 2) Remove the reader BIAS to force readers into the slow path + * 3) Wait until all readers have left the critical region + * 4) Mark it write locked + * + * up_write() + * 1) Remove the write locked marker + * 2) Set the reader BIAS so readers can use the fast path again + * 3) Unlock sem->rtmutex to release blocked readers + * + * down_read() + * 1) Try fast path acquisition (reader BIAS is set) + * 2) Take sem->rtmutex.wait_lock which protects the writelocked flag + * 3) If !writelocked, acquire it for read + * 4) If writelocked, block on sem->rtmutex + * 5) unlock sem->rtmutex, goto 1) + * + * up_read() + * 1) Try fast path release (reader count != 1) + * 2) Wake the writer waiting in down_write()#3 + * + * down_read()#3 has the consequence, that rw semaphores on RT are not writer + * fair, but writers, which should be avoided in RT tasks (think mmap_sem), + * are subject to the rtmutex priority/DL inheritance mechanism. + * + * It's possible to make the rw semaphores writer fair by keeping a list of + * active readers. A blocked writer would force all newly incoming readers to + * block on the rtmutex, but the rtmutex would have to be proxy locked for one + * reader after the other. We can't use multi-reader inheritance because there + * is no way to support that with SCHED_DEADLINE. Implementing the one by one + * reader boosting/handover mechanism is a major surgery for a very dubious + * value. + * + * The risk of writer starvation is there, but the pathological use cases + * which trigger it are not necessarily the typical RT workloads. + */ + +void __rwsem_init(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held semaphore: + */ + debug_check_no_locks_freed((void *)sem, sizeof(*sem)); + lockdep_init_map(&sem->dep_map, name, key, 0); +#endif + atomic_set(&sem->readers, READER_BIAS); +} +EXPORT_SYMBOL(__rwsem_init); + +int __down_read_trylock(struct rw_semaphore *sem) +{ + int r, old; + + /* + * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is + * set. + */ + for (r = atomic_read(&sem->readers); r < 0;) { + old = atomic_cmpxchg(&sem->readers, r, r + 1); + if (likely(old == r)) + return 1; + r = old; + } + return 0; +} + +static int __sched __down_read_common(struct rw_semaphore *sem, int state) +{ + struct rt_mutex *m = &sem->rtmutex; + struct rt_mutex_waiter waiter; + int ret; + + if (__down_read_trylock(sem)) + return 0; + /* + * If rt_mutex blocks, the function sched_submit_work will not call + * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true). + * We must call blk_schedule_flush_plug here, if we don't call it, + * a deadlock in I/O may happen. + */ + if (unlikely(blk_needs_flush_plug(current))) + blk_schedule_flush_plug(current); + + might_sleep(); + raw_spin_lock_irq(&m->wait_lock); + /* + * Allow readers as long as the writer has not completely + * acquired the semaphore for write. + */ + if (atomic_read(&sem->readers) != WRITER_BIAS) { + atomic_inc(&sem->readers); + raw_spin_unlock_irq(&m->wait_lock); + return 0; + } + + /* + * Call into the slow lock path with the rtmutex->wait_lock + * held, so this can't result in the following race: + * + * Reader1 Reader2 Writer + * down_read() + * down_write() + * rtmutex_lock(m) + * swait() + * down_read() + * unlock(m->wait_lock) + * up_read() + * swake() + * lock(m->wait_lock) + * sem->writelocked=true + * unlock(m->wait_lock) + * + * up_write() + * sem->writelocked=false + * rtmutex_unlock(m) + * down_read() + * down_write() + * rtmutex_lock(m) + * swait() + * rtmutex_lock(m) + * + * That would put Reader1 behind the writer waiting on + * Reader2 to call up_read() which might be unbound. + */ + rt_mutex_init_waiter(&waiter, false); + ret = rt_mutex_slowlock_locked(m, state, NULL, RT_MUTEX_MIN_CHAINWALK, + NULL, &waiter); + /* + * The slowlock() above is guaranteed to return with the rtmutex (for + * ret = 0) is now held, so there can't be a writer active. Increment + * the reader count and immediately drop the rtmutex again. + * For ret != 0 we don't hold the rtmutex and need unlock the wait_lock. + * We don't own the lock then. + */ + if (!ret) + atomic_inc(&sem->readers); + raw_spin_unlock_irq(&m->wait_lock); + if (!ret) + __rt_mutex_unlock(m); + + debug_rt_mutex_free_waiter(&waiter); + return ret; +} + +void __down_read(struct rw_semaphore *sem) +{ + int ret; + + ret = __down_read_common(sem, TASK_UNINTERRUPTIBLE); + WARN_ON_ONCE(ret); +} + +int __down_read_interruptible(struct rw_semaphore *sem) +{ + int ret; + + ret = __down_read_common(sem, TASK_INTERRUPTIBLE); + if (likely(!ret)) + return ret; + WARN_ONCE(ret != -EINTR, "Unexpected state: %d\n", ret); + return -EINTR; +} + +int __down_read_killable(struct rw_semaphore *sem) +{ + int ret; + + ret = __down_read_common(sem, TASK_KILLABLE); + if (likely(!ret)) + return ret; + WARN_ONCE(ret != -EINTR, "Unexpected state: %d\n", ret); + return -EINTR; +} + +void __up_read(struct rw_semaphore *sem) +{ + struct rt_mutex *m = &sem->rtmutex; + struct task_struct *tsk; + + /* + * sem->readers can only hit 0 when a writer is waiting for the + * active readers to leave the critical region. + */ + if (!atomic_dec_and_test(&sem->readers)) + return; + + raw_spin_lock_irq(&m->wait_lock); + /* + * Wake the writer, i.e. the rtmutex owner. It might release the + * rtmutex concurrently in the fast path (due to a signal), but to + * clean up the rwsem it needs to acquire m->wait_lock. The worst + * case which can happen is a spurious wakeup. + */ + tsk = rt_mutex_owner(m); + if (tsk) + wake_up_process(tsk); + + raw_spin_unlock_irq(&m->wait_lock); +} + +static void __up_write_unlock(struct rw_semaphore *sem, int bias, + unsigned long flags) +{ + struct rt_mutex *m = &sem->rtmutex; + + atomic_add(READER_BIAS - bias, &sem->readers); + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + __rt_mutex_unlock(m); +} + +static int __sched __down_write_common(struct rw_semaphore *sem, int state) +{ + struct rt_mutex *m = &sem->rtmutex; + unsigned long flags; + + /* Take the rtmutex as a first step */ + if (__rt_mutex_lock_state(m, state)) + return -EINTR; + + /* Force readers into slow path */ + atomic_sub(READER_BIAS, &sem->readers); + might_sleep(); + + set_current_state(state); + for (;;) { + raw_spin_lock_irqsave(&m->wait_lock, flags); + /* Have all readers left the critical region? */ + if (!atomic_read(&sem->readers)) { + atomic_set(&sem->readers, WRITER_BIAS); + __set_current_state(TASK_RUNNING); + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + return 0; + } + + if (signal_pending_state(state, current)) { + __set_current_state(TASK_RUNNING); + __up_write_unlock(sem, 0, flags); + return -EINTR; + } + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + + if (atomic_read(&sem->readers) != 0) { + schedule(); + set_current_state(state); + } + } +} + +void __sched __down_write(struct rw_semaphore *sem) +{ + __down_write_common(sem, TASK_UNINTERRUPTIBLE); +} + +int __sched __down_write_killable(struct rw_semaphore *sem) +{ + return __down_write_common(sem, TASK_KILLABLE); +} + +int __down_write_trylock(struct rw_semaphore *sem) +{ + struct rt_mutex *m = &sem->rtmutex; + unsigned long flags; + + if (!__rt_mutex_trylock(m)) + return 0; + + atomic_sub(READER_BIAS, &sem->readers); + + raw_spin_lock_irqsave(&m->wait_lock, flags); + if (!atomic_read(&sem->readers)) { + atomic_set(&sem->readers, WRITER_BIAS); + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + return 1; + } + __up_write_unlock(sem, 0, flags); + return 0; +} + +void __up_write(struct rw_semaphore *sem) +{ + struct rt_mutex *m = &sem->rtmutex; + unsigned long flags; + + raw_spin_lock_irqsave(&m->wait_lock, flags); + __up_write_unlock(sem, WRITER_BIAS, flags); +} + +void __downgrade_write(struct rw_semaphore *sem) +{ + struct rt_mutex *m = &sem->rtmutex; + unsigned long flags; + + raw_spin_lock_irqsave(&m->wait_lock, flags); + /* Release it and account current as reader */ + __up_write_unlock(sem, WRITER_BIAS - 1, flags); +} diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 936f3d14dd6bfeda3ef7921266fef5dc5b36e2a8..e89b70f474afe5173631e7e64ea27ec1a6de3485 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -117,8 +117,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ * __[spin|read|write]_lock_bh() */ BUILD_LOCK_OPS(spin, raw_spinlock); + +#ifndef CONFIG_PREEMPT_RT_FULL BUILD_LOCK_OPS(read, rwlock); BUILD_LOCK_OPS(write, rwlock); +#endif #endif @@ -202,6 +205,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) EXPORT_SYMBOL(_raw_spin_unlock_bh); #endif +#ifndef CONFIG_PREEMPT_RT_FULL + #ifndef CONFIG_INLINE_READ_TRYLOCK int __lockfunc _raw_read_trylock(rwlock_t *lock) { @@ -346,6 +351,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) EXPORT_SYMBOL(_raw_write_unlock_bh); #endif +#endif /* !PREEMPT_RT_FULL */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c index 03595c29c5665fdc6a89c1c5e067410c950f1c40..d63df281b4957144ae70f9894807492f15988215 100644 --- a/kernel/locking/spinlock_debug.c +++ b/kernel/locking/spinlock_debug.c @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, EXPORT_SYMBOL(__raw_spin_lock_init); +#ifndef CONFIG_PREEMPT_RT_FULL void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) { @@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, } EXPORT_SYMBOL(__rwlock_init); +#endif static void spin_dump(raw_spinlock_t *lock, const char *msg) { @@ -135,6 +137,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock) arch_spin_unlock(&lock->raw_lock); } +#ifndef CONFIG_PREEMPT_RT_FULL static void rwlock_bug(rwlock_t *lock, const char *msg) { if (!debug_locks_off()) @@ -224,3 +227,5 @@ void do_raw_write_unlock(rwlock_t *lock) debug_write_unlock(lock); arch_write_unlock(&lock->raw_lock); } + +#endif diff --git a/kernel/panic.c b/kernel/panic.c index 8138a676fb7d1b4f59f82c18cebc8cecea3f3d59..fa114be5c30f6150f59d3720b9802f45e242c9d2 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -480,9 +480,11 @@ static u64 oops_id; static int init_oops_id(void) { +#ifndef CONFIG_PREEMPT_RT_FULL if (!oops_id) get_random_bytes(&oops_id, sizeof(oops_id)); else +#endif oops_id++; return 0; diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 6abdfdf571ee7034fc65e5a2598d7cf090cfb57b..7020a6582a4ca0a0ff3496e322436254c43cadfd 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -690,6 +690,10 @@ static int load_image_and_restore(void) return error; } +#ifndef CONFIG_SUSPEND +bool pm_in_action; +#endif + /** * hibernate - Carry out system hibernation, including saving the image. */ @@ -703,6 +707,8 @@ int hibernate(void) return -EPERM; } + pm_in_action = true; + lock_system_sleep(); /* The snapshot device should not be opened while we're running */ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { @@ -781,6 +787,7 @@ int hibernate(void) atomic_inc(&snapshot_device_available); Unlock: unlock_system_sleep(); + pm_in_action = false; pr_info("hibernation exit\n"); return error; diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 0bd595a0b6103c56439871b765ef0d4fb5ac672b..a4456772d98ec20102b9e06049ecc7ba2b38d44e 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -600,6 +600,8 @@ static int enter_state(suspend_state_t state) return error; } +bool pm_in_action; + /** * pm_suspend - Externally visible function for suspending the system. * @state: System sleep state to enter. @@ -614,6 +616,7 @@ int pm_suspend(suspend_state_t state) if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) return -EINVAL; + pm_in_action = true; pr_info("suspend entry (%s)\n", mem_sleep_labels[state]); error = enter_state(state); if (error) { @@ -623,6 +626,7 @@ int pm_suspend(suspend_state_t state) suspend_stats.success++; } pr_info("suspend exit\n"); + pm_in_action = false; return error; } EXPORT_SYMBOL(pm_suspend); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 2ba16c426ba5d865c8e1435293c6afaaefe79b42..1e0988a0bb9777cc83d633eaa682d1ab13c9c698 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -407,6 +407,65 @@ DEFINE_RAW_SPINLOCK(logbuf_lock); printk_safe_exit_irqrestore(flags); \ } while (0) +#ifdef CONFIG_EARLY_PRINTK +struct console *early_console; + +static void early_vprintk(const char *fmt, va_list ap) +{ + if (early_console) { + char buf[512]; + int n = vscnprintf(buf, sizeof(buf), fmt, ap); + + early_console->write(early_console, buf, n); + } +} + +asmlinkage void early_printk(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + early_vprintk(fmt, ap); + va_end(ap); +} + +/* + * This is independent of any log levels - a global + * kill switch that turns off all of printk. + * + * Used by the NMI watchdog if early-printk is enabled. + */ +static bool __read_mostly printk_killswitch; + +static int __init force_early_printk_setup(char *str) +{ + printk_killswitch = true; + return 0; +} +early_param("force_early_printk", force_early_printk_setup); + +void printk_kill(void) +{ + printk_killswitch = true; +} + +#ifdef CONFIG_PRINTK +static int forced_early_printk(const char *fmt, va_list ap) +{ + if (!printk_killswitch) + return 0; + early_vprintk(fmt, ap); + return 1; +} +#endif + +#else +static inline int forced_early_printk(const char *fmt, va_list ap) +{ + return 0; +} +#endif + #ifdef CONFIG_PRINTK DECLARE_WAIT_QUEUE_HEAD(log_wait); /* the next printk record to read by syslog(READ) or /proc/kmsg */ @@ -1398,12 +1457,23 @@ static int syslog_print_all(char __user *buf, int size, bool clear) u64 next_seq; u64 seq; u32 idx; + int attempts = 0; + int num_msg; text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); if (!text) return -ENOMEM; logbuf_lock_irq(); + +try_again: + attempts++; + if (attempts > 10) { + len = -EBUSY; + goto out; + } + num_msg = 0; + /* * Find first record that fits, including all following records, * into the user-provided buffer for this dump. @@ -1416,6 +1486,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear) len += msg_print_text(msg, true, NULL, 0); idx = log_next(idx); seq++; + num_msg++; + if (num_msg > 5) { + num_msg = 0; + logbuf_unlock_irq(); + logbuf_lock_irq(); + if (clear_seq < log_first_seq) + goto try_again; + } } /* move first record forward until length fits into the buffer */ @@ -1427,6 +1505,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear) len -= msg_print_text(msg, true, NULL, 0); idx = log_next(idx); seq++; + num_msg++; + if (num_msg > 5) { + num_msg = 0; + logbuf_unlock_irq(); + logbuf_lock_irq(); + if (clear_seq < log_first_seq) + goto try_again; + } } /* last message fitting into this dump */ @@ -1464,6 +1550,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) clear_seq = log_next_seq; clear_idx = log_next_idx; } +out: logbuf_unlock_irq(); kfree(text); @@ -1595,6 +1682,7 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) return do_syslog(type, buf, len, SYSLOG_FROM_READER); } +#ifndef CONFIG_PREEMPT_RT_FULL /* * Special console_lock variants that help to reduce the risk of soft-lockups. * They allow to pass console_lock to another printk() call using a busy wait. @@ -1735,6 +1823,15 @@ static int console_trylock_spinning(void) return 1; } +#else + +static int console_trylock_spinning(void) +{ + return console_trylock(); +} + +#endif + /* * Call the console drivers, asking them to write out * log_buf[start] to log_buf[end - 1]. @@ -1750,6 +1847,12 @@ static void call_console_drivers(const char *ext_text, size_t ext_len, if (!console_drivers) return; + if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { + if (in_irq() || in_nmi()) + return; + } + + migrate_disable(); for_each_console(con) { if (exclusive_console && con != exclusive_console) continue; @@ -1765,6 +1868,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len, else con->write(con, text, len); } + migrate_enable(); } int printk_delay_msec __read_mostly; @@ -1935,6 +2039,13 @@ asmlinkage int vprintk_emit(int facility, int level, unsigned long flags; u64 curr_log_seq; + /* + * Fall back to early_printk if a debugging subsystem has + * killed printk output + */ + if (unlikely(forced_early_printk(fmt, args))) + return 1; + if (level == LOGLEVEL_SCHED) { level = LOGLEVEL_DEFAULT; in_sched = true; @@ -1952,20 +2063,30 @@ asmlinkage int vprintk_emit(int facility, int level, /* If called from the scheduler, we can not call up(). */ if (!in_sched && pending_output) { + int may_trylock = 1; + +#ifdef CONFIG_PREEMPT_RT_FULL + /* + * we can't take a sleeping lock with IRQs or preeption disabled + * so we can't print in these contexts + */ + if (!(preempt_count() == 0 && !irqs_disabled())) + may_trylock = 0; +#endif /* * Disable preemption to avoid being preempted while holding * console_sem which would prevent anyone from printing to * console */ - preempt_disable(); + migrate_disable(); /* * Try to acquire and then immediately release the console * semaphore. The release will print out buffers and wake up * /dev/kmsg and syslog() users. */ - if (console_trylock_spinning()) + if (may_trylock && console_trylock_spinning()) console_unlock(); - preempt_enable(); + migrate_enable(); } if (pending_output) @@ -2079,26 +2200,6 @@ static bool suppress_message_printing(int level) { return false; } #endif /* CONFIG_PRINTK */ -#ifdef CONFIG_EARLY_PRINTK -struct console *early_console; - -asmlinkage __visible void early_printk(const char *fmt, ...) -{ - va_list ap; - char buf[512]; - int n; - - if (!early_console) - return; - - va_start(ap, fmt); - n = vscnprintf(buf, sizeof(buf), fmt, ap); - va_end(ap); - - early_console->write(early_console, buf, n); -} -#endif - static int __add_preferred_console(char *name, int idx, char *options, char *brl_options) { @@ -2457,6 +2558,10 @@ void console_unlock(void) console_seq++; raw_spin_unlock(&logbuf_lock); +#ifdef CONFIG_PREEMPT_RT_FULL + printk_safe_exit_irqrestore(flags); + call_console_drivers(ext_text, ext_len, text, len); +#else /* * While actively printing out messages, if another printk() * were to occur on another CPU, it may wait for this one to @@ -2475,6 +2580,7 @@ void console_unlock(void) } printk_safe_exit_irqrestore(flags); +#endif if (do_cond_resched) cond_resched(); @@ -2522,6 +2628,11 @@ void console_unblank(void) { struct console *c; + if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { + if (in_irq() || in_nmi()) + return; + } + /* * console_unblank can no longer be called in interrupt context unless * oops_in_progress is set to 1.. diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 8a98e3e78c7043894cf16f5d7a8b05978e730951..b7d2a22534da638e9c8500f3c2c951e4fc3f797a 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -190,7 +190,14 @@ static bool ptrace_freeze_traced(struct task_struct *task) spin_lock_irq(&task->sighand->siglock); if (task_is_traced(task) && !looks_like_a_spurious_pid(task) && !__fatal_signal_pending(task)) { - task->state = __TASK_TRACED; + unsigned long flags; + + raw_spin_lock_irqsave(&task->pi_lock, flags); + if (task->state & __TASK_TRACED) + task->state = __TASK_TRACED; + else + task->saved_state = __TASK_TRACED; + raw_spin_unlock_irqrestore(&task->pi_lock, flags); ret = true; } spin_unlock_irq(&task->sighand->siglock); @@ -200,8 +207,8 @@ static bool ptrace_freeze_traced(struct task_struct *task) static void ptrace_unfreeze_traced(struct task_struct *task) { - if (task->state != __TASK_TRACED) - return; + unsigned long flags; + bool frozen = true; WARN_ON(!task->ptrace || task->parent != current); @@ -210,12 +217,19 @@ static void ptrace_unfreeze_traced(struct task_struct *task) * Recheck state under the lock to close this race. */ spin_lock_irq(&task->sighand->siglock); - if (task->state == __TASK_TRACED) { - if (__fatal_signal_pending(task)) - wake_up_state(task, __TASK_TRACED); - else - task->state = TASK_TRACED; - } + + raw_spin_lock_irqsave(&task->pi_lock, flags); + if (task->state == __TASK_TRACED) + task->state = TASK_TRACED; + else if (task->saved_state == __TASK_TRACED) + task->saved_state = TASK_TRACED; + else + frozen = false; + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + + if (frozen && __fatal_signal_pending(task)) + wake_up_state(task, __TASK_TRACED); + spin_unlock_irq(&task->sighand->siglock); } diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index 9210379c0353676b79bc92e0c872fb41f1ba5693..a243a78ff38c0d0e236fd2a67aa8d6f3dfc6b449 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -172,7 +172,7 @@ config RCU_FANOUT_LEAF config RCU_FAST_NO_HZ bool "Accelerate last non-dyntick-idle CPU's grace periods" - depends on NO_HZ_COMMON && SMP && RCU_EXPERT + depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL default n help This option permits CPUs to enter dynticks-idle state even if @@ -190,8 +190,8 @@ config RCU_FAST_NO_HZ config RCU_BOOST bool "Enable RCU priority boosting" - depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT - default n + depends on (RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT) || PREEMPT_RT_FULL + default y if PREEMPT_RT_FULL help This option boosts the priority of preempted RCU readers that block the current preemptible RCU grace period for too long. diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 4d04683c31b2b20d011a4988e9d0e114878798c5..808cce9a5d43e2f1234317fc9ecee42594f749fa 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -528,7 +528,6 @@ static inline void show_rcu_gp_kthreads(void) { } static inline int rcu_get_gp_kthreads_prio(void) { return 0; } #else /* #ifdef CONFIG_TINY_RCU */ unsigned long rcu_get_gp_seq(void); -unsigned long rcu_bh_get_gp_seq(void); unsigned long rcu_sched_get_gp_seq(void); unsigned long rcu_exp_batches_completed(void); unsigned long rcu_exp_batches_completed_sched(void); @@ -536,10 +535,18 @@ unsigned long srcu_batches_completed(struct srcu_struct *sp); void show_rcu_gp_kthreads(void); int rcu_get_gp_kthreads_prio(void); void rcu_force_quiescent_state(void); -void rcu_bh_force_quiescent_state(void); void rcu_sched_force_quiescent_state(void); extern struct workqueue_struct *rcu_gp_wq; extern struct workqueue_struct *rcu_par_gp_wq; + +#ifdef CONFIG_PREEMPT_RT_FULL +#define rcu_bh_get_gp_seq rcu_get_gp_seq +#define rcu_bh_force_quiescent_state rcu_force_quiescent_state +#else +unsigned long rcu_bh_get_gp_seq(void); +void rcu_bh_force_quiescent_state(void); +#endif + #endif /* #else #ifdef CONFIG_TINY_RCU */ #ifdef CONFIG_RCU_NOCB_CPU diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 0b7af7e2bcbb17ec8a52b2fef39faf8ef87427ed..e95d121efc8034d4539f596560d1635a23617d1b 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -434,6 +434,7 @@ static struct rcu_torture_ops rcu_ops = { .name = "rcu" }; +#ifndef CONFIG_PREEMPT_RT_FULL /* * Definitions for rcu_bh torture testing. */ @@ -475,6 +476,12 @@ static struct rcu_torture_ops rcu_bh_ops = { .name = "rcu_bh" }; +#else +static struct rcu_torture_ops rcu_bh_ops = { + .ttype = INVALID_RCU_FLAVOR, +}; +#endif + /* * Don't even think about trying any of these in real life!!! * The names includes "busted", and they really means it! diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 1ff17e297f0cc625872c302f84ebd028849b4d7c..0f09a1a9e17c6217b4e3a292d01c55033ffd1bd7 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -38,6 +38,8 @@ #include #include #include +#include +#include #include "rcu.h" #include "rcu_segcblist.h" @@ -460,21 +462,6 @@ static void srcu_gp_start(struct srcu_struct *sp) WARN_ON_ONCE(state != SRCU_STATE_SCAN1); } -/* - * Track online CPUs to guide callback workqueue placement. - */ -DEFINE_PER_CPU(bool, srcu_online); - -void srcu_online_cpu(unsigned int cpu) -{ - WRITE_ONCE(per_cpu(srcu_online, cpu), true); -} - -void srcu_offline_cpu(unsigned int cpu) -{ - WRITE_ONCE(per_cpu(srcu_online, cpu), false); -} - /* * Place the workqueue handler on the specified CPU if online, otherwise * just run it whereever. This is useful for placing workqueue handlers @@ -486,12 +473,12 @@ static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, { bool ret; - preempt_disable(); - if (READ_ONCE(per_cpu(srcu_online, cpu))) + cpus_read_lock(); + if (cpu_online(cpu)) ret = queue_delayed_work_on(cpu, wq, dwork, delay); else ret = queue_delayed_work(wq, dwork, delay); - preempt_enable(); + cpus_read_unlock(); return ret; } @@ -774,6 +761,8 @@ static void srcu_flip(struct srcu_struct *sp) * negligible when amoritized over that time period, and the extra latency * of a needlessly non-expedited grace period is similarly negligible. */ +static DEFINE_LOCAL_IRQ_LOCK(sp_llock); + static bool srcu_might_be_idle(struct srcu_struct *sp) { unsigned long curseq; @@ -782,13 +771,13 @@ static bool srcu_might_be_idle(struct srcu_struct *sp) unsigned long t; /* If the local srcu_data structure has callbacks, not idle. */ - local_irq_save(flags); + local_lock_irqsave(sp_llock, flags); sdp = this_cpu_ptr(sp->sda); if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { - local_irq_restore(flags); + local_unlock_irqrestore(sp_llock, flags); return false; /* Callbacks already present, so not idle. */ } - local_irq_restore(flags); + local_unlock_irqrestore(sp_llock, flags); /* * No local callbacks, so probabalistically probe global state. @@ -866,7 +855,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, return; } rhp->func = func; - local_irq_save(flags); + local_lock_irqsave(sp_llock, flags); sdp = this_cpu_ptr(sp->sda); spin_lock_rcu_node(sdp); rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); @@ -882,7 +871,8 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, sdp->srcu_gp_seq_needed_exp = s; needexp = true; } - spin_unlock_irqrestore_rcu_node(sdp, flags); + spin_unlock_rcu_node(sdp); + local_unlock_irqrestore(sp_llock, flags); if (needgp) srcu_funnel_gp_start(sp, sdp, s, do_norm); else if (needexp) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f7e89c989df75e190d96eecbc13df42be56f242b..278fe66bfb707e85e070998f5ff19ff631891f6c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -61,6 +61,13 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include "../time/tick-internal.h" #include "tree.h" #include "rcu.h" @@ -244,6 +251,19 @@ void rcu_sched_qs(void) this_cpu_ptr(&rcu_sched_data), true); } +#ifdef CONFIG_PREEMPT_RT_FULL +static void rcu_preempt_qs(void); + +void rcu_bh_qs(void) +{ + unsigned long flags; + + /* Callers to this function, rcu_preempt_qs(), must disable irqs. */ + local_irq_save(flags); + rcu_preempt_qs(); + local_irq_restore(flags); +} +#else void rcu_bh_qs(void) { RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); @@ -254,6 +274,7 @@ void rcu_bh_qs(void) __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); } } +#endif /* * Steal a bit from the bottom of ->dynticks for idle entry/exit @@ -568,6 +589,7 @@ unsigned long rcu_sched_get_gp_seq(void) } EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Return the number of RCU-bh GPs completed thus far for debug & stats. */ @@ -576,6 +598,7 @@ unsigned long rcu_bh_get_gp_seq(void) return READ_ONCE(rcu_bh_state.gp_seq); } EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); +#endif /* * Return the number of RCU expedited batches completed thus far for @@ -599,6 +622,7 @@ unsigned long rcu_exp_batches_completed_sched(void) } EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Force a quiescent state. */ @@ -617,6 +641,13 @@ void rcu_bh_force_quiescent_state(void) } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); +#else +void rcu_force_quiescent_state(void) +{ +} +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); +#endif + /* * Force a quiescent state for RCU-sched. */ @@ -674,9 +705,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, case RCU_FLAVOR: rsp = rcu_state_p; break; +#ifndef CONFIG_PREEMPT_RT_FULL case RCU_BH_FLAVOR: rsp = &rcu_bh_state; break; +#endif case RCU_SCHED_FLAVOR: rsp = &rcu_sched_state; break; @@ -1263,6 +1296,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && (rnp->ffmask & rdp->grpmask)) { init_irq_work(&rdp->rcu_iw, rcu_iw_handler); + rdp->rcu_iw.flags = IRQ_WORK_HARD_IRQ; rdp->rcu_iw_pending = true; rdp->rcu_iw_gp_seq = rnp->gp_seq; irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); @@ -2870,18 +2904,17 @@ __rcu_process_callbacks(struct rcu_state *rsp) /* * Do RCU core processing for the current CPU. */ -static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) +static __latent_entropy void rcu_process_callbacks(void) { struct rcu_state *rsp; if (cpu_is_offline(smp_processor_id())) return; - trace_rcu_utilization(TPS("Start RCU core")); for_each_rcu_flavor(rsp) __rcu_process_callbacks(rsp); - trace_rcu_utilization(TPS("End RCU core")); } +static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); /* * Schedule RCU callback invocation. If the specified type of RCU * does not support RCU priority boosting, just do a direct call, @@ -2893,19 +2926,106 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) { if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) return; - if (likely(!rsp->boost)) { - rcu_do_batch(rsp, rdp); - return; - } - invoke_rcu_callbacks_kthread(); + rcu_do_batch(rsp, rdp); } +static void rcu_wake_cond(struct task_struct *t, int status) +{ + /* + * If the thread is yielding, only wake it when this + * is invoked from idle + */ + if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) + wake_up_process(t); +} + +/* + * Wake up this CPU's rcuc kthread to do RCU core processing. + */ static void invoke_rcu_core(void) { - if (cpu_online(smp_processor_id())) - raise_softirq(RCU_SOFTIRQ); + unsigned long flags; + struct task_struct *t; + + if (!cpu_online(smp_processor_id())) + return; + local_irq_save(flags); + __this_cpu_write(rcu_cpu_has_work, 1); + t = __this_cpu_read(rcu_cpu_kthread_task); + if (t != NULL && current != t) + rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status)); + local_irq_restore(flags); +} + +static void rcu_cpu_kthread_park(unsigned int cpu) +{ + per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; +} + +static int rcu_cpu_kthread_should_run(unsigned int cpu) +{ + return __this_cpu_read(rcu_cpu_has_work); } +/* + * Per-CPU kernel thread that invokes RCU callbacks. This replaces the + * RCU softirq used in flavors and configurations of RCU that do not + * support RCU priority boosting. + */ +static void rcu_cpu_kthread(unsigned int cpu) +{ + unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); + char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); + int spincnt; + + for (spincnt = 0; spincnt < 10; spincnt++) { + trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); + local_bh_disable(); + *statusp = RCU_KTHREAD_RUNNING; + this_cpu_inc(rcu_cpu_kthread_loops); + local_irq_disable(); + work = *workp; + *workp = 0; + local_irq_enable(); + if (work) + rcu_process_callbacks(); + local_bh_enable(); + if (*workp == 0) { + trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); + *statusp = RCU_KTHREAD_WAITING; + return; + } + } + *statusp = RCU_KTHREAD_YIELDING; + trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); + schedule_timeout_interruptible(2); + trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); + *statusp = RCU_KTHREAD_WAITING; +} + +static struct smp_hotplug_thread rcu_cpu_thread_spec = { + .store = &rcu_cpu_kthread_task, + .thread_should_run = rcu_cpu_kthread_should_run, + .thread_fn = rcu_cpu_kthread, + .thread_comm = "rcuc/%u", + .setup = rcu_cpu_kthread_setup, + .park = rcu_cpu_kthread_park, +}; + +/* + * Spawn per-CPU RCU core processing kthreads. + */ +static int __init rcu_spawn_core_kthreads(void) +{ + int cpu; + + for_each_possible_cpu(cpu) + per_cpu(rcu_cpu_has_work, cpu) = 0; + BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); + return 0; +} +early_initcall(rcu_spawn_core_kthreads); + /* * Handle any core-RCU processing required by a call_rcu() invocation. */ @@ -3057,6 +3177,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) } EXPORT_SYMBOL_GPL(call_rcu_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /** * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. @@ -3084,6 +3205,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) __call_rcu(head, func, &rcu_bh_state, -1, 0); } EXPORT_SYMBOL_GPL(call_rcu_bh); +#endif /* * Queue an RCU callback for lazy invocation after a grace period. @@ -3169,6 +3291,7 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * @@ -3195,6 +3318,7 @@ void synchronize_rcu_bh(void) wait_rcu_gp(call_rcu_bh); } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); +#endif /** * get_state_synchronize_rcu - Snapshot current RCU state @@ -3502,6 +3626,7 @@ static void _rcu_barrier(struct rcu_state *rsp) mutex_unlock(&rsp->barrier_mutex); } +#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ @@ -3510,6 +3635,7 @@ void rcu_barrier_bh(void) _rcu_barrier(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); +#endif /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. @@ -3659,8 +3785,6 @@ int rcutree_online_cpu(unsigned int cpu) rnp->ffmask |= rdp->grpmask; raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } - if (IS_ENABLED(CONFIG_TREE_SRCU)) - srcu_online_cpu(cpu); if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) return 0; /* Too early in boot for scheduler work. */ sync_sched_exp_online_cleanup(cpu); @@ -3688,8 +3812,6 @@ int rcutree_offline_cpu(unsigned int cpu) } rcutree_affinity_setting(cpu, cpu); - if (IS_ENABLED(CONFIG_TREE_SRCU)) - srcu_offline_cpu(cpu); return 0; } @@ -4157,12 +4279,13 @@ void __init rcu_init(void) rcu_bootup_announce(); rcu_init_geometry(); +#ifndef CONFIG_PREEMPT_RT_FULL rcu_init_one(&rcu_bh_state); +#endif rcu_init_one(&rcu_sched_state); if (dump_tree) rcu_dump_rcu_node_tree(&rcu_sched_state); __rcu_init_preempt(); - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); /* * We don't need protection against CPU-hotplug here because diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 4e74df768c579d9fe356081b0f421bdbab731275..98257d20feb2a43a88c8011c71880e76d1de6fcd 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -413,7 +413,9 @@ extern struct list_head rcu_struct_flavors; */ extern struct rcu_state rcu_sched_state; +#ifndef CONFIG_PREEMPT_RT_FULL extern struct rcu_state rcu_bh_state; +#endif #ifdef CONFIG_PREEMPT_RCU extern struct rcu_state rcu_preempt_state; @@ -421,12 +423,10 @@ extern struct rcu_state rcu_preempt_state; int rcu_dynticks_snap(struct rcu_dynticks *rdtp); -#ifdef CONFIG_RCU_BOOST DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DECLARE_PER_CPU(char, rcu_cpu_has_work); -#endif /* #ifdef CONFIG_RCU_BOOST */ #ifndef RCU_TREE_NONCORE @@ -449,8 +449,8 @@ static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); -static void invoke_rcu_callbacks_kthread(void); static bool rcu_is_callbacks_kthread(void); +static void rcu_cpu_kthread_setup(unsigned int cpu); #ifdef CONFIG_RCU_BOOST static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, struct rcu_node *rnp); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 72770a551c24db3ff477427925372c3439c5e5f1..ac6d6fdf57839071d8b22020379bec9c72d4e70c 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -472,7 +472,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, smp_call_func_t func) { - int cpu; struct rcu_node *rnp; trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset")); @@ -494,13 +493,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, continue; } INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); - preempt_disable(); - cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask); - /* If all offline, queue the work on an unbound CPU. */ - if (unlikely(cpu > rnp->grphi)) - cpu = WORK_CPU_UNBOUND; - queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); - preempt_enable(); + queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work); rnp->exp_need_flush = true; } diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 5f6de49dc78e555c45f9dd71ce890bf433554200..56639c8ed55099bc9cf75ef1ccaa04764da9a17c 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -24,41 +24,16 @@ * Paul E. McKenney */ -#include -#include -#include -#include -#include -#include -#include -#include "../time/tick-internal.h" - -#ifdef CONFIG_RCU_BOOST - #include "../locking/rtmutex_common.h" /* * Control variables for per-CPU and per-rcu_node kthreads. These * handle all flavors of RCU. */ -static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(char, rcu_cpu_has_work); -#else /* #ifdef CONFIG_RCU_BOOST */ - -/* - * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST, - * all uses are in dead code. Provide a definition to keep the compiler - * happy, but add WARN_ON_ONCE() to complain if used in the wrong place. - * This probably needs to be excluded from -rt builds. - */ -#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; }) -#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1) - -#endif /* #else #ifdef CONFIG_RCU_BOOST */ - #ifdef CONFIG_RCU_NOCB_CPU static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ @@ -337,9 +312,13 @@ static void rcu_preempt_note_context_switch(bool preempt) struct task_struct *t = current; struct rcu_data *rdp; struct rcu_node *rnp; + int sleeping_l = 0; lockdep_assert_irqs_disabled(); - WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); +#if defined(CONFIG_PREEMPT_RT_FULL) + sleeping_l = t->sleeping_lock; +#endif + WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !sleeping_l); if (t->rcu_read_lock_nesting > 0 && !t->rcu_read_unlock_special.b.blocked) { @@ -520,7 +499,7 @@ static void rcu_read_unlock_special(struct task_struct *t) } /* Hardware IRQ handlers cannot block, complain if they get here. */ - if (in_irq() || in_serving_softirq()) { + if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { lockdep_rcu_suspicious(__FILE__, __LINE__, "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", @@ -1023,18 +1002,21 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ +/* + * If boosting, set rcuc kthreads to realtime priority. + */ +static void rcu_cpu_kthread_setup(unsigned int cpu) +{ #ifdef CONFIG_RCU_BOOST + struct sched_param sp; -static void rcu_wake_cond(struct task_struct *t, int status) -{ - /* - * If the thread is yielding, only wake it when this - * is invoked from idle - */ - if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) - wake_up_process(t); + sp.sched_priority = kthread_prio; + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); +#endif /* #ifdef CONFIG_RCU_BOOST */ } +#ifdef CONFIG_RCU_BOOST + /* * Carry out RCU priority boosting on the task indicated by ->exp_tasks * or ->boost_tasks, advancing the pointer to the next task in the @@ -1172,23 +1154,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) } } -/* - * Wake up the per-CPU kthread to invoke RCU callbacks. - */ -static void invoke_rcu_callbacks_kthread(void) -{ - unsigned long flags; - - local_irq_save(flags); - __this_cpu_write(rcu_cpu_has_work, 1); - if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && - current != __this_cpu_read(rcu_cpu_kthread_task)) { - rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), - __this_cpu_read(rcu_cpu_kthread_status)); - } - local_irq_restore(flags); -} - /* * Is the current CPU running the RCU-callbacks kthread? * Caller must have preemption disabled. @@ -1243,67 +1208,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, return 0; } -static void rcu_kthread_do_work(void) -{ - rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); - rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); - rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); -} - -static void rcu_cpu_kthread_setup(unsigned int cpu) -{ - struct sched_param sp; - - sp.sched_priority = kthread_prio; - sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); -} - -static void rcu_cpu_kthread_park(unsigned int cpu) -{ - per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; -} - -static int rcu_cpu_kthread_should_run(unsigned int cpu) -{ - return __this_cpu_read(rcu_cpu_has_work); -} - -/* - * Per-CPU kernel thread that invokes RCU callbacks. This replaces the - * RCU softirq used in flavors and configurations of RCU that do not - * support RCU priority boosting. - */ -static void rcu_cpu_kthread(unsigned int cpu) -{ - unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); - char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); - int spincnt; - - for (spincnt = 0; spincnt < 10; spincnt++) { - trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); - local_bh_disable(); - *statusp = RCU_KTHREAD_RUNNING; - this_cpu_inc(rcu_cpu_kthread_loops); - local_irq_disable(); - work = *workp; - *workp = 0; - local_irq_enable(); - if (work) - rcu_kthread_do_work(); - local_bh_enable(); - if (*workp == 0) { - trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); - *statusp = RCU_KTHREAD_WAITING; - return; - } - } - *statusp = RCU_KTHREAD_YIELDING; - trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); - schedule_timeout_interruptible(2); - trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); - *statusp = RCU_KTHREAD_WAITING; -} - /* * Set the per-rcu_node kthread's affinity to cover all CPUs that are * served by the rcu_node in question. The CPU hotplug lock is still @@ -1334,26 +1238,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) free_cpumask_var(cm); } -static struct smp_hotplug_thread rcu_cpu_thread_spec = { - .store = &rcu_cpu_kthread_task, - .thread_should_run = rcu_cpu_kthread_should_run, - .thread_fn = rcu_cpu_kthread, - .thread_comm = "rcuc/%u", - .setup = rcu_cpu_kthread_setup, - .park = rcu_cpu_kthread_park, -}; - /* * Spawn boost kthreads -- called as soon as the scheduler is running. */ static void __init rcu_spawn_boost_kthreads(void) { struct rcu_node *rnp; - int cpu; - - for_each_possible_cpu(cpu) - per_cpu(rcu_cpu_has_work, cpu) = 0; - BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); rcu_for_each_leaf_node(rcu_state_p, rnp) (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); } @@ -1376,11 +1266,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } -static void invoke_rcu_callbacks_kthread(void) -{ - WARN_ON_ONCE(1); -} - static bool rcu_is_callbacks_kthread(void) { return false; @@ -1404,7 +1289,7 @@ static void rcu_prepare_kthreads(int cpu) #endif /* #else #ifdef CONFIG_RCU_BOOST */ -#if !defined(CONFIG_RCU_FAST_NO_HZ) +#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) /* * Check to see if any future RCU-related work will need to be done @@ -1420,7 +1305,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) *nextevt = KTIME_MAX; return rcu_cpu_has_callbacks(NULL); } +#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ +#if !defined(CONFIG_RCU_FAST_NO_HZ) /* * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up * after it. @@ -1517,6 +1404,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) return cbs_ready; } +#ifndef CONFIG_PREEMPT_RT_FULL + /* * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready * to invoke. If the CPU has callbacks, try to advance them. Tell the @@ -1559,6 +1448,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) *nextevt = basemono + dj * TICK_NSEC; return 0; } +#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ /* * Prepare a CPU for idle from an RCU perspective. The first major task diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 81688a1335529e7b7420d503b2f9923235e2f5b7..ed75addd3ccda42c4eaabafa91c4187863ba9033 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -68,8 +68,10 @@ extern int rcu_expedited; /* from sysctl */ module_param(rcu_expedited, int, 0); extern int rcu_normal; /* from sysctl */ module_param(rcu_normal, int, 0); -static int rcu_normal_after_boot; +static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); +#ifndef CONFIG_PREEMPT_RT_FULL module_param(rcu_normal_after_boot, int, 0); +#endif #endif /* #ifndef CONFIG_TINY_RCU */ #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -288,6 +290,7 @@ int rcu_read_lock_held(void) } EXPORT_SYMBOL_GPL(rcu_read_lock_held); +#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? * @@ -314,6 +317,7 @@ int rcu_read_lock_bh_held(void) return in_softirq() || irqs_disabled(); } EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); +#endif #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 7fe183404c383f8d4611d47eb9d4299994f8b7ee..2b765aa4e2c4f20adfe2d4ef39f64800dab12ef9 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -18,7 +18,7 @@ endif obj-y += core.o loadavg.o clock.o cputime.o obj-y += idle.o fair.o rt.o deadline.o -obj-y += wait.o wait_bit.o swait.o completion.o +obj-y += wait.o wait_bit.o swait.o swork.o completion.o obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index a1ad5b7d5521be6340bf221c1a1c5983507a1754..49c14137988ea5947e22c1229d2512075f2f8efa 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c @@ -29,12 +29,12 @@ void complete(struct completion *x) { unsigned long flags; - spin_lock_irqsave(&x->wait.lock, flags); + raw_spin_lock_irqsave(&x->wait.lock, flags); if (x->done != UINT_MAX) x->done++; - __wake_up_locked(&x->wait, TASK_NORMAL, 1); - spin_unlock_irqrestore(&x->wait.lock, flags); + swake_up_locked(&x->wait); + raw_spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete); @@ -58,10 +58,10 @@ void complete_all(struct completion *x) { unsigned long flags; - spin_lock_irqsave(&x->wait.lock, flags); + raw_spin_lock_irqsave(&x->wait.lock, flags); x->done = UINT_MAX; - __wake_up_locked(&x->wait, TASK_NORMAL, 0); - spin_unlock_irqrestore(&x->wait.lock, flags); + swake_up_all_locked(&x->wait); + raw_spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete_all); @@ -70,20 +70,20 @@ do_wait_for_common(struct completion *x, long (*action)(long), long timeout, int state) { if (!x->done) { - DECLARE_WAITQUEUE(wait, current); + DECLARE_SWAITQUEUE(wait); - __add_wait_queue_entry_tail_exclusive(&x->wait, &wait); do { if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; break; } + __prepare_to_swait(&x->wait, &wait); __set_current_state(state); - spin_unlock_irq(&x->wait.lock); + raw_spin_unlock_irq(&x->wait.lock); timeout = action(timeout); - spin_lock_irq(&x->wait.lock); + raw_spin_lock_irq(&x->wait.lock); } while (!x->done && timeout); - __remove_wait_queue(&x->wait, &wait); + __finish_swait(&x->wait, &wait); if (!x->done) return timeout; } @@ -100,9 +100,9 @@ __wait_for_common(struct completion *x, complete_acquire(x); - spin_lock_irq(&x->wait.lock); + raw_spin_lock_irq(&x->wait.lock); timeout = do_wait_for_common(x, action, timeout, state); - spin_unlock_irq(&x->wait.lock); + raw_spin_unlock_irq(&x->wait.lock); complete_release(x); @@ -291,12 +291,12 @@ bool try_wait_for_completion(struct completion *x) if (!READ_ONCE(x->done)) return false; - spin_lock_irqsave(&x->wait.lock, flags); + raw_spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = false; else if (x->done != UINT_MAX) x->done--; - spin_unlock_irqrestore(&x->wait.lock, flags); + raw_spin_unlock_irqrestore(&x->wait.lock, flags); return ret; } EXPORT_SYMBOL(try_wait_for_completion); @@ -322,8 +322,8 @@ bool completion_done(struct completion *x) * otherwise we can end up freeing the completion before complete() * is done referencing it. */ - spin_lock_irqsave(&x->wait.lock, flags); - spin_unlock_irqrestore(&x->wait.lock, flags); + raw_spin_lock_irqsave(&x->wait.lock, flags); + raw_spin_unlock_irqrestore(&x->wait.lock, flags); return true; } EXPORT_SYMBOL(completion_done); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 32af895bd86b324484565f63d0d3a3a1788429aa..410882ff20078ab7baf52e56e50db531b8c566fd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -44,7 +44,11 @@ const_debug unsigned int sysctl_sched_features = * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ +#ifdef CONFIG_PREEMPT_RT_FULL +const_debug unsigned int sysctl_sched_nr_migrate = 8; +#else const_debug unsigned int sysctl_sched_nr_migrate = 32; +#endif /* * period over which we measure -rt task CPU usage in us. @@ -315,7 +319,7 @@ static void hrtick_rq_init(struct rq *rq) rq->hrtick_csd.info = rq; #endif - hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); rq->hrtick_timer.function = hrtick; } #else /* CONFIG_SCHED_HRTICK */ @@ -397,9 +401,15 @@ static bool set_nr_if_polling(struct task_struct *p) #endif #endif -void wake_q_add(struct wake_q_head *head, struct task_struct *task) +void __wake_q_add(struct wake_q_head *head, struct task_struct *task, + bool sleeper) { - struct wake_q_node *node = &task->wake_q; + struct wake_q_node *node; + + if (sleeper) + node = &task->wake_q_sleeper; + else + node = &task->wake_q; /* * Atomically grab the task, if ->wake_q is !nil already it means @@ -422,24 +432,32 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task) head->lastp = &node->next; } -void wake_up_q(struct wake_q_head *head) +void __wake_up_q(struct wake_q_head *head, bool sleeper) { struct wake_q_node *node = head->first; while (node != WAKE_Q_TAIL) { struct task_struct *task; - task = container_of(node, struct task_struct, wake_q); + if (sleeper) + task = container_of(node, struct task_struct, wake_q_sleeper); + else + task = container_of(node, struct task_struct, wake_q); BUG_ON(!task); /* Task can safely be re-inserted now: */ node = node->next; - task->wake_q.next = NULL; - + if (sleeper) + task->wake_q_sleeper.next = NULL; + else + task->wake_q.next = NULL; /* * wake_up_process() executes a full barrier, which pairs with * the queueing in wake_q_add() so as not to miss wakeups. */ - wake_up_process(task); + if (sleeper) + wake_up_lock_sleeper(task); + else + wake_up_process(task); put_task_struct(task); } } @@ -475,6 +493,48 @@ void resched_curr(struct rq *rq) trace_sched_wake_idle_without_ipi(cpu); } +#ifdef CONFIG_PREEMPT_LAZY + +static int tsk_is_polling(struct task_struct *p) +{ +#ifdef TIF_POLLING_NRFLAG + return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); +#else + return 0; +#endif +} + +void resched_curr_lazy(struct rq *rq) +{ + struct task_struct *curr = rq->curr; + int cpu; + + if (!sched_feat(PREEMPT_LAZY)) { + resched_curr(rq); + return; + } + + lockdep_assert_held(&rq->lock); + + if (test_tsk_need_resched(curr)) + return; + + if (test_tsk_need_resched_lazy(curr)) + return; + + set_tsk_need_resched_lazy(curr); + + cpu = cpu_of(rq); + if (cpu == smp_processor_id()) + return; + + /* NEED_RESCHED_LAZY must be visible before we test polling */ + smp_mb(); + if (!tsk_is_polling(curr)) + smp_send_reschedule(cpu); +} +#endif + void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); @@ -878,10 +938,10 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) */ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) { - if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) + if (!cpumask_test_cpu(cpu, p->cpus_ptr)) return false; - if (is_per_cpu_kthread(p)) + if (is_per_cpu_kthread(p) || __migrate_disabled(p)) return cpu_online(cpu); return cpu_active(cpu); @@ -930,6 +990,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, struct migration_arg { struct task_struct *task; int dest_cpu; + bool done; }; /* @@ -965,6 +1026,11 @@ static int migration_cpu_stop(void *data) struct task_struct *p = arg->task; struct rq *rq = this_rq(); struct rq_flags rf; + int dest_cpu = arg->dest_cpu; + + /* We don't look at arg after this point. */ + smp_mb(); + arg->done = true; /* * The original target CPU might have gone down and we might @@ -973,7 +1039,7 @@ static int migration_cpu_stop(void *data) local_irq_disable(); /* * We need to explicitly wake pending tasks before running - * __migrate_task() such that we will not miss enforcing cpus_allowed + * __migrate_task() such that we will not miss enforcing cpus_ptr * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. */ sched_ttwu_pending(); @@ -987,9 +1053,9 @@ static int migration_cpu_stop(void *data) */ if (task_rq(p) == rq) { if (task_on_rq_queued(p)) - rq = __migrate_task(rq, &rf, p, arg->dest_cpu); + rq = __migrate_task(rq, &rf, p, dest_cpu); else - p->wake_cpu = arg->dest_cpu; + p->wake_cpu = dest_cpu; } rq_unlock(rq, &rf); raw_spin_unlock(&p->pi_lock); @@ -1004,10 +1070,19 @@ static int migration_cpu_stop(void *data) */ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) { - cpumask_copy(&p->cpus_allowed, new_mask); - p->nr_cpus_allowed = cpumask_weight(new_mask); + cpumask_copy(&p->cpus_mask, new_mask); + if (p->cpus_ptr == &p->cpus_mask) + p->nr_cpus_allowed = cpumask_weight(new_mask); } +#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) +int __migrate_disabled(struct task_struct *p) +{ + return p->migrate_disable; +} +EXPORT_SYMBOL_GPL(__migrate_disabled); +#endif + void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { struct rq *rq = task_rq(p); @@ -1074,7 +1149,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, goto out; } - if (cpumask_equal(&p->cpus_allowed, new_mask)) + if (cpumask_equal(&p->cpus_mask, new_mask)) goto out; dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); @@ -1096,7 +1171,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, } /* Can the task run on the task's current CPU? If so, we're done */ - if (cpumask_test_cpu(task_cpu(p), new_mask)) + if (cpumask_test_cpu(task_cpu(p), new_mask) || + p->cpus_ptr != &p->cpus_mask) goto out; if (task_running(rq, p) || p->state == TASK_WAKING) { @@ -1237,10 +1313,10 @@ static int migrate_swap_stop(void *data) if (task_cpu(arg->src_task) != arg->src_cpu) goto unlock; - if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed)) + if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) goto unlock; - if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed)) + if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) goto unlock; __migrate_swap_task(arg->src_task, arg->dst_cpu); @@ -1282,10 +1358,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) goto out; - if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed)) + if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) goto out; - if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed)) + if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) goto out; trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); @@ -1296,6 +1372,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, } #endif /* CONFIG_NUMA_BALANCING */ +static bool check_task_state(struct task_struct *p, long match_state) +{ + bool match = false; + + raw_spin_lock_irq(&p->pi_lock); + if (p->state == match_state || p->saved_state == match_state) + match = true; + raw_spin_unlock_irq(&p->pi_lock); + + return match; +} + /* * wait_task_inactive - wait for a thread to unschedule. * @@ -1340,7 +1428,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * is actually now running somewhere else! */ while (task_running(rq, p)) { - if (match_state && unlikely(p->state != match_state)) + if (match_state && !check_task_state(p, match_state)) return 0; cpu_relax(); } @@ -1355,7 +1443,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) running = task_running(rq, p); queued = task_on_rq_queued(p); ncsw = 0; - if (!match_state || p->state == match_state) + if (!match_state || p->state == match_state || + p->saved_state == match_state) ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &rf); @@ -1430,7 +1519,7 @@ void kick_process(struct task_struct *p) EXPORT_SYMBOL_GPL(kick_process); /* - * ->cpus_allowed is protected by both rq->lock and p->pi_lock + * ->cpus_ptr is protected by both rq->lock and p->pi_lock * * A few notes on cpu_active vs cpu_online: * @@ -1470,14 +1559,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p) for_each_cpu(dest_cpu, nodemask) { if (!cpu_active(dest_cpu)) continue; - if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) + if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) return dest_cpu; } } for (;;) { /* Any allowed, online CPU? */ - for_each_cpu(dest_cpu, &p->cpus_allowed) { + for_each_cpu(dest_cpu, p->cpus_ptr) { if (!is_cpu_allowed(p, dest_cpu)) continue; @@ -1521,7 +1610,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) } /* - * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. + * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. */ static inline int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) @@ -1531,11 +1620,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) if (p->nr_cpus_allowed > 1) cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); else - cpu = cpumask_any(&p->cpus_allowed); + cpu = cpumask_any(p->cpus_ptr); /* * In order not to call set_task_cpu() on a blocking task we need - * to rely on ttwu() to place the task on a valid ->cpus_allowed + * to rely on ttwu() to place the task on a valid ->cpus_ptr * CPU. * * Since this is common to all placement strategies, this lives here. @@ -1638,10 +1727,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl { activate_task(rq, p, en_flags); p->on_rq = TASK_ON_RQ_QUEUED; - - /* If a worker is waking up, notify the workqueue: */ - if (p->flags & PF_WQ_WORKER) - wq_worker_waking_up(p, cpu_of(rq)); } /* @@ -1963,8 +2048,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) */ raw_spin_lock_irqsave(&p->pi_lock, flags); smp_mb__after_spinlock(); - if (!(p->state & state)) + if (!(p->state & state)) { + /* + * The task might be running due to a spinlock sleeper + * wakeup. Check the saved state and set it to running + * if the wakeup condition is true. + */ + if (!(wake_flags & WF_LOCK_SLEEPER)) { + if (p->saved_state & state) { + p->saved_state = TASK_RUNNING; + success = 1; + } + } goto out; + } + + /* + * If this is a regular wakeup, then we can unconditionally + * clear the saved state of a "lock sleeper". + */ + if (!(wake_flags & WF_LOCK_SLEEPER)) + p->saved_state = TASK_RUNNING; trace_sched_waking(p); @@ -2061,56 +2165,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) return success; } -/** - * try_to_wake_up_local - try to wake up a local task with rq lock held - * @p: the thread to be awakened - * @rf: request-queue flags for pinning - * - * Put @p on the run-queue if it's not already there. The caller must - * ensure that this_rq() is locked, @p is bound to this_rq() and not - * the current task. - */ -static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf) -{ - struct rq *rq = task_rq(p); - - if (WARN_ON_ONCE(rq != this_rq()) || - WARN_ON_ONCE(p == current)) - return; - - lockdep_assert_held(&rq->lock); - - if (!raw_spin_trylock(&p->pi_lock)) { - /* - * This is OK, because current is on_cpu, which avoids it being - * picked for load-balance and preemption/IRQs are still - * disabled avoiding further scheduler activity on it and we've - * not yet picked a replacement task. - */ - rq_unlock(rq, rf); - raw_spin_lock(&p->pi_lock); - rq_relock(rq, rf); - } - - if (!(p->state & TASK_NORMAL)) - goto out; - - trace_sched_waking(p); - - if (!task_on_rq_queued(p)) { - if (p->in_iowait) { - delayacct_blkio_end(p); - atomic_dec(&rq->nr_iowait); - } - ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK); - } - - ttwu_do_wakeup(rq, p, 0, rf); - ttwu_stat(p, smp_processor_id(), 0); -out: - raw_spin_unlock(&p->pi_lock); -} - /** * wake_up_process - Wake up a specific process * @p: The process to be woken up. @@ -2128,6 +2182,18 @@ int wake_up_process(struct task_struct *p) } EXPORT_SYMBOL(wake_up_process); +/** + * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" + * @p: The process to be woken up. + * + * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate + * the nature of the wakeup. + */ +int wake_up_lock_sleeper(struct task_struct *p) +{ + return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER); +} + int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); @@ -2366,6 +2432,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->on_cpu = 0; #endif init_task_preempt_count(p); +#ifdef CONFIG_HAVE_PREEMPT_LAZY + task_thread_info(p)->preempt_lazy_count = 0; +#endif #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); @@ -2406,7 +2475,7 @@ void wake_up_new_task(struct task_struct *p) #ifdef CONFIG_SMP /* * Fork balancing, do it here and not earlier because: - * - cpus_allowed can change in the fork path + * - cpus_ptr can change in the fork path * - any previously selected CPU might disappear through hotplug * * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, @@ -2695,23 +2764,18 @@ static struct rq *finish_task_switch(struct task_struct *prev) * provided by mmdrop(), * - a sync_core for SYNC_CORE. */ + /* + * We use mmdrop_delayed() here so we don't have to do the + * full __mmdrop() when we are the last user. + */ if (mm) { membarrier_mm_sync_core_before_usermode(mm); - mmdrop(mm); + mmdrop_delayed(mm); } if (unlikely(prev_state == TASK_DEAD)) { if (prev->sched_class->task_dead) prev->sched_class->task_dead(prev); - /* - * Remove function-return probe instances associated with this - * task and put them back on the free list. - */ - kprobe_flush_task(prev); - - /* Task is done with its stack. */ - put_task_stack(prev); - put_task_struct(prev); } @@ -3388,6 +3452,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) BUG(); } +static void migrate_disabled_sched(struct task_struct *p); + /* * __schedule() is the main scheduler function. * @@ -3458,6 +3524,9 @@ static void __sched notrace __schedule(bool preempt) rq_lock(rq, &rf); smp_mb__after_spinlock(); + if (__migrate_disabled(prev)) + migrate_disabled_sched(prev); + /* Promote REQ to ACT */ rq->clock_update_flags <<= 1; update_rq_clock(rq); @@ -3474,25 +3543,13 @@ static void __sched notrace __schedule(bool preempt) atomic_inc(&rq->nr_iowait); delayacct_blkio_start(); } - - /* - * If a worker went to sleep, notify and ask workqueue - * whether it wants to wake up a task to maintain - * concurrency. - */ - if (prev->flags & PF_WQ_WORKER) { - struct task_struct *to_wakeup; - - to_wakeup = wq_worker_sleeping(prev); - if (to_wakeup) - try_to_wake_up_local(to_wakeup, &rf); - } } switch_count = &prev->nvcsw; } next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); + clear_tsk_need_resched_lazy(prev); clear_preempt_need_resched(); if (likely(prev != next)) { @@ -3544,8 +3601,25 @@ void __noreturn do_task_dead(void) static inline void sched_submit_work(struct task_struct *tsk) { - if (!tsk->state || tsk_is_pi_blocked(tsk)) + if (!tsk->state) return; + + /* + * If a worker went to sleep, notify and ask workqueue whether + * it wants to wake up a task to maintain concurrency. + * As this function is called inside the schedule() context, + * we disable preemption to avoid it calling schedule() again + * in the possible wakeup of a kworker. + */ + if (tsk->flags & PF_WQ_WORKER) { + preempt_disable(); + wq_worker_sleeping(tsk); + preempt_enable_no_resched(); + } + + if (tsk_is_pi_blocked(tsk)) + return; + /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. @@ -3554,6 +3628,12 @@ static inline void sched_submit_work(struct task_struct *tsk) blk_schedule_flush_plug(tsk); } +static void sched_update_worker(struct task_struct *tsk) +{ + if (tsk->flags & PF_WQ_WORKER) + wq_worker_running(tsk); +} + asmlinkage __visible void __sched schedule(void) { struct task_struct *tsk = current; @@ -3564,6 +3644,7 @@ asmlinkage __visible void __sched schedule(void) __schedule(false); sched_preempt_enable_no_resched(); } while (need_resched()); + sched_update_worker(tsk); } EXPORT_SYMBOL(schedule); @@ -3652,6 +3733,30 @@ static void __sched notrace preempt_schedule_common(void) } while (need_resched()); } +#ifdef CONFIG_PREEMPT_LAZY +/* + * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is + * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as + * preempt_lazy_count counter >0. + */ +static __always_inline int preemptible_lazy(void) +{ + if (test_thread_flag(TIF_NEED_RESCHED)) + return 1; + if (current_thread_info()->preempt_lazy_count) + return 0; + return 1; +} + +#else + +static inline int preemptible_lazy(void) +{ + return 1; +} + +#endif + #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption @@ -3666,7 +3771,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) */ if (likely(!preemptible())) return; - + if (!preemptible_lazy()) + return; preempt_schedule_common(); } NOKPROBE_SYMBOL(preempt_schedule); @@ -3693,6 +3799,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) if (likely(!preemptible())) return; + if (!preemptible_lazy()) + return; + do { /* * Because the function tracer can trace preempt_count_sub() @@ -4322,7 +4431,7 @@ static int __sched_setscheduler(struct task_struct *p, * the entire root_domain to become SCHED_DEADLINE. We * will also fail if there's no bandwidth available. */ - if (!cpumask_subset(span, &p->cpus_allowed) || + if (!cpumask_subset(span, p->cpus_ptr) || rq->rd->dl_bw.bw == 0) { task_rq_unlock(rq, p, &rf); return -EPERM; @@ -4921,7 +5030,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) goto out_unlock; raw_spin_lock_irqsave(&p->pi_lock, flags); - cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); + cpumask_and(mask, &p->cpus_mask, cpu_active_mask); raw_spin_unlock_irqrestore(&p->pi_lock, flags); out_unlock: @@ -5459,7 +5568,9 @@ void init_idle(struct task_struct *idle, int cpu) /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); - +#ifdef CONFIG_HAVE_PREEMPT_LAZY + task_thread_info(idle)->preempt_lazy_count = 0; +#endif /* * The idle tasks have their own, simple scheduling class: */ @@ -5498,7 +5609,7 @@ int task_can_attach(struct task_struct *p, * allowed nodes is unnecessary. Thus, cpusets are not * applicable for such threads. This prevents checking for * success of set_cpus_allowed_ptr() on all attached tasks - * before cpus_allowed may be changed. + * before cpus_mask may be changed. */ if (p->flags & PF_NO_SETAFFINITY) { ret = -EINVAL; @@ -5525,7 +5636,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu) if (curr_cpu == target_cpu) return 0; - if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed)) + if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) return -EINVAL; /* TODO: This is not properly updating schedstats */ @@ -5564,6 +5675,7 @@ void sched_setnuma(struct task_struct *p, int nid) #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_HOTPLUG_CPU + /* * Ensure that the idle task is using init_mm right before its CPU goes * offline. @@ -5663,8 +5775,10 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) BUG_ON(!next); put_prev_task(rq, next); + WARN_ON_ONCE(__migrate_disabled(next)); + /* - * Rules for changing task_struct::cpus_allowed are holding + * Rules for changing task_struct::cpus_mask are holding * both pi_lock and rq->lock, such that holding either * stabilizes the mask. * @@ -6126,7 +6240,7 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { - int nested = preempt_count() + rcu_preempt_depth(); + int nested = preempt_count() + sched_rcu_preempt_depth(); return (nested == preempt_offset); } @@ -7108,3 +7222,171 @@ const u32 sched_prio_to_wmult[40] = { }; #undef CREATE_TRACE_POINTS + +#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) + +static inline void +update_nr_migratory(struct task_struct *p, long delta) +{ + if (unlikely((p->sched_class == &rt_sched_class || + p->sched_class == &dl_sched_class) && + p->nr_cpus_allowed > 1)) { + if (p->sched_class == &rt_sched_class) + task_rq(p)->rt.rt_nr_migratory += delta; + else + task_rq(p)->dl.dl_nr_migratory += delta; + } +} + +static inline void +migrate_disable_update_cpus_allowed(struct task_struct *p) +{ + p->cpus_ptr = cpumask_of(smp_processor_id()); + update_nr_migratory(p, -1); + p->nr_cpus_allowed = 1; +} + +static inline void +migrate_enable_update_cpus_allowed(struct task_struct *p) +{ + struct rq *rq; + struct rq_flags rf; + + rq = task_rq_lock(p, &rf); + p->cpus_ptr = &p->cpus_mask; + p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask); + update_nr_migratory(p, 1); + task_rq_unlock(rq, p, &rf); +} + +void migrate_disable(void) +{ + preempt_disable(); + + if (++current->migrate_disable == 1) { + this_rq()->nr_pinned++; + preempt_lazy_disable(); +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(current->pinned_on_cpu >= 0); + current->pinned_on_cpu = smp_processor_id(); +#endif + } + + preempt_enable(); +} +EXPORT_SYMBOL(migrate_disable); + +static void migrate_disabled_sched(struct task_struct *p) +{ + if (p->migrate_disable_scheduled) + return; + + migrate_disable_update_cpus_allowed(p); + p->migrate_disable_scheduled = 1; +} + +static DEFINE_PER_CPU(struct cpu_stop_work, migrate_work); +static DEFINE_PER_CPU(struct migration_arg, migrate_arg); + +void migrate_enable(void) +{ + struct task_struct *p = current; + struct rq *rq = this_rq(); + int cpu = task_cpu(p); + + WARN_ON_ONCE(p->migrate_disable <= 0); + if (p->migrate_disable > 1) { + p->migrate_disable--; + return; + } + + preempt_disable(); + +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(current->pinned_on_cpu != cpu); + current->pinned_on_cpu = -1; +#endif + + WARN_ON_ONCE(rq->nr_pinned < 1); + + p->migrate_disable = 0; + rq->nr_pinned--; +#ifdef CONFIG_HOTPLUG_CPU + if (rq->nr_pinned == 0 && unlikely(!cpu_active(cpu)) && + takedown_cpu_task) + wake_up_process(takedown_cpu_task); +#endif + + if (!p->migrate_disable_scheduled) + goto out; + + p->migrate_disable_scheduled = 0; + + migrate_enable_update_cpus_allowed(p); + + WARN_ON(smp_processor_id() != cpu); + if (!is_cpu_allowed(p, cpu)) { + struct migration_arg __percpu *arg; + struct cpu_stop_work __percpu *work; + struct rq_flags rf; + + work = this_cpu_ptr(&migrate_work); + arg = this_cpu_ptr(&migrate_arg); + WARN_ON_ONCE(!arg->done && !work->disabled && work->arg); + + arg->task = p; + arg->done = false; + + rq = task_rq_lock(p, &rf); + update_rq_clock(rq); + arg->dest_cpu = select_fallback_rq(cpu, p); + task_rq_unlock(rq, p, &rf); + + stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, + arg, work); + tlb_migrate_finish(p->mm); + } + +out: + preempt_lazy_enable(); + preempt_enable(); +} +EXPORT_SYMBOL(migrate_enable); + +int cpu_nr_pinned(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + return rq->nr_pinned; +} + +#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) +static void migrate_disabled_sched(struct task_struct *p) +{ +} + +void migrate_disable(void) +{ +#ifdef CONFIG_SCHED_DEBUG + current->migrate_disable++; +#endif + barrier(); +} +EXPORT_SYMBOL(migrate_disable); + +void migrate_enable(void) +{ +#ifdef CONFIG_SCHED_DEBUG + struct task_struct *p = current; + + WARN_ON_ONCE(p->migrate_disable <= 0); + p->migrate_disable--; +#endif + barrier(); +} +EXPORT_SYMBOL(migrate_enable); +#else +static void migrate_disabled_sched(struct task_struct *p) +{ +} +#endif diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 50316455ea66a9e46aa1465272b8cf7f6174ba46..d57fb2f8ae67bf167f231473207e06ce81709157 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -124,14 +124,14 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, const struct sched_dl_entity *dl_se = &p->dl; if (later_mask && - cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { + cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) { return 1; } else { int best_cpu = cpudl_maximum(cp); WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); - if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) && + if (cpumask_test_cpu(best_cpu, p->cpus_ptr) && dl_time_before(dl_se->deadline, cp->elements[0].dl)) { if (later_mask) cpumask_set_cpu(best_cpu, later_mask); diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index daaadf939ccb1e9349e716e9d97359d0c7258519..f7d2c10b4c92e956fb6e431572e47b50edcc4490 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -98,11 +98,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, if (skip) continue; - if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) + if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids) continue; if (lowest_mask) { - cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); + cpumask_and(lowest_mask, p->cpus_ptr, vec->mask); /* * We have to ensure that we have at least one bit diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index beec5081a55af775d9af3af7b73d3424b44d733d..0bc1bad199c19cd8baa2ad980876be42bfe5500c 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -287,7 +287,7 @@ static void task_non_contending(struct task_struct *p) dl_se->dl_non_contending = 1; get_task_struct(p); - hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL); + hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD); } static void task_contending(struct sched_dl_entity *dl_se, int flags) @@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p * If we cannot preempt any rq, fall back to pick any * online CPU: */ - cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); + cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); if (cpu >= nr_cpu_ids) { /* * Failed to find any suitable CPU. @@ -1086,7 +1086,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) { struct hrtimer *timer = &dl_se->dl_timer; - hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); timer->function = dl_task_timer; } @@ -1325,7 +1325,7 @@ void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se) { struct hrtimer *timer = &dl_se->inactive_timer; - hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); timer->function = inactive_task_timer; } @@ -1857,7 +1857,7 @@ static void set_curr_task_dl(struct rq *rq) static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && - cpumask_test_cpu(cpu, &p->cpus_allowed)) + cpumask_test_cpu(cpu, p->cpus_ptr)) return 1; return 0; } @@ -2007,7 +2007,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) /* Retry if something changed. */ if (double_lock_balance(rq, later_rq)) { if (unlikely(task_rq(task) != rq || - !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) || + !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) || task_running(rq, task) || !dl_task(task) || !task_on_rq_queued(task))) { diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index b1ef4f2e7edcb5d7238e0b225892978b4f3aba9c..169b98c12da7917428abf2ff3c676bf2be7cea2b 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -988,6 +988,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, P(dl.runtime); P(dl.deadline); } +#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) + P(migrate_disable); +#endif + P(nr_cpus_allowed); #undef PN_SCHEDSTAT #undef PN #undef __PN diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 84e7efda98dafa80703c00343798766de5dc17ed..d006dfc54a455be9c6a0d7b9849622fd62b5400d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1691,7 +1691,7 @@ static void task_numa_compare(struct task_numa_env *env, * be incurred if the tasks were swapped. */ /* Skip this swap candidate if cannot move to the source cpu */ - if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) + if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) goto unlock; /* @@ -1789,7 +1789,7 @@ static void task_numa_find_cpu(struct task_numa_env *env, for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { /* Skip this CPU if the source task cannot migrate */ - if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) + if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) continue; env->dst_cpu = cpu; @@ -4104,7 +4104,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. @@ -4128,7 +4128,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) return; if (delta > ideal_runtime) - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); } static void @@ -4270,7 +4270,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) * validating it and just reschedule. */ if (queued) { - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); return; } /* @@ -4404,7 +4404,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); } static __always_inline @@ -4597,7 +4597,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) struct rq *rq = rq_of(cfs_rq); struct rq_flags rf; - rq_lock(rq, &rf); + rq_lock_irqsave(rq, &rf); if (!cfs_rq_throttled(cfs_rq)) goto next; @@ -4616,7 +4616,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) unthrottle_cfs_rq(cfs_rq); next: - rq_unlock(rq, &rf); + rq_unlock_irqrestore(rq, &rf); if (!remaining) break; @@ -4632,7 +4632,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) * period the timer is deactivated until scheduling resumes; cfs_b->idle is * used to track this state. */ -static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) +static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) { u64 runtime; int throttled; @@ -4672,10 +4672,10 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { runtime = cfs_b->runtime; cfs_b->distribute_running = 1; - raw_spin_unlock(&cfs_b->lock); + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); /* we can't nest cfs_b->lock while distributing bandwidth */ runtime = distribute_cfs_runtime(cfs_b, runtime); - raw_spin_lock(&cfs_b->lock); + raw_spin_lock_irqsave(&cfs_b->lock, flags); cfs_b->distribute_running = 0; throttled = !list_empty(&cfs_b->throttled_cfs_rq); @@ -4783,16 +4783,17 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) { u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); + unsigned long flags; /* confirm we're still not at a refresh boundary */ - raw_spin_lock(&cfs_b->lock); + raw_spin_lock_irqsave(&cfs_b->lock, flags); if (cfs_b->distribute_running) { - raw_spin_unlock(&cfs_b->lock); + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); return; } if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { - raw_spin_unlock(&cfs_b->lock); + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); return; } @@ -4802,17 +4803,17 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) if (runtime) cfs_b->distribute_running = 1; - raw_spin_unlock(&cfs_b->lock); + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); if (!runtime) return; runtime = distribute_cfs_runtime(cfs_b, runtime); - raw_spin_lock(&cfs_b->lock); + raw_spin_lock_irqsave(&cfs_b->lock, flags); cfs_b->runtime -= min(runtime, cfs_b->runtime); cfs_b->distribute_running = 0; - raw_spin_unlock(&cfs_b->lock); + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); } /* @@ -4892,11 +4893,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) { struct cfs_bandwidth *cfs_b = container_of(timer, struct cfs_bandwidth, period_timer); + unsigned long flags; int overrun; int idle = 0; int count = 0; - raw_spin_lock(&cfs_b->lock); + raw_spin_lock_irqsave(&cfs_b->lock, flags); for (;;) { overrun = hrtimer_forward_now(timer, cfs_b->period); if (!overrun) @@ -4932,11 +4934,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) count = 0; } - idle = do_sched_cfs_period_timer(cfs_b, overrun); + idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); } if (idle) cfs_b->period_active = 0; - raw_spin_unlock(&cfs_b->lock); + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; } @@ -5110,7 +5112,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) if (delta < 0) { if (rq->curr == p) - resched_curr(rq); + resched_curr_lazy(rq); return; } hrtick_start(rq, delta); @@ -5801,7 +5803,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_span(group), - &p->cpus_allowed)) + p->cpus_ptr)) continue; local_group = cpumask_test_cpu(this_cpu, @@ -5933,7 +5935,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this return cpumask_first(sched_group_span(group)); /* Traverse only the allowed CPUs */ - for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { + for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); @@ -5973,7 +5975,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p { int new_cpu = cpu; - if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) + if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) return prev_cpu; /* @@ -6090,7 +6092,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int if (!test_idle_cores(target, false)) return -1; - cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); + cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); for_each_cpu_wrap(core, cpus, target) { bool idle = true; @@ -6124,7 +6126,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { - if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) + if (!cpumask_test_cpu(cpu, p->cpus_ptr)) continue; if (available_idle_cpu(cpu)) return cpu; @@ -6185,7 +6187,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t time = local_clock(); - cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); + cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); for_each_cpu_wrap(cpu, cpus, target) { if (!--nr) @@ -6225,7 +6227,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && available_idle_cpu(recent_used_cpu) && - cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) { + cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) { /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: @@ -6443,7 +6445,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) - && cpumask_test_cpu(cpu, &p->cpus_allowed); + && cpumask_test_cpu(cpu, p->cpus_ptr); } rcu_read_lock(); @@ -6704,7 +6706,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ return; preempt: - resched_curr(rq); + resched_curr_lazy(rq); /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved @@ -7182,14 +7184,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or - * 2) cannot be migrated to this CPU due to cpus_allowed, or + * 2) cannot be migrated to this CPU due to cpus_ptr, or * 3) running (obviously), or * 4) are cache-hot on their current CPU. */ if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) return 0; - if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) { + if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); @@ -7209,7 +7211,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { - if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { + if (cpumask_test_cpu(cpu, p->cpus_ptr)) { env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; @@ -7834,7 +7836,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) /* * Group imbalance indicates (and tries to solve) the problem where balancing - * groups is inadequate due to ->cpus_allowed constraints. + * groups is inadequate due to ->cpus_ptr constraints. * * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. @@ -8449,7 +8451,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically - * isn't true due to cpus_allowed constraints and the like. + * isn't true due to cpus_ptr constraints and the like. */ if (busiest->group_type == group_imbalanced) goto force_balance; @@ -8845,7 +8847,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, * if the curr task on busiest CPU can't be * moved to this_cpu: */ - if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { + if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { raw_spin_unlock_irqrestore(&busiest->lock, flags); env.flags |= LBF_ALL_PINNED; @@ -9838,7 +9840,7 @@ static void task_fork_fair(struct task_struct *p) * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); - resched_curr(rq); + resched_curr_lazy(rq); } se->vruntime -= cfs_rq->min_vruntime; @@ -9862,7 +9864,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) */ if (rq->curr == p) { if (p->prio > oldprio) - resched_curr(rq); + resched_curr_lazy(rq); } else check_preempt_curr(rq, p, 0); } diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 85ae8488039c088c00bf12f182e4027c061ab2f0..12a12be6770b653953dd625686a41a75b33fe93b 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -46,11 +46,19 @@ SCHED_FEAT(LB_BIAS, true) */ SCHED_FEAT(NONTASK_CAPACITY, true) +#ifdef CONFIG_PREEMPT_RT_FULL +SCHED_FEAT(TTWU_QUEUE, false) +# ifdef CONFIG_PREEMPT_LAZY +SCHED_FEAT(PREEMPT_LAZY, true) +# endif +#else + /* * Queue remote wakeups on the target CPU and process them * using the scheduler IPI. Reduces rq->lock contention/bounces. */ SCHED_FEAT(TTWU_QUEUE, true) +#endif /* * When doing wakeups, attempt to limit superfluous scans of the LLC domain. diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 70e8cd39547452a61b20c1414fd4e2e80e5d1bb4..a2f2c6c91e05b66bfb72f956736cc176149ef6de 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -45,8 +45,8 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) raw_spin_lock_init(&rt_b->rt_runtime_lock); - hrtimer_init(&rt_b->rt_period_timer, - CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_HARD); rt_b->rt_period_timer.function = sched_rt_period_timer; } @@ -1620,7 +1620,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && - cpumask_test_cpu(cpu, &p->cpus_allowed)) + cpumask_test_cpu(cpu, p->cpus_ptr)) return 1; return 0; @@ -1757,7 +1757,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) * Also make sure that it wasn't scheduled on its rq. */ if (unlikely(task_rq(task) != rq || - !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) || + !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) || task_running(rq, task) || !rt_task(task) || !task_on_rq_queued(task))) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 55e695080fc6bd413039500b3b08fa14409d935f..45b3f135d2058b51fd9b12d1ea0314c63130728e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -915,6 +915,10 @@ struct rq { /* Must be inspected within a rcu lock section */ struct cpuidle_state *idle_state; #endif + +#if defined(CONFIG_PREEMPT_RT_BASE) && defined(CONFIG_SMP) + int nr_pinned; +#endif }; static inline int cpu_of(struct rq *rq) @@ -1448,6 +1452,7 @@ static inline int task_on_rq_migrating(struct task_struct *p) #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ #define WF_FORK 0x02 /* Child wakeup after fork */ #define WF_MIGRATED 0x4 /* Internal use, task got migrated */ +#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ /* * To aid in avoiding the subversion of "niceness" due to uneven distribution @@ -1642,6 +1647,15 @@ extern void reweight_task(struct task_struct *p, int prio); extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); +#ifdef CONFIG_PREEMPT_LAZY +extern void resched_curr_lazy(struct rq *rq); +#else +static inline void resched_curr_lazy(struct rq *rq) +{ + resched_curr(rq); +} +#endif + extern struct rt_bandwidth def_rt_bandwidth; extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c index 66b59ac77c2209fd9fccd92c8ae0c660e7428e4d..119a56d7f739710947f8396a9151161796eb14bd 100644 --- a/kernel/sched/swait.c +++ b/kernel/sched/swait.c @@ -32,6 +32,25 @@ void swake_up_locked(struct swait_queue_head *q) } EXPORT_SYMBOL(swake_up_locked); +void swake_up_all_locked(struct swait_queue_head *q) +{ + struct swait_queue *curr; + int wakes = 0; + + while (!list_empty(&q->task_list)) { + + curr = list_first_entry(&q->task_list, typeof(*curr), + task_list); + wake_up_process(curr->task); + list_del_init(&curr->task_list); + wakes++; + } + if (pm_in_action) + return; + WARN(wakes > 2, "complete_all() with %d waiters\n", wakes); +} +EXPORT_SYMBOL(swake_up_all_locked); + void swake_up_one(struct swait_queue_head *q) { unsigned long flags; @@ -51,6 +70,7 @@ void swake_up_all(struct swait_queue_head *q) struct swait_queue *curr; LIST_HEAD(tmp); + WARN_ON(irqs_disabled()); raw_spin_lock_irq(&q->lock); list_splice_init(&q->task_list, &tmp); while (!list_empty(&tmp)) { @@ -69,7 +89,7 @@ void swake_up_all(struct swait_queue_head *q) } EXPORT_SYMBOL(swake_up_all); -static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) +void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) { wait->task = current; if (list_empty(&wait->task_list)) diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c new file mode 100644 index 0000000000000000000000000000000000000000..c90d14b9b12693f49455ddb61662268a2f830518 --- /dev/null +++ b/kernel/sched/swork.c @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de + * + * Provides a framework for enqueuing callbacks from irq context + * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. + */ + +#include +#include +#include +#include +#include +#include + +#define SWORK_EVENT_PENDING 1 + +static DEFINE_MUTEX(worker_mutex); +static struct sworker *glob_worker; + +struct sworker { + struct list_head events; + struct swait_queue_head wq; + + raw_spinlock_t lock; + + struct task_struct *task; + int refs; +}; + +static bool swork_readable(struct sworker *worker) +{ + bool r; + + if (kthread_should_stop()) + return true; + + raw_spin_lock_irq(&worker->lock); + r = !list_empty(&worker->events); + raw_spin_unlock_irq(&worker->lock); + + return r; +} + +static int swork_kthread(void *arg) +{ + struct sworker *worker = arg; + + for (;;) { + swait_event_interruptible_exclusive(worker->wq, + swork_readable(worker)); + if (kthread_should_stop()) + break; + + raw_spin_lock_irq(&worker->lock); + while (!list_empty(&worker->events)) { + struct swork_event *sev; + + sev = list_first_entry(&worker->events, + struct swork_event, item); + list_del(&sev->item); + raw_spin_unlock_irq(&worker->lock); + + WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING, + &sev->flags)); + sev->func(sev); + raw_spin_lock_irq(&worker->lock); + } + raw_spin_unlock_irq(&worker->lock); + } + return 0; +} + +static struct sworker *swork_create(void) +{ + struct sworker *worker; + + worker = kzalloc(sizeof(*worker), GFP_KERNEL); + if (!worker) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&worker->events); + raw_spin_lock_init(&worker->lock); + init_swait_queue_head(&worker->wq); + + worker->task = kthread_run(swork_kthread, worker, "kswork"); + if (IS_ERR(worker->task)) { + kfree(worker); + return ERR_PTR(-ENOMEM); + } + + return worker; +} + +static void swork_destroy(struct sworker *worker) +{ + kthread_stop(worker->task); + + WARN_ON(!list_empty(&worker->events)); + kfree(worker); +} + +/** + * swork_queue - queue swork + * + * Returns %false if @work was already on a queue, %true otherwise. + * + * The work is queued and processed on a random CPU + */ +bool swork_queue(struct swork_event *sev) +{ + unsigned long flags; + + if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags)) + return false; + + raw_spin_lock_irqsave(&glob_worker->lock, flags); + list_add_tail(&sev->item, &glob_worker->events); + raw_spin_unlock_irqrestore(&glob_worker->lock, flags); + + swake_up_one(&glob_worker->wq); + return true; +} +EXPORT_SYMBOL_GPL(swork_queue); + +/** + * swork_get - get an instance of the sworker + * + * Returns an negative error code if the initialization if the worker did not + * work, %0 otherwise. + * + */ +int swork_get(void) +{ + struct sworker *worker; + + mutex_lock(&worker_mutex); + if (!glob_worker) { + worker = swork_create(); + if (IS_ERR(worker)) { + mutex_unlock(&worker_mutex); + return -ENOMEM; + } + + glob_worker = worker; + } + + glob_worker->refs++; + mutex_unlock(&worker_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(swork_get); + +/** + * swork_put - puts an instance of the sworker + * + * Will destroy the sworker thread. This function must not be called until all + * queued events have been completed. + */ +void swork_put(void) +{ + mutex_lock(&worker_mutex); + + glob_worker->refs--; + if (glob_worker->refs > 0) + goto out; + + swork_destroy(glob_worker); + glob_worker = NULL; +out: + mutex_unlock(&worker_mutex); +} +EXPORT_SYMBOL_GPL(swork_put); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 02e85cd233d423f2cd4e0b5c38b05095d679a1a9..208af1181bf58e2a290bf14cd2642db6c4253777 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -279,6 +279,7 @@ static int init_rootdomain(struct root_domain *rd) rd->rto_cpu = -1; raw_spin_lock_init(&rd->rto_lock); init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); + rd->rto_push_work.flags |= IRQ_WORK_HARD_IRQ; #endif init_dl_bw(&rd->dl_bw); diff --git a/kernel/signal.c b/kernel/signal.c index 4cc3f3ba13a955733b7a62c3927cf04274e7e101..59294dfaa74491a3bf8aa53acc03e4f02e77acb8 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -398,13 +399,30 @@ void task_join_group_stop(struct task_struct *task) task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING); } +static inline struct sigqueue *get_task_cache(struct task_struct *t) +{ + struct sigqueue *q = t->sigqueue_cache; + + if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) + return NULL; + return q; +} + +static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) +{ + if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) + return 0; + return 1; +} + /* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an * appropriate lock must be held to stop the target task from exiting */ static struct sigqueue * -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) +__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, + int override_rlimit, int fromslab) { struct sigqueue *q = NULL; struct user_struct *user; @@ -426,7 +444,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi rcu_read_unlock(); if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { - q = kmem_cache_alloc(sigqueue_cachep, flags); + if (!fromslab) + q = get_task_cache(t); + if (!q) + q = kmem_cache_alloc(sigqueue_cachep, flags); } else { print_dropped_signal(sig); } @@ -443,6 +464,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi return q; } +static struct sigqueue * +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, + int override_rlimit) +{ + return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); +} + static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) @@ -452,6 +480,21 @@ static void __sigqueue_free(struct sigqueue *q) kmem_cache_free(sigqueue_cachep, q); } +static void sigqueue_free_current(struct sigqueue *q) +{ + struct user_struct *up; + + if (q->flags & SIGQUEUE_PREALLOC) + return; + + up = q->user; + if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { + if (atomic_dec_and_test(&up->sigpending)) + free_uid(up); + } else + __sigqueue_free(q); +} + void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; @@ -464,6 +507,21 @@ void flush_sigqueue(struct sigpending *queue) } } +/* + * Called from __exit_signal. Flush tsk->pending and + * tsk->sigqueue_cache + */ +void flush_task_sigqueue(struct task_struct *tsk) +{ + struct sigqueue *q; + + flush_sigqueue(&tsk->pending); + + q = get_task_cache(tsk); + if (q) + kmem_cache_free(sigqueue_cachep, q); +} + /* * Flush all pending signals for this kthread. */ @@ -587,7 +645,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, (info->si_code == SI_TIMER) && (info->si_sys_private); - __sigqueue_free(first); + sigqueue_free_current(first); } else { /* * Ok, it wasn't in the queue. This must be @@ -624,6 +682,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) bool resched_timer = false; int signr; + WARN_ON_ONCE(tsk != current); + /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ @@ -1283,8 +1343,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, * We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE. */ -int -force_sig_info(int sig, struct siginfo *info, struct task_struct *t) +static int +do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) { unsigned long int flags; int ret, blocked, ignored; @@ -1313,6 +1373,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) return ret; } +int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) +{ +/* + * On some archs, PREEMPT_RT has to delay sending a signal from a trap + * since it can not enable preemption, and the signal code's spin_locks + * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will + * send the signal on exit of the trap. + */ +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND + if (in_atomic()) { + if (WARN_ON_ONCE(t != current)) + return 0; + if (WARN_ON_ONCE(t->forced_info.si_signo)) + return 0; + + if (is_si_special(info)) { + WARN_ON_ONCE(info != SEND_SIG_PRIV); + t->forced_info.si_signo = sig; + t->forced_info.si_errno = 0; + t->forced_info.si_code = SI_KERNEL; + t->forced_info.si_pid = 0; + t->forced_info.si_uid = 0; + } else { + t->forced_info = *info; + } + + set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); + return 0; + } +#endif + return do_force_sig_info(sig, info, t); +} + /* * Nuke all other threads in the group. */ @@ -1729,7 +1822,8 @@ EXPORT_SYMBOL(kill_pid); */ struct sigqueue *sigqueue_alloc(void) { - struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); + /* Preallocated sigqueue objects always from the slabcache ! */ + struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); if (q) q->flags |= SIGQUEUE_PREALLOC; @@ -2099,15 +2193,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) if (gstop_done && ptrace_reparented(current)) do_notify_parent_cldstop(current, false, why); - /* - * Don't want to allow preemption here, because - * sys_ptrace() needs this task to be inactive. - * - * XXX: implement read_unlock_no_resched(). - */ - preempt_disable(); read_unlock(&tasklist_lock); - preempt_enable_no_resched(); freezable_schedule(); } else { /* diff --git a/kernel/softirq.c b/kernel/softirq.c index 6f584861d329bfb0382a5691320938cf3db7009b..9bad7a16dc61b9bf5eb09105debbd28afb959d5e 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -21,11 +21,14 @@ #include #include #include +#include #include #include #include #include +#include #include +#include #define CREATE_TRACE_POINTS #include @@ -56,12 +59,136 @@ EXPORT_PER_CPU_SYMBOL(irq_stat); static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; DEFINE_PER_CPU(struct task_struct *, ksoftirqd); +#ifdef CONFIG_PREEMPT_RT_FULL +#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ)) +DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd); +#endif const char * const softirq_to_name[NR_SOFTIRQS] = { "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", "TASKLET", "SCHED", "HRTIMER", "RCU" }; +#ifdef CONFIG_NO_HZ_COMMON +# ifdef CONFIG_PREEMPT_RT_FULL + +struct softirq_runner { + struct task_struct *runner[NR_SOFTIRQS]; +}; + +static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); + +static inline void softirq_set_runner(unsigned int sirq) +{ + struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); + + sr->runner[sirq] = current; +} + +static inline void softirq_clr_runner(unsigned int sirq) +{ + struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); + + sr->runner[sirq] = NULL; +} + +static bool softirq_check_runner_tsk(struct task_struct *tsk, + unsigned int *pending) +{ + bool ret = false; + + if (!tsk) + return ret; + + /* + * The wakeup code in rtmutex.c wakes up the task + * _before_ it sets pi_blocked_on to NULL under + * tsk->pi_lock. So we need to check for both: state + * and pi_blocked_on. + * The test against UNINTERRUPTIBLE + ->sleeping_lock is in case the + * task does cpu_chill(). + */ + raw_spin_lock(&tsk->pi_lock); + if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING || + (tsk->state == TASK_UNINTERRUPTIBLE && tsk->sleeping_lock)) { + /* Clear all bits pending in that task */ + *pending &= ~(tsk->softirqs_raised); + ret = true; + } + raw_spin_unlock(&tsk->pi_lock); + + return ret; +} + +/* + * On preempt-rt a softirq running context might be blocked on a + * lock. There might be no other runnable task on this CPU because the + * lock owner runs on some other CPU. So we have to go into idle with + * the pending bit set. Therefor we need to check this otherwise we + * warn about false positives which confuses users and defeats the + * whole purpose of this test. + * + * This code is called with interrupts disabled. + */ +void softirq_check_pending_idle(void) +{ + struct task_struct *tsk; + static int rate_limit; + struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); + u32 warnpending; + int i; + + if (rate_limit >= 10) + return; + + warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; + if (!warnpending) + return; + for (i = 0; i < NR_SOFTIRQS; i++) { + tsk = sr->runner[i]; + + if (softirq_check_runner_tsk(tsk, &warnpending)) + warnpending &= ~(1 << i); + } + + if (warnpending) { + tsk = __this_cpu_read(ksoftirqd); + softirq_check_runner_tsk(tsk, &warnpending); + } + + if (warnpending) { + tsk = __this_cpu_read(ktimer_softirqd); + softirq_check_runner_tsk(tsk, &warnpending); + } + + if (warnpending) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + warnpending); + rate_limit++; + } +} +# else +/* + * On !PREEMPT_RT we just printk rate limited: + */ +void softirq_check_pending_idle(void) +{ + static int rate_limit; + + if (rate_limit < 10 && + (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + local_softirq_pending()); + rate_limit++; + } +} +# endif + +#else /* !CONFIG_NO_HZ_COMMON */ +static inline void softirq_set_runner(unsigned int sirq) { } +static inline void softirq_clr_runner(unsigned int sirq) { } +#endif + /* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency @@ -77,6 +204,38 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } +#ifdef CONFIG_PREEMPT_RT_FULL +static void wakeup_timer_softirqd(void) +{ + /* Interrupts are disabled: no need to stop preemption */ + struct task_struct *tsk = __this_cpu_read(ktimer_softirqd); + + if (tsk && tsk->state != TASK_RUNNING) + wake_up_process(tsk); +} +#endif + +static void handle_softirq(unsigned int vec_nr) +{ + struct softirq_action *h = softirq_vec + vec_nr; + int prev_count; + + prev_count = preempt_count(); + + kstat_incr_softirqs_this_cpu(vec_nr); + + trace_softirq_entry(vec_nr); + h->action(h); + trace_softirq_exit(vec_nr); + if (unlikely(prev_count != preempt_count())) { + pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", + vec_nr, softirq_to_name[vec_nr], h->action, + prev_count, preempt_count()); + preempt_count_set(prev_count); + } +} + +#ifndef CONFIG_PREEMPT_RT_FULL /* * If ksoftirqd is scheduled, we do not want to process pending softirqs * right now. Let ksoftirqd handle this at its own rate, to get fairness, @@ -92,6 +251,47 @@ static bool ksoftirqd_running(unsigned long pending) return tsk && (tsk->state == TASK_RUNNING); } +static inline int ksoftirqd_softirq_pending(void) +{ + return local_softirq_pending(); +} + +static void handle_pending_softirqs(u32 pending) +{ + struct softirq_action *h = softirq_vec; + int softirq_bit; + + local_irq_enable(); + + h = softirq_vec; + + while ((softirq_bit = ffs(pending))) { + unsigned int vec_nr; + + h += softirq_bit - 1; + vec_nr = h - softirq_vec; + handle_softirq(vec_nr); + + h++; + pending >>= softirq_bit; + } + + rcu_bh_qs(); + local_irq_disable(); +} + +static void run_ksoftirqd(unsigned int cpu) +{ + local_irq_disable(); + if (ksoftirqd_softirq_pending()) { + __do_softirq(); + local_irq_enable(); + cond_resched(); + return; + } + local_irq_enable(); +} + /* * preempt_count and SOFTIRQ_OFFSET usage: * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving @@ -251,10 +451,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) unsigned long end = jiffies + MAX_SOFTIRQ_TIME; unsigned long old_flags = current->flags; int max_restart = MAX_SOFTIRQ_RESTART; - struct softirq_action *h; bool in_hardirq; __u32 pending; - int softirq_bit; /* * Mask out PF_MEMALLOC s current task context is borrowed for the @@ -273,36 +471,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); - local_irq_enable(); - - h = softirq_vec; - - while ((softirq_bit = ffs(pending))) { - unsigned int vec_nr; - int prev_count; - - h += softirq_bit - 1; - - vec_nr = h - softirq_vec; - prev_count = preempt_count(); - - kstat_incr_softirqs_this_cpu(vec_nr); - - trace_softirq_entry(vec_nr); - h->action(h); - trace_softirq_exit(vec_nr); - if (unlikely(prev_count != preempt_count())) { - pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", - vec_nr, softirq_to_name[vec_nr], h->action, - prev_count, preempt_count()); - preempt_count_set(prev_count); - } - h++; - pending >>= softirq_bit; - } - - rcu_bh_qs(); - local_irq_disable(); + handle_pending_softirqs(pending); pending = local_softirq_pending(); if (pending) { @@ -338,6 +507,309 @@ asmlinkage __visible void do_softirq(void) local_irq_restore(flags); } +/* + * This function must run with irqs disabled! + */ +void raise_softirq_irqoff(unsigned int nr) +{ + __raise_softirq_irqoff(nr); + + /* + * If we're in an interrupt or softirq, we're done + * (this also catches softirq-disabled code). We will + * actually run the softirq once we return from + * the irq or softirq. + * + * Otherwise we wake up ksoftirqd to make sure we + * schedule the softirq soon. + */ + if (!in_interrupt()) + wakeup_softirqd(); +} + +void __raise_softirq_irqoff(unsigned int nr) +{ + trace_softirq_raise(nr); + or_softirq_pending(1UL << nr); +} + +static inline void local_bh_disable_nort(void) { local_bh_disable(); } +static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } +static void ksoftirqd_set_sched_params(unsigned int cpu) { } + +#else /* !PREEMPT_RT_FULL */ + +/* + * On RT we serialize softirq execution with a cpu local lock per softirq + */ +static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); + +void __init softirq_early_init(void) +{ + int i; + + for (i = 0; i < NR_SOFTIRQS; i++) + local_irq_lock_init(local_softirq_locks[i]); +} + +static void lock_softirq(int which) +{ + local_lock(local_softirq_locks[which]); +} + +static void unlock_softirq(int which) +{ + local_unlock(local_softirq_locks[which]); +} + +static void do_single_softirq(int which) +{ + unsigned long old_flags = current->flags; + + current->flags &= ~PF_MEMALLOC; + vtime_account_irq_enter(current); + current->flags |= PF_IN_SOFTIRQ; + lockdep_softirq_enter(); + local_irq_enable(); + handle_softirq(which); + local_irq_disable(); + lockdep_softirq_exit(); + current->flags &= ~PF_IN_SOFTIRQ; + vtime_account_irq_enter(current); + current_restore_flags(old_flags, PF_MEMALLOC); +} + +/* + * Called with interrupts disabled. Process softirqs which were raised + * in current context (or on behalf of ksoftirqd). + */ +static void do_current_softirqs(void) +{ + while (current->softirqs_raised) { + int i = __ffs(current->softirqs_raised); + unsigned int pending, mask = (1U << i); + + current->softirqs_raised &= ~mask; + local_irq_enable(); + + /* + * If the lock is contended, we boost the owner to + * process the softirq or leave the critical section + * now. + */ + lock_softirq(i); + local_irq_disable(); + softirq_set_runner(i); + /* + * Check with the local_softirq_pending() bits, + * whether we need to process this still or if someone + * else took care of it. + */ + pending = local_softirq_pending(); + if (pending & mask) { + set_softirq_pending(pending & ~mask); + do_single_softirq(i); + } + softirq_clr_runner(i); + WARN_ON(current->softirq_nestcnt != 1); + local_irq_enable(); + unlock_softirq(i); + local_irq_disable(); + } +} + +void __local_bh_disable(void) +{ + if (++current->softirq_nestcnt == 1) + migrate_disable(); +} +EXPORT_SYMBOL(__local_bh_disable); + +void __local_bh_enable(void) +{ + if (WARN_ON(current->softirq_nestcnt == 0)) + return; + + local_irq_disable(); + if (current->softirq_nestcnt == 1 && current->softirqs_raised) + do_current_softirqs(); + local_irq_enable(); + + if (--current->softirq_nestcnt == 0) + migrate_enable(); +} +EXPORT_SYMBOL(__local_bh_enable); + +void _local_bh_enable(void) +{ + if (WARN_ON(current->softirq_nestcnt == 0)) + return; + if (--current->softirq_nestcnt == 0) + migrate_enable(); +} +EXPORT_SYMBOL(_local_bh_enable); + +int in_serving_softirq(void) +{ + return current->flags & PF_IN_SOFTIRQ; +} +EXPORT_SYMBOL(in_serving_softirq); + +/* Called with preemption disabled */ +static void run_ksoftirqd(unsigned int cpu) +{ + local_irq_disable(); + current->softirq_nestcnt++; + + do_current_softirqs(); + current->softirq_nestcnt--; + local_irq_enable(); + cond_resched(); +} + +/* + * Called from netif_rx_ni(). Preemption enabled, but migration + * disabled. So the cpu can't go away under us. + */ +void thread_do_softirq(void) +{ + if (!in_serving_softirq() && current->softirqs_raised) { + current->softirq_nestcnt++; + do_current_softirqs(); + current->softirq_nestcnt--; + } +} + +static void do_raise_softirq_irqoff(unsigned int nr) +{ + unsigned int mask; + + mask = 1UL << nr; + + trace_softirq_raise(nr); + or_softirq_pending(mask); + + /* + * If we are not in a hard interrupt and inside a bh disabled + * region, we simply raise the flag on current. local_bh_enable() + * will make sure that the softirq is executed. Otherwise we + * delegate it to ksoftirqd. + */ + if (!in_irq() && current->softirq_nestcnt) + current->softirqs_raised |= mask; + else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd)) + return; + + if (mask & TIMER_SOFTIRQS) + __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; + else + __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; +} + +static void wakeup_proper_softirq(unsigned int nr) +{ + if ((1UL << nr) & TIMER_SOFTIRQS) + wakeup_timer_softirqd(); + else + wakeup_softirqd(); +} + +void __raise_softirq_irqoff(unsigned int nr) +{ + do_raise_softirq_irqoff(nr); + if (!in_irq() && !current->softirq_nestcnt) + wakeup_proper_softirq(nr); +} + +/* + * Same as __raise_softirq_irqoff() but will process them in ksoftirqd + */ +void __raise_softirq_irqoff_ksoft(unsigned int nr) +{ + unsigned int mask; + + if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) || + !__this_cpu_read(ktimer_softirqd))) + return; + mask = 1UL << nr; + + trace_softirq_raise(nr); + or_softirq_pending(mask); + if (mask & TIMER_SOFTIRQS) + __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; + else + __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; + wakeup_proper_softirq(nr); +} + +/* + * This function must run with irqs disabled! + */ +void raise_softirq_irqoff(unsigned int nr) +{ + do_raise_softirq_irqoff(nr); + + /* + * If we're in an hard interrupt we let irq return code deal + * with the wakeup of ksoftirqd. + */ + if (in_irq()) + return; + /* + * If we are in thread context but outside of a bh disabled + * region, we need to wake ksoftirqd as well. + * + * CHECKME: Some of the places which do that could be wrapped + * into local_bh_disable/enable pairs. Though it's unclear + * whether this is worth the effort. To find those places just + * raise a WARN() if the condition is met. + */ + if (!current->softirq_nestcnt) + wakeup_proper_softirq(nr); +} + +static inline int ksoftirqd_softirq_pending(void) +{ + return current->softirqs_raised; +} + +static inline void local_bh_disable_nort(void) { } +static inline void _local_bh_enable_nort(void) { } + +static inline void ksoftirqd_set_sched_params(unsigned int cpu) +{ + /* Take over all but timer pending softirqs when starting */ + local_irq_disable(); + current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS; + local_irq_enable(); +} + +static inline void ktimer_softirqd_set_sched_params(unsigned int cpu) +{ + struct sched_param param = { .sched_priority = 1 }; + + sched_setscheduler(current, SCHED_FIFO, ¶m); + + /* Take over timer pending softirqs when starting */ + local_irq_disable(); + current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS; + local_irq_enable(); +} + +static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu, + bool online) +{ + struct sched_param param = { .sched_priority = 0 }; + + sched_setscheduler(current, SCHED_NORMAL, ¶m); +} + +static int ktimer_softirqd_should_run(unsigned int cpu) +{ + return current->softirqs_raised; +} + +#endif /* PREEMPT_RT_FULL */ /* * Enter an interrupt context. */ @@ -349,9 +821,9 @@ void irq_enter(void) * Prevent raise_softirq from needlessly waking up ksoftirqd * here, as softirq will be serviced on return from interrupt. */ - local_bh_disable(); + local_bh_disable_nort(); tick_irq_enter(); - _local_bh_enable(); + _local_bh_enable_nort(); } __irq_enter(); @@ -359,6 +831,7 @@ void irq_enter(void) static inline void invoke_softirq(void) { +#ifndef CONFIG_PREEMPT_RT_FULL if (ksoftirqd_running(local_softirq_pending())) return; @@ -381,6 +854,18 @@ static inline void invoke_softirq(void) } else { wakeup_softirqd(); } +#else /* PREEMPT_RT_FULL */ + unsigned long flags; + + local_irq_save(flags); + if (__this_cpu_read(ksoftirqd) && + __this_cpu_read(ksoftirqd)->softirqs_raised) + wakeup_softirqd(); + if (__this_cpu_read(ktimer_softirqd) && + __this_cpu_read(ktimer_softirqd)->softirqs_raised) + wakeup_timer_softirqd(); + local_irq_restore(flags); +#endif } static inline void tick_irq_exit(void) @@ -416,26 +901,6 @@ void irq_exit(void) trace_hardirq_exit(); /* must be last! */ } -/* - * This function must run with irqs disabled! - */ -inline void raise_softirq_irqoff(unsigned int nr) -{ - __raise_softirq_irqoff(nr); - - /* - * If we're in an interrupt or softirq, we're done - * (this also catches softirq-disabled code). We will - * actually run the softirq once we return from - * the irq or softirq. - * - * Otherwise we wake up ksoftirqd to make sure we - * schedule the softirq soon. - */ - if (!in_interrupt()) - wakeup_softirqd(); -} - void raise_softirq(unsigned int nr) { unsigned long flags; @@ -445,12 +910,6 @@ void raise_softirq(unsigned int nr) local_irq_restore(flags); } -void __raise_softirq_irqoff(unsigned int nr) -{ - trace_softirq_raise(nr); - or_softirq_pending(1UL << nr); -} - void open_softirq(int nr, void (*action)(struct softirq_action *)) { softirq_vec[nr].action = action; @@ -475,11 +934,44 @@ static void __tasklet_schedule_common(struct tasklet_struct *t, unsigned long flags; local_irq_save(flags); + if (!tasklet_trylock(t)) { + local_irq_restore(flags); + return; + } + head = this_cpu_ptr(headp); - t->next = NULL; - *head->tail = t; - head->tail = &(t->next); - raise_softirq_irqoff(softirq_nr); +again: + /* We may have been preempted before tasklet_trylock + * and __tasklet_action may have already run. + * So double check the sched bit while the takslet + * is locked before adding it to the list. + */ + if (test_bit(TASKLET_STATE_SCHED, &t->state)) { +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + if (test_and_set_bit(TASKLET_STATE_CHAINED, &t->state)) { + tasklet_unlock(t); + return; + } +#endif + t->next = NULL; + *head->tail = t; + head->tail = &(t->next); + raise_softirq_irqoff(softirq_nr); + tasklet_unlock(t); + } else { + /* This is subtle. If we hit the corner case above + * It is possible that we get preempted right here, + * and another task has successfully called + * tasklet_schedule(), then this function, and + * failed on the trylock. Thus we must be sure + * before releasing the tasklet lock, that the + * SCHED_BIT is clear. Otherwise the tasklet + * may get its SCHED_BIT set, but not added to the + * list + */ + if (!tasklet_tryunlock(t)) + goto again; + } local_irq_restore(flags); } @@ -497,11 +989,21 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) } EXPORT_SYMBOL(__tasklet_hi_schedule); +void tasklet_enable(struct tasklet_struct *t) +{ + if (!atomic_dec_and_test(&t->count)) + return; + if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) + tasklet_schedule(t); +} +EXPORT_SYMBOL(tasklet_enable); + static void tasklet_action_common(struct softirq_action *a, struct tasklet_head *tl_head, unsigned int softirq_nr) { struct tasklet_struct *list; + int loops = 1000000; local_irq_disable(); list = tl_head->head; @@ -513,25 +1015,60 @@ static void tasklet_action_common(struct softirq_action *a, struct tasklet_struct *t = list; list = list->next; + /* + * Should always succeed - after a tasklist got on the + * list (after getting the SCHED bit set from 0 to 1), + * nothing but the tasklet softirq it got queued to can + * lock it: + */ + if (!tasklet_trylock(t)) { + WARN_ON(1); + continue; + } + + t->next = NULL; - if (tasklet_trylock(t)) { - if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, - &t->state)) - BUG(); - t->func(t->data); + if (unlikely(atomic_read(&t->count))) { +out_disabled: + /* implicit unlock: */ + wmb(); + t->state = TASKLET_STATEF_PENDING; + continue; + } + /* + * After this point on the tasklet might be rescheduled + * on another CPU, but it can only be added to another + * CPU's tasklet list if we unlock the tasklet (which we + * dont do yet). + */ + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + WARN_ON(1); +again: + t->func(t->data); + +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + while (cmpxchg(&t->state, TASKLET_STATEF_RC, 0) != TASKLET_STATEF_RC) { +#else + while (!tasklet_tryunlock(t)) { +#endif + /* + * If it got disabled meanwhile, bail out: + */ + if (atomic_read(&t->count)) + goto out_disabled; + /* + * If it got scheduled meanwhile, re-execute + * the tasklet function: + */ + if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + goto again; + if (!--loops) { + printk("hm, tasklet state: %08lx\n", t->state); + WARN_ON(1); tasklet_unlock(t); - continue; + break; } - tasklet_unlock(t); } - - local_irq_disable(); - t->next = NULL; - *tl_head->tail = t; - tl_head->tail = &t->next; - __raise_softirq_irqoff(softirq_nr); - local_irq_enable(); } } @@ -563,7 +1100,7 @@ void tasklet_kill(struct tasklet_struct *t) while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { do { - yield(); + msleep(1); } while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); @@ -637,25 +1174,26 @@ void __init softirq_init(void) open_softirq(HI_SOFTIRQ, tasklet_hi_action); } -static int ksoftirqd_should_run(unsigned int cpu) -{ - return local_softirq_pending(); -} - -static void run_ksoftirqd(unsigned int cpu) +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) +void tasklet_unlock_wait(struct tasklet_struct *t) { - local_irq_disable(); - if (local_softirq_pending()) { + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { /* - * We can safely run softirq on inline stack, as we are not deep - * in the task stack here. + * Hack for now to avoid this busy-loop: */ - __do_softirq(); - local_irq_enable(); - cond_resched(); - return; +#ifdef CONFIG_PREEMPT_RT_FULL + msleep(1); +#else + barrier(); +#endif } - local_irq_enable(); +} +EXPORT_SYMBOL(tasklet_unlock_wait); +#endif + +static int ksoftirqd_should_run(unsigned int cpu) +{ + return ksoftirqd_softirq_pending(); } #ifdef CONFIG_HOTPLUG_CPU @@ -722,17 +1260,31 @@ static int takeover_tasklets(unsigned int cpu) static struct smp_hotplug_thread softirq_threads = { .store = &ksoftirqd, + .setup = ksoftirqd_set_sched_params, .thread_should_run = ksoftirqd_should_run, .thread_fn = run_ksoftirqd, .thread_comm = "ksoftirqd/%u", }; +#ifdef CONFIG_PREEMPT_RT_FULL +static struct smp_hotplug_thread softirq_timer_threads = { + .store = &ktimer_softirqd, + .setup = ktimer_softirqd_set_sched_params, + .cleanup = ktimer_softirqd_clr_sched_params, + .thread_should_run = ktimer_softirqd_should_run, + .thread_fn = run_ksoftirqd, + .thread_comm = "ktimersoftd/%u", +}; +#endif + static __init int spawn_ksoftirqd(void) { cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, takeover_tasklets); BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); - +#ifdef CONFIG_PREEMPT_RT_FULL + BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads)); +#endif return 0; } early_initcall(spawn_ksoftirqd); diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 067cb83f37eae5644fa84e29a614abd6b4d8a3de..2d15c0d50625f446bbc6f2264aaf8199850eb143 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -86,8 +86,11 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) enabled = stopper->enabled; if (enabled) __cpu_stop_queue_work(stopper, work, &wakeq); - else if (work->done) - cpu_stop_signal_done(work->done); + else { + work->disabled = true; + if (work->done) + cpu_stop_signal_done(work->done); + } raw_spin_unlock_irqrestore(&stopper->lock, flags); wake_up_q(&wakeq); diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 6a2ba39889bd030f87cefb572123a468a5b790c7..e7b983df6996fb6bf9664582fddc65d928fd368c 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -438,7 +438,7 @@ int alarm_cancel(struct alarm *alarm) int ret = alarm_try_to_cancel(alarm); if (ret >= 0) return ret; - cpu_relax(); + hrtimer_grab_expiry_lock(&alarm->timer); } } EXPORT_SYMBOL_GPL(alarm_cancel); diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 32ee24f5142ab494215bdd2d596e7e2e56c76d70..a84673149881574285936b43a56cad5b9263c0cd 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -150,6 +150,11 @@ static struct hrtimer_cpu_base migration_cpu_base = { #define migration_base migration_cpu_base.clock_base[0] +static inline bool is_migration_base(struct hrtimer_clock_base *base) +{ + return base == &migration_base; +} + /* * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock * means that all timers which are tied to this base via timer->base are @@ -274,6 +279,11 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, #else /* CONFIG_SMP */ +static inline bool is_migration_base(struct hrtimer_clock_base *base) +{ + return false; +} + static inline struct hrtimer_clock_base * lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) { @@ -957,6 +967,16 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) } EXPORT_SYMBOL_GPL(hrtimer_forward); +void hrtimer_grab_expiry_lock(const struct hrtimer *timer) +{ + struct hrtimer_clock_base *base = READ_ONCE(timer->base); + + if (timer->is_soft && !is_migration_base(base)) { + spin_lock(&base->cpu_base->softirq_expiry_lock); + spin_unlock(&base->cpu_base->softirq_expiry_lock); + } +} + /* * enqueue_hrtimer - internal function to (re)start a timer * @@ -1175,7 +1195,9 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft * match. */ +#ifndef CONFIG_PREEMPT_RT_BASE WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft); +#endif base = lock_hrtimer_base(timer, &flags); @@ -1238,7 +1260,7 @@ int hrtimer_cancel(struct hrtimer *timer) if (ret >= 0) return ret; - cpu_relax(); + hrtimer_grab_expiry_lock(timer); } } EXPORT_SYMBOL_GPL(hrtimer_cancel); @@ -1335,10 +1357,17 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id) static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, enum hrtimer_mode mode) { - bool softtimer = !!(mode & HRTIMER_MODE_SOFT); - int base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0; + bool softtimer; + int base; struct hrtimer_cpu_base *cpu_base; + softtimer = !!(mode & HRTIMER_MODE_SOFT); +#ifdef CONFIG_PREEMPT_RT_FULL + if (!softtimer && !(mode & HRTIMER_MODE_HARD)) + softtimer = true; +#endif + base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0; + memset(timer, 0, sizeof(struct hrtimer)); cpu_base = raw_cpu_ptr(&hrtimer_bases); @@ -1535,6 +1564,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h) unsigned long flags; ktime_t now; + spin_lock(&cpu_base->softirq_expiry_lock); raw_spin_lock_irqsave(&cpu_base->lock, flags); now = hrtimer_update_base(cpu_base); @@ -1544,6 +1574,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h) hrtimer_update_softirq_timer(cpu_base, true); raw_spin_unlock_irqrestore(&cpu_base->lock, flags); + spin_unlock(&cpu_base->softirq_expiry_lock); } #ifdef CONFIG_HIGH_RES_TIMERS @@ -1715,13 +1746,52 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) return HRTIMER_NORESTART; } -void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) +static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, + clockid_t clock_id, + enum hrtimer_mode mode, + struct task_struct *task) { +#ifdef CONFIG_PREEMPT_RT_FULL + if (!(mode & (HRTIMER_MODE_SOFT | HRTIMER_MODE_HARD))) { + if (task_is_realtime(current) || system_state != SYSTEM_RUNNING) + mode |= HRTIMER_MODE_HARD; + else + mode |= HRTIMER_MODE_SOFT; + } +#endif + __hrtimer_init(&sl->timer, clock_id, mode); sl->timer.function = hrtimer_wakeup; sl->task = task; } + +/** + * hrtimer_init_sleeper - initialize sleeper to the given clock + * @sl: sleeper to be initialized + * @clock_id: the clock to be used + * @mode: timer mode abs/rel + * @task: the task to wake up + */ +void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, + enum hrtimer_mode mode, struct task_struct *task) +{ + debug_init(&sl->timer, clock_id, mode); + __hrtimer_init_sleeper(sl, clock_id, mode, task); + +} EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); +#ifdef CONFIG_DEBUG_OBJECTS_TIMERS +void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, + clockid_t clock_id, + enum hrtimer_mode mode, + struct task_struct *task) +{ + debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr); + __hrtimer_init_sleeper(sl, clock_id, mode, task); +} +EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack); +#endif + int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) { switch(restart->nanosleep.type) { @@ -1745,8 +1815,6 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod { struct restart_block *restart; - hrtimer_init_sleeper(t, current); - do { set_current_state(TASK_INTERRUPTIBLE); hrtimer_start_expires(&t->timer, mode); @@ -1754,12 +1822,12 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod if (likely(t->task)) freezable_schedule(); + __set_current_state(TASK_RUNNING); hrtimer_cancel(&t->timer); mode = HRTIMER_MODE_ABS; } while (t->task && !signal_pending(current)); - __set_current_state(TASK_RUNNING); if (!t->task) return 0; @@ -1783,10 +1851,9 @@ static long __sched hrtimer_nanosleep_restart(struct restart_block *restart) struct hrtimer_sleeper t; int ret; - hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, - HRTIMER_MODE_ABS); + hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid, + HRTIMER_MODE_ABS, current); hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); - ret = do_nanosleep(&t, HRTIMER_MODE_ABS); destroy_hrtimer_on_stack(&t.timer); return ret; @@ -1804,7 +1871,7 @@ long hrtimer_nanosleep(const struct timespec64 *rqtp, if (dl_task(current) || rt_task(current)) slack = 0; - hrtimer_init_on_stack(&t.timer, clockid, mode); + hrtimer_init_sleeper_on_stack(&t, clockid, mode, current); hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack); ret = do_nanosleep(&t, mode); if (ret != -ERESTART_RESTARTBLOCK) @@ -1864,6 +1931,38 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, } #endif +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * Sleep for 1 ms in hope whoever holds what we want will let it go. + */ +void cpu_chill(void) +{ + unsigned int freeze_flag = current->flags & PF_NOFREEZE; + struct task_struct *self = current; + ktime_t chill_time; + + raw_spin_lock_irq(&self->pi_lock); + self->saved_state = self->state; + __set_current_state_no_track(TASK_UNINTERRUPTIBLE); + raw_spin_unlock_irq(&self->pi_lock); + + chill_time = ktime_set(0, NSEC_PER_MSEC); + + current->flags |= PF_NOFREEZE; + sleeping_lock_inc(); + schedule_hrtimeout(&chill_time, HRTIMER_MODE_REL_HARD); + sleeping_lock_dec(); + if (!freeze_flag) + current->flags &= ~PF_NOFREEZE; + + raw_spin_lock_irq(&self->pi_lock); + __set_current_state_no_track(self->saved_state); + self->saved_state = TASK_RUNNING; + raw_spin_unlock_irq(&self->pi_lock); +} +EXPORT_SYMBOL(cpu_chill); +#endif + /* * Functions related to boot-time initialization: */ @@ -1885,6 +1984,7 @@ int hrtimers_prepare_cpu(unsigned int cpu) cpu_base->softirq_next_timer = NULL; cpu_base->expires_next = KTIME_MAX; cpu_base->softirq_expires_next = KTIME_MAX; + spin_lock_init(&cpu_base->softirq_expiry_lock); return 0; } @@ -2003,11 +2103,9 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, return -EINTR; } - hrtimer_init_on_stack(&t.timer, clock_id, mode); + hrtimer_init_sleeper_on_stack(&t, clock_id, mode, current); hrtimer_set_expires_range_ns(&t.timer, *expires, delta); - hrtimer_init_sleeper(&t, current); - hrtimer_start_expires(&t.timer, mode); if (likely(t.task)) diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index 2e2b335ef101861b58128cbf2df1ea78a12d86a3..48d977f947d9fc9e2a79b4198c197f738bb3d041 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -211,6 +211,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); + hrtimer_grab_expiry_lock(timer); goto again; } expires = timeval_to_ktime(value->it_value); diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 497719127bf9f65c1c992874abc33f0c52d5c766..62acb8914c9e682db7591c6b87c58a5fffa93b60 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = { .max_cycles = 10, }; -__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); +__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); +__cacheline_aligned_in_smp seqcount_t jiffies_seq; #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void) @@ -83,9 +84,9 @@ u64 get_jiffies_64(void) u64 ret; do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); ret = jiffies_64; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); return ret; } EXPORT_SYMBOL(get_jiffies_64); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index bfaa44a80c0346ab954a176d583f68f964e3e0c6..b9e4ccbb60f34d22d31cd99621c53735aed466d1 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -3,8 +3,10 @@ * Implement CPU time clocks for the POSIX clock interface. */ +#include #include #include +#include #include #include #include @@ -15,6 +17,7 @@ #include #include #include +#include #include "posix-timers.h" @@ -789,6 +792,7 @@ check_timers_list(struct list_head *timers, return t->expires; t->firing = 1; + t->firing_cpu = smp_processor_id(); list_move_tail(&t->entry, firing); } @@ -1135,18 +1139,31 @@ static inline int fastpath_timer_check(struct task_struct *tsk) return 0; } +static DEFINE_PER_CPU(spinlock_t, cpu_timer_expiry_lock) = __SPIN_LOCK_UNLOCKED(cpu_timer_expiry_lock); + +void cpu_timers_grab_expiry_lock(struct k_itimer *timer) +{ + int cpu = timer->it.cpu.firing_cpu; + + if (cpu >= 0) { + spinlock_t *expiry_lock = per_cpu_ptr(&cpu_timer_expiry_lock, cpu); + + spin_lock_irq(expiry_lock); + spin_unlock_irq(expiry_lock); + } +} + /* * This is called from the timer interrupt handler. The irq handler has * already updated our counts. We need to check if any timers fire now. * Interrupts are disabled. */ -void run_posix_cpu_timers(struct task_struct *tsk) +static void __run_posix_cpu_timers(struct task_struct *tsk) { LIST_HEAD(firing); struct k_itimer *timer, *next; unsigned long flags; - - lockdep_assert_irqs_disabled(); + spinlock_t *expiry_lock; /* * The fast path checks that there are no expired thread or thread @@ -1155,8 +1172,13 @@ void run_posix_cpu_timers(struct task_struct *tsk) if (!fastpath_timer_check(tsk)) return; - if (!lock_task_sighand(tsk, &flags)) + expiry_lock = this_cpu_ptr(&cpu_timer_expiry_lock); + spin_lock(expiry_lock); + + if (!lock_task_sighand(tsk, &flags)) { + spin_unlock(expiry_lock); return; + } /* * Here we take off tsk->signal->cpu_timers[N] and * tsk->cpu_timers[N] all the timers that are firing, and @@ -1189,6 +1211,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) list_del_init(&timer->it.cpu.entry); cpu_firing = timer->it.cpu.firing; timer->it.cpu.firing = 0; + timer->it.cpu.firing_cpu = -1; /* * The firing flag is -1 if we collided with a reset * of the timer, which already reported this @@ -1198,8 +1221,156 @@ void run_posix_cpu_timers(struct task_struct *tsk) cpu_timer_fire(timer); spin_unlock(&timer->it_lock); } + spin_unlock(expiry_lock); +} + +#ifdef CONFIG_PREEMPT_RT_BASE +#include +#include +DEFINE_PER_CPU(struct task_struct *, posix_timer_task); +DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); +DEFINE_PER_CPU(bool, posix_timer_th_active); + +static void posix_cpu_kthread_fn(unsigned int cpu) +{ + struct task_struct *tsk = NULL; + struct task_struct *next = NULL; + + BUG_ON(per_cpu(posix_timer_task, cpu) != current); + + /* grab task list */ + raw_local_irq_disable(); + tsk = per_cpu(posix_timer_tasklist, cpu); + per_cpu(posix_timer_tasklist, cpu) = NULL; + raw_local_irq_enable(); + + /* its possible the list is empty, just return */ + if (!tsk) + return; + + /* Process task list */ + while (1) { + /* save next */ + next = tsk->posix_timer_list; + + /* run the task timers, clear its ptr and + * unreference it + */ + __run_posix_cpu_timers(tsk); + tsk->posix_timer_list = NULL; + put_task_struct(tsk); + + /* check if this is the last on the list */ + if (next == tsk) + break; + tsk = next; + } +} + +static inline int __fastpath_timer_check(struct task_struct *tsk) +{ + /* tsk == current, ensure it is safe to use ->signal/sighand */ + if (unlikely(tsk->exit_state)) + return 0; + + if (!task_cputime_zero(&tsk->cputime_expires)) + return 1; + + if (!task_cputime_zero(&tsk->signal->cputime_expires)) + return 1; + + return 0; } +void run_posix_cpu_timers(struct task_struct *tsk) +{ + unsigned int cpu = smp_processor_id(); + struct task_struct *tasklist; + + BUG_ON(!irqs_disabled()); + + if (per_cpu(posix_timer_th_active, cpu) != true) + return; + + /* get per-cpu references */ + tasklist = per_cpu(posix_timer_tasklist, cpu); + + /* check to see if we're already queued */ + if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { + get_task_struct(tsk); + if (tasklist) { + tsk->posix_timer_list = tasklist; + } else { + /* + * The list is terminated by a self-pointing + * task_struct + */ + tsk->posix_timer_list = tsk; + } + per_cpu(posix_timer_tasklist, cpu) = tsk; + + wake_up_process(per_cpu(posix_timer_task, cpu)); + } +} + +static int posix_cpu_kthread_should_run(unsigned int cpu) +{ + return __this_cpu_read(posix_timer_tasklist) != NULL; +} + +static void posix_cpu_kthread_park(unsigned int cpu) +{ + this_cpu_write(posix_timer_th_active, false); +} + +static void posix_cpu_kthread_unpark(unsigned int cpu) +{ + this_cpu_write(posix_timer_th_active, true); +} + +static void posix_cpu_kthread_setup(unsigned int cpu) +{ + struct sched_param sp; + + sp.sched_priority = MAX_RT_PRIO - 1; + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); + posix_cpu_kthread_unpark(cpu); +} + +static struct smp_hotplug_thread posix_cpu_thread = { + .store = &posix_timer_task, + .thread_should_run = posix_cpu_kthread_should_run, + .thread_fn = posix_cpu_kthread_fn, + .thread_comm = "posixcputmr/%u", + .setup = posix_cpu_kthread_setup, + .park = posix_cpu_kthread_park, + .unpark = posix_cpu_kthread_unpark, +}; + +static int __init posix_cpu_thread_init(void) +{ + /* Start one for boot CPU. */ + unsigned long cpu; + int ret; + + /* init the per-cpu posix_timer_tasklets */ + for_each_possible_cpu(cpu) + per_cpu(posix_timer_tasklist, cpu) = NULL; + + ret = smpboot_register_percpu_thread(&posix_cpu_thread); + WARN_ON(ret); + + return 0; +} +early_initcall(posix_cpu_thread_init); +#else /* CONFIG_PREEMPT_RT_BASE */ +void run_posix_cpu_timers(struct task_struct *tsk) +{ + lockdep_assert_irqs_disabled(); + __run_posix_cpu_timers(tsk); +} +#endif /* CONFIG_PREEMPT_RT_BASE */ + /* * Set one of the process-wide special case CPU timers or RLIMIT_CPU. * The tsk->sighand->siglock must be held by the caller. @@ -1318,6 +1489,8 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, spin_unlock_irq(&timer.it_lock); while (error == TIMER_RETRY) { + + cpu_timers_grab_expiry_lock(&timer); /* * We need to handle case when timer was or is in the * middle of firing. In other cases we already freed diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 48758108e055cd8d89d310c82dd0ca91bc36875f..3fd433d2a767ce6972c021ff85a92c3a058f308f 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -463,7 +463,7 @@ static struct k_itimer * alloc_posix_timer(void) static void k_itimer_rcu_free(struct rcu_head *head) { - struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu); + struct k_itimer *tmr = container_of(head, struct k_itimer, rcu); kmem_cache_free(posix_timers_cache, tmr); } @@ -480,7 +480,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set) } put_pid(tmr->it_pid); sigqueue_free(tmr->sigq); - call_rcu(&tmr->it.rcu, k_itimer_rcu_free); + call_rcu(&tmr->rcu, k_itimer_rcu_free); } static int common_timer_create(struct k_itimer *new_timer) @@ -826,6 +826,17 @@ static int common_hrtimer_try_to_cancel(struct k_itimer *timr) return hrtimer_try_to_cancel(&timr->it.real.timer); } +static void timer_wait_for_callback(const struct k_clock *kc, struct k_itimer *timer) +{ + if (kc->timer_arm == common_hrtimer_arm) + hrtimer_grab_expiry_lock(&timer->it.real.timer); + else if (kc == &alarm_clock) + hrtimer_grab_expiry_lock(&timer->it.alarm.alarmtimer.timer); + else + /* posix-cpu-timers */ + cpu_timers_grab_expiry_lock(timer); +} + /* Set a POSIX.1b interval timer. */ int common_timer_set(struct k_itimer *timr, int flags, struct itimerspec64 *new_setting, @@ -891,11 +902,15 @@ static int do_timer_settime(timer_t timer_id, int flags, else error = kc->timer_set(timr, flags, new_spec64, old_spec64); - unlock_timer(timr, flag); if (error == TIMER_RETRY) { + rcu_read_lock(); + unlock_timer(timr, flag); + timer_wait_for_callback(kc, timr); + rcu_read_unlock(); old_spec64 = NULL; // We already got the old time... goto retry; } + unlock_timer(timr, flag); return error; } @@ -957,13 +972,21 @@ int common_timer_del(struct k_itimer *timer) return 0; } -static inline int timer_delete_hook(struct k_itimer *timer) +static int timer_delete_hook(struct k_itimer *timer) { const struct k_clock *kc = timer->kclock; + int ret; if (WARN_ON_ONCE(!kc || !kc->timer_del)) return -EINVAL; - return kc->timer_del(timer); + ret = kc->timer_del(timer); + if (ret == TIMER_RETRY) { + rcu_read_lock(); + spin_unlock_irq(&timer->it_lock); + timer_wait_for_callback(kc, timer); + rcu_read_unlock(); + } + return ret; } /* Delete a POSIX.1b interval timer. */ @@ -977,10 +1000,8 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) if (!timer) return -EINVAL; - if (timer_delete_hook(timer) == TIMER_RETRY) { - unlock_timer(timer, flags); + if (timer_delete_hook(timer) == TIMER_RETRY) goto retry_delete; - } spin_lock(¤t->sighand->siglock); list_del(&timer->list); @@ -1006,10 +1027,9 @@ static void itimer_delete(struct k_itimer *timer) retry_delete: spin_lock_irqsave(&timer->it_lock, flags); - if (timer_delete_hook(timer) == TIMER_RETRY) { - unlock_timer(timer, flags); + if (timer_delete_hook(timer) == TIMER_RETRY) goto retry_delete; - } + list_del(&timer->list); /* * This keeps any tasks waiting on the spin lock from thinking diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h index ddb21145211a0280f9b20667a33b9503ce62d776..725bd230a8db425ad209eef2be931c0637cb5e9c 100644 --- a/kernel/time/posix-timers.h +++ b/kernel/time/posix-timers.h @@ -32,6 +32,8 @@ extern const struct k_clock clock_process; extern const struct k_clock clock_thread; extern const struct k_clock alarm_clock; +extern void cpu_timers_grab_expiry_lock(struct k_itimer *timer); + int posix_timer_event(struct k_itimer *timr, int si_private); void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting); diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index a836efd34589595f12087f11c9798e9ebb23293b..c50e8f3262deae65aeb03da6a50a2aadf505d833 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -107,7 +107,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t) void tick_setup_hrtimer_broadcast(void) { - hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); bctimer.function = bc_handler; clockevents_register_device(&ce_broadcast_hrtimer); } diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 0a3cc37e4b8387a067ba8e2ed49fbf31678c4853..7bd136b646d4431c5bce35da1586456f216c7079 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -80,13 +80,15 @@ int tick_is_oneshot_available(void) static void tick_periodic(int cpu) { if (tick_do_timer_cpu == cpu) { - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); /* Keep track of the next tick event */ tick_next_period = ktime_add(tick_next_period, tick_period); do_timer(1); - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); update_wall_time(); } @@ -158,9 +160,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) ktime_t next; do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); next = tick_next_period; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 48403fb653c2f5188cefd460cb714396c9d04e89..4d31ec98e968445b5fbe19f7e3a8281906dd523f 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -68,7 +68,8 @@ static void tick_do_update_jiffies64(ktime_t now) return; /* Reevaluate with jiffies_lock held */ - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); delta = ktime_sub(now, last_jiffies_update); if (delta >= tick_period) { @@ -94,10 +95,12 @@ static void tick_do_update_jiffies64(ktime_t now) /* Keep the tick_next_period variable up to date */ tick_next_period = ktime_add(last_jiffies_update, tick_period); } else { - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); return; } - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); update_wall_time(); } @@ -108,12 +111,14 @@ static ktime_t tick_init_jiffy_update(void) { ktime_t period; - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); /* Did we start the jiffies update yet ? */ if (last_jiffies_update == 0) last_jiffies_update = tick_next_period; period = last_jiffies_update; - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); return period; } @@ -231,6 +236,7 @@ static void nohz_full_kick_func(struct irq_work *work) static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { .func = nohz_full_kick_func, + .flags = IRQ_WORK_HARD_IRQ, }; /* @@ -656,10 +662,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) /* Read jiffies and the time when jiffies were updated last */ do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); basemono = last_jiffies_update; basejiff = jiffies; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); ts->last_jiffies = basejiff; ts->timer_expires_base = basemono; @@ -890,14 +896,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) return false; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { - static int ratelimit; - - if (ratelimit < 10 && - (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { - pr_warn("NOHZ: local_softirq_pending %02x\n", - (unsigned int) local_softirq_pending()); - ratelimit++; - } + softirq_check_pending_idle(); return false; } @@ -1309,7 +1308,7 @@ void tick_setup_sched_timer(void) /* * Emulate tick processing via per-CPU hrtimers: */ - hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); ts->sched_timer.function = tick_sched_timer; /* Get the next period (per-CPU) */ diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index f246818e35dbce03e9e591582d7c83242c2a5bd5..2cc8a1e8e94f2f2c3f6fd421137c173aa23c03d6 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -2392,8 +2392,10 @@ EXPORT_SYMBOL(hardpps); */ void xtime_update(unsigned long ticks) { - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); do_timer(ticks); - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); update_wall_time(); } diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h index 141ab3ab0354f39fdb5daf7274e8d061d90a556c..099737f6f10c77bc8e27e75d2c09fe6f107d37cf 100644 --- a/kernel/time/timekeeping.h +++ b/kernel/time/timekeeping.h @@ -25,7 +25,8 @@ static inline void sched_clock_resume(void) { } extern void do_timer(unsigned long ticks); extern void update_wall_time(void); -extern seqlock_t jiffies_lock; +extern raw_spinlock_t jiffies_lock; +extern seqcount_t jiffies_seq; #define CS_NAME_LEN 32 diff --git a/kernel/time/timer.c b/kernel/time/timer.c index a6e88d9bb931cd8c572b386375ff05730f212e53..a2be2277506d222985d8cb738a10efc246a2533d 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -198,6 +198,7 @@ EXPORT_SYMBOL(jiffies_64); struct timer_base { raw_spinlock_t lock; struct timer_list *running_timer; + spinlock_t expiry_lock; unsigned long clk; unsigned long next_expiry; unsigned int cpu; @@ -214,8 +215,7 @@ static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); static DEFINE_MUTEX(timer_keys_mutex); -static void timer_update_keys(struct work_struct *work); -static DECLARE_WORK(timer_update_work, timer_update_keys); +static struct swork_event timer_update_swork; #ifdef CONFIG_SMP unsigned int sysctl_timer_migration = 1; @@ -233,7 +233,7 @@ static void timers_update_migration(void) static inline void timers_update_migration(void) { } #endif /* !CONFIG_SMP */ -static void timer_update_keys(struct work_struct *work) +static void timer_update_keys(struct swork_event *event) { mutex_lock(&timer_keys_mutex); timers_update_migration(); @@ -243,9 +243,17 @@ static void timer_update_keys(struct work_struct *work) void timers_update_nohz(void) { - schedule_work(&timer_update_work); + swork_queue(&timer_update_swork); } +static __init int hrtimer_init_thread(void) +{ + WARN_ON(swork_get()); + INIT_SWORK(&timer_update_swork, timer_update_keys); + return 0; +} +early_initcall(hrtimer_init_thread); + int timer_migration_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -1219,14 +1227,8 @@ int del_timer(struct timer_list *timer) } EXPORT_SYMBOL(del_timer); -/** - * try_to_del_timer_sync - Try to deactivate a timer - * @timer: timer to delete - * - * This function tries to deactivate a timer. Upon successful (ret >= 0) - * exit the timer is not queued and the handler is not running on any CPU. - */ -int try_to_del_timer_sync(struct timer_list *timer) +static int __try_to_del_timer_sync(struct timer_list *timer, + struct timer_base **basep) { struct timer_base *base; unsigned long flags; @@ -1234,7 +1236,7 @@ int try_to_del_timer_sync(struct timer_list *timer) debug_assert_init(timer); - base = lock_timer_base(timer, &flags); + *basep = base = lock_timer_base(timer, &flags); if (base->running_timer != timer) ret = detach_if_pending(timer, base, true); @@ -1243,9 +1245,42 @@ int try_to_del_timer_sync(struct timer_list *timer) return ret; } + +/** + * try_to_del_timer_sync - Try to deactivate a timer + * @timer: timer to delete + * + * This function tries to deactivate a timer. Upon successful (ret >= 0) + * exit the timer is not queued and the handler is not running on any CPU. + */ +int try_to_del_timer_sync(struct timer_list *timer) +{ + struct timer_base *base; + + return __try_to_del_timer_sync(timer, &base); +} EXPORT_SYMBOL(try_to_del_timer_sync); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) +static int __del_timer_sync(struct timer_list *timer) +{ + struct timer_base *base; + int ret; + + for (;;) { + ret = __try_to_del_timer_sync(timer, &base); + if (ret >= 0) + return ret; + + /* + * When accessing the lock, timers of base are no longer expired + * and so timer is no longer running. + */ + spin_lock(&base->expiry_lock); + spin_unlock(&base->expiry_lock); + } +} + /** * del_timer_sync - deactivate a timer and wait for the handler to finish. * @timer: the timer to be deactivated @@ -1301,12 +1336,8 @@ int del_timer_sync(struct timer_list *timer) * could lead to deadlock. */ WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); - for (;;) { - int ret = try_to_del_timer_sync(timer); - if (ret >= 0) - return ret; - cpu_relax(); - } + + return __del_timer_sync(timer); } EXPORT_SYMBOL(del_timer_sync); #endif @@ -1366,13 +1397,20 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head) fn = timer->function; - if (timer->flags & TIMER_IRQSAFE) { + if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && + timer->flags & TIMER_IRQSAFE) { raw_spin_unlock(&base->lock); call_timer_fn(timer, fn); + base->running_timer = NULL; + spin_unlock(&base->expiry_lock); + spin_lock(&base->expiry_lock); raw_spin_lock(&base->lock); } else { raw_spin_unlock_irq(&base->lock); call_timer_fn(timer, fn); + base->running_timer = NULL; + spin_unlock(&base->expiry_lock); + spin_lock(&base->expiry_lock); raw_spin_lock_irq(&base->lock); } } @@ -1669,6 +1707,7 @@ static inline void __run_timers(struct timer_base *base) if (!time_after_eq(jiffies, base->clk)) return; + spin_lock(&base->expiry_lock); raw_spin_lock_irq(&base->lock); /* @@ -1695,8 +1734,8 @@ static inline void __run_timers(struct timer_base *base) while (levels--) expire_timers(base, heads + levels); } - base->running_timer = NULL; raw_spin_unlock_irq(&base->lock); + spin_unlock(&base->expiry_lock); } /* @@ -1706,6 +1745,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); + irq_work_tick_soft(); + __run_timers(base); if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); @@ -1941,6 +1982,7 @@ static void __init init_timer_cpu(int cpu) base->cpu = cpu; raw_spin_lock_init(&base->lock); base->clk = jiffies; + spin_lock_init(&base->expiry_lock); } } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 063b434c89d26af997fcc913bd78c033936a890e..87bce59fa3407a66594e70755603dfba5f9477e4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2153,6 +2153,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; + entry->preempt_lazy_count = preempt_lazy_count(); entry->pid = (tsk) ? tsk->pid : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT @@ -2163,8 +2164,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | - (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | + (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | + (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); + + entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); @@ -3362,14 +3366,17 @@ get_total_entries(struct trace_buffer *buf, static void print_lat_help_header(struct seq_file *m) { - seq_puts(m, "# _------=> CPU# \n" - "# / _-----=> irqs-off \n" - "# | / _----=> need-resched \n" - "# || / _---=> hardirq/softirq \n" - "# ||| / _--=> preempt-depth \n" - "# |||| / delay \n" - "# cmd pid ||||| time | caller \n" - "# \\ / ||||| \\ | / \n"); + seq_puts(m, "# _--------=> CPU# \n" + "# / _-------=> irqs-off \n" + "# | / _------=> need-resched \n" + "# || / _-----=> need-resched_lazy \n" + "# ||| / _----=> hardirq/softirq \n" + "# |||| / _---=> preempt-depth \n" + "# ||||| / _--=> preempt-lazy-depth\n" + "# |||||| / _-=> migrate-disable \n" + "# ||||||| / delay \n" + "# cmd pid |||||||| time | caller \n" + "# \\ / |||||||| \\ | / \n"); } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) @@ -3407,15 +3414,17 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file tgid ? tgid_space : space); seq_printf(m, "# %s / _----=> need-resched\n", tgid ? tgid_space : space); - seq_printf(m, "# %s| / _---=> hardirq/softirq\n", + seq_printf(m, "# %s| / _---=> need-resched_lazy\n", + tgid ? tgid_space : space); + seq_printf(m, "# %s|| / _--=> hardirq/softirq\n", tgid ? tgid_space : space); - seq_printf(m, "# %s|| / _--=> preempt-depth\n", + seq_printf(m, "# %s||| / preempt-depth\n", tgid ? tgid_space : space); - seq_printf(m, "# %s||| / delay\n", + seq_printf(m, "# %s|||| / delay\n", tgid ? tgid_space : space); - seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", + seq_printf(m, "# TASK-PID %sCPU# ||||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : space); - seq_printf(m, "# | | %s | |||| | |\n", + seq_printf(m, "# | | %s | ||||| | |\n", tgid ? " | " : space); } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 74185fb040f331f5148b6685956839ba6e6c5d97..7740bcdad3553d7c388f6674a5f6e6012a63e163 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -127,6 +127,7 @@ struct kretprobe_trace_entry_head { * NEED_RESCHED - reschedule is requested * HARDIRQ - inside an interrupt handler * SOFTIRQ - inside a softirq handler + * NEED_RESCHED_LAZY - lazy reschedule is requested */ enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, @@ -136,6 +137,7 @@ enum trace_flag_type { TRACE_FLAG_SOFTIRQ = 0x10, TRACE_FLAG_PREEMPT_RESCHED = 0x20, TRACE_FLAG_NMI = 0x40, + TRACE_FLAG_NEED_RESCHED_LAZY = 0x80, }; #define TRACE_BUF_SIZE 1024 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 1ca64a9296d0de634b92ecd93bc5038813e9f880..9b1b1c6de2bd55c964d7b96af1d127832c51ec16 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -188,6 +188,8 @@ static int trace_define_common_fields(void) __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); + __common_field(unsigned char, migrate_disable); + __common_field(unsigned char, preempt_lazy_count); return ret; } diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index ade6c3070c62f327026f2b1842157d9eb6f69633..164e5c618cce158f47a50557f3b11dab3cd9d041 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -280,7 +280,7 @@ static void move_to_next_cpu(void) * of this thread, than stop migrating for the duration * of the current test. */ - if (!cpumask_equal(current_mask, ¤t->cpus_allowed)) + if (!cpumask_equal(current_mask, current->cpus_ptr)) goto disable; get_online_cpus(); diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 6e6cc64faa389b72012f58912a32114ad45cf704..3f78b0afb7291b428390590c5f1126b08d97077c 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -448,6 +448,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) { char hardsoft_irq; char need_resched; + char need_resched_lazy; char irqs_off; int hardirq; int softirq; @@ -478,6 +479,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) break; } + need_resched_lazy = + (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; + hardsoft_irq = (nmi && hardirq) ? 'Z' : nmi ? 'z' : @@ -486,14 +490,25 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) softirq ? 's' : '.' ; - trace_seq_printf(s, "%c%c%c", - irqs_off, need_resched, hardsoft_irq); + trace_seq_printf(s, "%c%c%c%c", + irqs_off, need_resched, need_resched_lazy, + hardsoft_irq); if (entry->preempt_count) trace_seq_printf(s, "%x", entry->preempt_count); else trace_seq_putc(s, '.'); + if (entry->preempt_lazy_count) + trace_seq_printf(s, "%x", entry->preempt_lazy_count); + else + trace_seq_putc(s, '.'); + + if (entry->migrate_disable) + trace_seq_printf(s, "%x", entry->migrate_disable); + else + trace_seq_putc(s, '.'); + return !trace_seq_has_overflowed(s); } diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 6d60701dc6361e3cc4f88a0be7ccd6540205cc43..328620fe85f68597d1e19c707d7c58575d932475 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -485,7 +485,7 @@ static void watchdog_enable(unsigned int cpu) * Start the timer first to prevent the NMI watchdog triggering * before the timer has a chance to fire. */ - hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); hrtimer->function = watchdog_timer_fn; hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL_PINNED); diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 71381168dedef4e88382a1849412f554a4cb4a56..685443375dc0dfe9cc7c66a524756b0b58ab00a4 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -24,6 +24,8 @@ static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); static DEFINE_PER_CPU(struct perf_event *, dead_event); +static DEFINE_RAW_SPINLOCK(watchdog_output_lock); + static struct cpumask dead_events_mask; static unsigned long hardlockup_allcpu_dumped; @@ -134,6 +136,13 @@ static void watchdog_overflow_callback(struct perf_event *event, /* only print hardlockups once */ if (__this_cpu_read(hard_watchdog_warn) == true) return; + /* + * If early-printk is enabled then make sure we do not + * lock up in printk() and kill console logging: + */ + printk_kill(); + + raw_spin_lock(&watchdog_output_lock); pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); print_modules(); @@ -151,6 +160,7 @@ static void watchdog_overflow_callback(struct perf_event *event, !test_and_set_bit(0, &hardlockup_allcpu_dumped)) trigger_allbutself_cpu_backtrace(); + raw_spin_unlock(&watchdog_output_lock); if (hardlockup_panic) nmi_panic(regs, "Hard LOCKUP"); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b1bb6cb5802ec8fe567552de776bea9d30a928fa..4ed22776b2ee4f6477ecdff488dd43069ad55acc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -128,7 +128,7 @@ enum { * * PL: wq_pool_mutex protected. * - * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. + * PR: wq_pool_mutex protected for writes. RCU protected for reads. * * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. * @@ -137,7 +137,7 @@ enum { * * WQ: wq->mutex protected. * - * WR: wq->mutex protected for writes. Sched-RCU protected for reads. + * WR: wq->mutex protected for writes. RCU protected for reads. * * MD: wq_mayday_lock protected. */ @@ -145,7 +145,7 @@ enum { /* struct worker is defined in workqueue_internal.h */ struct worker_pool { - spinlock_t lock; /* the pool lock */ + raw_spinlock_t lock; /* the pool lock */ int cpu; /* I: the associated cpu */ int node; /* I: the associated node ID */ int id; /* I: pool ID */ @@ -184,7 +184,7 @@ struct worker_pool { atomic_t nr_running ____cacheline_aligned_in_smp; /* - * Destruction of pool is sched-RCU protected to allow dereferences + * Destruction of pool is RCU protected to allow dereferences * from get_work_pool(). */ struct rcu_head rcu; @@ -213,7 +213,7 @@ struct pool_workqueue { /* * Release of unbound pwq is punted to system_wq. See put_pwq() * and pwq_unbound_release_workfn() for details. pool_workqueue - * itself is also sched-RCU protected so that the first pwq can be + * itself is also RCU protected so that the first pwq can be * determined without grabbing wq->mutex. */ struct work_struct unbound_release_work; @@ -298,8 +298,8 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ -static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ -static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ +static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ +static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ static LIST_HEAD(workqueues); /* PR: list of all workqueues */ static bool workqueue_freezing; /* PL: have wqs started freezing? */ @@ -358,20 +358,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); #include #define assert_rcu_or_pool_mutex() \ - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ !lockdep_is_held(&wq_pool_mutex), \ - "sched RCU or wq_pool_mutex should be held") + "RCU or wq_pool_mutex should be held") #define assert_rcu_or_wq_mutex(wq) \ - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ !lockdep_is_held(&wq->mutex), \ - "sched RCU or wq->mutex should be held") + "RCU or wq->mutex should be held") #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ !lockdep_is_held(&wq->mutex) && \ !lockdep_is_held(&wq_pool_mutex), \ - "sched RCU, wq->mutex or wq_pool_mutex should be held") + "RCU, wq->mutex or wq_pool_mutex should be held") #define for_each_cpu_worker_pool(pool, cpu) \ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ @@ -383,7 +383,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); * @pool: iteration cursor * @pi: integer used for iteration * - * This must be called either with wq_pool_mutex held or sched RCU read + * This must be called either with wq_pool_mutex held or RCU read * locked. If the pool needs to be used beyond the locking in effect, the * caller is responsible for guaranteeing that the pool stays online. * @@ -415,7 +415,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); * @pwq: iteration cursor * @wq: the target workqueue * - * This must be called either with wq->mutex held or sched RCU read locked. + * This must be called either with wq->mutex held or RCU read locked. * If the pwq needs to be used beyond the locking in effect, the caller is * responsible for guaranteeing that the pwq stays online. * @@ -551,7 +551,7 @@ static int worker_pool_assign_id(struct worker_pool *pool) * @wq: the target workqueue * @node: the node ID * - * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU + * This must be called with any of wq_pool_mutex, wq->mutex or RCU * read locked. * If the pwq needs to be used beyond the locking in effect, the caller is * responsible for guaranteeing that the pwq stays online. @@ -695,8 +695,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) * @work: the work item of interest * * Pools are created and destroyed under wq_pool_mutex, and allows read - * access under sched-RCU read lock. As such, this function should be - * called under wq_pool_mutex or with preemption disabled. + * access under RCU read lock. As such, this function should be + * called under wq_pool_mutex or inside of a rcu_read_lock() region. * * All fields of the returned pool are accessible as long as the above * mentioned locking is in effect. If the returned pool needs to be used @@ -829,7 +829,7 @@ static struct worker *first_idle_worker(struct worker_pool *pool) * Wake up the first idle worker of @pool. * * CONTEXT: - * spin_lock_irq(pool->lock). + * raw_spin_lock_irq(pool->lock). */ static void wake_up_worker(struct worker_pool *pool) { @@ -840,43 +840,32 @@ static void wake_up_worker(struct worker_pool *pool) } /** - * wq_worker_waking_up - a worker is waking up + * wq_worker_running - a worker is running again * @task: task waking up - * @cpu: CPU @task is waking up to * - * This function is called during try_to_wake_up() when a worker is - * being awoken. - * - * CONTEXT: - * spin_lock_irq(rq->lock) + * This function is called when a worker returns from schedule() */ -void wq_worker_waking_up(struct task_struct *task, int cpu) +void wq_worker_running(struct task_struct *task) { struct worker *worker = kthread_data(task); - if (!(worker->flags & WORKER_NOT_RUNNING)) { - WARN_ON_ONCE(worker->pool->cpu != cpu); + if (!worker->sleeping) + return; + if (!(worker->flags & WORKER_NOT_RUNNING)) atomic_inc(&worker->pool->nr_running); - } + worker->sleeping = 0; } /** * wq_worker_sleeping - a worker is going to sleep * @task: task going to sleep * - * This function is called during schedule() when a busy worker is - * going to sleep. Worker on the same cpu can be woken up by - * returning pointer to its task. - * - * CONTEXT: - * spin_lock_irq(rq->lock) - * - * Return: - * Worker task on @cpu to wake up, %NULL if none. + * This function is called from schedule() when a busy worker is + * going to sleep. */ -struct task_struct *wq_worker_sleeping(struct task_struct *task) +void wq_worker_sleeping(struct task_struct *task) { - struct worker *worker = kthread_data(task), *to_wakeup = NULL; + struct worker *next, *worker = kthread_data(task); struct worker_pool *pool; /* @@ -885,13 +874,15 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) * checking NOT_RUNNING. */ if (worker->flags & WORKER_NOT_RUNNING) - return NULL; + return; pool = worker->pool; - /* this can only happen on the local cpu */ - if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id())) - return NULL; + if (WARN_ON_ONCE(worker->sleeping)) + return; + + worker->sleeping = 1; + raw_spin_lock_irq(&pool->lock); /* * The counterpart of the following dec_and_test, implied mb, @@ -905,9 +896,12 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) * lock is safe. */ if (atomic_dec_and_test(&pool->nr_running) && - !list_empty(&pool->worklist)) - to_wakeup = first_idle_worker(pool); - return to_wakeup ? to_wakeup->task : NULL; + !list_empty(&pool->worklist)) { + next = first_idle_worker(pool); + if (next) + wake_up_process(next->task); + } + raw_spin_unlock_irq(&pool->lock); } /** @@ -918,7 +912,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) * Set @flags in @worker->flags and adjust nr_running accordingly. * * CONTEXT: - * spin_lock_irq(pool->lock) + * raw_spin_lock_irq(pool->lock) */ static inline void worker_set_flags(struct worker *worker, unsigned int flags) { @@ -943,7 +937,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags) * Clear @flags in @worker->flags and adjust nr_running accordingly. * * CONTEXT: - * spin_lock_irq(pool->lock) + * raw_spin_lock_irq(pool->lock) */ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) { @@ -991,7 +985,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) * actually occurs, it should be easy to locate the culprit work function. * * CONTEXT: - * spin_lock_irq(pool->lock). + * raw_spin_lock_irq(pool->lock). * * Return: * Pointer to worker which is executing @work if found, %NULL @@ -1026,7 +1020,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool, * nested inside outer list_for_each_entry_safe(). * * CONTEXT: - * spin_lock_irq(pool->lock). + * raw_spin_lock_irq(pool->lock). */ static void move_linked_works(struct work_struct *work, struct list_head *head, struct work_struct **nextp) @@ -1101,12 +1095,12 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) { if (pwq) { /* - * As both pwqs and pools are sched-RCU protected, the + * As both pwqs and pools are RCU protected, the * following lock operations are safe. */ - spin_lock_irq(&pwq->pool->lock); + raw_spin_lock_irq(&pwq->pool->lock); put_pwq(pwq); - spin_unlock_irq(&pwq->pool->lock); + raw_spin_unlock_irq(&pwq->pool->lock); } } @@ -1139,7 +1133,7 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq) * decrement nr_in_flight of its pwq and handle workqueue flushing. * * CONTEXT: - * spin_lock_irq(pool->lock). + * raw_spin_lock_irq(pool->lock). */ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) { @@ -1229,6 +1223,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) return 0; + rcu_read_lock(); /* * The queueing is in progress, or it is already queued. Try to * steal it from ->worklist without clearing WORK_STRUCT_PENDING. @@ -1237,7 +1232,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, if (!pool) goto fail; - spin_lock(&pool->lock); + raw_spin_lock(&pool->lock); /* * work->data is guaranteed to point to pwq only while the work * item is queued on pwq->wq, and both updating work->data to point @@ -1266,11 +1261,13 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, /* work->data points to pwq iff queued, point to pool */ set_work_pool_and_keep_pending(work, pool->id); - spin_unlock(&pool->lock); + raw_spin_unlock(&pool->lock); + rcu_read_unlock(); return 1; } - spin_unlock(&pool->lock); + raw_spin_unlock(&pool->lock); fail: + rcu_read_unlock(); local_irq_restore(*flags); if (work_is_canceling(work)) return -ENOENT; @@ -1289,7 +1286,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, * work_struct flags. * * CONTEXT: - * spin_lock_irq(pool->lock). + * raw_spin_lock_irq(pool->lock). */ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, struct list_head *head, unsigned int extra_flags) @@ -1383,6 +1380,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, if (unlikely(wq->flags & __WQ_DRAINING) && WARN_ON_ONCE(!is_chained_work(wq))) return; + rcu_read_lock(); retry: /* pwq which will be used unless @work is executing elsewhere */ if (wq->flags & WQ_UNBOUND) { @@ -1404,7 +1402,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, if (last_pool && last_pool != pwq->pool) { struct worker *worker; - spin_lock(&last_pool->lock); + raw_spin_lock(&last_pool->lock); worker = find_worker_executing_work(last_pool, work); @@ -1412,11 +1410,11 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, pwq = worker->current_pwq; } else { /* meh... not running there, queue here */ - spin_unlock(&last_pool->lock); - spin_lock(&pwq->pool->lock); + raw_spin_unlock(&last_pool->lock); + raw_spin_lock(&pwq->pool->lock); } } else { - spin_lock(&pwq->pool->lock); + raw_spin_lock(&pwq->pool->lock); } /* @@ -1429,7 +1427,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, */ if (unlikely(!pwq->refcnt)) { if (wq->flags & WQ_UNBOUND) { - spin_unlock(&pwq->pool->lock); + raw_spin_unlock(&pwq->pool->lock); cpu_relax(); goto retry; } @@ -1441,10 +1439,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, /* pwq determined, queue */ trace_workqueue_queue_work(req_cpu, pwq, work); - if (WARN_ON(!list_empty(&work->entry))) { - spin_unlock(&pwq->pool->lock); - return; - } + if (WARN_ON(!list_empty(&work->entry))) + goto out; pwq->nr_in_flight[pwq->work_color]++; work_flags = work_color_to_flags(pwq->work_color); @@ -1463,7 +1459,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, debug_work_activate(work); insert_work(pwq, work, worklist, work_flags); - spin_unlock(&pwq->pool->lock); +out: + raw_spin_unlock(&pwq->pool->lock); + rcu_read_unlock(); } /** @@ -1498,9 +1496,11 @@ EXPORT_SYMBOL(queue_work_on); void delayed_work_timer_fn(struct timer_list *t) { struct delayed_work *dwork = from_timer(dwork, t, timer); + unsigned long flags; - /* should have been called from irqsafe timer with irq already off */ + local_irq_save(flags); __queue_work(dwork->cpu, dwork->wq, &dwork->work); + local_irq_restore(flags); } EXPORT_SYMBOL(delayed_work_timer_fn); @@ -1647,7 +1647,7 @@ EXPORT_SYMBOL(queue_rcu_work); * necessary. * * LOCKING: - * spin_lock_irq(pool->lock). + * raw_spin_lock_irq(pool->lock). */ static void worker_enter_idle(struct worker *worker) { @@ -1687,7 +1687,7 @@ static void worker_enter_idle(struct worker *worker) * @worker is leaving idle state. Update stats. * * LOCKING: - * spin_lock_irq(pool->lock). + * raw_spin_lock_irq(pool->lock). */ static void worker_leave_idle(struct worker *worker) { @@ -1822,11 +1822,11 @@ static struct worker *create_worker(struct worker_pool *pool) worker_attach_to_pool(worker, pool); /* start the newly created worker */ - spin_lock_irq(&pool->lock); + raw_spin_lock_irq(&pool->lock); worker->pool->nr_workers++; worker_enter_idle(worker); wake_up_process(worker->task); - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); return worker; @@ -1845,7 +1845,7 @@ static struct worker *create_worker(struct worker_pool *pool) * be idle. * * CONTEXT: - * spin_lock_irq(pool->lock). + * raw_spin_lock_irq(pool->lock). */ static void destroy_worker(struct worker *worker) { @@ -1871,7 +1871,7 @@ static void idle_worker_timeout(struct timer_list *t) { struct worker_pool *pool = from_timer(pool, t, idle_timer); - spin_lock_irq(&pool->lock); + raw_spin_lock_irq(&pool->lock); while (too_many_workers(pool)) { struct worker *worker; @@ -1889,7 +1889,7 @@ static void idle_worker_timeout(struct timer_list *t) destroy_worker(worker); } - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); } static void send_mayday(struct work_struct *work) @@ -1920,8 +1920,8 @@ static void pool_mayday_timeout(struct timer_list *t) struct worker_pool *pool = from_timer(pool, t, mayday_timer); struct work_struct *work; - spin_lock_irq(&pool->lock); - spin_lock(&wq_mayday_lock); /* for wq->maydays */ + raw_spin_lock_irq(&pool->lock); + raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ if (need_to_create_worker(pool)) { /* @@ -1934,8 +1934,8 @@ static void pool_mayday_timeout(struct timer_list *t) send_mayday(work); } - spin_unlock(&wq_mayday_lock); - spin_unlock_irq(&pool->lock); + raw_spin_unlock(&wq_mayday_lock); + raw_spin_unlock_irq(&pool->lock); mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); } @@ -1954,7 +1954,7 @@ static void pool_mayday_timeout(struct timer_list *t) * may_start_working() %true. * * LOCKING: - * spin_lock_irq(pool->lock) which may be released and regrabbed + * raw_spin_lock_irq(pool->lock) which may be released and regrabbed * multiple times. Does GFP_KERNEL allocations. Called only from * manager. */ @@ -1963,7 +1963,7 @@ __releases(&pool->lock) __acquires(&pool->lock) { restart: - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); @@ -1979,7 +1979,7 @@ __acquires(&pool->lock) } del_timer_sync(&pool->mayday_timer); - spin_lock_irq(&pool->lock); + raw_spin_lock_irq(&pool->lock); /* * This is necessary even after a new worker was just successfully * created as @pool->lock was dropped and the new worker might have @@ -2002,7 +2002,7 @@ __acquires(&pool->lock) * and may_start_working() is true. * * CONTEXT: - * spin_lock_irq(pool->lock) which may be released and regrabbed + * raw_spin_lock_irq(pool->lock) which may be released and regrabbed * multiple times. Does GFP_KERNEL allocations. * * Return: @@ -2025,7 +2025,7 @@ static bool manage_workers(struct worker *worker) pool->manager = NULL; pool->flags &= ~POOL_MANAGER_ACTIVE; - wake_up(&wq_manager_wait); + swake_up_one(&wq_manager_wait); return true; } @@ -2041,7 +2041,7 @@ static bool manage_workers(struct worker *worker) * call this function to process a work. * * CONTEXT: - * spin_lock_irq(pool->lock) which is released and regrabbed. + * raw_spin_lock_irq(pool->lock) which is released and regrabbed. */ static void process_one_work(struct worker *worker, struct work_struct *work) __releases(&pool->lock) @@ -2123,7 +2123,7 @@ __acquires(&pool->lock) */ set_work_pool_and_clear_pending(work, pool->id); - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); lock_map_acquire(&pwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); @@ -2178,7 +2178,7 @@ __acquires(&pool->lock) */ cond_resched(); - spin_lock_irq(&pool->lock); + raw_spin_lock_irq(&pool->lock); /* clear cpu intensive status */ if (unlikely(cpu_intensive)) @@ -2201,7 +2201,7 @@ __acquires(&pool->lock) * fetches a work from the top and executes it. * * CONTEXT: - * spin_lock_irq(pool->lock) which may be released and regrabbed + * raw_spin_lock_irq(pool->lock) which may be released and regrabbed * multiple times. */ static void process_scheduled_works(struct worker *worker) @@ -2243,11 +2243,11 @@ static int worker_thread(void *__worker) /* tell the scheduler that this is a workqueue worker */ set_pf_worker(true); woke_up: - spin_lock_irq(&pool->lock); + raw_spin_lock_irq(&pool->lock); /* am I supposed to die? */ if (unlikely(worker->flags & WORKER_DIE)) { - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); WARN_ON_ONCE(!list_empty(&worker->entry)); set_pf_worker(false); @@ -2313,7 +2313,7 @@ static int worker_thread(void *__worker) */ worker_enter_idle(worker); __set_current_state(TASK_IDLE); - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); schedule(); goto woke_up; } @@ -2367,7 +2367,7 @@ static int rescuer_thread(void *__rescuer) should_stop = kthread_should_stop(); /* see whether any pwq is asking for help */ - spin_lock_irq(&wq_mayday_lock); + raw_spin_lock_irq(&wq_mayday_lock); while (!list_empty(&wq->maydays)) { struct pool_workqueue *pwq = list_first_entry(&wq->maydays, @@ -2379,11 +2379,11 @@ static int rescuer_thread(void *__rescuer) __set_current_state(TASK_RUNNING); list_del_init(&pwq->mayday_node); - spin_unlock_irq(&wq_mayday_lock); + raw_spin_unlock_irq(&wq_mayday_lock); worker_attach_to_pool(rescuer, pool); - spin_lock_irq(&pool->lock); + raw_spin_lock_irq(&pool->lock); /* * Slurp in all works issued via this workqueue and @@ -2412,7 +2412,7 @@ static int rescuer_thread(void *__rescuer) * incur MAYDAY_INTERVAL delay inbetween. */ if (need_to_create_worker(pool)) { - spin_lock(&wq_mayday_lock); + raw_spin_lock(&wq_mayday_lock); /* * Queue iff we aren't racing destruction * and somebody else hasn't queued it already. @@ -2421,7 +2421,7 @@ static int rescuer_thread(void *__rescuer) get_pwq(pwq); list_add_tail(&pwq->mayday_node, &wq->maydays); } - spin_unlock(&wq_mayday_lock); + raw_spin_unlock(&wq_mayday_lock); } } @@ -2439,14 +2439,14 @@ static int rescuer_thread(void *__rescuer) if (need_more_worker(pool)) wake_up_worker(pool); - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); worker_detach_from_pool(rescuer); - spin_lock_irq(&wq_mayday_lock); + raw_spin_lock_irq(&wq_mayday_lock); } - spin_unlock_irq(&wq_mayday_lock); + raw_spin_unlock_irq(&wq_mayday_lock); if (should_stop) { __set_current_state(TASK_RUNNING); @@ -2526,7 +2526,7 @@ static void wq_barrier_func(struct work_struct *work) * underneath us, so we can't reliably determine pwq from @target. * * CONTEXT: - * spin_lock_irq(pool->lock). + * raw_spin_lock_irq(pool->lock). */ static void insert_wq_barrier(struct pool_workqueue *pwq, struct wq_barrier *barr, @@ -2613,7 +2613,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, for_each_pwq(pwq, wq) { struct worker_pool *pool = pwq->pool; - spin_lock_irq(&pool->lock); + raw_spin_lock_irq(&pool->lock); if (flush_color >= 0) { WARN_ON_ONCE(pwq->flush_color != -1); @@ -2630,7 +2630,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, pwq->work_color = work_color; } - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); } if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) @@ -2830,9 +2830,9 @@ void drain_workqueue(struct workqueue_struct *wq) for_each_pwq(pwq, wq) { bool drained; - spin_lock_irq(&pwq->pool->lock); + raw_spin_lock_irq(&pwq->pool->lock); drained = !pwq->nr_active && list_empty(&pwq->delayed_works); - spin_unlock_irq(&pwq->pool->lock); + raw_spin_unlock_irq(&pwq->pool->lock); if (drained) continue; @@ -2861,14 +2861,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, might_sleep(); - local_irq_disable(); + rcu_read_lock(); pool = get_work_pool(work); if (!pool) { - local_irq_enable(); + rcu_read_unlock(); return false; } - spin_lock(&pool->lock); + raw_spin_lock_irq(&pool->lock); /* see the comment in try_to_grab_pending() with the same code */ pwq = get_work_pwq(work); if (pwq) { @@ -2884,7 +2884,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, check_flush_dependency(pwq->wq, work); insert_wq_barrier(pwq, barr, work, worker); - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); /* * Force a lock recursion deadlock when using flush_work() inside a @@ -2900,10 +2900,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, lock_map_acquire(&pwq->wq->lockdep_map); lock_map_release(&pwq->wq->lockdep_map); } - + rcu_read_unlock(); return true; already_gone: - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); + rcu_read_unlock(); return false; } @@ -3215,7 +3216,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context); * * Undo alloc_workqueue_attrs(). */ -void free_workqueue_attrs(struct workqueue_attrs *attrs) +static void free_workqueue_attrs(struct workqueue_attrs *attrs) { if (attrs) { free_cpumask_var(attrs->cpumask); @@ -3225,21 +3226,20 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) /** * alloc_workqueue_attrs - allocate a workqueue_attrs - * @gfp_mask: allocation mask to use * * Allocate a new workqueue_attrs, initialize with default settings and * return it. * * Return: The allocated new workqueue_attr on success. %NULL on failure. */ -struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) +static struct workqueue_attrs *alloc_workqueue_attrs(void) { struct workqueue_attrs *attrs; - attrs = kzalloc(sizeof(*attrs), gfp_mask); + attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); if (!attrs) goto fail; - if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) + if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) goto fail; cpumask_copy(attrs->cpumask, cpu_possible_mask); @@ -3296,7 +3296,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, */ static int init_worker_pool(struct worker_pool *pool) { - spin_lock_init(&pool->lock); + raw_spin_lock_init(&pool->lock); pool->id = -1; pool->cpu = -1; pool->node = NUMA_NO_NODE; @@ -3317,7 +3317,7 @@ static int init_worker_pool(struct worker_pool *pool) pool->refcnt = 1; /* shouldn't fail above this point */ - pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); + pool->attrs = alloc_workqueue_attrs(); if (!pool->attrs) return -ENOMEM; return 0; @@ -3350,7 +3350,7 @@ static void rcu_free_pool(struct rcu_head *rcu) * put_unbound_pool - put a worker_pool * @pool: worker_pool to put * - * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU + * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU * safe manner. get_unbound_pool() calls this function on its failure path * and this function should be able to release pools which went through, * successfully or not, init_worker_pool(). @@ -3382,15 +3382,15 @@ static void put_unbound_pool(struct worker_pool *pool) * @pool's workers from blocking on attach_mutex. We're the last * manager and @pool gets freed with the flag set. */ - spin_lock_irq(&pool->lock); - wait_event_lock_irq(wq_manager_wait, + raw_spin_lock_irq(&pool->lock); + swait_event_lock_irq(wq_manager_wait, !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock); pool->flags |= POOL_MANAGER_ACTIVE; while ((worker = first_idle_worker(pool))) destroy_worker(worker); WARN_ON(pool->nr_workers || pool->nr_idle); - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); mutex_lock(&wq_pool_attach_mutex); if (!list_empty(&pool->workers)) @@ -3404,8 +3404,8 @@ static void put_unbound_pool(struct worker_pool *pool) del_timer_sync(&pool->idle_timer); del_timer_sync(&pool->mayday_timer); - /* sched-RCU protected to allow dereferences from get_work_pool() */ - call_rcu_sched(&pool->rcu, rcu_free_pool); + /* RCU protected to allow dereferences from get_work_pool() */ + call_rcu(&pool->rcu, rcu_free_pool); } /** @@ -3518,14 +3518,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work) put_unbound_pool(pool); mutex_unlock(&wq_pool_mutex); - call_rcu_sched(&pwq->rcu, rcu_free_pwq); + call_rcu(&pwq->rcu, rcu_free_pwq); /* * If we're the last pwq going away, @wq is already dead and no one * is gonna access it anymore. Schedule RCU free. */ if (is_last) - call_rcu_sched(&wq->rcu, rcu_free_wq); + call_rcu(&wq->rcu, rcu_free_wq); } /** @@ -3550,7 +3550,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) return; /* this function can be called during early boot w/ irq disabled */ - spin_lock_irqsave(&pwq->pool->lock, flags); + raw_spin_lock_irqsave(&pwq->pool->lock, flags); /* * During [un]freezing, the caller is responsible for ensuring that @@ -3580,7 +3580,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) pwq->max_active = 0; } - spin_unlock_irqrestore(&pwq->pool->lock, flags); + raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); } /* initialize newly alloced @pwq which is associated with @wq and @pool */ @@ -3753,8 +3753,8 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); - new_attrs = alloc_workqueue_attrs(GFP_KERNEL); - tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); + new_attrs = alloc_workqueue_attrs(); + tmp_attrs = alloc_workqueue_attrs(); if (!ctx || !new_attrs || !tmp_attrs) goto out_free; @@ -3890,7 +3890,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, * * Return: 0 on success and -errno on failure. */ -int apply_workqueue_attrs(struct workqueue_struct *wq, +static int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) { int ret; @@ -3901,7 +3901,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, return ret; } -EXPORT_SYMBOL_GPL(apply_workqueue_attrs); /** * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug @@ -3979,9 +3978,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, use_dfl_pwq: mutex_lock(&wq->mutex); - spin_lock_irq(&wq->dfl_pwq->pool->lock); + raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); get_pwq(wq->dfl_pwq); - spin_unlock_irq(&wq->dfl_pwq->pool->lock); + raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); out_unlock: mutex_unlock(&wq->mutex); @@ -4100,7 +4099,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, return NULL; if (flags & WQ_UNBOUND) { - wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); + wq->unbound_attrs = alloc_workqueue_attrs(); if (!wq->unbound_attrs) goto err_free_wq; } @@ -4187,9 +4186,9 @@ void destroy_workqueue(struct workqueue_struct *wq) struct worker *rescuer = wq->rescuer; /* this prevents new queueing */ - spin_lock_irq(&wq_mayday_lock); + raw_spin_lock_irq(&wq_mayday_lock); wq->rescuer = NULL; - spin_unlock_irq(&wq_mayday_lock); + raw_spin_unlock_irq(&wq_mayday_lock); /* rescuer will empty maydays list before exiting */ kthread_stop(rescuer->task); @@ -4232,7 +4231,7 @@ void destroy_workqueue(struct workqueue_struct *wq) * The base ref is never dropped on per-cpu pwqs. Directly * schedule RCU free. */ - call_rcu_sched(&wq->rcu, rcu_free_wq); + call_rcu(&wq->rcu, rcu_free_wq); } else { /* * We're the sole accessor of @wq at this point. Directly @@ -4342,7 +4341,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) struct pool_workqueue *pwq; bool ret; - rcu_read_lock_sched(); + rcu_read_lock(); + preempt_disable(); if (cpu == WORK_CPU_UNBOUND) cpu = smp_processor_id(); @@ -4353,7 +4353,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); ret = !list_empty(&pwq->delayed_works); - rcu_read_unlock_sched(); + preempt_enable(); + rcu_read_unlock(); return ret; } @@ -4379,15 +4380,15 @@ unsigned int work_busy(struct work_struct *work) if (work_pending(work)) ret |= WORK_BUSY_PENDING; - local_irq_save(flags); + rcu_read_lock(); pool = get_work_pool(work); if (pool) { - spin_lock(&pool->lock); + raw_spin_lock_irqsave(&pool->lock, flags); if (find_worker_executing_work(pool, work)) ret |= WORK_BUSY_RUNNING; - spin_unlock(&pool->lock); + raw_spin_unlock_irqrestore(&pool->lock, flags); } - local_irq_restore(flags); + rcu_read_unlock(); return ret; } @@ -4572,7 +4573,7 @@ void show_workqueue_state(void) unsigned long flags; int pi; - rcu_read_lock_sched(); + rcu_read_lock(); pr_info("Showing busy workqueues and worker pools:\n"); @@ -4592,10 +4593,10 @@ void show_workqueue_state(void) pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); for_each_pwq(pwq, wq) { - spin_lock_irqsave(&pwq->pool->lock, flags); + raw_spin_lock_irqsave(&pwq->pool->lock, flags); if (pwq->nr_active || !list_empty(&pwq->delayed_works)) show_pwq(pwq); - spin_unlock_irqrestore(&pwq->pool->lock, flags); + raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); /* * We could be printing a lot from atomic context, e.g. * sysrq-t -> show_workqueue_state(). Avoid triggering @@ -4609,7 +4610,7 @@ void show_workqueue_state(void) struct worker *worker; bool first = true; - spin_lock_irqsave(&pool->lock, flags); + raw_spin_lock_irqsave(&pool->lock, flags); if (pool->nr_workers == pool->nr_idle) goto next_pool; @@ -4628,7 +4629,7 @@ void show_workqueue_state(void) } pr_cont("\n"); next_pool: - spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, flags); /* * We could be printing a lot from atomic context, e.g. * sysrq-t -> show_workqueue_state(). Avoid triggering @@ -4637,7 +4638,7 @@ void show_workqueue_state(void) touch_nmi_watchdog(); } - rcu_read_unlock_sched(); + rcu_read_unlock(); } /* used to show worker information through /proc/PID/{comm,stat,status} */ @@ -4658,7 +4659,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) struct worker_pool *pool = worker->pool; if (pool) { - spin_lock_irq(&pool->lock); + raw_spin_lock_irq(&pool->lock); /* * ->desc tracks information (wq name or * set_worker_desc()) for the latest execution. If @@ -4672,7 +4673,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) scnprintf(buf + off, size - off, "-%s", worker->desc); } - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); } } @@ -4703,7 +4704,7 @@ static void unbind_workers(int cpu) for_each_cpu_worker_pool(pool, cpu) { mutex_lock(&wq_pool_attach_mutex); - spin_lock_irq(&pool->lock); + raw_spin_lock_irq(&pool->lock); /* * We've blocked all attach/detach operations. Make all workers @@ -4717,7 +4718,7 @@ static void unbind_workers(int cpu) pool->flags |= POOL_DISASSOCIATED; - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); mutex_unlock(&wq_pool_attach_mutex); /* @@ -4743,9 +4744,9 @@ static void unbind_workers(int cpu) * worker blocking could lead to lengthy stalls. Kick off * unbound chain execution of currently pending work items. */ - spin_lock_irq(&pool->lock); + raw_spin_lock_irq(&pool->lock); wake_up_worker(pool); - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); } } @@ -4772,7 +4773,7 @@ static void rebind_workers(struct worker_pool *pool) WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0); - spin_lock_irq(&pool->lock); + raw_spin_lock_irq(&pool->lock); pool->flags &= ~POOL_DISASSOCIATED; @@ -4811,7 +4812,7 @@ static void rebind_workers(struct worker_pool *pool) WRITE_ONCE(worker->flags, worker_flags); } - spin_unlock_irq(&pool->lock); + raw_spin_unlock_irq(&pool->lock); } /** @@ -5024,16 +5025,16 @@ bool freeze_workqueues_busy(void) * nr_active is monotonically decreasing. It's safe * to peek without lock. */ - rcu_read_lock_sched(); + rcu_read_lock(); for_each_pwq(pwq, wq) { WARN_ON_ONCE(pwq->nr_active < 0); if (pwq->nr_active) { busy = true; - rcu_read_unlock_sched(); + rcu_read_unlock(); goto out_unlock; } } - rcu_read_unlock_sched(); + rcu_read_unlock(); } out_unlock: mutex_unlock(&wq_pool_mutex); @@ -5235,7 +5236,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, const char *delim = ""; int node, written = 0; - rcu_read_lock_sched(); + get_online_cpus(); + rcu_read_lock(); for_each_node(node) { written += scnprintf(buf + written, PAGE_SIZE - written, "%s%d:%d", delim, node, @@ -5243,7 +5245,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, delim = " "; } written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); - rcu_read_unlock_sched(); + rcu_read_unlock(); + put_online_cpus(); return written; } @@ -5268,7 +5271,7 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) lockdep_assert_held(&wq_pool_mutex); - attrs = alloc_workqueue_attrs(GFP_KERNEL); + attrs = alloc_workqueue_attrs(); if (!attrs) return NULL; @@ -5697,7 +5700,7 @@ static void __init wq_numa_init(void) return; } - wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL); + wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(); BUG_ON(!wq_update_unbound_numa_attrs_buf); /* @@ -5772,7 +5775,7 @@ int __init workqueue_init_early(void) for (i = 0; i < NR_STD_WORKER_POOLS; i++) { struct workqueue_attrs *attrs; - BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); + BUG_ON(!(attrs = alloc_workqueue_attrs())); attrs->nice = std_nice[i]; unbound_std_wq_attrs[i] = attrs; @@ -5781,7 +5784,7 @@ int __init workqueue_init_early(void) * guaranteed by max_active which is enforced by pwqs. * Turn off NUMA so that dfl_pwq is used for all nodes. */ - BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); + BUG_ON(!(attrs = alloc_workqueue_attrs())); attrs->nice = std_nice[i]; attrs->no_numa = true; ordered_wq_attrs[i] = attrs; diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index 66fbb5a9e633b4fbe2b9d6090774ad89a8bfebf9..30cfed226b390ee2dec5741e31754011f69a0e0c 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -44,6 +44,7 @@ struct worker { unsigned long last_active; /* L: last active timestamp */ unsigned int flags; /* X: flags */ int id; /* I: worker id */ + int sleeping; /* None */ /* * Opaque string set with work_set_desc(). Printed out with task @@ -69,7 +70,7 @@ static inline struct worker *current_wq_worker(void) * Scheduler hooks for concurrency managed workqueue. Only to be used from * sched/core.c and workqueue.c. */ -void wq_worker_waking_up(struct task_struct *task, int cpu); -struct task_struct *wq_worker_sleeping(struct task_struct *task); +void wq_worker_running(struct task_struct *task); +void wq_worker_sleeping(struct task_struct *task); #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ diff --git a/lib/Kconfig b/lib/Kconfig index 714ec2f50bb105100d30dbab8e08752c78c53790..4857bb4c490a6d84079b8b9d67d5a2edc6054068 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -440,6 +440,7 @@ config CHECK_SIGNATURE config CPUMASK_OFFSTACK bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS + depends on !PREEMPT_RT_FULL help Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 621859a453f8250b3ec8a7230be9d5c7482a3953..02f962c9b19951a4f19d0edcb1e53e558d6a7746 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1206,7 +1206,7 @@ config DEBUG_ATOMIC_SLEEP config DEBUG_LOCKING_API_SELFTESTS bool "Locking API boot-time self-tests" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !PREEMPT_RT_FULL help Say Y here if you want the kernel to run a short self-test during bootup. The self-test checks whether common types of locking bugs diff --git a/lib/crc32.c b/lib/crc32.c index 1a5d08470044ea53f5469a0d4011e1f8a69b43ea..bf60ef26a45c241f6ff33e35e0281651e11bfc3d 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -183,21 +183,21 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, } #if CRC_LE_BITS == 1 -u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) +u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE); } -u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) +u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE); } #else -u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) +u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, (const u32 (*)[256])crc32table_le, CRC32_POLY_LE); } -u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) +u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); @@ -206,6 +206,9 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) EXPORT_SYMBOL(crc32_le); EXPORT_SYMBOL(__crc32c_le); +u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); +u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); + /* * This multiplies the polynomials x and y modulo the given modulus. * This follows the "little-endian" CRC convention that the lsbit diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 14afeeb7d6ef5b91929702af25f96ef4eeb711ec..e28481c402ae603db2a2bf87304c68f90cbd3f76 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -376,7 +376,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) struct debug_obj *obj; unsigned long flags; - fill_pool(); +#ifdef CONFIG_PREEMPT_RT_FULL + if (preempt_count() == 0 && !irqs_disabled()) +#endif + fill_pool(); db = get_bucket((unsigned long) addr); diff --git a/lib/irq_poll.c b/lib/irq_poll.c index 86a709954f5a515bc151ddb0ddfdfcdcd3b871ab..9c069ef83d6d2e6ea78644734de58d67c641ca45 100644 --- a/lib/irq_poll.c +++ b/lib/irq_poll.c @@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop) list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(irq_poll_sched); @@ -72,6 +73,7 @@ void irq_poll_complete(struct irq_poll *iop) local_irq_save(flags); __irq_poll_complete(iop); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(irq_poll_complete); @@ -96,6 +98,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h) } local_irq_enable(); + preempt_check_resched_rt(); /* Even though interrupts have been re-enabled, this * access is safe because interrupts can only add new @@ -133,6 +136,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h) __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); } /** @@ -196,6 +200,7 @@ static int irq_poll_cpu_dead(unsigned int cpu) this_cpu_ptr(&blk_cpu_iopoll)); __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); return 0; } diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 1e1bbf171eca4076a39383d51b75224dbb4099d0..32db9532ddd46f288feaec067c139f857395339d 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -742,6 +742,8 @@ GENERATE_TESTCASE(init_held_rtmutex); #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin) +#ifndef CONFIG_PREEMPT_RT_FULL + #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) @@ -757,9 +759,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) +#endif + #undef E1 #undef E2 +#ifndef CONFIG_PREEMPT_RT_FULL /* * Enabling hardirqs with a softirq-safe lock held: */ @@ -792,6 +797,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) #undef E1 #undef E2 +#endif + /* * Enabling irqs with an irq-safe lock held: */ @@ -815,6 +822,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin) +#ifndef CONFIG_PREEMPT_RT_FULL + #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) @@ -830,6 +839,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) +#endif + #undef E1 #undef E2 @@ -861,6 +872,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin) +#ifndef CONFIG_PREEMPT_RT_FULL + #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) @@ -876,6 +889,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) +#endif + #undef E1 #undef E2 #undef E3 @@ -909,6 +924,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin) +#ifndef CONFIG_PREEMPT_RT_FULL + #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) @@ -924,10 +941,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) +#endif + #undef E1 #undef E2 #undef E3 +#ifndef CONFIG_PREEMPT_RT_FULL + /* * read-lock / write-lock irq inversion. * @@ -990,6 +1011,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock) #undef E2 #undef E3 +#endif + +#ifndef CONFIG_PREEMPT_RT_FULL + /* * read-lock / write-lock recursion that is actually safe. */ @@ -1028,6 +1053,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) #undef E2 #undef E3 +#endif + /* * read-lock / write-lock recursion that is unsafe. */ @@ -2057,6 +2084,7 @@ void locking_selftest(void) printk(" --------------------------------------------------------------------------\n"); +#ifndef CONFIG_PREEMPT_RT_FULL /* * irq-context testcases: */ @@ -2069,6 +2097,28 @@ void locking_selftest(void) DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); +#else + /* On -rt, we only do hardirq context test for raw spinlock */ + DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); + DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); + + DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); + DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); + + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); + + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); +#endif ww_tests(); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index e5cab5c4e383097436018a728e9ac78a6ad721ce..9309e813bc1f7f0a0ae6677edb381bcef20ceadc 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -38,7 +38,7 @@ #include #include #include - +#include /* Number of nodes in fully populated tree of given height */ static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; @@ -87,6 +87,7 @@ struct radix_tree_preload { struct radix_tree_node *nodes; }; static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; +static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock); static inline struct radix_tree_node *entry_to_node(void *ptr) { @@ -405,12 +406,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, * succeed in getting a node here (and never reach * kmem_cache_alloc) */ - rtp = this_cpu_ptr(&radix_tree_preloads); + rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes; rtp->nodes = ret->parent; rtp->nr--; } + put_locked_var(radix_tree_preloads_lock, radix_tree_preloads); /* * Update the allocation stack trace as this is more useful * for debugging. @@ -476,14 +478,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) */ gfp_mask &= ~__GFP_ACCOUNT; - preempt_disable(); + local_lock(radix_tree_preloads_lock); rtp = this_cpu_ptr(&radix_tree_preloads); while (rtp->nr < nr) { - preempt_enable(); + local_unlock(radix_tree_preloads_lock); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; - preempt_disable(); + local_lock(radix_tree_preloads_lock); rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr < nr) { node->parent = rtp->nodes; @@ -525,7 +527,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) if (gfpflags_allow_blocking(gfp_mask)) return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); /* Preloading doesn't help anything with this gfp mask, skip it */ - preempt_disable(); + local_lock(radix_tree_preloads_lock); return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); @@ -563,7 +565,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) /* Preloading doesn't help anything with this gfp mask, skip it */ if (!gfpflags_allow_blocking(gfp_mask)) { - preempt_disable(); + local_lock(radix_tree_preloads_lock); return 0; } @@ -597,6 +599,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) return __radix_tree_preload(gfp_mask, nr_nodes); } +void radix_tree_preload_end(void) +{ + local_unlock(radix_tree_preloads_lock); +} +EXPORT_SYMBOL(radix_tree_preload_end); + static unsigned radix_tree_load_root(const struct radix_tree_root *root, struct radix_tree_node **nodep, unsigned long *maxindex) { @@ -2102,10 +2110,16 @@ EXPORT_SYMBOL(radix_tree_tagged); void idr_preload(gfp_t gfp_mask) { if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) - preempt_disable(); + local_lock(radix_tree_preloads_lock); } EXPORT_SYMBOL(idr_preload); +void idr_preload_end(void) +{ + local_unlock(radix_tree_preloads_lock); +} +EXPORT_SYMBOL(idr_preload_end); + int ida_pre_get(struct ida *ida, gfp_t gfp) { /* @@ -2114,7 +2128,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) * to return to the ida_pre_get() step. */ if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE)) - preempt_enable(); + local_unlock(radix_tree_preloads_lock); if (!this_cpu_read(ida_bitmap)) { struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 3b859201f84c6a766dac6a375bc5ebbed139f4d2..093c3aa3d895cd30a0bf58f1716e06acc05b394b 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -777,7 +777,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { - WARN_ON_ONCE(preemptible()); + WARN_ON_ONCE(!pagefault_disabled()); kunmap_atomic(miter->addr); } else kunmap(miter->page); diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 85925aaa4fff5f6cb0831d3587c7a3ce18df751b..2e7398534b661f42cfd78488c2d0de01516b3a2d 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -22,7 +22,12 @@ notrace static unsigned int check_preemption_disabled(const char *what1, * Kernel threads bound to a single CPU can safely use * smp_processor_id(): */ - if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu))) +#if defined(CONFIG_PREEMPT_RT_BASE) && (defined(CONFIG_SMP) || defined(CONFIG_SCHED_DEBUG)) + if (current->migrate_disable) + goto out; +#endif + + if (current->nr_cpus_allowed == 1) goto out; /* diff --git a/lib/ubsan.c b/lib/ubsan.c index 1e9e2ab2553969403e2f725145b2e6873c0d2e3c..199c75e03469bdc4a732d60727d729927e1daebc 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -143,25 +143,21 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type, } } -static DEFINE_SPINLOCK(report_lock); - -static void ubsan_prologue(struct source_location *location, - unsigned long *flags) +static void ubsan_prologue(struct source_location *location) { current->in_ubsan++; - spin_lock_irqsave(&report_lock, *flags); pr_err("========================================" "========================================\n"); print_source_location("UBSAN: Undefined behaviour in", location); } -static void ubsan_epilogue(unsigned long *flags) +static void ubsan_epilogue(void) { dump_stack(); pr_err("========================================" "========================================\n"); - spin_unlock_irqrestore(&report_lock, *flags); + current->in_ubsan--; } @@ -170,14 +166,13 @@ static void handle_overflow(struct overflow_data *data, void *lhs, { struct type_descriptor *type = data->type; - unsigned long flags; char lhs_val_str[VALUE_LENGTH]; char rhs_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); @@ -189,7 +184,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs, rhs_val_str, type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } void __ubsan_handle_add_overflow(struct overflow_data *data, @@ -217,20 +212,19 @@ EXPORT_SYMBOL(__ubsan_handle_mul_overflow); void __ubsan_handle_negate_overflow(struct overflow_data *data, void *old_val) { - unsigned long flags; char old_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); pr_err("negation of %s cannot be represented in type %s:\n", old_val_str, data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_negate_overflow); @@ -238,13 +232,12 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow); void __ubsan_handle_divrem_overflow(struct overflow_data *data, void *lhs, void *rhs) { - unsigned long flags; char rhs_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); @@ -254,58 +247,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data, else pr_err("division by zero\n"); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); static void handle_null_ptr_deref(struct type_mismatch_data_common *data) { - unsigned long flags; - if (suppress_report(data->location)) return; - ubsan_prologue(data->location, &flags); + ubsan_prologue(data->location); pr_err("%s null pointer of type %s\n", type_check_kinds[data->type_check_kind], data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } static void handle_misaligned_access(struct type_mismatch_data_common *data, unsigned long ptr) { - unsigned long flags; - if (suppress_report(data->location)) return; - ubsan_prologue(data->location, &flags); + ubsan_prologue(data->location); pr_err("%s misaligned address %p for type %s\n", type_check_kinds[data->type_check_kind], (void *)ptr, data->type->type_name); pr_err("which requires %ld byte alignment\n", data->alignment); - ubsan_epilogue(&flags); + ubsan_epilogue(); } static void handle_object_size_mismatch(struct type_mismatch_data_common *data, unsigned long ptr) { - unsigned long flags; - if (suppress_report(data->location)) return; - ubsan_prologue(data->location, &flags); + ubsan_prologue(data->location); pr_err("%s address %p with insufficient space\n", type_check_kinds[data->type_check_kind], (void *) ptr); pr_err("for an object of type %s\n", data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, @@ -352,42 +339,39 @@ EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, void *bound) { - unsigned long flags; char bound_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(bound_str, sizeof(bound_str), data->type, bound); pr_err("variable length array bound value %s <= 0\n", bound_str); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive); void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index) { - unsigned long flags; char index_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(index_str, sizeof(index_str), data->index_type, index); pr_err("index %s is out of range for type %s\n", index_str, data->array_type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, void *lhs, void *rhs) { - unsigned long flags; struct type_descriptor *rhs_type = data->rhs_type; struct type_descriptor *lhs_type = data->lhs_type; char rhs_str[VALUE_LENGTH]; @@ -396,7 +380,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); @@ -419,18 +403,16 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, lhs_str, rhs_str, lhs_type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) { - unsigned long flags; - - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); pr_err("calling __builtin_unreachable()\n"); - ubsan_epilogue(&flags); + ubsan_epilogue(); panic("can't return from __builtin_unreachable()"); } EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); @@ -438,19 +420,18 @@ EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, void *val) { - unsigned long flags; char val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(val_str, sizeof(val_str), data->type, val); pr_err("load of value %s is not a valid value for type %s\n", val_str, data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); diff --git a/localversion-rt b/localversion-rt new file mode 100644 index 0000000000000000000000000000000000000000..b3e668a8fb94884ea28187a22b8606c4f07c1a9f --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ +-rt110 diff --git a/mm/Kconfig b/mm/Kconfig index b457e94ae6182a491df70157a607af812b6f1b37..0dddbb2a32825e51604c3a6db40f9bcf69365a26 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -377,7 +377,7 @@ config NOMMU_INITIAL_TRIM_EXCESS config TRANSPARENT_HUGEPAGE bool "Transparent Hugepage Support" - depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE + depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL select COMPACTION select RADIX_TREE_MULTIORDER help diff --git a/mm/compaction.c b/mm/compaction.c index 5079ddbec8f9e2fd23344019f01fcdca6f9a3968..c40d3a13cbbd557e4ca94da196280173f9829827 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1668,10 +1668,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro block_start_pfn(cc->migrate_pfn, cc->order); if (cc->last_migrated_pfn < current_block_start) { - cpu = get_cpu(); + cpu = get_cpu_light(); + local_lock_irq(swapvec_lock); lru_add_drain_cpu(cpu); + local_unlock_irq(swapvec_lock); drain_local_pages(zone); - put_cpu(); + put_cpu_light(); /* No more flushing until we migrate again */ cc->last_migrated_pfn = 0; } diff --git a/mm/highmem.c b/mm/highmem.c index 59db3223a5d62a9ea26f506ed3fd9fe6d54d2cf2..22aa3ddbd87b0235b86e548c7bac2a203f5ea768 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -30,10 +30,11 @@ #include #include - +#ifndef CONFIG_PREEMPT_RT_FULL #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) DEFINE_PER_CPU(int, __kmap_atomic_idx); #endif +#endif /* * Virtual_count is not a pure "count". @@ -108,8 +109,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) unsigned long totalhigh_pages __read_mostly; EXPORT_SYMBOL(totalhigh_pages); - +#ifndef CONFIG_PREEMPT_RT_FULL EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); +#endif unsigned int nr_free_highpages (void) { diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 3a8ddf8baf7dc3d52597bf0e53753c0cc17503cd..b209dbaefde822315201ab409dce65ca52b371eb 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -103,7 +103,7 @@ static int quarantine_head; static int quarantine_tail; /* Total size of all objects in global_quarantine across all batches. */ static unsigned long quarantine_size; -static DEFINE_SPINLOCK(quarantine_lock); +static DEFINE_RAW_SPINLOCK(quarantine_lock); DEFINE_STATIC_SRCU(remove_cache_srcu); /* Maximum size of the global queue. */ @@ -190,7 +190,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { qlist_move_all(q, &temp); - spin_lock(&quarantine_lock); + raw_spin_lock(&quarantine_lock); WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); qlist_move_all(&temp, &global_quarantine[quarantine_tail]); if (global_quarantine[quarantine_tail].bytes >= @@ -203,7 +203,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) if (new_tail != quarantine_head) quarantine_tail = new_tail; } - spin_unlock(&quarantine_lock); + raw_spin_unlock(&quarantine_lock); } local_irq_restore(flags); @@ -230,7 +230,7 @@ void quarantine_reduce(void) * expected case). */ srcu_idx = srcu_read_lock(&remove_cache_srcu); - spin_lock_irqsave(&quarantine_lock, flags); + raw_spin_lock_irqsave(&quarantine_lock, flags); /* * Update quarantine size in case of hotplug. Allocate a fraction of @@ -254,7 +254,7 @@ void quarantine_reduce(void) quarantine_head = 0; } - spin_unlock_irqrestore(&quarantine_lock, flags); + raw_spin_unlock_irqrestore(&quarantine_lock, flags); qlist_free_all(&to_free, NULL); srcu_read_unlock(&remove_cache_srcu, srcu_idx); @@ -310,17 +310,17 @@ void quarantine_remove_cache(struct kmem_cache *cache) */ on_each_cpu(per_cpu_remove_cache, cache, 1); - spin_lock_irqsave(&quarantine_lock, flags); + raw_spin_lock_irqsave(&quarantine_lock, flags); for (i = 0; i < QUARANTINE_BATCHES; i++) { if (qlist_empty(&global_quarantine[i])) continue; qlist_move_cache(&global_quarantine[i], &to_free, cache); /* Scanning whole quarantine can take a while. */ - spin_unlock_irqrestore(&quarantine_lock, flags); + raw_spin_unlock_irqrestore(&quarantine_lock, flags); cond_resched(); - spin_lock_irqsave(&quarantine_lock, flags); + raw_spin_lock_irqsave(&quarantine_lock, flags); } - spin_unlock_irqrestore(&quarantine_lock, flags); + raw_spin_unlock_irqrestore(&quarantine_lock, flags); qlist_free_all(&to_free, cache); diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 639acbb91fd53dc942e23498fa9735d8052a843e..23e2837501d65d0e87f698e9d4b2200c188fcbd8 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -26,7 +26,7 @@ * * The following locks and mutexes are used by kmemleak: * - * - kmemleak_lock (rwlock): protects the object_list modifications and + * - kmemleak_lock (raw spinlock): protects the object_list modifications and * accesses to the object_tree_root. The object_list is the main list * holding the metadata (struct kmemleak_object) for the allocated memory * blocks. The object_tree_root is a red black tree used to look-up @@ -147,7 +147,7 @@ struct kmemleak_scan_area { * (use_count) and freed using the RCU mechanism. */ struct kmemleak_object { - spinlock_t lock; + raw_spinlock_t lock; unsigned int flags; /* object status flags */ struct list_head object_list; struct list_head gray_list; @@ -197,7 +197,7 @@ static LIST_HEAD(gray_list); /* search tree for object boundaries */ static struct rb_root object_tree_root = RB_ROOT; /* rw_lock protecting the access to object_list and object_tree_root */ -static DEFINE_RWLOCK(kmemleak_lock); +static DEFINE_RAW_SPINLOCK(kmemleak_lock); /* allocation caches for kmemleak internal data */ static struct kmem_cache *object_cache; @@ -491,9 +491,9 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) struct kmemleak_object *object; rcu_read_lock(); - read_lock_irqsave(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); object = lookup_object(ptr, alias); - read_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); /* check whether the object is still available */ if (object && !get_object(object)) @@ -513,13 +513,13 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali unsigned long flags; struct kmemleak_object *object; - write_lock_irqsave(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); object = lookup_object(ptr, alias); if (object) { rb_erase(&object->rb_node, &object_tree_root); list_del_rcu(&object->object_list); } - write_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); return object; } @@ -561,7 +561,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, INIT_LIST_HEAD(&object->object_list); INIT_LIST_HEAD(&object->gray_list); INIT_HLIST_HEAD(&object->area_list); - spin_lock_init(&object->lock); + raw_spin_lock_init(&object->lock); atomic_set(&object->use_count, 1); object->flags = OBJECT_ALLOCATED; object->pointer = ptr; @@ -593,7 +593,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, /* kernel backtrace */ object->trace_len = __save_stack_trace(object->trace); - write_lock_irqsave(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); min_addr = min(min_addr, ptr); max_addr = max(max_addr, ptr + size); @@ -624,7 +624,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, list_add_tail_rcu(&object->object_list, &object_list); out: - write_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); return object; } @@ -642,9 +642,9 @@ static void __delete_object(struct kmemleak_object *object) * Locking here also ensures that the corresponding memory block * cannot be freed when it is being scanned. */ - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); object->flags &= ~OBJECT_ALLOCATED; - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); } @@ -716,9 +716,9 @@ static void paint_it(struct kmemleak_object *object, int color) { unsigned long flags; - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); __paint_it(object, color); - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); } static void paint_ptr(unsigned long ptr, int color) @@ -778,7 +778,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) goto out; } - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); if (size == SIZE_MAX) { size = object->pointer + object->size - ptr; } else if (ptr + size > object->pointer + object->size) { @@ -794,7 +794,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) hlist_add_head(&area->node, &object->area_list); out_unlock: - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); out: put_object(object); } @@ -817,9 +817,9 @@ static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref) return; } - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); object->excess_ref = excess_ref; - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); } @@ -839,9 +839,9 @@ static void object_no_scan(unsigned long ptr) return; } - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); object->flags |= OBJECT_NO_SCAN; - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); } @@ -902,11 +902,11 @@ static void early_alloc(struct early_log *log) log->min_count, GFP_ATOMIC); if (!object) goto out; - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); for (i = 0; i < log->trace_len; i++) object->trace[i] = log->trace[i]; object->trace_len = log->trace_len; - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); out: rcu_read_unlock(); } @@ -1096,9 +1096,9 @@ void __ref kmemleak_update_trace(const void *ptr) return; } - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); object->trace_len = __save_stack_trace(object->trace); - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); } @@ -1310,7 +1310,7 @@ static void scan_block(void *_start, void *_end, unsigned long *end = _end - (BYTES_PER_POINTER - 1); unsigned long flags; - read_lock_irqsave(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); for (ptr = start; ptr < end; ptr++) { struct kmemleak_object *object; unsigned long pointer; @@ -1344,7 +1344,7 @@ static void scan_block(void *_start, void *_end, * previously acquired in scan_object(). These locks are * enclosed by scan_mutex. */ - spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); /* only pass surplus references (object already gray) */ if (color_gray(object)) { excess_ref = object->excess_ref; @@ -1353,7 +1353,7 @@ static void scan_block(void *_start, void *_end, excess_ref = 0; update_refs(object); } - spin_unlock(&object->lock); + raw_spin_unlock(&object->lock); if (excess_ref) { object = lookup_object(excess_ref, 0); @@ -1362,12 +1362,12 @@ static void scan_block(void *_start, void *_end, if (object == scanned) /* circular reference, ignore */ continue; - spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); update_refs(object); - spin_unlock(&object->lock); + raw_spin_unlock(&object->lock); } } - read_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); } /* @@ -1400,7 +1400,7 @@ static void scan_object(struct kmemleak_object *object) * Once the object->lock is acquired, the corresponding memory block * cannot be freed (the same lock is acquired in delete_object). */ - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); if (object->flags & OBJECT_NO_SCAN) goto out; if (!(object->flags & OBJECT_ALLOCATED)) @@ -1419,9 +1419,9 @@ static void scan_object(struct kmemleak_object *object) if (start >= end) break; - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); cond_resched(); - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); } while (object->flags & OBJECT_ALLOCATED); } else hlist_for_each_entry(area, &object->area_list, node) @@ -1429,7 +1429,7 @@ static void scan_object(struct kmemleak_object *object) (void *)(area->start + area->size), object); out: - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); } /* @@ -1482,7 +1482,7 @@ static void kmemleak_scan(void) /* prepare the kmemleak_object's */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); #ifdef DEBUG /* * With a few exceptions there should be a maximum of @@ -1499,7 +1499,7 @@ static void kmemleak_scan(void) if (color_gray(object) && get_object(object)) list_add_tail(&object->gray_list, &gray_list); - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); @@ -1564,14 +1564,14 @@ static void kmemleak_scan(void) */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); if (color_white(object) && (object->flags & OBJECT_ALLOCATED) && update_checksum(object) && get_object(object)) { /* color it gray temporarily */ object->count = object->min_count; list_add_tail(&object->gray_list, &gray_list); } - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); @@ -1591,13 +1591,13 @@ static void kmemleak_scan(void) */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); if (unreferenced_object(object) && !(object->flags & OBJECT_REPORTED)) { object->flags |= OBJECT_REPORTED; new_leaks++; } - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); @@ -1749,10 +1749,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v) struct kmemleak_object *object = v; unsigned long flags; - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) print_unreferenced(seq, object); - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); return 0; } @@ -1782,9 +1782,9 @@ static int dump_str_object_info(const char *str) return -EINVAL; } - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); dump_object_info(object); - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); return 0; @@ -1803,11 +1803,11 @@ static void kmemleak_clear(void) rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { - spin_lock_irqsave(&object->lock, flags); + raw_spin_lock_irqsave(&object->lock, flags); if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) __paint_it(object, KMEMLEAK_GREY); - spin_unlock_irqrestore(&object->lock, flags); + raw_spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); diff --git a/mm/memblock.c b/mm/memblock.c index 4d471da3cc4794a06024330848fafc0b1d5b36f8..6e568b32638fc7e1a2220c79393ccd58246a646b 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1318,7 +1318,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i static void * __init memblock_virt_alloc_internal( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, - int nid) + int nid, bool exact_nid) { phys_addr_t alloc; void *ptr; @@ -1346,7 +1346,7 @@ static void * __init memblock_virt_alloc_internal( if (alloc && !memblock_reserve(alloc, size)) goto done; - if (nid != NUMA_NO_NODE) { + if (nid != NUMA_NO_NODE && !exact_nid) { alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, NUMA_NO_NODE, flags); @@ -1412,7 +1412,7 @@ void * __init memblock_virt_alloc_try_nid_raw( &max_addr, (void *)_RET_IP_); ptr = memblock_virt_alloc_internal(size, align, - min_addr, max_addr, nid); + min_addr, max_addr, nid, false); #ifdef CONFIG_DEBUG_VM if (ptr && size > 0) memset(ptr, PAGE_POISON_PATTERN, size); @@ -1420,6 +1420,43 @@ void * __init memblock_virt_alloc_try_nid_raw( return ptr; } +/** + * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node + * without zeroing memory + * @size: size of memory block to be allocated in bytes + * @align: alignment of the region and block's size + * @min_addr: the lower bound of the memory region from where the allocation + * is preferred (phys address) + * @max_addr: the upper bound of the memory region from where the allocation + * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to + * allocate only from memory limited by memblock.current_limit value + * @nid: nid of the free area to find, %NUMA_NO_NODE for any node + * + * Public function, provides additional debug information (including caller + * info), if enabled. Does not zero allocated memory. + * + * Return: + * Virtual address of allocated memory block on success, NULL on failure. + */ +void * __init memblock_alloc_exact_nid_raw( + phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid) +{ + void *ptr; + + memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", + __func__, (u64)size, (u64)align, nid, &min_addr, + &max_addr, (void *)_RET_IP_); + + ptr = memblock_virt_alloc_internal(size, align, + min_addr, max_addr, nid, true); + if (ptr && size > 0) + memset(ptr, PAGE_POISON_PATTERN, size); + + return ptr; +} + /** * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block * @size: size of memory block to be allocated in bytes @@ -1449,7 +1486,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic( &max_addr, (void *)_RET_IP_); ptr = memblock_virt_alloc_internal(size, align, - min_addr, max_addr, nid); + min_addr, max_addr, nid, false); if (ptr) memset(ptr, 0, size); return ptr; @@ -1484,7 +1521,7 @@ void * __init memblock_virt_alloc_try_nid( __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr, (void *)_RET_IP_); ptr = memblock_virt_alloc_internal(size, align, - min_addr, max_addr, nid); + min_addr, max_addr, nid, false); if (ptr) { memset(ptr, 0, size); return ptr; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f3aa6e6214d5ee0b11cc5c6fe3fee6d7eb10b27d..d5bda1449fe396ea5e2d728a6c445f23fd84d578 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -69,6 +69,7 @@ #include #include #include "slab.h" +#include #include @@ -94,6 +95,8 @@ int do_swap_account __read_mostly; #define do_swap_account 0 #endif +static DEFINE_LOCAL_IRQ_LOCK(event_lock); + /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { @@ -2084,7 +2087,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) * as well as workers from this path always operate on the local * per-cpu data. CPU up doesn't touch memcg_stock at all. */ - curcpu = get_cpu(); + curcpu = get_cpu_light(); for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; @@ -2104,7 +2107,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) } css_put(&memcg->css); } - put_cpu(); + put_cpu_light(); mutex_unlock(&percpu_charge_mutex); } @@ -4933,12 +4936,12 @@ static int mem_cgroup_move_account(struct page *page, ret = 0; - local_irq_disable(); + local_lock_irq(event_lock); mem_cgroup_charge_statistics(to, page, compound, nr_pages); memcg_check_events(to, page); mem_cgroup_charge_statistics(from, page, compound, -nr_pages); memcg_check_events(from, page); - local_irq_enable(); + local_unlock_irq(event_lock); out_unlock: unlock_page(page); out: @@ -6057,10 +6060,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, commit_charge(page, memcg, lrucare); - local_irq_disable(); + local_lock_irq(event_lock); mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); memcg_check_events(memcg, page); - local_irq_enable(); + local_unlock_irq(event_lock); if (do_memsw_account() && PageSwapCache(page)) { swp_entry_t entry = { .val = page_private(page) }; @@ -6129,7 +6132,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) memcg_oom_recover(ug->memcg); } - local_irq_save(flags); + local_lock_irqsave(event_lock, flags); __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); @@ -6137,7 +6140,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); memcg_check_events(ug->memcg, ug->dummy_page); - local_irq_restore(flags); + local_unlock_irqrestore(event_lock, flags); if (!mem_cgroup_is_root(ug->memcg)) css_put_many(&ug->memcg->css, nr_pages); @@ -6300,10 +6303,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) commit_charge(newpage, memcg, false); - local_irq_save(flags); + local_lock_irqsave(event_lock, flags); mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); memcg_check_events(memcg, newpage); - local_irq_restore(flags); + local_unlock_irqrestore(event_lock, flags); } DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); @@ -6485,6 +6488,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; unsigned short oldid; + unsigned long flags; VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); @@ -6530,10 +6534,14 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) * important here to have the interrupts disabled because it is the * only synchronisation we have for updating the per-CPU variables. */ + local_lock_irqsave(event_lock, flags); +#ifndef CONFIG_PREEMPT_RT_BASE VM_BUG_ON(!irqs_disabled()); +#endif mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page), -nr_entries); memcg_check_events(memcg, page); + local_unlock_irqrestore(event_lock, flags); if (!mem_cgroup_is_root(memcg)) css_put_many(&memcg->css, nr_entries); diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 3e612ae748e96692522cf7d3c7c615345974fa6d..d0ccc070979fe128fe40f1ba74b2c8b915ce2be3 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c @@ -25,6 +25,7 @@ void use_mm(struct mm_struct *mm) struct task_struct *tsk = current; task_lock(tsk); + preempt_disable_rt(); active_mm = tsk->active_mm; if (active_mm != mm) { mmgrab(mm); @@ -32,6 +33,7 @@ void use_mm(struct mm_struct *mm) } tsk->mm = mm; switch_mm(active_mm, mm, tsk); + preempt_enable_rt(); task_unlock(tsk); #ifdef finish_arch_post_lock_switch finish_arch_post_lock_switch(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9c35403d9646193c19d4170d97783e5830c426c5..11c08cfd6dfb278367b171384abdd85a0f50aaeb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include #include @@ -292,6 +293,18 @@ EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif +static DEFINE_LOCAL_IRQ_LOCK(pa_lock); + +#ifdef CONFIG_PREEMPT_RT_BASE +# define cpu_lock_irqsave(cpu, flags) \ + local_lock_irqsave_on(pa_lock, flags, cpu) +# define cpu_unlock_irqrestore(cpu, flags) \ + local_unlock_irqrestore_on(pa_lock, flags, cpu) +#else +# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) +# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) +#endif + int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT @@ -1096,7 +1109,7 @@ static inline void prefetch_buddy(struct page *page) } /* - * Frees a number of pages from the PCP lists + * Frees a number of pages which have been collected from the pcp lists. * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * @@ -1106,15 +1119,57 @@ static inline void prefetch_buddy(struct page *page) * And clear the zone's pages_scanned counter, to hold off the "all pages are * pinned" detection logic. */ -static void free_pcppages_bulk(struct zone *zone, int count, - struct per_cpu_pages *pcp) +static void free_pcppages_bulk(struct zone *zone, struct list_head *head, + bool zone_retry) +{ + bool isolated_pageblocks; + struct page *page, *tmp; + unsigned long flags; + + spin_lock_irqsave(&zone->lock, flags); + isolated_pageblocks = has_isolate_pageblock(zone); + + /* + * Use safe version since after __free_one_page(), + * page->lru.next will not point to original list. + */ + list_for_each_entry_safe(page, tmp, head, lru) { + int mt = get_pcppage_migratetype(page); + + if (page_zone(page) != zone) { + /* + * free_unref_page_list() sorts pages by zone. If we end + * up with pages from a different NUMA nodes belonging + * to the same ZONE index then we need to redo with the + * correct ZONE pointer. Skip the page for now, redo it + * on the next iteration. + */ + WARN_ON_ONCE(zone_retry == false); + if (zone_retry) + continue; + } + + /* MIGRATE_ISOLATE page should not go to pcplists */ + VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); + /* Pageblock could have been isolated meanwhile */ + if (unlikely(isolated_pageblocks)) + mt = get_pageblock_migratetype(page); + + list_del(&page->lru); + __free_one_page(page, page_to_pfn(page), zone, 0, mt); + trace_mm_page_pcpu_drain(page, 0, mt); + } + spin_unlock_irqrestore(&zone->lock, flags); +} + +static void isolate_pcp_pages(int count, struct per_cpu_pages *pcp, + struct list_head *dst) + { int migratetype = 0; int batch_free = 0; int prefetch_nr = 0; - bool isolated_pageblocks; - struct page *page, *tmp; - LIST_HEAD(head); + struct page *page; /* * Ensure proper count is passed which otherwise would stuck in the @@ -1151,7 +1206,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, if (bulkfree_pcp_prepare(page)) continue; - list_add_tail(&page->lru, &head); + list_add_tail(&page->lru, dst); /* * We are going to put the page back to the global @@ -1166,26 +1221,6 @@ static void free_pcppages_bulk(struct zone *zone, int count, prefetch_buddy(page); } while (--count && --batch_free && !list_empty(list)); } - - spin_lock(&zone->lock); - isolated_pageblocks = has_isolate_pageblock(zone); - - /* - * Use safe version since after __free_one_page(), - * page->lru.next will not point to original list. - */ - list_for_each_entry_safe(page, tmp, &head, lru) { - int mt = get_pcppage_migratetype(page); - /* MIGRATE_ISOLATE page should not go to pcplists */ - VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); - /* Pageblock could have been isolated meanwhile */ - if (unlikely(isolated_pageblocks)) - mt = get_pageblock_migratetype(page); - - __free_one_page(page, page_to_pfn(page), zone, 0, mt); - trace_mm_page_pcpu_drain(page, 0, mt); - } - spin_unlock(&zone->lock); } static void free_one_page(struct zone *zone, @@ -1280,10 +1315,10 @@ static void __free_pages_ok(struct page *page, unsigned int order) return; migratetype = get_pfnblock_migratetype(page, pfn); - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); __count_vm_events(PGFREE, 1 << order); free_one_page(page_zone(page), page, pfn, order, migratetype); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } static void __init __free_pages_boot_core(struct page *page, unsigned int order) @@ -2546,13 +2581,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; int to_drain, batch; + LIST_HEAD(dst); - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); batch = READ_ONCE(pcp->batch); to_drain = min(pcp->count, batch); if (to_drain > 0) - free_pcppages_bulk(zone, to_drain, pcp); - local_irq_restore(flags); + isolate_pcp_pages(to_drain, pcp, &dst); + + local_unlock_irqrestore(pa_lock, flags); + + if (to_drain > 0) + free_pcppages_bulk(zone, &dst, false); } #endif @@ -2568,14 +2608,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) unsigned long flags; struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; + LIST_HEAD(dst); + int count; - local_irq_save(flags); + cpu_lock_irqsave(cpu, flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; - if (pcp->count) - free_pcppages_bulk(zone, pcp->count, pcp); - local_irq_restore(flags); + count = pcp->count; + if (count) + isolate_pcp_pages(count, pcp, &dst); + + cpu_unlock_irqrestore(cpu, flags); + + if (count) + free_pcppages_bulk(zone, &dst, false); } /* @@ -2610,6 +2657,7 @@ void drain_local_pages(struct zone *zone) drain_pages(cpu); } +#ifndef CONFIG_PREEMPT_RT_BASE static void drain_local_pages_wq(struct work_struct *work) { /* @@ -2623,6 +2671,7 @@ static void drain_local_pages_wq(struct work_struct *work) drain_local_pages(NULL); preempt_enable(); } +#endif /* * Spill all the per-cpu pages from all CPUs back into the buddy allocator. @@ -2689,7 +2738,14 @@ void drain_all_pages(struct zone *zone) else cpumask_clear_cpu(cpu, &cpus_with_pcps); } - +#ifdef CONFIG_PREEMPT_RT_BASE + for_each_cpu(cpu, &cpus_with_pcps) { + if (zone) + drain_pages_zone(cpu, zone); + else + drain_pages(cpu); + } +#else for_each_cpu(cpu, &cpus_with_pcps) { struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); INIT_WORK(work, drain_local_pages_wq); @@ -2697,6 +2753,7 @@ void drain_all_pages(struct zone *zone) } for_each_cpu(cpu, &cpus_with_pcps) flush_work(per_cpu_ptr(&pcpu_drain, cpu)); +#endif mutex_unlock(&pcpu_drain_mutex); } @@ -2768,7 +2825,8 @@ static bool free_unref_page_prepare(struct page *page, unsigned long pfn) return true; } -static void free_unref_page_commit(struct page *page, unsigned long pfn) +static void free_unref_page_commit(struct page *page, unsigned long pfn, + struct list_head *dst) { struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; @@ -2797,7 +2855,8 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); - free_pcppages_bulk(zone, batch, pcp); + + isolate_pcp_pages(batch, pcp, dst); } } @@ -2808,13 +2867,17 @@ void free_unref_page(struct page *page) { unsigned long flags; unsigned long pfn = page_to_pfn(page); + struct zone *zone = page_zone(page); + LIST_HEAD(dst); if (!free_unref_page_prepare(page, pfn)) return; - local_irq_save(flags); - free_unref_page_commit(page, pfn); - local_irq_restore(flags); + local_lock_irqsave(pa_lock, flags); + free_unref_page_commit(page, pfn, &dst); + local_unlock_irqrestore(pa_lock, flags); + if (!list_empty(&dst)) + free_pcppages_bulk(zone, &dst, false); } /* @@ -2825,6 +2888,11 @@ void free_unref_page_list(struct list_head *list) struct page *page, *next; unsigned long flags, pfn; int batch_count = 0; + struct list_head dsts[__MAX_NR_ZONES]; + int i; + + for (i = 0; i < __MAX_NR_ZONES; i++) + INIT_LIST_HEAD(&dsts[i]); /* Prepare pages for freeing */ list_for_each_entry_safe(page, next, list, lru) { @@ -2834,25 +2902,42 @@ void free_unref_page_list(struct list_head *list) set_page_private(page, pfn); } - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); list_for_each_entry_safe(page, next, list, lru) { unsigned long pfn = page_private(page); + enum zone_type type; set_page_private(page, 0); trace_mm_page_free_batched(page); - free_unref_page_commit(page, pfn); + type = page_zonenum(page); + free_unref_page_commit(page, pfn, &dsts[type]); /* * Guard against excessive IRQ disabled times when we get * a large list of pages to free. */ if (++batch_count == SWAP_CLUSTER_MAX) { - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); batch_count = 0; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); } } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); + + for (i = 0; i < __MAX_NR_ZONES; ) { + struct page *page; + struct zone *zone; + + if (list_empty(&dsts[i])) { + i++; + continue; + } + + page = list_first_entry(&dsts[i], struct page, lru); + zone = page_zone(page); + + free_pcppages_bulk(zone, &dsts[i], true); + } } /* @@ -2986,7 +3071,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, struct page *page; unsigned long flags; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; page = __rmqueue_pcplist(zone, migratetype, pcp, list); @@ -2994,7 +3079,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone); } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); return page; } @@ -3021,7 +3106,7 @@ struct page *rmqueue(struct zone *preferred_zone, * allocate greater than order-1 page units with __GFP_NOFAIL. */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - spin_lock_irqsave(&zone->lock, flags); + local_spin_lock_irqsave(pa_lock, &zone->lock, flags); do { page = NULL; @@ -3041,14 +3126,14 @@ struct page *rmqueue(struct zone *preferred_zone, __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); out: VM_BUG_ON_PAGE(page && bad_range(zone, page), page); return page; failed: - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); return NULL; } @@ -7172,8 +7257,9 @@ void __init free_area_init(unsigned long *zones_size) static int page_alloc_cpu_dead(unsigned int cpu) { - + local_lock_irq_on(swapvec_lock, cpu); lru_add_drain_cpu(cpu); + local_unlock_irq_on(swapvec_lock, cpu); drain_pages(cpu); /* @@ -8084,7 +8170,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); @@ -8093,7 +8179,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/mm/slab.c b/mm/slab.c index 46f21e73db2f8f1eec907b38f1d69065e375e70a..38f6609343b3750af1ae4273851b538d38785156 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -233,7 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) parent->shared = NULL; parent->alien = NULL; parent->colour_next = 0; - spin_lock_init(&parent->list_lock); + raw_spin_lock_init(&parent->list_lock); parent->free_objects = 0; parent->free_touched = 0; } @@ -600,9 +600,9 @@ static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, page_node = page_to_nid(page); n = get_node(cachep, page_node); - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); free_block(cachep, &objp, 1, page_node, &list); - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); slabs_destroy(cachep, &list); } @@ -731,7 +731,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, struct kmem_cache_node *n = get_node(cachep, node); if (ac->avail) { - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); /* * Stuff objects into the remote nodes shared array first. * That way we could avoid the overhead of putting the objects @@ -742,7 +742,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, free_block(cachep, ac->entry, ac->avail, node, list); ac->avail = 0; - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); } } @@ -815,9 +815,9 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp, slabs_destroy(cachep, &list); } else { n = get_node(cachep, page_node); - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); free_block(cachep, &objp, 1, page_node, &list); - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); slabs_destroy(cachep, &list); } return 1; @@ -858,10 +858,10 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) */ n = get_node(cachep, node); if (n) { - spin_lock_irq(&n->list_lock); + raw_spin_lock_irq(&n->list_lock); n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); return 0; } @@ -940,7 +940,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, goto fail; n = get_node(cachep, node); - spin_lock_irq(&n->list_lock); + raw_spin_lock_irq(&n->list_lock); if (n->shared && force_change) { free_block(cachep, n->shared->entry, n->shared->avail, node, &list); @@ -958,7 +958,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, new_alien = NULL; } - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); slabs_destroy(cachep, &list); /* @@ -997,7 +997,7 @@ static void cpuup_canceled(long cpu) if (!n) continue; - spin_lock_irq(&n->list_lock); + raw_spin_lock_irq(&n->list_lock); /* Free limit for this kmem_cache_node */ n->free_limit -= cachep->batchcount; @@ -1010,7 +1010,7 @@ static void cpuup_canceled(long cpu) } if (!cpumask_empty(mask)) { - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); goto free_slab; } @@ -1024,7 +1024,7 @@ static void cpuup_canceled(long cpu) alien = n->alien; n->alien = NULL; - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); kfree(shared); if (alien) { @@ -1208,7 +1208,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node * /* * Do not assume that spinlocks can be initialized via memcpy: */ - spin_lock_init(&ptr->list_lock); + raw_spin_lock_init(&ptr->list_lock); MAKE_ALL_LISTS(cachep, ptr, nodeid); cachep->node[nodeid] = ptr; @@ -1379,11 +1379,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) for_each_kmem_cache_node(cachep, node, n) { unsigned long total_slabs, free_slabs, free_objs; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); total_slabs = n->total_slabs; free_slabs = n->free_slabs; free_objs = n->free_objects; - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", node, total_slabs - free_slabs, total_slabs, @@ -2178,7 +2178,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep) { #ifdef CONFIG_SMP check_irq_off(); - assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); + assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); #endif } @@ -2186,7 +2186,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) { #ifdef CONFIG_SMP check_irq_off(); - assert_spin_locked(&get_node(cachep, node)->list_lock); + assert_raw_spin_locked(&get_node(cachep, node)->list_lock); #endif } @@ -2226,9 +2226,9 @@ static void do_drain(void *arg) check_irq_off(); ac = cpu_cache_get(cachep); n = get_node(cachep, node); - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); free_block(cachep, ac->entry, ac->avail, node, &list); - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); slabs_destroy(cachep, &list); ac->avail = 0; } @@ -2246,9 +2246,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep) drain_alien_cache(cachep, n->alien); for_each_kmem_cache_node(cachep, node, n) { - spin_lock_irq(&n->list_lock); + raw_spin_lock_irq(&n->list_lock); drain_array_locked(cachep, n->shared, node, true, &list); - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); slabs_destroy(cachep, &list); } @@ -2270,10 +2270,10 @@ static int drain_freelist(struct kmem_cache *cache, nr_freed = 0; while (nr_freed < tofree && !list_empty(&n->slabs_free)) { - spin_lock_irq(&n->list_lock); + raw_spin_lock_irq(&n->list_lock); p = n->slabs_free.prev; if (p == &n->slabs_free) { - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); goto out; } @@ -2286,7 +2286,7 @@ static int drain_freelist(struct kmem_cache *cache, * to the cache. */ n->free_objects -= cache->num; - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); slab_destroy(cache, page); nr_freed++; } @@ -2734,7 +2734,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) INIT_LIST_HEAD(&page->lru); n = get_node(cachep, page_to_nid(page)); - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); n->total_slabs++; if (!page->active) { list_add_tail(&page->lru, &(n->slabs_free)); @@ -2744,7 +2744,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) STATS_INC_GROWN(cachep); n->free_objects += cachep->num - page->active; - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); fixup_objfreelist_debug(cachep, &list); } @@ -2912,7 +2912,7 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) { struct page *page; - assert_spin_locked(&n->list_lock); + assert_raw_spin_locked(&n->list_lock); page = list_first_entry_or_null(&n->slabs_partial, struct page, lru); if (!page) { n->free_touched = 1; @@ -2938,10 +2938,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, if (!gfp_pfmemalloc_allowed(flags)) return NULL; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); page = get_first_slab(n, true); if (!page) { - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); return NULL; } @@ -2950,7 +2950,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, fixup_slab_list(cachep, n, page, &list); - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); fixup_objfreelist_debug(cachep, &list); return obj; @@ -3009,7 +3009,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) if (!n->free_objects && (!shared || !shared->avail)) goto direct_grow; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); shared = READ_ONCE(n->shared); /* See if we can refill from the shared array */ @@ -3033,7 +3033,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) must_grow: n->free_objects -= ac->avail; alloc_done: - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); fixup_objfreelist_debug(cachep, &list); direct_grow: @@ -3258,7 +3258,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, BUG_ON(!n); check_irq_off(); - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); page = get_first_slab(n, false); if (!page) goto must_grow; @@ -3276,12 +3276,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, fixup_slab_list(cachep, n, page, &list); - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); fixup_objfreelist_debug(cachep, &list); return obj; must_grow: - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); if (page) { /* This slab isn't counted yet so don't update free_objects */ @@ -3457,7 +3457,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) check_irq_off(); n = get_node(cachep, node); - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); if (n->shared) { struct array_cache *shared_array = n->shared; int max = shared_array->limit - shared_array->avail; @@ -3486,7 +3486,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) STATS_SET_FREEABLE(cachep, i); } #endif - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); slabs_destroy(cachep, &list); ac->avail -= batchcount; memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); @@ -3896,9 +3896,9 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, node = cpu_to_mem(cpu); n = get_node(cachep, node); - spin_lock_irq(&n->list_lock); + raw_spin_lock_irq(&n->list_lock); free_block(cachep, ac->entry, ac->avail, node, &list); - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); slabs_destroy(cachep, &list); } free_percpu(prev); @@ -4023,9 +4023,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, return; } - spin_lock_irq(&n->list_lock); + raw_spin_lock_irq(&n->list_lock); drain_array_locked(cachep, ac, node, false, &list); - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); slabs_destroy(cachep, &list); } @@ -4109,7 +4109,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) for_each_kmem_cache_node(cachep, node, n) { check_irq_on(); - spin_lock_irq(&n->list_lock); + raw_spin_lock_irq(&n->list_lock); total_slabs += n->total_slabs; free_slabs += n->free_slabs; @@ -4118,7 +4118,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) if (n->shared) shared_avail += n->shared->avail; - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); } num_objs = total_slabs * cachep->num; active_slabs = total_slabs - free_slabs; @@ -4338,13 +4338,13 @@ static int leaks_show(struct seq_file *m, void *p) for_each_kmem_cache_node(cachep, node, n) { check_irq_on(); - spin_lock_irq(&n->list_lock); + raw_spin_lock_irq(&n->list_lock); list_for_each_entry(page, &n->slabs_full, lru) handle_slab(x, cachep, page); list_for_each_entry(page, &n->slabs_partial, lru) handle_slab(x, cachep, page); - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); } } while (!is_store_user_clean(cachep)); diff --git a/mm/slab.h b/mm/slab.h index 0ed7a463f476ca2e53e06c37b1c854c550095b34..12b7da32bcd01308895de9bddb9fedb37adf1213 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -454,7 +454,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, * The slab lists for all objects. */ struct kmem_cache_node { - spinlock_t list_lock; + raw_spinlock_t list_lock; #ifdef CONFIG_SLAB struct list_head slabs_partial; /* partial list first, better asm code */ diff --git a/mm/slub.c b/mm/slub.c index 499fb073d1ffd49098322dc32601edf2a356a22c..6b9b894ba5bc4a1558b63e5237338cf986019fd0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1182,7 +1182,7 @@ static noinline int free_debug_processing( unsigned long uninitialized_var(flags); int ret = 0; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); slab_lock(page); if (s->flags & SLAB_CONSISTENCY_CHECKS) { @@ -1217,7 +1217,7 @@ static noinline int free_debug_processing( bulk_cnt, cnt); slab_unlock(page); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); if (!ret) slab_fix(s, "Object at 0x%p not freed", object); return ret; @@ -1350,6 +1350,12 @@ static bool freelist_corrupted(struct kmem_cache *s, struct page *page, } #endif /* CONFIG_SLUB_DEBUG */ +struct slub_free_list { + raw_spinlock_t lock; + struct list_head list; +}; +static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); + /* * Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. @@ -1591,10 +1597,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) void *start, *p; int idx, order; bool shuffle; + bool enableirqs = false; flags &= gfp_allowed_mask; if (gfpflags_allow_blocking(flags)) + enableirqs = true; +#ifdef CONFIG_PREEMPT_RT_FULL + if (system_state > SYSTEM_BOOTING) + enableirqs = true; +#endif + if (enableirqs) local_irq_enable(); flags |= s->allocflags; @@ -1653,7 +1666,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) page->frozen = 1; out: - if (gfpflags_allow_blocking(flags)) + if (enableirqs) local_irq_disable(); if (!page) return NULL; @@ -1711,6 +1724,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page) __free_pages(page, order); } +static void free_delayed(struct list_head *h) +{ + while (!list_empty(h)) { + struct page *page = list_first_entry(h, struct page, lru); + + list_del(&page->lru); + __free_slab(page->slab_cache, page); + } +} + static void rcu_free_slab(struct rcu_head *h) { struct page *page = container_of(h, struct page, rcu_head); @@ -1722,6 +1745,12 @@ static void free_slab(struct kmem_cache *s, struct page *page) { if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { call_rcu(&page->rcu_head, rcu_free_slab); + } else if (irqs_disabled()) { + struct slub_free_list *f = this_cpu_ptr(&slub_free_list); + + raw_spin_lock(&f->lock); + list_add(&page->lru, &f->list); + raw_spin_unlock(&f->lock); } else __free_slab(s, page); } @@ -1829,7 +1858,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, if (!n || !n->nr_partial) return NULL; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); list_for_each_entry_safe(page, page2, &n->partial, lru) { void *t; @@ -1854,7 +1883,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, break; } - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); return object; } @@ -2106,7 +2135,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, * that acquire_slab() will see a slab page that * is frozen */ - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } } else { m = M_FULL; @@ -2117,7 +2146,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, * slabs from diagnostic functions will not see * any frozen slabs. */ - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } } @@ -2152,7 +2181,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, goto redo; if (lock) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); @@ -2187,10 +2216,10 @@ static void unfreeze_partials(struct kmem_cache *s, n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); n = n2; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } do { @@ -2219,7 +2248,7 @@ static void unfreeze_partials(struct kmem_cache *s, } if (n) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); while (discard_page) { page = discard_page; @@ -2256,14 +2285,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { + struct slub_free_list *f; unsigned long flags; + LIST_HEAD(tofree); /* * partial array is full. Move the existing * set to the per node partial list. */ local_irq_save(flags); unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); + f = this_cpu_ptr(&slub_free_list); + raw_spin_lock(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock(&f->lock); local_irq_restore(flags); + free_delayed(&tofree); oldpage = NULL; pobjects = 0; pages = 0; @@ -2333,7 +2369,19 @@ static bool has_cpu_slab(int cpu, void *info) static void flush_all(struct kmem_cache *s) { + LIST_HEAD(tofree); + int cpu; + on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); + for_each_online_cpu(cpu) { + struct slub_free_list *f; + + f = &per_cpu(slub_free_list, cpu); + raw_spin_lock_irq(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock_irq(&f->lock); + free_delayed(&tofree); + } } /* @@ -2388,10 +2436,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, unsigned long x = 0; struct page *page; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) x += get_count(page); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return x; } #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ @@ -2531,8 +2579,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) * already disabled (which is the case for bulk allocation). */ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - unsigned long addr, struct kmem_cache_cpu *c) + unsigned long addr, struct kmem_cache_cpu *c, + struct list_head *to_free) { + struct slub_free_list *f; void *freelist; struct page *page; @@ -2598,6 +2648,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); + +out: + f = this_cpu_ptr(&slub_free_list); + raw_spin_lock(&f->lock); + list_splice_init(&f->list, to_free); + raw_spin_unlock(&f->lock); + return freelist; new_slab: @@ -2613,7 +2670,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, if (unlikely(!freelist)) { slab_out_of_memory(s, gfpflags, node); - return NULL; + goto out; } page = c->page; @@ -2626,7 +2683,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, goto new_slab; /* Slab failed checks. Next slab needed */ deactivate_slab(s, page, get_freepointer(s, freelist), c); - return freelist; + goto out; } /* @@ -2638,6 +2695,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, { void *p; unsigned long flags; + LIST_HEAD(tofree); local_irq_save(flags); #ifdef CONFIG_PREEMPT @@ -2649,8 +2707,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, c = this_cpu_ptr(s->cpu_slab); #endif - p = ___slab_alloc(s, gfpflags, node, addr, c); + p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); local_irq_restore(flags); + free_delayed(&tofree); return p; } @@ -2836,7 +2895,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, do { if (unlikely(n)) { - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); n = NULL; } prior = page->freelist; @@ -2868,7 +2927,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); } } @@ -2910,7 +2969,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return; slab_empty: @@ -2925,7 +2984,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, remove_full(s, n, page); } - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); discard_slab(s, page); } @@ -3130,6 +3189,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { struct kmem_cache_cpu *c; + LIST_HEAD(to_free); int i; /* memcg and kmem_cache debug support */ @@ -3162,7 +3222,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, * of re-populating per CPU c->freelist */ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, - _RET_IP_, c); + _RET_IP_, c, &to_free); if (unlikely(!p[i])) goto error; @@ -3174,6 +3234,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, } c->tid = next_tid(c->tid); local_irq_enable(); + free_delayed(&to_free); /* Clear memory outside IRQ disabled fastpath loop */ if (unlikely(flags & __GFP_ZERO)) { @@ -3188,6 +3249,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, return i; error: local_irq_enable(); + free_delayed(&to_free); slab_post_alloc_hook(s, flags, i, p); __kmem_cache_free_bulk(s, i, p); return 0; @@ -3323,7 +3385,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; - spin_lock_init(&n->list_lock); + raw_spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); @@ -3676,6 +3738,11 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, const char *text) { #ifdef CONFIG_SLUB_DEBUG +#ifdef CONFIG_PREEMPT_RT_BASE + /* XXX move out of irq-off section */ + slab_err(s, page, text, s->name); +#else + void *addr = page_address(page); void *p; unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects), @@ -3697,6 +3764,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, slab_unlock(page); kfree(map); #endif +#endif } /* @@ -3710,7 +3778,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) struct page *page, *h; BUG_ON(irqs_disabled()); - spin_lock_irq(&n->list_lock); + raw_spin_lock_irq(&n->list_lock); list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { remove_partial(n, page); @@ -3720,7 +3788,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) "Objects remaining in %s on __kmem_cache_shutdown()"); } } - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); list_for_each_entry_safe(page, h, &discard, lru) discard_slab(s, page); @@ -3993,7 +4061,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) for (i = 0; i < SHRINK_PROMOTE_MAX; i++) INIT_LIST_HEAD(promote + i); - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); /* * Build lists of slabs to discard or promote. @@ -4024,7 +4092,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) list_splice(promote + i, &n->partial); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); /* Release empty slabs */ list_for_each_entry_safe(page, t, &discard, lru) @@ -4237,6 +4305,12 @@ void __init kmem_cache_init(void) { static __initdata struct kmem_cache boot_kmem_cache, boot_kmem_cache_node; + int cpu; + + for_each_possible_cpu(cpu) { + raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); + INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); + } if (debug_guardpage_minorder()) slub_max_order = 0; @@ -4438,7 +4512,7 @@ static int validate_slab_node(struct kmem_cache *s, struct page *page; unsigned long flags; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page, map); @@ -4460,7 +4534,7 @@ static int validate_slab_node(struct kmem_cache *s, s->name, count, atomic_long_read(&n->nr_slabs)); out: - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return count; } @@ -4517,6 +4591,9 @@ static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) struct location *l; int order; + if (IS_ENABLED(CONFIG_PREEMPT_RT) && flags == GFP_ATOMIC) + return 0; + order = get_order(sizeof(struct location) * max); l = (void *)__get_free_pages(flags, order); @@ -4650,12 +4727,12 @@ static int list_locations(struct kmem_cache *s, char *buf, if (!atomic_long_read(&n->nr_slabs)) continue; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) process_slab(&t, s, page, alloc, map); list_for_each_entry(page, &n->full, lru) process_slab(&t, s, page, alloc, map); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); } for (i = 0; i < t.count; i++) { diff --git a/mm/sparse.c b/mm/sparse.c index ed60f0a375fec9f00d325344df173e67cceacd75..a0e950e1d584839f277b97739c466c9ee045e5ad 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -17,6 +17,8 @@ #include #include +#include + /* * Permanent SPARSEMEM data: * @@ -405,7 +407,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid) { WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ sparsemap_buf = - memblock_virt_alloc_try_nid_raw(size, PAGE_SIZE, + memblock_alloc_exact_nid_raw(size, section_map_size(), __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); sparsemap_buf_end = sparsemap_buf + size; diff --git a/mm/swap.c b/mm/swap.c index 45fdbfb6b2a608857165d6efc625c01b4ab92438..3885645a45cea4a2100030b18d0d3b08a03bc01f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -51,6 +52,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs); #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); #endif +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); +DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); /* * This path almost never happens for VM activity - pages are normally @@ -253,11 +256,11 @@ void rotate_reclaimable_page(struct page *page) unsigned long flags; get_page(page); - local_irq_save(flags); + local_lock_irqsave(rotate_lock, flags); pvec = this_cpu_ptr(&lru_rotate_pvecs); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_move_tail(pvec); - local_irq_restore(flags); + local_unlock_irqrestore(rotate_lock, flags); } } @@ -307,12 +310,13 @@ void activate_page(struct page *page) { page = compound_head(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + activate_page_pvecs); get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); - put_cpu_var(activate_page_pvecs); + put_locked_var(swapvec_lock, activate_page_pvecs); } } @@ -334,7 +338,7 @@ void activate_page(struct page *page) static void __lru_cache_activate_page(struct page *page) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); int i; /* @@ -356,7 +360,7 @@ static void __lru_cache_activate_page(struct page *page) } } - put_cpu_var(lru_add_pvec); + put_locked_var(swapvec_lock, lru_add_pvec); } /* @@ -398,12 +402,12 @@ EXPORT_SYMBOL(mark_page_accessed); static void __lru_cache_add(struct page *page) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) __pagevec_lru_add(pvec); - put_cpu_var(lru_add_pvec); + put_locked_var(swapvec_lock, lru_add_pvec); } /** @@ -581,9 +585,15 @@ void lru_add_drain_cpu(int cpu) unsigned long flags; /* No harm done if a racing interrupt already did this */ - local_irq_save(flags); +#ifdef CONFIG_PREEMPT_RT_BASE + local_lock_irqsave_on(rotate_lock, flags, cpu); pagevec_move_tail(pvec); - local_irq_restore(flags); + local_unlock_irqrestore_on(rotate_lock, flags, cpu); +#else + local_lock_irqsave(rotate_lock, flags); + pagevec_move_tail(pvec); + local_unlock_irqrestore(rotate_lock, flags); +#endif } pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); @@ -615,11 +625,12 @@ void deactivate_file_page(struct page *page) return; if (likely(get_page_unless_zero(page))) { - struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + lru_deactivate_file_pvecs); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); - put_cpu_var(lru_deactivate_file_pvecs); + put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); } } @@ -634,23 +645,34 @@ void mark_page_lazyfree(struct page *page) { if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page) && !PageUnevictable(page)) { - struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + lru_lazyfree_pvecs); get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); - put_cpu_var(lru_lazyfree_pvecs); + put_locked_var(swapvec_lock, lru_lazyfree_pvecs); } } void lru_add_drain(void) { - lru_add_drain_cpu(get_cpu()); - put_cpu(); + lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); + local_unlock_cpu(swapvec_lock); } #ifdef CONFIG_SMP +#ifdef CONFIG_PREEMPT_RT_BASE +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) +{ + local_lock_on(swapvec_lock, cpu); + lru_add_drain_cpu(cpu); + local_unlock_on(swapvec_lock, cpu); +} + +#else + static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); static void lru_add_drain_per_cpu(struct work_struct *dummy) @@ -658,6 +680,16 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) lru_add_drain(); } +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) +{ + struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); + + INIT_WORK(work, lru_add_drain_per_cpu); + queue_work_on(cpu, mm_percpu_wq, work); + cpumask_set_cpu(cpu, has_work); +} +#endif + /* * Doesn't need any cpu hotplug locking because we do rely on per-cpu * kworkers being shut down before our page_alloc_cpu_dead callback is @@ -682,21 +714,19 @@ void lru_add_drain_all(void) cpumask_clear(&has_work); for_each_online_cpu(cpu) { - struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) || - need_activate_page_drain(cpu)) { - INIT_WORK(work, lru_add_drain_per_cpu); - queue_work_on(cpu, mm_percpu_wq, work); - cpumask_set_cpu(cpu, &has_work); - } + need_activate_page_drain(cpu)) + remote_lru_add_drain(cpu, &has_work); } +#ifndef CONFIG_PREEMPT_RT_BASE for_each_cpu(cpu, &has_work) flush_work(&per_cpu(lru_add_drain_work, cpu)); +#endif mutex_unlock(&lock); } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 1817871b02395ceccecdfa918dce4a97d5203786..aa06badc76f4d5b39cd406f81508dbc358eb4e46 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -853,7 +853,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) struct vmap_block *vb; struct vmap_area *va; unsigned long vb_idx; - int node, err; + int node, err, cpu; void *vaddr; node = numa_node_id(); @@ -896,11 +896,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) BUG_ON(err); radix_tree_preload_end(); - vbq = &get_cpu_var(vmap_block_queue); + cpu = get_cpu_light(); + vbq = this_cpu_ptr(&vmap_block_queue); spin_lock(&vbq->lock); list_add_tail_rcu(&vb->free_list, &vbq->free); spin_unlock(&vbq->lock); - put_cpu_var(vmap_block_queue); + put_cpu_light(); return vaddr; } @@ -969,6 +970,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) struct vmap_block *vb; void *vaddr = NULL; unsigned int order; + int cpu; BUG_ON(offset_in_page(size)); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); @@ -983,7 +985,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) order = get_order(size); rcu_read_lock(); - vbq = &get_cpu_var(vmap_block_queue); + cpu = get_cpu_light(); + vbq = this_cpu_ptr(&vmap_block_queue); list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long pages_off; @@ -1006,7 +1009,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) break; } - put_cpu_var(vmap_block_queue); + put_cpu_light(); rcu_read_unlock(); /* Allocate new block if nothing was found */ diff --git a/mm/vmstat.c b/mm/vmstat.c index 21e07e71ea2dfad8c7866225868c29af74131cbd..ff7c0d08f2b6611c3719bfc2d646b87987b6d477 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -320,6 +320,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long x; long t; + preempt_disable_rt(); x = delta + __this_cpu_read(*p); t = __this_cpu_read(pcp->stat_threshold); @@ -329,6 +330,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, x = 0; } __this_cpu_write(*p, x); + preempt_enable_rt(); } EXPORT_SYMBOL(__mod_zone_page_state); @@ -340,6 +342,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, long x; long t; + preempt_disable_rt(); x = delta + __this_cpu_read(*p); t = __this_cpu_read(pcp->stat_threshold); @@ -349,6 +352,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, x = 0; } __this_cpu_write(*p, x); + preempt_enable_rt(); } EXPORT_SYMBOL(__mod_node_page_state); @@ -381,6 +385,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; + preempt_disable_rt(); v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { @@ -389,6 +394,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) zone_page_state_add(v + overstep, zone, item); __this_cpu_write(*p, -overstep); } + preempt_enable_rt(); } void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) @@ -397,6 +403,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) s8 __percpu *p = pcp->vm_node_stat_diff + item; s8 v, t; + preempt_disable_rt(); v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { @@ -405,6 +412,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) node_page_state_add(v + overstep, pgdat, item); __this_cpu_write(*p, -overstep); } + preempt_enable_rt(); } void __inc_zone_page_state(struct page *page, enum zone_stat_item item) @@ -425,6 +433,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; + preempt_disable_rt(); v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { @@ -433,6 +442,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) zone_page_state_add(v - overstep, zone, item); __this_cpu_write(*p, overstep); } + preempt_enable_rt(); } void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) @@ -441,6 +451,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) s8 __percpu *p = pcp->vm_node_stat_diff + item; s8 v, t; + preempt_disable_rt(); v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { @@ -449,6 +460,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) node_page_state_add(v - overstep, pgdat, item); __this_cpu_write(*p, overstep); } + preempt_enable_rt(); } void __dec_zone_page_state(struct page *page, enum zone_stat_item item) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 4d71356ea66a36aa31e1ae3665e7001be011e837..50c5c3922b07c8fb0b1607958720c6b2cbc1a648 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -56,6 +56,7 @@ #include #include #include +#include #define ZSPAGE_MAGIC 0x58 @@ -73,9 +74,22 @@ */ #define ZS_MAX_ZSPAGE_ORDER 2 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) - #define ZS_HANDLE_SIZE (sizeof(unsigned long)) +#ifdef CONFIG_PREEMPT_RT_FULL + +struct zsmalloc_handle { + unsigned long addr; + struct mutex lock; +}; + +#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle)) + +#else + +#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long)) +#endif + /* * Object location (, ) is encoded as * as single (unsigned long) handle value. @@ -325,7 +339,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} static int create_cache(struct zs_pool *pool) { - pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, + pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE, 0, 0, NULL); if (!pool->handle_cachep) return 1; @@ -349,10 +363,27 @@ static void destroy_cache(struct zs_pool *pool) static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) { - return (unsigned long)kmem_cache_alloc(pool->handle_cachep, - gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); + void *p; + + p = kmem_cache_alloc(pool->handle_cachep, + gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); +#ifdef CONFIG_PREEMPT_RT_FULL + if (p) { + struct zsmalloc_handle *zh = p; + + mutex_init(&zh->lock); + } +#endif + return (unsigned long)p; } +#ifdef CONFIG_PREEMPT_RT_FULL +static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle) +{ + return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1)); +} +#endif + static void cache_free_handle(struct zs_pool *pool, unsigned long handle) { kmem_cache_free(pool->handle_cachep, (void *)handle); @@ -371,12 +402,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) static void record_obj(unsigned long handle, unsigned long obj) { +#ifdef CONFIG_PREEMPT_RT_FULL + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); + + WRITE_ONCE(zh->addr, obj); +#else /* * lsb of @obj represents handle lock while other bits * represent object value the handle is pointing so * updating shouldn't do store tearing. */ WRITE_ONCE(*(unsigned long *)handle, obj); +#endif } /* zpool driver */ @@ -458,6 +495,7 @@ MODULE_ALIAS("zpool-zsmalloc"); /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ static DEFINE_PER_CPU(struct mapping_area, zs_map_area); +static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock); static bool is_zspage_isolated(struct zspage *zspage) { @@ -887,7 +925,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) static unsigned long handle_to_obj(unsigned long handle) { +#ifdef CONFIG_PREEMPT_RT_FULL + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); + + return zh->addr; +#else return *(unsigned long *)handle; +#endif } static unsigned long obj_to_head(struct page *page, void *obj) @@ -901,22 +945,46 @@ static unsigned long obj_to_head(struct page *page, void *obj) static inline int testpin_tag(unsigned long handle) { +#ifdef CONFIG_PREEMPT_RT_FULL + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); + + return mutex_is_locked(&zh->lock); +#else return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle); +#endif } static inline int trypin_tag(unsigned long handle) { +#ifdef CONFIG_PREEMPT_RT_FULL + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); + + return mutex_trylock(&zh->lock); +#else return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); +#endif } static void pin_tag(unsigned long handle) { +#ifdef CONFIG_PREEMPT_RT_FULL + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); + + return mutex_lock(&zh->lock); +#else bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle); +#endif } static void unpin_tag(unsigned long handle) { +#ifdef CONFIG_PREEMPT_RT_FULL + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); + + return mutex_unlock(&zh->lock); +#else bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle); +#endif } static void reset_page(struct page *page) @@ -1342,7 +1410,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, class = pool->size_class[class_idx]; off = (class->size * obj_idx) & ~PAGE_MASK; - area = &get_cpu_var(zs_map_area); + area = &get_locked_var(zs_map_area_lock, zs_map_area); area->vm_mm = mm; if (off + class->size <= PAGE_SIZE) { /* this object is contained entirely within a page */ @@ -1396,7 +1464,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) __zs_unmap_object(area, pages, off, class->size); } - put_cpu_var(zs_map_area); + put_locked_var(zs_map_area_lock, zs_map_area); migrate_read_unlock(zspage); unpin_tag(handle); diff --git a/mm/zswap.c b/mm/zswap.c index cd91fd9d96b814d145e378b573dd289fb501e64e..420225d3ff0bda178a182aae03911101e5e1134d 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -990,6 +991,8 @@ static void zswap_fill_page(void *ptr, unsigned long value) memset_l(page, value, PAGE_SIZE / sizeof(unsigned long)); } +/* protect zswap_dstmem from concurrency */ +static DEFINE_LOCAL_IRQ_LOCK(zswap_dstmem_lock); /********************************* * frontswap hooks **********************************/ @@ -1066,12 +1069,11 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, } /* compress */ - dst = get_cpu_var(zswap_dstmem); - tfm = *get_cpu_ptr(entry->pool->tfm); + dst = get_locked_var(zswap_dstmem_lock, zswap_dstmem); + tfm = *this_cpu_ptr(entry->pool->tfm); src = kmap_atomic(page); ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen); kunmap_atomic(src); - put_cpu_ptr(entry->pool->tfm); if (ret) { ret = -EINVAL; goto put_dstmem; @@ -1094,7 +1096,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, memcpy(buf, &zhdr, hlen); memcpy(buf + hlen, dst, dlen); zpool_unmap_handle(entry->pool->zpool, handle); - put_cpu_var(zswap_dstmem); + put_locked_var(zswap_dstmem_lock, zswap_dstmem); /* populate entry */ entry->offset = offset; @@ -1122,7 +1124,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, return 0; put_dstmem: - put_cpu_var(zswap_dstmem); + put_locked_var(zswap_dstmem_lock, zswap_dstmem); zswap_pool_put(entry->pool); freepage: zswap_entry_cache_free(entry); diff --git a/net/Kconfig b/net/Kconfig index 228dfa382eeca8afd12d32475978259738dceb53..bc8d01996f222476025269f5604569bdabbd9bac 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -275,7 +275,7 @@ config CGROUP_NET_CLASSID config NET_RX_BUSY_POLL bool - default y + default y if !PREEMPT_RT_FULL config BQL bool diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index c044ff2f73e6c98db3f194edb91cb311ec607688..75bc8102cdd72911315f026becf6c39f9afad357 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -64,15 +64,13 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) { struct sock *sk = d->owner, *parent; - unsigned long flags; if (!sk) return; BT_DBG("dlc %p state %ld err %d", d, d->state, err); - local_irq_save(flags); - bh_lock_sock(sk); + spin_lock_bh(&sk->sk_lock.slock); if (err) sk->sk_err = err; @@ -93,8 +91,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) sk->sk_state_change(sk); } - bh_unlock_sock(sk); - local_irq_restore(flags); + spin_unlock_bh(&sk->sk_lock.slock); if (parent && sock_flag(sk, SOCK_ZAPPED)) { /* We have to drop DLC lock here, otherwise diff --git a/net/core/dev.c b/net/core/dev.c index 42f6ff8b9703cdec6430b8d8c475dd73cedee61f..3df1fe0528639feaff1b6298735d043828c4dbc7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -219,14 +219,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS - spin_lock(&sd->input_pkt_queue.lock); + raw_spin_lock(&sd->input_pkt_queue.raw_lock); #endif } static inline void rps_unlock(struct softnet_data *sd) { #ifdef CONFIG_RPS - spin_unlock(&sd->input_pkt_queue.lock); + raw_spin_unlock(&sd->input_pkt_queue.raw_lock); #endif } @@ -2725,6 +2725,7 @@ static void __netif_reschedule(struct Qdisc *q) sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } void __netif_schedule(struct Qdisc *q) @@ -2787,6 +2788,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) __this_cpu_write(softnet_data.completion_queue, skb); raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(__dev_kfree_skb_irq); @@ -3470,7 +3472,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, * This permits qdisc->running owner to get the lock more * often and dequeue packets faster. */ +#ifdef CONFIG_PREEMPT_RT_FULL + contended = true; +#else contended = qdisc_is_running(q); +#endif if (unlikely(contended)) spin_lock(&q->busylock); @@ -3831,10 +3837,14 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ +#ifdef CONFIG_PREEMPT_RT_FULL + if (READ_ONCE(txq->xmit_lock_owner) != current) { +#else /* Other cpus might concurrently change txq->xmit_lock_owner * to -1 or to their cpu id, but not to our id. */ if (READ_ONCE(txq->xmit_lock_owner) != cpu) { +#endif if (dev_xmit_recursion()) goto recursion_alert; @@ -4269,6 +4279,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, rps_unlock(sd); local_irq_restore(flags); + preempt_check_resched_rt(); atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); @@ -4483,7 +4494,7 @@ static int netif_rx_internal(struct sk_buff *skb) struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; - preempt_disable(); + migrate_disable(); rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); @@ -4493,14 +4504,14 @@ static int netif_rx_internal(struct sk_buff *skb) ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); - preempt_enable(); + migrate_enable(); } else #endif { unsigned int qtail; - ret = enqueue_to_backlog(skb, get_cpu(), &qtail); - put_cpu(); + ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); + put_cpu_light(); } return ret; } @@ -4534,11 +4545,9 @@ int netif_rx_ni(struct sk_buff *skb) trace_netif_rx_ni_entry(skb); - preempt_disable(); + local_bh_disable(); err = netif_rx_internal(skb); - if (local_softirq_pending()) - do_softirq(); - preempt_enable(); + local_bh_enable(); return err; } @@ -5820,12 +5829,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) sd->rps_ipi_list = NULL; local_irq_enable(); + preempt_check_resched_rt(); /* Send pending IPI's to kick RPS processing on remote cpus. */ net_rps_send_ipi(remsd); } else #endif local_irq_enable(); + preempt_check_resched_rt(); } static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) @@ -5855,7 +5866,9 @@ static int process_backlog(struct napi_struct *napi, int quota) while (again) { struct sk_buff *skb; + local_irq_disable(); while ((skb = __skb_dequeue(&sd->process_queue))) { + local_irq_enable(); rcu_read_lock(); __netif_receive_skb(skb); rcu_read_unlock(); @@ -5863,9 +5876,9 @@ static int process_backlog(struct napi_struct *napi, int quota) if (++work >= quota) return work; + local_irq_disable(); } - local_irq_disable(); rps_lock(sd); if (skb_queue_empty(&sd->input_pkt_queue)) { /* @@ -5903,6 +5916,7 @@ void __napi_schedule(struct napi_struct *n) local_irq_save(flags); ____napi_schedule(this_cpu_ptr(&softnet_data), n); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(__napi_schedule); @@ -5939,6 +5953,7 @@ bool napi_schedule_prep(struct napi_struct *n) } EXPORT_SYMBOL(napi_schedule_prep); +#ifndef CONFIG_PREEMPT_RT_FULL /** * __napi_schedule_irqoff - schedule for receive * @n: entry to schedule @@ -5957,6 +5972,7 @@ void __napi_schedule_irqoff(struct napi_struct *n) __napi_schedule(n); } EXPORT_SYMBOL(__napi_schedule_irqoff); +#endif bool napi_complete_done(struct napi_struct *n, int work_done) { @@ -6337,13 +6353,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) unsigned long time_limit = jiffies + usecs_to_jiffies(netdev_budget_usecs); int budget = netdev_budget; + struct sk_buff_head tofree_q; + struct sk_buff *skb; LIST_HEAD(list); LIST_HEAD(repoll); + __skb_queue_head_init(&tofree_q); + local_irq_disable(); + skb_queue_splice_init(&sd->tofree_queue, &tofree_q); list_splice_init(&sd->poll_list, &list); local_irq_enable(); + while ((skb = __skb_dequeue(&tofree_q))) + kfree_skb(skb); + for (;;) { struct napi_struct *n; @@ -6373,7 +6397,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) list_splice_tail(&repoll, &list); list_splice(&list, &sd->poll_list); if (!list_empty(&sd->poll_list)) - __raise_softirq_irqoff(NET_RX_SOFTIRQ); + __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ); net_rps_action_and_irq_enable(sd); out: @@ -8580,7 +8604,7 @@ static void netdev_init_one_queue(struct net_device *dev, /* Initialize queue lock */ spin_lock_init(&queue->_xmit_lock); netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); - queue->xmit_lock_owner = -1; + netdev_queue_clear_owner(queue); netdev_queue_numa_node_write(queue, NUMA_NO_NODE); queue->dev = dev; #ifdef CONFIG_BQL @@ -9527,6 +9551,7 @@ static int dev_cpu_dead(unsigned int oldcpu) raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); #ifdef CONFIG_RPS remsd = oldsd->rps_ipi_list; @@ -9540,10 +9565,13 @@ static int dev_cpu_dead(unsigned int oldcpu) netif_rx_ni(skb); input_queue_head_incr(oldsd); } - while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { + while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { netif_rx_ni(skb); input_queue_head_incr(oldsd); } + while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { + kfree_skb(skb); + } return 0; } @@ -9854,8 +9882,9 @@ static int __init net_dev_init(void) INIT_WORK(flush, flush_backlog); - skb_queue_head_init(&sd->input_pkt_queue); - skb_queue_head_init(&sd->process_queue); + skb_queue_head_init_raw(&sd->input_pkt_queue); + skb_queue_head_init_raw(&sd->process_queue); + skb_queue_head_init_raw(&sd->tofree_queue); #ifdef CONFIG_XFRM_OFFLOAD skb_queue_head_init(&sd->xfrm_backlog); #endif diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 752744db11ffc310a9f4ca15fe1e98d104265f13..7112e28b4130c6d8095a97e2027c5c2ec889b8af 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -46,7 +46,7 @@ struct net_rate_estimator { struct gnet_stats_basic_packed *bstats; spinlock_t *stats_lock; - seqcount_t *running; + net_seqlock_t *running; struct gnet_stats_basic_cpu __percpu *cpu_bstats; u8 ewma_log; u8 intvl_log; /* period : (250ms << intvl_log) */ @@ -129,7 +129,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, spinlock_t *lock, - seqcount_t *running, + net_seqlock_t *running, struct nlattr *opt) { struct gnet_estimator *parm = nla_data(opt); @@ -230,7 +230,7 @@ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, spinlock_t *lock, - seqcount_t *running, struct nlattr *opt) + net_seqlock_t *running, struct nlattr *opt) { return gen_new_estimator(bstats, cpu_bstats, rate_est, lock, running, opt); diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index e2fd8baec65f3087a40e017edabd06f04459a130..8bab887386918a29b0e9cb720213d29c3ff66677 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -142,7 +142,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, } void -__gnet_stats_copy_basic(const seqcount_t *running, +__gnet_stats_copy_basic(net_seqlock_t *running, struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) @@ -155,10 +155,10 @@ __gnet_stats_copy_basic(const seqcount_t *running, } do { if (running) - seq = read_seqcount_begin(running); + seq = net_seq_begin(running); bstats->bytes = b->bytes; bstats->packets = b->packets; - } while (running && read_seqcount_retry(running, seq)); + } while (running && net_seq_retry(running, seq)); } EXPORT_SYMBOL(__gnet_stats_copy_basic); @@ -176,7 +176,7 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic); * if the room in the socket buffer was not sufficient. */ int -gnet_stats_copy_basic(const seqcount_t *running, +gnet_stats_copy_basic(net_seqlock_t *running, struct gnet_dump *d, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 3714cd9e3111f57aefbecb5d0829e4f77c72a41a..2cea192b1475e8e93f287bc405b590fad7145490 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2160,7 +2160,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) s64 remaining; struct hrtimer_sleeper t; - hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS, + current); hrtimer_set_expires(&t.timer, spin_until); remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); @@ -2175,7 +2176,6 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) } while (ktime_compare(end_time, spin_until) < 0); } else { /* see do_nanosleep */ - hrtimer_init_sleeper(&t, current); do { set_current_state(TASK_INTERRUPTIBLE); hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c623c129d0ab6fce1f4685759baf02890b4cda82..58f78205585cacb3cf3e1b6de0c13002e4335610 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include @@ -330,6 +331,8 @@ struct napi_alloc_cache { static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); +static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); +static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock); static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { @@ -337,10 +340,10 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) unsigned long flags; void *data; - local_irq_save(flags); + local_lock_irqsave(netdev_alloc_lock, flags); nc = this_cpu_ptr(&netdev_alloc_cache); data = page_frag_alloc(nc, fragsz, gfp_mask); - local_irq_restore(flags); + local_unlock_irqrestore(netdev_alloc_lock, flags); return data; } @@ -361,9 +364,13 @@ EXPORT_SYMBOL(netdev_alloc_frag); static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); + struct napi_alloc_cache *nc; + void *data; - return page_frag_alloc(&nc->page, fragsz, gfp_mask); + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + data = page_frag_alloc(&nc->page, fragsz, gfp_mask); + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + return data; } void *napi_alloc_frag(unsigned int fragsz) @@ -416,13 +423,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; - local_irq_save(flags); + local_lock_irqsave(netdev_alloc_lock, flags); nc = this_cpu_ptr(&netdev_alloc_cache); data = page_frag_alloc(nc, len, gfp_mask); pfmemalloc = nc->pfmemalloc; - local_irq_restore(flags); + local_unlock_irqrestore(netdev_alloc_lock, flags); if (unlikely(!data)) return NULL; @@ -466,6 +473,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, struct napi_alloc_cache *nc; struct sk_buff *skb; void *data; + bool pfmemalloc; len += NET_SKB_PAD + NET_IP_ALIGN; @@ -488,7 +496,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); data = page_frag_alloc(&nc->page, len, gfp_mask); + pfmemalloc = nc->page.pfmemalloc; + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); if (unlikely(!data)) return NULL; @@ -499,7 +510,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, } /* use OR instead of assignment to avoid clearing of bits in mask */ - if (nc->page.pfmemalloc) + if (pfmemalloc) skb->pfmemalloc = 1; skb->head_frag = 1; @@ -731,23 +742,26 @@ void __consume_stateless_skb(struct sk_buff *skb) void __kfree_skb_flush(void) { - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); + struct napi_alloc_cache *nc; + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); /* flush skb_cache if containing objects */ if (nc->skb_count) { kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, nc->skb_cache); nc->skb_count = 0; } + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); } static inline void _kfree_skb_defer(struct sk_buff *skb) { - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); + struct napi_alloc_cache *nc; /* drop skb->head and call any destructors for packet */ skb_release_all(skb); + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); /* record skb to CPU local list */ nc->skb_cache[nc->skb_count++] = skb; @@ -762,6 +776,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb) nc->skb_cache); nc->skb_count = 0; } + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); } void __kfree_skb_defer(struct sk_buff *skb) { diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index fe10a565b7d8568413a7685ebac1eff7bf9b8b90..d35719e08aefb35eff6653461ef8bc2aed1a9369 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -77,6 +77,7 @@ #include #include #include +#include #include #include #include @@ -204,6 +205,8 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; * * On SMP we have one ICMP socket per-cpu. */ +static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock); + static struct sock *icmp_sk(struct net *net) { return *this_cpu_ptr(net->ipv4.icmp_sk); @@ -214,12 +217,16 @@ static inline struct sock *icmp_xmit_lock(struct net *net) { struct sock *sk; + if (!local_trylock(icmp_sk_lock)) + return NULL; + sk = icmp_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path signals a * dst_link_failure() for an outgoing ICMP packet. */ + local_unlock(icmp_sk_lock); return NULL; } return sk; @@ -228,6 +235,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net) static inline void icmp_xmit_unlock(struct sock *sk) { spin_unlock(&sk->sk_lock.slock); + local_unlock(icmp_sk_lock); } int sysctl_icmp_msgs_per_sec __read_mostly = 1000; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 2719c60f285b02047bd24bf3b7d1a383f325963b..8e2db5007f598a7a4d032d1e079d1b525d033eca 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include @@ -637,6 +638,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(tcp_v4_send_check); +static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock); /* * This routine will send an RST to the other tcp. * @@ -771,6 +773,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) arg.tos = ip_hdr(skb)->tos; arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); local_bh_disable(); + local_lock(tcp_sk_lock); ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk); if (sk) ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? @@ -783,6 +786,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) ctl_sk->sk_mark = 0; __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); + local_unlock(tcp_sk_lock); local_bh_enable(); #ifdef CONFIG_TCP_MD5SIG @@ -863,6 +867,7 @@ static void tcp_v4_send_ack(const struct sock *sk, arg.tos = tos; arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL); local_bh_disable(); + local_lock(tcp_sk_lock); ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk); if (sk) ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? @@ -874,6 +879,7 @@ static void tcp_v4_send_ack(const struct sock *sk, ctl_sk->sk_mark = 0; __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); + local_unlock(tcp_sk_lock); local_bh_enable(); } diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 722d1b057f616111bd756ab4b5901ff85e7b0913..5b8ebd8abcb0dc3ac0a4be3a93cf7b36994b67cb 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,11 @@ #include "nf_internals.h" +#ifdef CONFIG_PREEMPT_RT_BASE +DEFINE_LOCAL_IRQ_LOCK(xt_write_lock); +EXPORT_PER_CPU_SYMBOL(xt_write_lock); +#endif + const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly; EXPORT_SYMBOL_GPL(nf_ipv6_ops); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f654f79e3310ce6e9dc9a64b917690ba7233b8d5..31ce7d1fbee16041dbd49c647cd7511b3f1081a8 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include #include @@ -668,7 +669,7 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t) if (BLOCK_NUM_PKTS(pbd)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ - cpu_relax(); + cpu_chill(); } } @@ -930,7 +931,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, if (!(status & TP_STATUS_BLK_TMO)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ - cpu_relax(); + cpu_chill(); } } prb_close_block(pkc, pbd, po, status); diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 0b347f46b2f41561d51668c97f479733f6388163..f395f06031bc971dea5e5120534f8c8839430c5c 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "rds_single_path.h" #include "ib_mr.h" @@ -222,7 +223,7 @@ static inline void wait_clean_list_grace(void) for_each_online_cpu(cpu) { flag = &per_cpu(clean_list_grace, cpu); while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) - cpu_relax(); + cpu_chill(); } } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 424e70907b967ceb468fcc8a04123017e82f5747..6b4ee7c98d691395243d983eedbe637a64cd28cd 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1191,7 +1191,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, rcu_assign_pointer(sch->stab, stab); } if (tca[TCA_RATE]) { - seqcount_t *running; + net_seqlock_t *running; err = -EOPNOTSUPP; if (sch->flags & TCQ_F_MQROOT) { diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 7c1b1eff84f4c745f55e333ed4cd5519425bf48e..cc4c4309a2b8a1c9dfcb957a3e53e98c413830e3 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -576,7 +576,11 @@ struct Qdisc noop_qdisc = { .ops = &noop_qdisc_ops, .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), .dev_queue = &noop_netdev_queue, +#ifdef CONFIG_PREEMPT_RT_BASE + .running = __SEQLOCK_UNLOCKED(noop_qdisc.running), +#else .running = SEQCNT_ZERO(noop_qdisc.running), +#endif .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), .gso_skb = { .next = (struct sk_buff *)&noop_qdisc.gso_skb, @@ -877,9 +881,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, lockdep_set_class(&sch->busylock, dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); +#ifdef CONFIG_PREEMPT_RT_BASE + seqlock_init(&sch->running); + lockdep_set_class(&sch->running.seqcount, + dev->qdisc_running_key ?: &qdisc_running_key); + lockdep_set_class(&sch->running.lock, + dev->qdisc_running_key ?: &qdisc_running_key); +#else seqcount_init(&sch->running); lockdep_set_class(&sch->running, dev->qdisc_running_key ?: &qdisc_running_key); +#endif sch->ops = ops; sch->flags = ops->static_flags; @@ -1253,7 +1265,7 @@ void dev_deactivate_many(struct list_head *head) /* Wait for outstanding qdisc_run calls. */ list_for_each_entry(dev, head, close_list) { while (some_qdisc_is_busy(dev)) - yield(); + msleep(1); /* The new qdisc is assigned at this point so we can safely * unwind stale skb lists and qdisc statistics */ diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 4b56b949a4637d0c2f2c1a4d38b31a2af3f62b3b..83a4a0a13a1c8a2bcf2e775fd5876f5169fc563d 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -402,7 +402,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt) if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) return; - cpu = get_cpu(); + cpu = get_cpu_light(); pool = svc_pool_for_cpu(xprt->xpt_server, cpu); atomic_long_inc(&pool->sp_stats.packets); @@ -426,7 +426,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt) rqstp = NULL; out_unlock: rcu_read_unlock(); - put_cpu(); + put_cpu_light(); trace_svc_xprt_do_enqueue(xprt, rqstp); } EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index a00ec715aa4681a89e348a058450320bbafe6cfb..a9799738542379c8e5090d8112ea1242bba3af18 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -36,6 +37,7 @@ struct ipcomp_tfms { static DEFINE_MUTEX(ipcomp_resource_mutex); static void * __percpu *ipcomp_scratches; +static DEFINE_LOCAL_IRQ_LOCK(ipcomp_scratches_lock); static int ipcomp_scratch_users; static LIST_HEAD(ipcomp_tfms_list); @@ -45,12 +47,15 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) const int plen = skb->len; int dlen = IPCOMP_SCRATCH_SIZE; const u8 *start = skb->data; - const int cpu = get_cpu(); - u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); - struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); - int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); - int len; + u8 *scratch; + struct crypto_comp *tfm; + int err, len; + + local_lock(ipcomp_scratches_lock); + scratch = *this_cpu_ptr(ipcomp_scratches); + tfm = *this_cpu_ptr(ipcd->tfms); + err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); if (err) goto out; @@ -103,7 +108,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) err = 0; out: - put_cpu(); + local_unlock(ipcomp_scratches_lock); return err; } @@ -146,6 +151,8 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) int err; local_bh_disable(); + local_lock(ipcomp_scratches_lock); + scratch = *this_cpu_ptr(ipcomp_scratches); tfm = *this_cpu_ptr(ipcd->tfms); err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); @@ -158,12 +165,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) } memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); + local_unlock(ipcomp_scratches_lock); local_bh_enable(); pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); return 0; out: + local_unlock(ipcomp_scratches_lock); local_bh_enable(); return err; } diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c index 5522692100ba0d01d3cd669b7ea1bbdf84b0b72e..8b4be8e1802a58002e4f1680047de0326b2493b8 100644 --- a/samples/trace_events/trace-events-sample.c +++ b/samples/trace_events/trace-events-sample.c @@ -33,7 +33,7 @@ static void simple_thread_func(int cnt) /* Silly tracepoints */ trace_foo_bar("hello", cnt, array, random_strings[len], - ¤t->cpus_allowed); + current->cpus_ptr); trace_foo_with_template_simple("HELLO", cnt); diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c index a2cc1036c9155e86d65d9a8b8de41b00fb061c13..a771a645f449a22f68598e5a3350e12ed1644ce5 100644 --- a/scripts/dtc/checks.c +++ b/scripts/dtc/checks.c @@ -792,7 +792,7 @@ static void check_pci_bridge(struct check *c, struct dt_info *dti, struct node * if (!strprefixeq(node->name, node->basenamelen, "pci") && !strprefixeq(node->name, node->basenamelen, "pcie")) - FAIL(c, dti, node, "node name is not \"pci\" or \"pcie\""); + ;//FAIL(c, dti, node, "node name is not \"pci\" or \"pcie\""); prop = get_property(node, "ranges"); if (!prop) diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index 662fe19da990bb852400f993d16d58ecf230cd89..089200debc380c0b32a97f87a326ac067c71e568 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -5,7 +5,8 @@ TARGET=$1 ARCH=$2 SMP=$3 PREEMPT=$4 -CC=$5 +RT=$5 +CC=$6 vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } @@ -53,6 +54,7 @@ UTS_VERSION="#$VERSION" CONFIG_FLAGS="" if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi +if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" # Truncate to maximum length diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h index b6380c5f00972702807407cbc1f853192a308333..12abfddb19c9c6c73ee89b735c02330eeed9d62c 100644 --- a/security/apparmor/include/path.h +++ b/security/apparmor/include/path.h @@ -40,8 +40,10 @@ struct aa_buffers { #include #include +#include DECLARE_PER_CPU(struct aa_buffers, aa_buffers); +DECLARE_LOCAL_IRQ_LOCK(aa_buffers_lock); #define ASSIGN(FN, A, X, N) ((X) = FN(A, N)) #define EVAL1(FN, A, X) ASSIGN(FN, A, X, 0) /*X = FN(0)*/ @@ -51,7 +53,17 @@ DECLARE_PER_CPU(struct aa_buffers, aa_buffers); #define for_each_cpu_buffer(I) for ((I) = 0; (I) < MAX_PATH_BUFFERS; (I)++) -#ifdef CONFIG_DEBUG_PREEMPT +#ifdef CONFIG_PREEMPT_RT_BASE +static inline void AA_BUG_PREEMPT_ENABLED(const char *s) +{ + struct local_irq_lock *lv; + + lv = this_cpu_ptr(&aa_buffers_lock); + WARN_ONCE(lv->owner != current, + "__get_buffer without aa_buffers_lock\n"); +} + +#elif defined(CONFIG_DEBUG_PREEMPT) #define AA_BUG_PREEMPT_ENABLED(X) AA_BUG(preempt_count() <= 0, X) #else #define AA_BUG_PREEMPT_ENABLED(X) /* nop */ @@ -67,14 +79,15 @@ DECLARE_PER_CPU(struct aa_buffers, aa_buffers); #define get_buffers(X...) \ do { \ - struct aa_buffers *__cpu_var = get_cpu_ptr(&aa_buffers); \ + struct aa_buffers *__cpu_var; \ + __cpu_var = get_locked_ptr(aa_buffers_lock, &aa_buffers); \ __get_buffers(__cpu_var, X); \ } while (0) #define put_buffers(X, Y...) \ do { \ __put_buffers(X, Y); \ - put_cpu_ptr(&aa_buffers); \ + put_locked_ptr(aa_buffers_lock, &aa_buffers); \ } while (0) #endif /* __AA_PATH_H */ diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 898752b818dc53011921dc7893c97d0f74de40e6..690c8fccd0a05c00391c5e1dd9f252425c41cc02 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -45,7 +45,7 @@ int apparmor_initialized; DEFINE_PER_CPU(struct aa_buffers, aa_buffers); - +DEFINE_LOCAL_IRQ_LOCK(aa_buffers_lock); /* * LSM hook functions diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index a65e8c0c630d286e1e8ca6cdb639a52131163685..1cea7ba309544c36328223a92374401d4387bbca 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -139,6 +139,9 @@ int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val) { unsigned int addr = azx_command_addr(val); unsigned int wp, rp; + unsigned long timeout; + unsigned int rirb_wp; + int i = 0; spin_lock_irq(&bus->reg_lock); @@ -165,6 +168,41 @@ int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val) bus->corb.buf[wp] = cpu_to_le32(val); snd_hdac_chip_writew(bus, CORBWP, wp); + if (bus->cmd_resend) { + timeout = jiffies + msecs_to_jiffies(1000); + udelay(80); + rirb_wp = snd_hdac_chip_readw(bus, RIRBWP); + while (rirb_wp == bus->rirb.wp) { + udelay(80); + rirb_wp = snd_hdac_chip_readw(bus, RIRBWP); + if (rirb_wp != bus->rirb.wp) + break; + if (i > 5) + break; + if (time_after(jiffies, timeout)) + break; + + /* add command to corb */ + wp = snd_hdac_chip_readw(bus, CORBWP); + if (wp == 0xffff) { + /* something wrong, controller likely turned to D3 */ + spin_unlock_irq(&bus->reg_lock); + return -EIO; + } + wp++; + wp %= AZX_MAX_CORB_ENTRIES; + + rp = snd_hdac_chip_readw(bus, CORBRP); + if (wp == rp) { + /* oops, it's full */ + spin_unlock_irq(&bus->reg_lock); + return -EAGAIN; + } + bus->corb.buf[wp] = cpu_to_le32(val); + snd_hdac_chip_writew(bus, CORBWP, wp); + i++; + } + } spin_unlock_irq(&bus->reg_lock); return 0; diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c index eee422390d8e260fae1e784262598133254f023c..38586457ee09ba6c0b78c05ac68af1f78f050dde 100644 --- a/sound/hda/hdac_stream.c +++ b/sound/hda/hdac_stream.c @@ -51,7 +51,11 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start) trace_snd_hdac_stream_start(bus, azx_dev); +#ifdef CONFIG_SND_HDA_PHYTIUM + azx_dev->start_wallclk = snd_hdac_chip_readl(bus, WALLCLK) / 15; +#else azx_dev->start_wallclk = snd_hdac_chip_readl(bus, WALLCLK); +#endif if (!fresh_start) azx_dev->start_wallclk -= azx_dev->period_wallclk; @@ -469,7 +473,11 @@ static u64 azx_cc_read(const struct cyclecounter *cc) { struct hdac_stream *azx_dev = container_of(cc, struct hdac_stream, cc); +#ifdef CONFIG_SND_HDA_PHYTIUM + return snd_hdac_chip_readl(azx_dev->bus, WALLCLK) / 25; +#else return snd_hdac_chip_readl(azx_dev->bus, WALLCLK); +#endif } static void azx_timecounter_init(struct hdac_stream *azx_dev, diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig index 4235907b785891326a479023f3433d2afd9b231e..df95dad1233fa0a4a42febd7d8d035b12abfc4f4 100644 --- a/sound/pci/hda/Kconfig +++ b/sound/pci/hda/Kconfig @@ -21,6 +21,21 @@ config SND_HDA_INTEL To compile this driver as a module, choose M here: the module will be called snd-hda-intel. +config SND_HDA_PHYTIUM + tristate "PHYTIUM HD Audio" + depends on SOUND + select SND_HDA + help + Say Y here to support the HDA controller present in PHYTIUM + SoCs + + This options enables support for the HD Audio controller + present in some PHYTIUM SoCs, used to communicate audio + to the "High Definition Audio" codec. + + To compile this driver as a module, choose M here: the module + will be called snd-hda-phytium. + config SND_HDA_TEGRA tristate "NVIDIA Tegra HD Audio" depends on ARCH_TEGRA diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile index b57432f000568eb1b40f5131464218ba1f07bcff..90e32c8ce07be3cc287fa6ea306a9eaeed8e615c 100644 --- a/sound/pci/hda/Makefile +++ b/sound/pci/hda/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 snd-hda-intel-objs := hda_intel.o +snd-hda-phytium-objs := hda_phytium.o snd-hda-tegra-objs := hda_tegra.o snd-hda-codec-y := hda_bind.o hda_codec.o hda_jack.o hda_auto_parser.o hda_sysfs.o @@ -48,3 +49,4 @@ obj-$(CONFIG_SND_HDA_CODEC_HDMI) += snd-hda-codec-hdmi.o # when built in kernel obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-intel.o obj-$(CONFIG_SND_HDA_TEGRA) += snd-hda-tegra.o +obj-$(CONFIG_SND_HDA_PHYTIUM) += snd-hda-phytium.o diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 0c5d41e5d14684e4c8da10f078a5e04a3a895297..03ab50fd3482470b0eadd5c1e5be583e288c4abe 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -28,6 +28,8 @@ #include #include +#include "hda_phytium.h" + #ifdef CONFIG_X86 /* for art-tsc conversion */ #include @@ -171,6 +173,10 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); unsigned short ctls = spdif ? spdif->ctls : 0; + struct hda_ft *hda; + hda = container_of(chip, struct hda_ft, chip); + hda->substream = substream; + trace_azx_pcm_prepare(chip, azx_dev); dsp_lock(azx_dev); if (dsp_is_locked(azx_dev)) { diff --git a/sound/pci/hda/hda_phytium.c b/sound/pci/hda/hda_phytium.c new file mode 100644 index 0000000000000000000000000000000000000000..5d3fa387b1f5946e7ddc18a520b0f5d090ff15fb --- /dev/null +++ b/sound/pci/hda/hda_phytium.c @@ -0,0 +1,1200 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Implementation of primary ALSA driver code base for Phytium HD Audio. + * + * Copyright (c) 2018-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hda_codec.h" + +#include "hda_controller.h" +#include "hda_phytium.h" + +#include "hda_intel_trace.h" + +/* position fix mode */ +enum { + POS_FIX_AUTO, + POS_FIX_LPIB, + POS_FIX_POSBUF, + POS_FIX_VIACOMBO, + POS_FIX_COMBO, +}; + +/* Define IN stream 0 FIFO size offset in VIA controller */ +#define VIA_IN_STREAM0_FIFO_SIZE_OFFSET 0x90 + +/* FT have 4 playback and 4 capture */ +#define FT4C_NUM_CAPTURE 4 +#define FT4C_NUM_PLAYBACK 4 + +#define DWORD_BYTE_WIDTH 4 +#define BYTE_BIT_WIDTH 8 + +static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; +static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; +static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; +static char *model[SNDRV_CARDS]; +static int position_fix[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = 1}; +static int bdl_pos_adj[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1}; +static int probe_mask[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1}; +static int probe_only[SNDRV_CARDS]; +static int jackpoll_ms[SNDRV_CARDS]; +static int single_cmd = -1; +static int enable_msi = -1; +#ifdef CONFIG_SND_HDA_INPUT_BEEP +static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = + CONFIG_SND_HDA_INPUT_BEEP_MODE}; +#endif + +module_param_array(index, int, NULL, 0444); +MODULE_PARM_DESC(index, "Index value for Intel HD audio interface."); +module_param_array(id, charp, NULL, 0444); +MODULE_PARM_DESC(id, "ID string for Intel HD audio interface."); +module_param_array(enable, bool, NULL, 0444); +MODULE_PARM_DESC(enable, "Enable Intel HD audio interface."); +module_param_array(model, charp, NULL, 0444); +MODULE_PARM_DESC(model, "Use the given board model."); +module_param_array(position_fix, int, NULL, 0444); +MODULE_PARM_DESC(position_fix, "DMA pointer read method." + "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO)."); +module_param_array(bdl_pos_adj, int, NULL, 0644); +MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset."); +module_param_array(probe_mask, int, NULL, 0444); +MODULE_PARM_DESC(probe_mask, "Bitmask to probe codecs (default = -1)."); +module_param_array(probe_only, int, NULL, 0444); +MODULE_PARM_DESC(probe_only, "Only probing and no codec initialization."); +module_param_array(jackpoll_ms, int, NULL, 0444); +MODULE_PARM_DESC(jackpoll_ms, "Ms between polling for jack events (default = 0, using unsol events only)"); +module_param(single_cmd, bint, 0444); +MODULE_PARM_DESC(single_cmd, "Use single command to communicate with codecs " + "(for debugging only)."); +module_param(enable_msi, bint, 0444); +MODULE_PARM_DESC(enable_msi, "Enable Message Signaled Interrupt (MSI)"); +#ifdef CONFIG_SND_HDA_INPUT_BEEP +module_param_array(beep_mode, bool, NULL, 0444); +MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode " + "(0=off, 1=on) (default=1)."); +#endif + +#define power_save 0 + +static int align_buffer_size = -1; +module_param(align_buffer_size, bint, 0644); +MODULE_PARM_DESC(align_buffer_size, + "Force buffer and period sizes to be multiple of 128 bytes."); + +/* driver types */ +enum { + AZX_DRIVER_ICH, + AZX_DRIVER_PCH, + AZX_DRIVER_SCH, + AZX_DRIVER_HDMI, + AZX_DRIVER_ATI, + AZX_DRIVER_ATIHDMI, + AZX_DRIVER_ATIHDMI_NS, + AZX_DRIVER_VIA, + AZX_DRIVER_SIS, + AZX_DRIVER_ULI, + AZX_DRIVER_NVIDIA, + AZX_DRIVER_TERA, + AZX_DRIVER_CTX, + AZX_DRIVER_CTHDA, + AZX_DRIVER_CMEDIA, + AZX_DRIVER_GENERIC, + AZX_DRIVER_FT, + AZX_NUM_DRIVERS, /* keep this as last entry */ +}; + +/* NOP for other archs */ +static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf, + bool on) +{ +} + +static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, + struct snd_pcm_substream *substream, bool on) +{ +} + +static int azx_acquire_irq(struct azx *chip, int do_disconnect); + +/* calculate runtime delay from LPIB */ +static int azx_get_delay_from_lpib(struct azx *chip, struct azx_dev *azx_dev, + unsigned int pos) +{ + struct snd_pcm_substream *substream = azx_dev->core.substream; + int stream = substream->stream; + unsigned int lpib_pos = azx_get_pos_lpib(chip, azx_dev); + int delay; + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) + delay = pos - lpib_pos; + else + delay = lpib_pos - pos; + if (delay < 0) { + if (delay >= azx_dev->core.delay_negative_threshold) + delay = 0; + else + delay += azx_dev->core.bufsize; + } + + if (delay >= azx_dev->core.period_bytes) { + dev_info(chip->card->dev, + "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n", + delay, azx_dev->core.period_bytes); + delay = 0; + chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY; + chip->get_delay[stream] = NULL; + } + + return bytes_to_frames(substream->runtime, delay); +} + +static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev); + +/* called from IRQ */ +static int azx_position_check(struct azx *chip, struct azx_dev *azx_dev) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + int ok; + + ok = azx_position_ok(chip, azx_dev); + if (ok == 1) { + azx_dev->irq_pending = 0; + return ok; + } else if (ok == 0) { + /* bogus IRQ, process it later */ + azx_dev->irq_pending = 1; + schedule_work(&hda->irq_pending_work); + } + return 0; +} + +static int azx_ft_link_power(struct azx *chip, bool enable) +{ + return 0; +} + +/* + * Check whether the current DMA position is acceptable for updating + * periods. Returns non-zero if it's OK. + * + * Many HD-audio controllers appear pretty inaccurate about + * the update-IRQ timing. The IRQ is issued before actually the + * data is processed. So, we need to process it afterwords in a + * workqueue. + */ +static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev) +{ + struct snd_pcm_substream *substream = azx_dev->core.substream; + int stream = substream->stream; + u32 wallclk; + unsigned int pos; + + wallclk = (azx_readl(chip, WALLCLK) - azx_dev->core.start_wallclk); + + if (wallclk < (azx_dev->core.period_wallclk * 2) / 3) + return -1; /* bogus (too early) interrupt */ + + if (chip->get_position[stream]) + pos = chip->get_position[stream](chip, azx_dev); + else { /* use the position buffer as default */ + pos = azx_get_pos_posbuf(chip, azx_dev); + if (!pos || pos == (u32)-1) { + dev_info(chip->card->dev, + "Invalid position buffer, using LPIB read method instead.\n"); + chip->get_position[stream] = azx_get_pos_lpib; + if (chip->get_position[0] == azx_get_pos_lpib && + chip->get_position[1] == azx_get_pos_lpib) + azx_bus(chip)->use_posbuf = false; + pos = azx_get_pos_lpib(chip, azx_dev); + chip->get_delay[stream] = NULL; + } else { + chip->get_position[stream] = azx_get_pos_posbuf; + if (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY) + chip->get_delay[stream] = azx_get_delay_from_lpib; + } + } + + if (pos >= azx_dev->core.bufsize) + pos = 0; + + if (WARN_ONCE(!azx_dev->core.period_bytes, + "hda-ft: zero azx_dev->period_bytes")) + return -1; /* this shouldn't happen! */ + if (wallclk < (azx_dev->core.period_wallclk * 5) / 4 && + pos % azx_dev->core.period_bytes > azx_dev->core.period_bytes / 2) + /* NG - it's below the first next period boundary */ + return chip->bdl_pos_adj ? 0 : -1; + + azx_dev->core.start_wallclk += wallclk; + + return 1; /* OK, it's fine */ +} + +/* The work for pending PCM period updates. */ +static void azx_irq_pending_work(struct work_struct *work) +{ + struct hda_ft *hda = container_of(work, struct hda_ft, irq_pending_work); + struct azx *chip = &hda->chip; + struct hdac_bus *bus = azx_bus(chip); + struct hdac_stream *s; + int pending, ok; + + if (!hda->irq_pending_warned) { + dev_info(chip->card->dev, + "IRQ timing workaround is activated for card #%d. Suggest a bigger bdl_pos_adj.\n", + chip->card->number); + hda->irq_pending_warned = 1; + } + + for (;;) { + pending = 0; + spin_lock_irq(&bus->reg_lock); + list_for_each_entry(s, &bus->stream_list, list) { + struct azx_dev *azx_dev = stream_to_azx_dev(s); + if (!azx_dev->irq_pending || + !s->substream || + !s->running) + continue; + ok = azx_position_ok(chip, azx_dev); + if (ok > 0) { + azx_dev->irq_pending = 0; + spin_unlock(&bus->reg_lock); + snd_pcm_period_elapsed(s->substream); + spin_lock(&bus->reg_lock); + } else if (ok < 0) { + pending = 0; /* too early */ + } else + pending++; + } + spin_unlock_irq(&bus->reg_lock); + if (!pending) + return; + msleep(1); + } +} + +/* clear irq_pending flags and assure no on-going workq */ +static void azx_clear_irq_pending(struct azx *chip) +{ + struct hdac_bus *bus = azx_bus(chip); + struct hdac_stream *s; + + spin_lock_irq(&bus->reg_lock); + list_for_each_entry(s, &bus->stream_list, list) { + struct azx_dev *azx_dev = stream_to_azx_dev(s); + azx_dev->irq_pending = 0; + } + spin_unlock_irq(&bus->reg_lock); +} + +static int azx_acquire_irq(struct azx *chip, int do_disconnect) +{ + struct hdac_bus *bus = azx_bus(chip); + + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + struct platform_device *pdev = to_platform_device(hda->dev); + int irq_id = platform_get_irq(pdev, 0); + int err; + + err = request_irq(irq_id, azx_interrupt, + IRQF_SHARED, KBUILD_MODNAME, chip); + if (err) { + dev_err(chip->card->dev, + "unable to request IRQ %d, disabling device\n", + irq_id); + if (do_disconnect) + snd_card_disconnect(chip->card); + return err; + } + bus->irq = irq_id; + + return 0; +} + +/* get the current DMA position with correction on VIA chips */ +static unsigned int azx_via_get_position(struct azx *chip, + struct azx_dev *azx_dev) +{ + unsigned int link_pos, mini_pos, bound_pos; + unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos; + unsigned int fifo_size; + + link_pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev)); + if (azx_dev->core.substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + /* Playback, no problem using link position */ + return link_pos; + } + + /* Capture */ + /* For new chipset, + * use mod to get the DMA position just like old chipset + */ + mod_dma_pos = le32_to_cpu(*azx_dev->core.posbuf); + mod_dma_pos %= azx_dev->core.period_bytes; + + /* azx_dev->fifo_size can't get FIFO size of in stream. + * Get from base address + offset. + */ + fifo_size = readw(azx_bus(chip)->remap_addr + + VIA_IN_STREAM0_FIFO_SIZE_OFFSET); + + if (azx_dev->insufficient) { + /* Link position never gather than FIFO size */ + if (link_pos <= fifo_size) + return 0; + + azx_dev->insufficient = 0; + } + + if (link_pos <= fifo_size) + mini_pos = azx_dev->core.bufsize + link_pos - fifo_size; + else + mini_pos = link_pos - fifo_size; + + /* Find nearest previous boudary */ + mod_mini_pos = mini_pos % azx_dev->core.period_bytes; + mod_link_pos = link_pos % azx_dev->core.period_bytes; + if (mod_link_pos >= fifo_size) + bound_pos = link_pos - mod_link_pos; + else if (mod_dma_pos >= mod_mini_pos) + bound_pos = mini_pos - mod_mini_pos; + else { + bound_pos = mini_pos - mod_mini_pos + azx_dev->core.period_bytes; + if (bound_pos >= azx_dev->core.bufsize) + bound_pos = 0; + } + + /* Calculate real DMA position we want */ + return bound_pos + mod_dma_pos; +} + +#ifdef CONFIG_PM +static DEFINE_MUTEX(card_list_lock); +static LIST_HEAD(card_list); + +static void azx_add_card_list(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + mutex_lock(&card_list_lock); + list_add(&hda->list, &card_list); + mutex_unlock(&card_list_lock); +} + +static void azx_del_card_list(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + mutex_lock(&card_list_lock); + list_del_init(&hda->list); + mutex_unlock(&card_list_lock); +} + +#else +#define azx_add_card_list(chip) /* NOP */ +#define azx_del_card_list(chip) /* NOP */ +#endif /* CONFIG_PM */ + +#if defined(CONFIG_PM_SLEEP) +/* power management */ +static int azx_suspend(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + struct hdac_bus *bus; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + if (chip->disabled || !chip->running) + return 0; + + bus = azx_bus(chip); + snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); + azx_clear_irq_pending(chip); + azx_stop_chip(chip); + if (bus->irq >= 0) { + free_irq(bus->irq, (void *)chip); + bus->irq = -1; + } + + return 0; +} + +static int azx_resume(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + struct hdac_bus *bus; + int index; + struct snd_pcm_substream *substream; + struct azx_dev *azx_dev; + int err; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + bus = azx_bus(chip); + if (chip->disabled || !chip->running) + return 0; + + if (azx_acquire_irq(chip, 1) < 0) + return -EIO; + + index = chip->dev_index; + + snd_hdac_bus_exit_link_reset(bus); + usleep_range(1000, 1200); + + azx_init_chip(chip, 0); + + snd_power_change_state(card, SNDRV_CTL_POWER_D0); + + if (hda->substream && hda->substream->runtime) { + substream = hda->substream; + + if(substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED){ + substream->runtime->status->state = substream->runtime->status->suspended_state; + err = substream->ops->prepare(substream); + if (err < 0) + return err; + } + + azx_dev = get_azx_dev(substream); + hda->substream = NULL; + } + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int azx_runtime_suspend(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + if (chip->disabled) + return 0; + + if (!azx_has_pm_runtime(chip)) + return 0; + + azx_stop_chip(chip); + azx_enter_link_reset(chip); + azx_clear_irq_pending(chip); + + return 0; +} + +static int azx_runtime_resume(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + struct hdac_bus *bus; + struct hda_codec *codec; + int status; + int index; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + bus = azx_bus(chip); + if (chip->disabled) + return 0; + + if (!azx_has_pm_runtime(chip)) + return 0; + + /* Read STATESTS before controller reset */ + status = azx_readw(chip, STATESTS); + + index = chip->dev_index; + + snd_hdac_bus_exit_link_reset(bus); + usleep_range(1000, 1200); + + azx_init_chip(chip, 0); + + if (status) { + list_for_each_codec(codec, &chip->bus) + if (status & (1 << codec->addr)) + schedule_delayed_work(&codec->jackpoll_work, + codec->jackpoll_interval); + } + + return 0; +} + +static int azx_runtime_idle(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + struct hda_ft *hda; + + if (!card) + return 0; + + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + if (chip->disabled) + return 0; + + if (!azx_has_pm_runtime(chip) || + azx_bus(chip)->codec_powered || !chip->running) + return -EBUSY; + + return 0; +} + +static const struct dev_pm_ops azx_pm = { + SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume) + SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle) +}; + +#define hda_ft_pm &azx_pm +#else +#define hda_ft_pm NULL +#endif /* CONFIG_PM */ + +static int azx_probe_continue(struct azx *chip); + +/* + * destructor + */ +static int azx_free(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + struct hdac_bus *bus = azx_bus(chip); + struct platform_device *pdev = to_platform_device(hda->dev); + struct device *hddev = hda->dev; + struct resource *res; + resource_size_t size; + + if (azx_has_pm_runtime(chip) && chip->running) + pm_runtime_get_noresume(&pdev->dev); + + azx_del_card_list(chip); + + complete_all(&hda->probe_wait); + + if (bus->chip_init) { + azx_clear_irq_pending(chip); + azx_stop_all_streams(chip); + azx_stop_chip(chip); + } + + if (bus->irq >= 0) { + free_irq(bus->irq, (void*)chip); + bus->irq = -1; + } + + devm_iounmap(hddev, bus->remap_addr); + + azx_free_stream_pages(chip); + azx_free_streams(chip); + snd_hdac_bus_exit(bus); + + if (chip->region_requested){ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + size = resource_size(res); + devm_release_mem_region(hddev, res->start, size); + } + + return 0; +} + +static int azx_dev_disconnect(struct snd_device *device) +{ + struct azx *chip = device->device_data; + + chip->bus.shutdown = 1; + return 0; +} + +static int azx_dev_free(struct snd_device *device) +{ + return azx_free(device->device_data); +} + +static int check_position_fix(struct azx *chip, int fix) +{ + switch (fix) { + case POS_FIX_AUTO: + case POS_FIX_LPIB: + case POS_FIX_POSBUF: + case POS_FIX_VIACOMBO: + case POS_FIX_COMBO: + return fix; + } + + if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) { + dev_dbg(chip->card->dev, "Using LPIB position fix\n"); + return POS_FIX_LPIB; + } + return POS_FIX_AUTO; +} + +static void assign_position_fix(struct azx *chip, int fix) +{ + static azx_get_pos_callback_t callbacks[] = { + [POS_FIX_AUTO] = NULL, + [POS_FIX_LPIB] = azx_get_pos_lpib, + [POS_FIX_POSBUF] = azx_get_pos_posbuf, + [POS_FIX_VIACOMBO] = azx_via_get_position, + [POS_FIX_COMBO] = azx_get_pos_lpib, + }; + + chip->get_position[0] = chip->get_position[1] = callbacks[fix]; + + /* combo mode uses LPIB only for playback */ + if (fix == POS_FIX_COMBO) + chip->get_position[1] = NULL; + + if (fix == POS_FIX_POSBUF && + (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) { + chip->get_delay[0] = chip->get_delay[1] = + azx_get_delay_from_lpib; + } + +} + +#define AZX_FORCE_CODEC_MASK 0x100 + +static void check_probe_mask(struct azx *chip, int dev) +{ + chip->codec_probe_mask = probe_mask[dev]; + + /* check forced option */ + if (chip->codec_probe_mask != -1 && + (chip->codec_probe_mask & AZX_FORCE_CODEC_MASK)) { + azx_bus(chip)->codec_mask = chip->codec_probe_mask & 0xff; + dev_info(chip->card->dev, "codec_mask forced to 0x%x\n", + (int)azx_bus(chip)->codec_mask); + } +} + +static void azx_probe_work(struct work_struct *work) +{ + struct hda_ft *hda = container_of(work, struct hda_ft, probe_work); + azx_probe_continue(&hda->chip); +} + +/* + * constructor + */ +static const struct hdac_io_ops axi_hda_io_ops; +static const struct hda_controller_ops axi_hda_ops; + +static int hda_ft_create(struct snd_card *card, struct platform_device *pdev, + int dev, unsigned int driver_caps, + struct azx **rchip) +{ + static struct snd_device_ops ops = { + .dev_disconnect = azx_dev_disconnect, + .dev_free = azx_dev_free, + }; + struct hda_ft *hda; + struct azx *chip; + int err; + + *rchip = NULL; + + hda = devm_kzalloc(&pdev->dev, sizeof(*hda), GFP_KERNEL); + if (!hda) + return -ENOMEM; + hda->dev = &pdev->dev; + chip = &hda->chip; + mutex_init(&chip->open_mutex); + chip->card = card; + chip->ops = &axi_hda_ops; + chip->driver_caps = driver_caps; + chip->driver_type = driver_caps & 0xff; + chip->dev_index = dev; + chip->jackpoll_ms = jackpoll_ms; + INIT_LIST_HEAD(&chip->pcm_list); + INIT_WORK(&hda->irq_pending_work, azx_irq_pending_work); + INIT_LIST_HEAD(&hda->list); + + init_completion(&hda->probe_wait); + assign_position_fix(chip, check_position_fix(chip, position_fix[dev])); + check_probe_mask(chip, dev); + + if (single_cmd < 0) /* allow fallback to single_cmd at errors */ + chip->fallback_to_single_cmd = 0; + else /* explicitly set to single_cmd or not */ + chip->single_cmd = single_cmd; + + if (bdl_pos_adj[dev] < 0) { + switch (chip->driver_type) { + case AZX_DRIVER_FT: + bdl_pos_adj[dev] = 32; + break; + default: + bdl_pos_adj[dev] = 32; + break; + } + } + chip->bdl_pos_adj = bdl_pos_adj[dev]; + + err = azx_bus_init(chip, model[dev], &axi_hda_io_ops); + if (err < 0) { + return err; + } + + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); + if (err < 0) { + dev_err(card->dev, "Error creating device [card]!\n"); + azx_free(chip); + return err; + } + + /* continue probing in work context as may trigger request module */ + INIT_WORK(&hda->probe_work, azx_probe_work); + + *rchip = chip; + + return 0; +} + +static int azx_first_init(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + struct platform_device *pdev = to_platform_device(hda->dev); + struct device *hddev = hda->dev; + + int dev = chip->dev_index; + bool full_reset; + + struct snd_card *card = chip->card; + struct hdac_bus *bus = azx_bus(chip); + int err; + unsigned short gcap; + unsigned int dma_bits = 64; + + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hda->regs = devm_ioremap_resource(hddev, res); + if (IS_ERR(hda->regs)) + return PTR_ERR(hda->regs); + chip->region_requested = 1; + + bus->addr = res->start; + bus->remap_addr = hda->regs; + if (bus->remap_addr == NULL) { + dev_err(card->dev, "ioremap error\n"); + return -ENXIO; + } + + bus->cmd_resend = 1; + + if (azx_acquire_irq(chip, 0) < 0) + return -EBUSY; + + synchronize_irq(bus->irq); + + gcap = azx_readw(chip, GCAP); + dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap); + + /* disable 64bit DMA address on some devices */ + if (chip->driver_caps & AZX_DCAPS_NO_64BIT) { + dev_dbg(card->dev, "Disabling 64bit DMA\n"); + gcap &= ~AZX_GCAP_64OK; + } + + /* disable buffer size rounding to 128-byte multiples if supported */ + if (align_buffer_size >= 0) + chip->align_buffer_size = !!align_buffer_size; + else { + if (chip->driver_caps & AZX_DCAPS_NO_ALIGN_BUFSIZE) + chip->align_buffer_size = 0; + else + chip->align_buffer_size = 1; + } + + /* allow 64bit DMA address if supported by H/W */ + if (!(gcap & AZX_GCAP_64OK)) + dma_bits = 32; + if (!dma_set_mask(hddev, DMA_BIT_MASK(dma_bits))) { + dma_set_coherent_mask(hddev, DMA_BIT_MASK(dma_bits)); + } else { + dma_set_mask(hddev, DMA_BIT_MASK(32)); + dma_set_coherent_mask(hddev, DMA_BIT_MASK(32)); + } + + /* read number of streams from GCAP register instead of using + * hardcoded value + */ + chip->capture_streams = (gcap >> 8) & 0x0f; + chip->playback_streams = (gcap >> 12) & 0x0f; + if (!chip->playback_streams && !chip->capture_streams) { + /* gcap didn't give any info, switching to old method */ + chip->playback_streams = FT4C_NUM_PLAYBACK; + chip->capture_streams = FT4C_NUM_CAPTURE; + } + chip->capture_index_offset = 0; + chip->playback_index_offset = chip->capture_streams; + chip->num_streams = chip->playback_streams + chip->capture_streams; + + /* initialize streams */ + err = azx_init_streams(chip); + if (err < 0) + return err; + + err = azx_alloc_stream_pages(chip); + if (err < 0) + return err; + + full_reset = (probe_only[dev] & 2) ? false : true; + azx_init_chip(chip, full_reset); + + /* codec detection */ + if (!azx_bus(chip)->codec_mask) { + dev_err(card->dev, "no codecs found!\n"); + return -ENODEV; + } + + strcpy(card->driver, "ft-hda"); + strcpy(card->shortname, "ft-hda"); + snprintf(card->longname, sizeof(card->longname), + "%s at 0x%lx irq %i", + card->shortname, bus->addr, bus->irq); + + return 0; +} + +/* + * HDA controller ops. + */ + +/* APB register access. */ +static void axi_azx_writel(u32 value, u32 __iomem *addr) +{ + writel(value, addr); +} + +static u32 axi_azx_readl(u32 __iomem *addr) +{ + return readl(addr); +} + +static void axi_azx_writew(u16 value, u16 __iomem *addr) +{ + u32 data; + u32 offset; + + offset = (u64)addr & 0x03; + addr = (u16 __iomem *)((u64)addr & 0xFFFFFFFFFFFFFFFC); + data = readl(addr); + data &= ~(0xFFFF << offset * BYTE_BIT_WIDTH); + data |= (value << offset * BYTE_BIT_WIDTH); + writel(data, addr); +} + +static u16 axi_azx_readw(u16 __iomem *addr) +{ + return readw(addr); +} + +static void axi_azx_writeb(u8 value, u8 __iomem *addr) +{ + u32 data; + u32 offset; + + offset = (u64)addr & 0x03; + addr = (u8 __iomem *)((u64)addr & 0xFFFFFFFFFFFFFFFC); + data = readl(addr); + data &= ~(0xFF << offset * BYTE_BIT_WIDTH); + data |= (value << offset * BYTE_BIT_WIDTH); + writel(data, addr); +} + +static u8 axi_azx_readb(u8 __iomem *addr) +{ + return readb(addr); +} + +/* DMA page allocation helpers. */ +static int dma_alloc_pages(struct hdac_bus *bus, + int type, + size_t size, + struct snd_dma_buffer *buf) +{ + struct azx *chip = bus_to_azx(bus); + int err; + + err = snd_dma_alloc_pages(type, + bus->dev, + size, buf); + if (err < 0) + return err; + mark_pages_wc(chip, buf, true); + return 0; +} + +static void dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf) +{ + struct azx *chip = bus_to_azx(bus); + + mark_pages_wc(chip, buf, false); + snd_dma_free_pages(buf); +} + +static int substream_alloc_pages(struct azx *chip, + struct snd_pcm_substream *substream, + size_t size) +{ + struct azx_dev *azx_dev = get_azx_dev(substream); + int ret; + + mark_runtime_wc(chip, azx_dev, substream, false); + ret = snd_pcm_lib_malloc_pages(substream, size); + if (ret < 0) + return ret; + + mark_runtime_wc(chip, azx_dev, substream, true); + return 0; +} + +static int substream_free_pages(struct azx *chip, + struct snd_pcm_substream *substream) +{ + struct azx_dev *azx_dev = get_azx_dev(substream); + mark_runtime_wc(chip, azx_dev, substream, false); + return snd_pcm_lib_free_pages(substream); +} + +static void pcm_mmap_prepare(struct snd_pcm_substream *substream, + struct vm_area_struct *area) +{ + +} + +static const struct hdac_io_ops axi_hda_io_ops = { + .reg_writel = axi_azx_writel, + .reg_readl = axi_azx_readl, + .reg_writew = axi_azx_writew, + .reg_readw = axi_azx_readw, + .reg_writeb = axi_azx_writeb, + .reg_readb = axi_azx_readb, + .dma_alloc_pages = dma_alloc_pages, + .dma_free_pages = dma_free_pages, +}; + +static const struct hda_controller_ops axi_hda_ops = { + .substream_alloc_pages = substream_alloc_pages, + .substream_free_pages = substream_free_pages, + .pcm_mmap_prepare = pcm_mmap_prepare, + .position_check = azx_position_check, + .link_power = azx_ft_link_power, +}; + +static DECLARE_BITMAP(probed_devs, SNDRV_CARDS); + +static int hda_ft_probe(struct platform_device *pdev) +{ + const unsigned int driver_flags = AZX_DCAPS_SYNC_WRITE | AZX_DRIVER_FT; + struct snd_card *card; + struct hda_ft *hda; + struct azx *chip; + bool schedule_probe; + int err; + int dev; + + dev = find_first_zero_bit(probed_devs, SNDRV_CARDS); + + if (dev >= SNDRV_CARDS) + return -ENODEV; + if (!enable[dev]) { + set_bit(dev, probed_devs); + return -ENOENT; + } + + err = snd_card_new(&pdev->dev, index[dev], id[dev], THIS_MODULE, + 0, &card); + if (err < 0) { + dev_err(&pdev->dev, "Error creating card!\n"); + return err; + } + + err = hda_ft_create(card, pdev,dev, driver_flags, &chip); + if (err < 0) + goto out_free; + card->private_data = chip; + hda = container_of(chip, struct hda_ft, chip); + + dev_set_drvdata(&pdev->dev, card); + + schedule_probe = !chip->disabled; + + if (schedule_probe) + schedule_work(&hda->probe_work); + + set_bit(dev, probed_devs); + if (chip->disabled) + complete_all(&hda->probe_wait); + return 0; + +out_free: + snd_card_free(card); + return err; +} + +/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ +static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { + [AZX_DRIVER_FT] = 4, +}; + +static int azx_probe_continue(struct azx *chip) +{ + struct hda_ft *hda = container_of(chip, struct hda_ft, chip); + struct device *hddev = hda->dev; + int dev = chip->dev_index; + int err; + struct hdac_bus *bus = azx_bus(chip); + + hda->probe_continued = 1; + + err = azx_first_init(chip); + if (err < 0) + goto out_free; + +#ifdef CONFIG_SND_HDA_INPUT_BEEP + chip->beep_mode = beep_mode[dev]; +#endif + + /* create codec instances */ + err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]); + if (err < 0) + goto out_free; + + if ((probe_only[dev] & 1) == 0) { + err = azx_codec_configure(chip); + if (err < 0) + goto out_free; + } + + err = snd_card_register(chip->card); + if (err < 0) + goto out_free; + + chip->running = 1; + azx_add_card_list(chip); + snd_hda_set_power_save(&chip->bus, power_save * 1000); + + if (azx_has_pm_runtime(chip)) + pm_runtime_put_noidle(hddev); + return err; + +out_free: + if (bus->irq >= 0) { + free_irq(bus->irq, (void *)chip); + bus->irq = -1; + } + return err; +} + +static int hda_ft_remove(struct platform_device *pdev) +{ + struct snd_card *card = dev_get_drvdata(&pdev->dev); + struct azx *chip; + struct hda_ft *hda; + + if (card) { + /* cancel the pending probing work */ + chip = card->private_data; + hda = container_of(chip, struct hda_ft, chip); + cancel_work_sync(&hda->probe_work); + clear_bit(chip->dev_index, probed_devs); + + snd_card_free(card); + return 0; + } + return 0; +} + +static void hda_ft_shutdown(struct platform_device *pdev) +{ + struct snd_card *card = dev_get_drvdata(&pdev->dev); + struct azx *chip; + + if (!card) + return; + chip = card->private_data; + if (chip && chip->running) + azx_stop_chip(chip); +} + +static const struct of_device_id hda_ft_of_match[] = { + { .compatible = "phytium,hda" }, + {}, +}; +MODULE_DEVICE_TABLE(of, hda_ft_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id hda_ft_acpi_match[] = { + { .id = "PHYT0006" }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hda_ft_acpi_match); +#else +#define hda_ft_acpi_match NULL +#endif + +static struct platform_driver ft_platform_hda = { + .driver = { + .name = "ft-hda", + .pm = hda_ft_pm, + .of_match_table = hda_ft_of_match, + .acpi_match_table = hda_ft_acpi_match, + }, + .probe = hda_ft_probe, + .remove = hda_ft_remove, + .shutdown = hda_ft_shutdown, +}; + +module_platform_driver(ft_platform_hda); + +MODULE_DESCRIPTION("FT HDA bus driver"); +MODULE_LICENSE("GPL v2"); diff --git a/sound/pci/hda/hda_phytium.h b/sound/pci/hda/hda_phytium.h new file mode 100644 index 0000000000000000000000000000000000000000..edca12ec6fa7e36baed78f455367e73d1bfaa497 --- /dev/null +++ b/sound/pci/hda/hda_phytium.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Implementation of primary ALSA driver code base for Phytium HD Audio. + * + * Copyright (c) 2018-2023 Phytium Technology Co., Ltd. + */ +#ifndef __SOUND_HDA_PHYTIUM_H__ +#define __SOUND_HDA_PHYTIUM_H__ + +#include "hda_controller.h" + +struct hda_ft { + struct azx chip; + struct snd_pcm_substream *substream; + struct device *dev; + void __iomem *regs; + + /* for pending irqs */ + struct work_struct irq_pending_work; + + /* sync probing */ + struct completion probe_wait; + struct work_struct probe_work; + + /* card list (for power_save trigger) */ + struct list_head list; + + /* extra flags */ + unsigned int irq_pending_warned:1; + unsigned int probe_continued:1; + +}; + +#endif diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig index 1cf11cf51e1dd6912c3bdce14c9e651b4fd955c2..32296faf39c5eedb75977c49aab5f823e7af1205 100644 --- a/sound/soc/Kconfig +++ b/sound/soc/Kconfig @@ -59,6 +59,7 @@ source "sound/soc/intel/Kconfig" source "sound/soc/mediatek/Kconfig" source "sound/soc/meson/Kconfig" source "sound/soc/mxs/Kconfig" +source "sound/soc/phytium/Kconfig" source "sound/soc/pxa/Kconfig" source "sound/soc/qcom/Kconfig" source "sound/soc/rockchip/Kconfig" diff --git a/sound/soc/Makefile b/sound/soc/Makefile index 62a5f87c3cfc435b4ff8b1984f77ec65b3e53578..2cdfbe1db8c292bbc805f82e5bed4bc18827a115 100644 --- a/sound/soc/Makefile +++ b/sound/soc/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_SND_SOC) += mxs/ obj-$(CONFIG_SND_SOC) += nuc900/ obj-$(CONFIG_SND_SOC) += omap/ obj-$(CONFIG_SND_SOC) += kirkwood/ +obj-$(CONFIG_SND_SOC) += phytium/ obj-$(CONFIG_SND_SOC) += pxa/ obj-$(CONFIG_SND_SOC) += qcom/ obj-$(CONFIG_SND_SOC) += rockchip/ diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index efb095dbcd7145a5267bb7bda73552be43324095..75e8f6389967c93fc1e1790d6373ac7e08f86e5b 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -606,6 +606,15 @@ config SND_SOC_ES8328_SPI depends on SPI_MASTER select SND_SOC_ES8328 +config SND_SOC_ES8336 + tristate "Everest Semi ES8336 CODEC" + depends on I2C + select GPIO_PHYTIUM_PCI + +config SND_SOC_ES8388 + tristate "Everest Semi ES8388 CODEC" + depends on I2C + config SND_SOC_GTM601 tristate 'GTM601 UMTS modem audio codec' diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index 7ae7c85e8219f5ff035dacaea3abc4b831296056..1ba362d2dd65119e274d1eae048f4a2aa7187bc1 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -76,6 +76,8 @@ snd-soc-es8316-objs := es8316.o snd-soc-es8328-objs := es8328.o snd-soc-es8328-i2c-objs := es8328-i2c.o snd-soc-es8328-spi-objs := es8328-spi.o +snd-soc-es8336-objs := es8336.o +snd-soc-es8388-objs := es8388.o snd-soc-gtm601-objs := gtm601.o snd-soc-hdac-hdmi-objs := hdac_hdmi.o snd-soc-ics43432-objs := ics43432.o @@ -336,6 +338,8 @@ obj-$(CONFIG_SND_SOC_ES8316) += snd-soc-es8316.o obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o +obj-$(CONFIG_SND_SOC_ES8336)+= snd-soc-es8336.o +obj-$(CONFIG_SND_SOC_ES8388) += snd-soc-es8388.o obj-$(CONFIG_SND_SOC_GTM601) += snd-soc-gtm601.o obj-$(CONFIG_SND_SOC_HDAC_HDMI) += snd-soc-hdac-hdmi.o obj-$(CONFIG_SND_SOC_ICS43432) += snd-soc-ics43432.o diff --git a/sound/soc/codecs/es8336.c b/sound/soc/codecs/es8336.c new file mode 100644 index 0000000000000000000000000000000000000000..ebb395600f5b2053dff384fa12cea9b52d4a9028 --- /dev/null +++ b/sound/soc/codecs/es8336.c @@ -0,0 +1,1072 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * es8336 ALSA SoC audio driver + * + * Copyright (C) Everest Semiconductor Co.,Ltd + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "es8336.h" + +static struct snd_soc_component *es8336_component; + +static const struct reg_default es8336_reg_defaults[] = { + {0x00, 0x03}, {0x01, 0x03}, {0x02, 0x00}, {0x03, 0x20}, + {0x04, 0x11}, {0x05, 0x00}, {0x06, 0x11}, {0x07, 0x00}, + {0x08, 0x00}, {0x09, 0x01}, {0x0a, 0x00}, {0x0b, 0x00}, + {0x0c, 0xf8}, {0x0d, 0x3f}, {0x0e, 0x00}, {0x0f, 0x00}, + {0x10, 0x01}, {0x11, 0xfc}, {0x12, 0x28}, {0x13, 0x00}, + {0x14, 0x00}, {0x15, 0x33}, {0x16, 0x00}, {0x17, 0x00}, + {0x18, 0x88}, {0x19, 0x06}, {0x1a, 0x22}, {0x1b, 0x03}, + {0x1c, 0x0f}, {0x1d, 0x00}, {0x1e, 0x80}, {0x1f, 0x80}, + {0x20, 0x00}, {0x21, 0x00}, {0x22, 0xc0}, {0x23, 0x00}, + {0x24, 0x01}, {0x25, 0x08}, {0x26, 0x10}, {0x27, 0xc0}, + {0x28, 0x00}, {0x29, 0x1c}, {0x2a, 0x00}, {0x2b, 0xb0}, + {0x2c, 0x32}, {0x2d, 0x03}, {0x2e, 0x00}, {0x2f, 0x11}, + {0x30, 0x10}, {0x31, 0x00}, {0x32, 0x00}, {0x33, 0xc0}, + {0x34, 0xc0}, {0x35, 0x1f}, {0x36, 0xf7}, {0x37, 0xfd}, + {0x38, 0xff}, {0x39, 0x1f}, {0x3a, 0xf7}, {0x3b, 0xfd}, + {0x3c, 0xff}, {0x3d, 0x1f}, {0x3e, 0xf7}, {0x3f, 0xfd}, + {0x40, 0xff}, {0x41, 0x1f}, {0x42, 0xf7}, {0x43, 0xfd}, + {0x44, 0xff}, {0x45, 0x1f}, {0x46, 0xf7}, {0x47, 0xfd}, + {0x48, 0xff}, {0x49, 0x1f}, {0x4a, 0xf7}, {0x4b, 0xfd}, + {0x4c, 0xff}, {0x4d, 0x00}, {0x4e, 0x00}, {0x4f, 0xff}, + {0x50, 0x00}, {0x51, 0x00}, {0x52, 0x00}, {0x53, 0x00}, +}; + +/* codec private data */ +struct es8336_priv { + struct regmap *regmap; + unsigned int dmic_amic; + unsigned int sysclk; + struct snd_pcm_hw_constraint_list *sysclk_constraints; + struct clk *mclk; + int debounce_time; + struct delayed_work work; + + struct gpio_desc *spk_ctl_gpio; + struct gpio_desc *hp_det_gpio; + bool muted; + bool hp_inserted; + + u8 mic_src; + int pwr_count; +}; + +/* + * es8336_reset + * write value 0xff to reg0x00, the chip will be in reset mode + * then, writer 0x00 to reg0x00, unreset the chip + */ +static int es8336_reset(struct snd_soc_component *component) +{ + snd_soc_component_write(component, ES8336_RESET_REG00, 0x3F); + usleep_range(5000, 5500); + return snd_soc_component_write(component, ES8336_RESET_REG00, 0x03); +} + +static void es8336_enable_spk(struct es8336_priv *es8336, bool enable) +{ + gpiod_set_value(es8336->spk_ctl_gpio, enable); +} + +static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -9600, 50, 1); +static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -9600, 50, 1); +static const DECLARE_TLV_DB_SCALE(hpmixer_gain_tlv, -1200, 150, 0); +static const DECLARE_TLV_DB_SCALE(mic_bst_tlv, 0, 1200, 0); + +static unsigned int linin_pga_tlv[] = { + TLV_DB_RANGE_HEAD(9), + 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0), + 1, 1, TLV_DB_SCALE_ITEM(300, 0, 0), + 2, 2, TLV_DB_SCALE_ITEM(600, 0, 0), + 3, 3, TLV_DB_SCALE_ITEM(900, 0, 0), + 4, 4, TLV_DB_SCALE_ITEM(1200, 0, 0), + 5, 5, TLV_DB_SCALE_ITEM(1500, 0, 0), + 6, 6, TLV_DB_SCALE_ITEM(1800, 0, 0), + 7, 7, TLV_DB_SCALE_ITEM(2100, 0, 0), + 8, 8, TLV_DB_SCALE_ITEM(2400, 0, 0), +}; + +static unsigned int hpout_vol_tlv[] = { + TLV_DB_RANGE_HEAD(1), + 0, 3, TLV_DB_SCALE_ITEM(-4800, 1200, 0), +}; + +static const char *const alc_func_txt[] = { "Off", "On" }; + +static const struct soc_enum alc_func = + SOC_ENUM_SINGLE(ES8336_ADC_ALC1_REG29, 6, 2, alc_func_txt); + +static const char *const ng_type_txt[] = { + "Constant PGA Gain", "Mute ADC Output" }; + +static const struct soc_enum ng_type = + SOC_ENUM_SINGLE(ES8336_ADC_ALC6_REG2E, 6, 2, ng_type_txt); + +static const char *const adcpol_txt[] = { "Normal", "Invert" }; + +static const struct soc_enum adcpol = + SOC_ENUM_SINGLE(ES8336_ADC_MUTE_REG26, 1, 2, adcpol_txt); + +static const char *const dacpol_txt[] = { + "Normal", "R Invert", "L Invert", "L + R Invert" }; + +static const struct soc_enum dacpol = + SOC_ENUM_SINGLE(ES8336_DAC_SET1_REG30, 0, 4, dacpol_txt); + +static const struct snd_kcontrol_new es8336_snd_controls[] = { + /* HP OUT VOLUME */ + SOC_DOUBLE_TLV("HP Playback Volume", ES8336_CPHP_ICAL_VOL_REG18, + 4, 0, 4, 1, hpout_vol_tlv), + /* HPMIXER VOLUME Control */ + SOC_DOUBLE_TLV("HPMixer Gain", ES8336_HPMIX_VOL_REG16, + 0, 4, 7, 0, hpmixer_gain_tlv), + + /* DAC Digital controls */ + SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8336_DAC_VOLL_REG33, + ES8336_DAC_VOLR_REG34, 0, 0xC0, 1, dac_vol_tlv), + + SOC_SINGLE("Enable DAC Soft Ramp", ES8336_DAC_SET1_REG30, 4, 1, 1), + SOC_SINGLE("DAC Soft Ramp Rate", ES8336_DAC_SET1_REG30, 2, 4, 0), + + SOC_ENUM("Playback Polarity", dacpol), + SOC_SINGLE("DAC Notch Filter", ES8336_DAC_SET2_REG31, 6, 1, 0), + SOC_SINGLE("DAC Double Fs Mode", ES8336_DAC_SET2_REG31, 7, 1, 0), + SOC_SINGLE("DAC Volume Control-LeR", ES8336_DAC_SET2_REG31, 2, 1, 0), + SOC_SINGLE("DAC Stereo Enhancement", ES8336_DAC_SET3_REG32, 0, 7, 0), + + /* +20dB D2SE PGA Control */ + SOC_SINGLE_TLV("MIC Boost", ES8336_ADC_D2SEPGA_REG24, + 0, 1, 0, mic_bst_tlv), + /* 0-+24dB Lineinput PGA Control */ + SOC_SINGLE_TLV("Input PGA", ES8336_ADC_PGAGAIN_REG23, + 4, 8, 0, linin_pga_tlv), +}; + +/* Analog Input MUX */ +static const char * const es8336_analog_in_txt[] = { + "lin1-rin1", + "lin2-rin2", + "lin1-rin1 with 15db Boost", + "lin2-rin2 with 15db Boost" +}; + +static const unsigned int es8336_analog_in_values[] = { 0, 1, 2, 3 }; + +static const struct soc_enum es8336_analog_input_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_ADC_PDN_LINSEL_REG22, 4, 3, + ARRAY_SIZE(es8336_analog_in_txt), + es8336_analog_in_txt, + es8336_analog_in_values); + +static const struct snd_kcontrol_new es8336_analog_in_mux_controls = + SOC_DAPM_ENUM("Route", es8336_analog_input_enum); + +/* Dmic MUX */ +static const char * const es8336_dmic_txt[] = { + "dmic disable", + "dmic data at high level", + "dmic data at low level", +}; + +static const unsigned int es8336_dmic_values[] = { 0, 2, 3 }; + +static const struct soc_enum es8336_dmic_src_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_ADC_DMIC_REG25, 0, 3, + ARRAY_SIZE(es8336_dmic_txt), + es8336_dmic_txt, + es8336_dmic_values); + +static const struct snd_kcontrol_new es8336_dmic_src_controls = + SOC_DAPM_ENUM("Route", es8336_dmic_src_enum); + +/* hp mixer mux */ +static const char *const es8336_hpmux_texts[] = { + "lin1-rin1", + "lin2-rin2", + "lin-rin with Boost", + "lin-rin with Boost and PGA" +}; + +static const unsigned int es8336_hpmux_values[] = { 0, 1, 2, 3 }; + +static const struct soc_enum es8336_left_hpmux_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_HPMIX_SEL_REG13, 4, 7, + ARRAY_SIZE(es8336_hpmux_texts), + es8336_hpmux_texts, + es8336_hpmux_values); + +static const struct snd_kcontrol_new es8336_left_hpmux_controls = + SOC_DAPM_ENUM("Route", es8336_left_hpmux_enum); + +static const struct soc_enum es8336_right_hpmux_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_HPMIX_SEL_REG13, 0, 7, + ARRAY_SIZE(es8336_hpmux_texts), + es8336_hpmux_texts, + es8336_hpmux_values); + +static const struct snd_kcontrol_new es8336_right_hpmux_controls = + SOC_DAPM_ENUM("Route", es8336_right_hpmux_enum); + +/* headphone Output Mixer */ +static const struct snd_kcontrol_new es8336_out_left_mix[] = { + SOC_DAPM_SINGLE("LLIN Switch", ES8336_HPMIX_SWITCH_REG14, + 6, 1, 0), + SOC_DAPM_SINGLE("Left DAC Switch", ES8336_HPMIX_SWITCH_REG14, + 7, 1, 0), +}; + +static const struct snd_kcontrol_new es8336_out_right_mix[] = { + SOC_DAPM_SINGLE("RLIN Switch", ES8336_HPMIX_SWITCH_REG14, + 2, 1, 0), + SOC_DAPM_SINGLE("Right DAC Switch", ES8336_HPMIX_SWITCH_REG14, + 3, 1, 0), +}; + +/* DAC data source mux */ +static const char * const es8336_dacsrc_texts[] = { + "LDATA TO LDAC, RDATA TO RDAC", + "LDATA TO LDAC, LDATA TO RDAC", + "RDATA TO LDAC, RDATA TO RDAC", + "RDATA TO LDAC, LDATA TO RDAC", +}; + +static const unsigned int es8336_dacsrc_values[] = { 0, 1, 2, 3 }; + +static const struct soc_enum es8336_dacsrc_mux_enum = + SOC_VALUE_ENUM_SINGLE(ES8336_DAC_SET1_REG30, 6, 4, + ARRAY_SIZE(es8336_dacsrc_texts), + es8336_dacsrc_texts, + es8336_dacsrc_values); +static const struct snd_kcontrol_new es8336_dacsrc_mux_controls = + SOC_DAPM_ENUM("Route", es8336_dacsrc_mux_enum); + +static const struct snd_soc_dapm_widget es8336_dapm_widgets[] = { + /* Input Lines */ + SND_SOC_DAPM_INPUT("DMIC"), + SND_SOC_DAPM_INPUT("MIC1"), + SND_SOC_DAPM_INPUT("MIC2"), + + SND_SOC_DAPM_MICBIAS("micbias", SND_SOC_NOPM, + 0, 0), + /* Input MUX */ + SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, + &es8336_analog_in_mux_controls), + + SND_SOC_DAPM_PGA("Line input PGA", ES8336_ADC_PDN_LINSEL_REG22, + 7, 1, NULL, 0), + + /* ADCs */ + SND_SOC_DAPM_ADC("Mono ADC", NULL, ES8336_ADC_PDN_LINSEL_REG22, 6, 1), + + /* Dmic MUX */ + SND_SOC_DAPM_MUX("Digital Mic Mux", SND_SOC_NOPM, 0, 0, + &es8336_dmic_src_controls), + + /* Digital Interface */ + SND_SOC_DAPM_AIF_OUT("I2S OUT", "I2S1 Capture", 1, + SND_SOC_NOPM, 0, 0), + + SND_SOC_DAPM_AIF_IN("I2S IN", "I2S1 Playback", 0, + SND_SOC_NOPM, 0, 0), + + /* DACs DATA SRC MUX */ + SND_SOC_DAPM_MUX("DAC SRC Mux", SND_SOC_NOPM, 0, 0, + &es8336_dacsrc_mux_controls), + /* DACs */ + SND_SOC_DAPM_DAC("Right DAC", NULL, ES8336_DAC_PDN_REG2F, 0, 1), + SND_SOC_DAPM_DAC("Left DAC", NULL, ES8336_DAC_PDN_REG2F, 4, 1), + + /* Headphone Output Side */ + /* hpmux for hp mixer */ + SND_SOC_DAPM_MUX("Left Hp mux", SND_SOC_NOPM, 0, 0, + &es8336_left_hpmux_controls), + SND_SOC_DAPM_MUX("Right Hp mux", SND_SOC_NOPM, 0, 0, + &es8336_right_hpmux_controls), + /* Output mixer */ + SND_SOC_DAPM_MIXER("Left Hp mixer", ES8336_HPMIX_PDN_REG15, + 4, 1, &es8336_out_left_mix[0], + ARRAY_SIZE(es8336_out_left_mix)), + SND_SOC_DAPM_MIXER("Right Hp mixer", ES8336_HPMIX_PDN_REG15, + 0, 1, &es8336_out_right_mix[0], + ARRAY_SIZE(es8336_out_right_mix)), + SND_SOC_DAPM_MIXER("Left Hp mixer", SND_SOC_NOPM, + 4, 1, &es8336_out_left_mix[0], + ARRAY_SIZE(es8336_out_left_mix)), + SND_SOC_DAPM_MIXER("Right Hp mixer", SND_SOC_NOPM, + 0, 1, &es8336_out_right_mix[0], + ARRAY_SIZE(es8336_out_right_mix)), + + /* Output charge pump */ + SND_SOC_DAPM_PGA("HPCP L", ES8336_CPHP_OUTEN_REG17, + 6, 0, NULL, 0), + SND_SOC_DAPM_PGA("HPCP R", ES8336_CPHP_OUTEN_REG17, + 2, 0, NULL, 0), + + /* Output Driver */ + SND_SOC_DAPM_PGA("HPVOL L", ES8336_CPHP_OUTEN_REG17, + 5, 0, NULL, 0), + SND_SOC_DAPM_PGA("HPVOL R", ES8336_CPHP_OUTEN_REG17, + 1, 0, NULL, 0), + /* Output Lines */ + SND_SOC_DAPM_OUTPUT("HPOL"), + SND_SOC_DAPM_OUTPUT("HPOR"), +}; + +static const struct snd_soc_dapm_route es8336_dapm_routes[] = { + /* + * record route map + */ + {"MIC1", NULL, "micbias"}, + {"MIC2", NULL, "micbias"}, + {"DMIC", NULL, "micbias"}, + + {"Differential Mux", "lin1-rin1", "MIC1"}, + {"Differential Mux", "lin2-rin2", "MIC2"}, + {"Differential Mux", "lin1-rin1 with 15db Boost", "MIC1"}, + {"Differential Mux", "lin2-rin2 with 15db Boost", "MIC2"}, + {"Line input PGA", NULL, "Differential Mux"}, + + {"Mono ADC", NULL, "Line input PGA"}, + + {"Digital Mic Mux", "dmic disable", "Mono ADC"}, + {"Digital Mic Mux", "dmic data at high level", "DMIC"}, + {"Digital Mic Mux", "dmic data at low level", "DMIC"}, + + {"I2S OUT", NULL, "Digital Mic Mux"}, + /* + * playback route map + */ + {"DAC SRC Mux", "LDATA TO LDAC, RDATA TO RDAC", "I2S IN"}, + {"DAC SRC Mux", "LDATA TO LDAC, LDATA TO RDAC", "I2S IN"}, + {"DAC SRC Mux", "RDATA TO LDAC, RDATA TO RDAC", "I2S IN"}, + {"DAC SRC Mux", "RDATA TO LDAC, LDATA TO RDAC", "I2S IN"}, + + {"Left DAC", NULL, "DAC SRC Mux"}, + {"Right DAC", NULL, "DAC SRC Mux"}, + + {"Left Hp mux", "lin1-rin1", "MIC1"}, + {"Left Hp mux", "lin2-rin2", "MIC2"}, + {"Left Hp mux", "lin-rin with Boost", "Differential Mux"}, + {"Left Hp mux", "lin-rin with Boost and PGA", "Line input PGA"}, + + {"Right Hp mux", "lin1-rin1", "MIC1"}, + {"Right Hp mux", "lin2-rin2", "MIC2"}, + {"Right Hp mux", "lin-rin with Boost", "Differential Mux"}, + {"Right Hp mux", "lin-rin with Boost and PGA", "Line input PGA"}, + + {"Left Hp mixer", "LLIN Switch", "Left Hp mux"}, + {"Left Hp mixer", "Left DAC Switch", "Left DAC"}, + + {"Right Hp mixer", "RLIN Switch", "Right Hp mux"}, + {"Right Hp mixer", "Right DAC Switch", "Right DAC"}, + + {"HPCP L", NULL, "Left Hp mixer"}, + {"HPCP R", NULL, "Right Hp mixer"}, + + {"HPVOL L", NULL, "HPCP L"}, + {"HPVOL R", NULL, "HPCP R"}, + + {"HPOL", NULL, "HPVOL L"}, + {"HPOR", NULL, "HPVOL R"}, +}; + + +/* The set of rates we can generate from the above for each SYSCLK */ + +static unsigned int rates_12288[] = { + 8000, 12000, 16000, 24000, 24000, 32000, 48000, 96000, +}; + +static struct snd_pcm_hw_constraint_list constraints_12288 = { + .count = ARRAY_SIZE(rates_12288), + .list = rates_12288, +}; + +static unsigned int rates_112896[] = { + 8000, 11025, 22050, 44100, +}; + +static struct snd_pcm_hw_constraint_list constraints_112896 = { + .count = ARRAY_SIZE(rates_112896), + .list = rates_112896, +}; + +static unsigned int rates_12[] = { + 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, + 48000, 88235, 96000, +}; + +static struct snd_pcm_hw_constraint_list constraints_12 = { + .count = ARRAY_SIZE(rates_12), + .list = rates_12, +}; + +/* + * Note that this should be called from init rather than from hw_params. + */ +static int es8336_set_dai_sysclk(struct snd_soc_dai *codec_dai, + int clk_id, unsigned int freq, int dir) +{ + struct snd_soc_component *component = codec_dai->component; + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + + switch (freq) { + case 11289600: + case 18432000: + case 22579200: + case 36864000: + es8336->sysclk_constraints = &constraints_112896; + es8336->sysclk = freq; + return 0; + case 12288000: + case 19200000: + case 16934400: + case 24576000: + case 33868800: + es8336->sysclk_constraints = &constraints_12288; + es8336->sysclk = freq; + return 0; + case 12000000: + case 24000000: + es8336->sysclk_constraints = &constraints_12; + es8336->sysclk = freq; + return 0; + } + + return 0; +} + +static int es8336_set_dai_fmt(struct snd_soc_dai *codec_dai, + unsigned int fmt) +{ + struct snd_soc_component *component = codec_dai->component; + u8 iface = 0; + u8 adciface = 0; + u8 daciface = 0; + + iface = snd_soc_component_read32(component, ES8336_IFACE); + adciface = snd_soc_component_read32(component, ES8336_ADC_IFACE); + daciface = snd_soc_component_read32(component, ES8336_DAC_IFACE); + + /* set master/slave audio interface */ + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + iface |= 0x80; + break; + case SND_SOC_DAIFMT_CBS_CFS: + iface &= 0x7F; + break; + default: + return -EINVAL; + } + + /* interface format */ + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + adciface &= 0xFC; + daciface &= 0xFC; + break; + case SND_SOC_DAIFMT_RIGHT_J: + return -EINVAL; + case SND_SOC_DAIFMT_LEFT_J: + adciface &= 0xFC; + daciface &= 0xFC; + adciface |= 0x01; + daciface |= 0x01; + break; + case SND_SOC_DAIFMT_DSP_A: + adciface &= 0xDC; + daciface &= 0xDC; + adciface |= 0x03; + daciface |= 0x03; + break; + case SND_SOC_DAIFMT_DSP_B: + adciface &= 0xDC; + daciface &= 0xDC; + adciface |= 0x23; + daciface |= 0x23; + break; + default: + return -EINVAL; + } + + /* clock inversion */ + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_NF: + iface &= 0xDF; + adciface &= 0xDF; + daciface &= 0xDF; + break; + case SND_SOC_DAIFMT_IB_IF: + iface |= 0x20; + adciface |= 0x20; + daciface |= 0x20; + break; + case SND_SOC_DAIFMT_IB_NF: + iface |= 0x20; + adciface &= 0xDF; + daciface &= 0xDF; + break; + case SND_SOC_DAIFMT_NB_IF: + iface &= 0xDF; + adciface |= 0x20; + daciface |= 0x20; + break; + default: + return -EINVAL; + } + snd_soc_component_write(component, ES8336_IFACE, iface); + snd_soc_component_write(component, ES8336_ADC_IFACE, adciface); + snd_soc_component_write(component, ES8336_DAC_IFACE, daciface); + return 0; +} + +static int es8336_pcm_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + bool playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); + + snd_soc_component_write(component, ES8336_RESET_REG00, 0xC0); + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x00); + /* es8336: both playback and capture need dac mclk */ + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_MCLK_DIV_MASK | + ES8336_CLKMGR_DAC_MCLK_MASK, + ES8336_CLKMGR_MCLK_DIV_NML | + ES8336_CLKMGR_DAC_MCLK_EN); + es8336->pwr_count++; + + if (playback) { + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0x1F); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x88); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0xBB); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x10); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x30); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x02); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x00); + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x66); + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_DAC_MCLK_MASK | + ES8336_CLKMGR_DAC_ANALOG_MASK, + ES8336_CLKMGR_DAC_MCLK_EN | + ES8336_CLKMGR_DAC_ANALOG_EN); + msleep(50); + } else { + snd_soc_component_update_bits(component, ES8336_ADC_PDN_LINSEL_REG22, 0xC0, 0x00); + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_ADC_MCLK_MASK | + ES8336_CLKMGR_ADC_ANALOG_MASK, + ES8336_CLKMGR_ADC_MCLK_EN | + ES8336_CLKMGR_ADC_ANALOG_EN); + } + + return 0; +} + +static void es8336_pcm_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + bool playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); + + if (playback) { + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x00); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0xFF); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0xFF); + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_DAC_ANALOG_MASK, + ES8336_CLKMGR_DAC_ANALOG_DIS); + } else { + snd_soc_component_update_bits(component, ES8336_ADC_PDN_LINSEL_REG22, 0xC0, 0xc0); + snd_soc_component_update_bits(component, ES8336_CLKMGR_CLKSW_REG01, + ES8336_CLKMGR_ADC_MCLK_MASK | + ES8336_CLKMGR_ADC_ANALOG_MASK, + ES8336_CLKMGR_ADC_MCLK_DIS | + ES8336_CLKMGR_ADC_ANALOG_DIS); + } + + if (--es8336->pwr_count == 0) { + if (!es8336->hp_inserted) + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x3F); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0xF3); + } +} + + +static int es8336_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + int val = 0; + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + val = ES8336_DACWL_16; + break; + case SNDRV_PCM_FORMAT_S20_3LE: + val = ES8336_DACWL_20; + break; + case SNDRV_PCM_FORMAT_S24_LE: + val = ES8336_DACWL_24; + break; + case SNDRV_PCM_FORMAT_S32_LE: + val = ES8336_DACWL_32; + break; + default: + val = ES8336_DACWL_16; + break; + } + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + snd_soc_component_update_bits(component, ES8336_SDP_DACFMT_REG0B, + ES8336_DACWL_MASK, val); + else + snd_soc_component_update_bits(component, ES8336_SDP_ADCFMT_REG0A, + ES8336_ADCWL_MASK, val); + + return 0; +} + +static int es8336_mute(struct snd_soc_dai *dai, int mute) +{ + struct snd_soc_component *component = dai->component; + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + + es8336->muted = mute; + if (mute) { + es8336_enable_spk(es8336, false); + msleep(100); + snd_soc_component_write(component, ES8336_DAC_SET1_REG30, 0x20); + } else if (dai->playback_active) { + snd_soc_component_write(component, ES8336_DAC_SET1_REG30, 0x00); + msleep(130); + if (!es8336->hp_inserted) + es8336_enable_spk(es8336, true); + } + return 0; +} + +static int es8336_set_bias_level(struct snd_soc_component *component, + enum snd_soc_bias_level level) +{ + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + + switch (level) { + case SND_SOC_BIAS_ON: + break; + + case SND_SOC_BIAS_PREPARE: + break; + + case SND_SOC_BIAS_STANDBY: + break; + + case SND_SOC_BIAS_OFF: + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); + snd_soc_component_update_bits(component, ES8336_ADC_PDN_LINSEL_REG22, 0xC0, 0xC0); + if (!es8336->hp_inserted) + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0x1F); + snd_soc_component_write(component, ES8336_RESET_REG00, 0x00); + break; + } + + return 0; +} + +#define es8336_RATES SNDRV_PCM_RATE_8000_96000 + +#define es8336_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ + SNDRV_PCM_FMTBIT_S24_LE) + +static const struct snd_soc_dai_ops es8336_ops = { + .startup = es8336_pcm_startup, + .hw_params = es8336_pcm_hw_params, + .set_fmt = es8336_set_dai_fmt, + .set_sysclk = es8336_set_dai_sysclk, + .digital_mute = es8336_mute, + .shutdown = es8336_pcm_shutdown, +}; + +static struct snd_soc_dai_driver es8336_dai = { + .name = "es8336-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 1, + .channels_max = 2, + .rates = es8336_RATES, + .formats = es8336_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = 1, + .channels_max = 2, + .rates = es8336_RATES, + .formats = es8336_FORMATS, + }, + .ops = &es8336_ops, + .symmetric_rates = 1, +}; + +static int es8336_init_regs(struct snd_soc_component *component) +{ + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + snd_soc_component_write(component, ES8336_RESET_REG00, 0x3f); + usleep_range(5000, 5500); + snd_soc_component_write(component, ES8336_RESET_REG00, 0x00); + snd_soc_component_write(component, ES8336_SYS_VMIDSEL_REG0C, 0xFF); + msleep(30); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSEL_REG02, 0x08); + snd_soc_component_write(component, ES8336_CLKMGR_ADCOSR_REG03, 0x20); + snd_soc_component_write(component, ES8336_CLKMGR_ADCDIV1_REG04, 0x11); + snd_soc_component_write(component, ES8336_CLKMGR_ADCDIV2_REG05, 0x00); + snd_soc_component_write(component, ES8336_CLKMGR_DACDIV1_REG06, 0x11); + snd_soc_component_write(component, ES8336_CLKMGR_DACDIV2_REG07, 0x00); + snd_soc_component_write(component, ES8336_CLKMGR_CPDIV_REG08, 0x00); + snd_soc_component_write(component, ES8336_SDP_MS_BCKDIV_REG09, 0x04); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0x7F); + snd_soc_component_write(component, ES8336_CAL_TYPE_REG1C, 0x0F); + snd_soc_component_write(component, ES8336_CAL_HPLIV_REG1E, 0x90); + snd_soc_component_write(component, ES8336_CAL_HPRIV_REG1F, 0x90); + snd_soc_component_write(component, ES8336_ADC_VOLUME_REG27, 0x00); + snd_soc_component_write(component, ES8336_ADC_PDN_LINSEL_REG22, es8336->mic_src); + snd_soc_component_write(component, ES8336_ADC_D2SEPGA_REG24, 0x00); + snd_soc_component_write(component, ES8336_ADC_DMIC_REG25, 0x08); + snd_soc_component_write(component, ES8336_DAC_SET2_REG31, 0x20); + snd_soc_component_write(component, ES8336_DAC_SET3_REG32, 0x00); + snd_soc_component_write(component, ES8336_DAC_VOLL_REG33, 0x00); + snd_soc_component_write(component, ES8336_DAC_VOLR_REG34, 0x00); + snd_soc_component_write(component, ES8336_SDP_ADCFMT_REG0A, 0x00); + snd_soc_component_write(component, ES8336_SDP_DACFMT_REG0B, 0x00); + snd_soc_component_write(component, ES8336_SYS_VMIDLOW_REG10, 0x11); + snd_soc_component_write(component, ES8336_SYS_VSEL_REG11, 0xFC); + snd_soc_component_write(component, ES8336_SYS_REF_REG12, 0x28); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0x04); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0x0C); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_HPMIX_SEL_REG13, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x88); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0xBB); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x10); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x30); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x02); + snd_soc_component_write(component, ES8336_CPHP_ICAL_VOL_REG18, 0x00); + snd_soc_component_write(component, ES8336_GPIO_SEL_REG4D, 0x02); + snd_soc_component_write(component, ES8336_GPIO_DEBUNCE_INT_REG4E, 0x02); + snd_soc_component_write(component, ES8336_TESTMODE_REG50, 0xA0); + snd_soc_component_write(component, ES8336_TEST1_REG51, 0x00); + snd_soc_component_write(component, ES8336_TEST2_REG52, 0x00); + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x00); + snd_soc_component_write(component, ES8336_RESET_REG00, 0xC0); + msleep(50); + snd_soc_component_write(component, ES8336_ADC_PGAGAIN_REG23, 0x60); + snd_soc_component_write(component, ES8336_ADC_D2SEPGA_REG24, 0x01); + /* adc ds mode, HPF enable */ + snd_soc_component_write(component, ES8336_ADC_DMIC_REG25, 0x08); + snd_soc_component_write(component, ES8336_ADC_ALC1_REG29, 0xcd); + snd_soc_component_write(component, ES8336_ADC_ALC2_REG2A, 0x08); + snd_soc_component_write(component, ES8336_ADC_ALC3_REG2B, 0xa0); + snd_soc_component_write(component, ES8336_ADC_ALC4_REG2C, 0x05); + snd_soc_component_write(component, ES8336_ADC_ALC5_REG2D, 0x06); + snd_soc_component_write(component, ES8336_ADC_ALC6_REG2E, 0x61); + return 0; +} + +static int es8336_suspend(struct snd_soc_component *component) +{ + return 0; +} + +static int es8336_resume(struct snd_soc_component *component) +{ + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + int ret; + + es8336_reset(component); /* UPDATED BY DAVID,15-3-5 */ + ret = snd_soc_component_read32(component, ES8336_CLKMGR_ADCDIV2_REG05); + if (!ret) { + es8336_init_regs(component); + snd_soc_component_write(component, ES8336_GPIO_SEL_REG4D, 0x02); + /* max debance time, enable interrupt, low active */ + snd_soc_component_write(component, ES8336_GPIO_DEBUNCE_INT_REG4E, 0xf3); + /* es8336_set_bias_level(component, SND_SOC_BIAS_OFF); */ + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); + if (!es8336->hp_inserted) + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0xFF); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0xFF); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0xF3); + snd_soc_component_update_bits(component, ES8336_ADC_PDN_LINSEL_REG22, 0xC0, 0xC0); + } + return 0; +} + +static irqreturn_t es8336_irq_handler(int irq, void *data) +{ + struct es8336_priv *es8336 = data; + + queue_delayed_work(system_power_efficient_wq, &es8336->work, + msecs_to_jiffies(es8336->debounce_time)); + + return IRQ_HANDLED; +} + +static void hp_work(struct work_struct *work) +{ + struct es8336_priv *es8336; + + es8336 = container_of(work, struct es8336_priv, work.work); + + es8336->hp_inserted = gpiod_get_value(es8336->hp_det_gpio); + if (!es8336->muted) { + if (es8336->hp_inserted) + es8336_enable_spk(es8336, false); + else + es8336_enable_spk(es8336, true); + } +} + +static int es8336_probe(struct snd_soc_component *component) +{ + struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); + int ret = 0; + + es8336_component = component; + ret = snd_soc_component_read32(component, ES8336_CLKMGR_ADCDIV2_REG05); + if (!ret) { + es8336_reset(component); /* UPDATED BY DAVID,15-3-5 */ + ret = snd_soc_component_read32(component, ES8336_CLKMGR_ADCDIV2_REG05); + if (!ret) { + es8336_init_regs(component); + snd_soc_component_write(component, ES8336_GPIO_SEL_REG4D, 0x02); + /* max debance time, enable interrupt, low active */ + snd_soc_component_write(component, + ES8336_GPIO_DEBUNCE_INT_REG4E, 0xf3); + + /* es8336_set_bias_level(codec, SND_SOC_BIAS_OFF); */ + snd_soc_component_write(component, ES8336_CPHP_OUTEN_REG17, 0x00); + snd_soc_component_write(component, ES8336_DAC_PDN_REG2F, 0x11); + snd_soc_component_write(component, ES8336_CPHP_LDOCTL_REG1B, 0x03); + snd_soc_component_write(component, ES8336_CPHP_PDN2_REG1A, 0x22); + snd_soc_component_write(component, ES8336_CPHP_PDN1_REG19, 0x06); + snd_soc_component_write(component, ES8336_HPMIX_SWITCH_REG14, 0x00); + snd_soc_component_write(component, ES8336_HPMIX_PDN_REG15, 0x33); + snd_soc_component_write(component, ES8336_HPMIX_VOL_REG16, 0x00); + if (!es8336->hp_inserted) + snd_soc_component_write(component, ES8336_SYS_PDN_REG0D, + 0x3F); + snd_soc_component_write(component, ES8336_SYS_LP1_REG0E, 0xFF); + snd_soc_component_write(component, ES8336_SYS_LP2_REG0F, 0xFF); + snd_soc_component_write(component, ES8336_CLKMGR_CLKSW_REG01, 0xF3); + snd_soc_component_update_bits(component, ES8336_ADC_PDN_LINSEL_REG22, 0xC0, 0xC0); + } + } + + return ret; +} + +static void es8336_remove(struct snd_soc_component *component) +{ + es8336_set_bias_level(component, SND_SOC_BIAS_OFF); +} + +const struct regmap_config es8336_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = ES8336_TEST3_REG53, + .cache_type = REGCACHE_RBTREE, + .reg_defaults = es8336_reg_defaults, + .num_reg_defaults = ARRAY_SIZE(es8336_reg_defaults), +}; + +static const struct snd_soc_component_driver soc_component_dev_es8336 = { + .probe = es8336_probe, + .remove = es8336_remove, + .suspend = es8336_suspend, + .resume = es8336_resume, + .set_bias_level = es8336_set_bias_level, + + .controls = es8336_snd_controls, + .num_controls = ARRAY_SIZE(es8336_snd_controls), + .dapm_widgets = es8336_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(es8336_dapm_widgets), + .dapm_routes = es8336_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(es8336_dapm_routes), +}; + +static int es8336_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct es8336_priv *es8336; + int ret = -1; + int hp_irq; + + es8336 = devm_kzalloc(&i2c->dev, sizeof(*es8336), GFP_KERNEL); + if (!es8336) + return -ENOMEM; + + es8336->debounce_time = 200; + es8336->pwr_count = 0; + es8336->hp_inserted = false; + es8336->muted = true; + + es8336->regmap = devm_regmap_init_i2c(i2c, &es8336_regmap_config); + if (IS_ERR(es8336->regmap)) { + ret = PTR_ERR(es8336->regmap); + dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret); + return ret; + } + + i2c_set_clientdata(i2c, es8336); + + es8336->spk_ctl_gpio = devm_gpiod_get_index_optional(&i2c->dev, "sel", 0, + GPIOD_OUT_HIGH); + ret = of_property_read_u8(i2c->dev.of_node, "mic-src", &es8336->mic_src); + if (ret != 0) { + dev_dbg(&i2c->dev, "mic1-src return %d", ret); + es8336->mic_src = 0x20; + } + dev_dbg(&i2c->dev, "mic1-src %x", es8336->mic_src); + + if (!es8336->spk_ctl_gpio) + dev_info(&i2c->dev, "Can not get spk_ctl_gpio\n"); + else + es8336_enable_spk(es8336, false); + + es8336->hp_det_gpio = devm_gpiod_get_index_optional(&i2c->dev, "det", 0, + GPIOD_IN); + + if (!es8336->hp_det_gpio) { + dev_info(&i2c->dev, "Can not get hp_det_gpio\n"); + } else { + INIT_DELAYED_WORK(&es8336->work, hp_work); + hp_irq = gpiod_to_irq(es8336->hp_det_gpio); + ret = devm_request_threaded_irq(&i2c->dev, hp_irq, NULL, + es8336_irq_handler, + IRQF_TRIGGER_FALLING | + IRQF_TRIGGER_RISING | + IRQF_ONESHOT, + "es8336_interrupt", es8336); + if (ret < 0) { + dev_err(&i2c->dev, "request_irq failed: %d\n", ret); + return ret; + } + + schedule_delayed_work(&es8336->work, + msecs_to_jiffies(es8336->debounce_time)); + } + + ret = snd_soc_register_component(&i2c->dev, + &soc_component_dev_es8336, + &es8336_dai, 1); + + return ret; +} + +static int es8336_i2c_remove(struct i2c_client *client) +{ + kfree(i2c_get_clientdata(client)); + return 0; +} + +static void es8336_i2c_shutdown(struct i2c_client *client) +{ + struct es8336_priv *es8336 = i2c_get_clientdata(client); + + if (es8336_component != NULL) { + es8336_enable_spk(es8336, false); + msleep(20); + es8336_set_bias_level(es8336_component, SND_SOC_BIAS_OFF); + } +} + +static const struct i2c_device_id es8336_i2c_id[] = { + {"es8336", 0}, + {"10ES8336:00", 0}, + {"10ES8336", 0}, + { } +}; +MODULE_DEVICE_TABLE(i2c, es8336_i2c_id); + +static const struct of_device_id es8336_of_match[] = { + { .compatible = "everest,es8336", }, + { } +}; +MODULE_DEVICE_TABLE(of, es8336_of_match); + +static const struct acpi_device_id es8336_acpi_match[] = { + { "ESSX8336", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, es8336_acpi_match); + +static struct i2c_driver es8336_i2c_driver = { + .driver = { + .name = "es8336", + .of_match_table = es8336_of_match, + .acpi_match_table = es8336_acpi_match, + }, + .probe = es8336_i2c_probe, + .remove = es8336_i2c_remove, + .shutdown = es8336_i2c_shutdown, + .id_table = es8336_i2c_id, +}; + +module_i2c_driver(es8336_i2c_driver); +MODULE_DESCRIPTION("ASoC es8336 driver"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/es8336.h b/sound/soc/codecs/es8336.h new file mode 100644 index 0000000000000000000000000000000000000000..d9eda8edaf1193ff1d849894907563e832db7620 --- /dev/null +++ b/sound/soc/codecs/es8336.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Everest Semiconductor Co.,Ltd + * Copyright (c) 2022-2023 Phytium Technology Co., Ltd. + */ + +#ifndef _ES8336_H +#define _ES8336_H + +/* ES8336 register space */ +/* + * RESET Control + */ +#define ES8336_RESET_REG00 0x00 +/* + * Clock Managerment + */ +#define ES8336_CLKMGR_CLKSW_REG01 0x01 +#define ES8336_CLKMGR_CLKSEL_REG02 0x02 +#define ES8336_CLKMGR_ADCOSR_REG03 0x03 +#define ES8336_CLKMGR_ADCDIV1_REG04 0x04 +#define ES8336_CLKMGR_ADCDIV2_REG05 0x05 +#define ES8336_CLKMGR_DACDIV1_REG06 0x06 +#define ES8336_CLKMGR_DACDIV2_REG07 0x07 +#define ES8336_CLKMGR_CPDIV_REG08 0x08 +/* + * SDP Control + */ +#define ES8336_SDP_MS_BCKDIV_REG09 0x09 +#define ES8336_SDP_ADCFMT_REG0A 0x0a +#define ES8336_SDP_DACFMT_REG0B 0x0b +/* + * System Control + */ +#define ES8336_SYS_VMIDSEL_REG0C 0x0c +#define ES8336_SYS_PDN_REG0D 0x0d +#define ES8336_SYS_LP1_REG0E 0x0e +#define ES8336_SYS_LP2_REG0F 0x0f +#define ES8336_SYS_VMIDLOW_REG10 0x10 +#define ES8336_SYS_VSEL_REG11 0x11 +#define ES8336_SYS_REF_REG12 0x12 +/* + * HP Mixer + */ +#define ES8336_HPMIX_SEL_REG13 0x13 +#define ES8336_HPMIX_SWITCH_REG14 0x14 +#define ES8336_HPMIX_PDN_REG15 0x15 +#define ES8336_HPMIX_VOL_REG16 0x16 +/* + * Charge Pump Headphone driver + */ +#define ES8336_CPHP_OUTEN_REG17 0x17 +#define ES8336_CPHP_ICAL_VOL_REG18 0x18 +#define ES8336_CPHP_PDN1_REG19 0x19 +#define ES8336_CPHP_PDN2_REG1A 0x1a +#define ES8336_CPHP_LDOCTL_REG1B 0x1b +/* + * Calibration + */ +#define ES8336_CAL_TYPE_REG1C 0x1c +#define ES8336_CAL_SET_REG1D 0x1d +#define ES8336_CAL_HPLIV_REG1E 0x1e +#define ES8336_CAL_HPRIV_REG1F 0x1f +#define ES8336_CAL_HPLMV_REG20 0x20 +#define ES8336_CAL_HPRMV_REG21 0x21 +/* + * ADC Control + */ +#define ES8336_ADC_PDN_LINSEL_REG22 0x22 +#define ES8336_ADC_PGAGAIN_REG23 0x23 +#define ES8336_ADC_D2SEPGA_REG24 0x24 +#define ES8336_ADC_DMIC_REG25 0x25 +#define ES8336_ADC_MUTE_REG26 0x26 +#define ES8336_ADC_VOLUME_REG27 0x27 +#define ES8336_ADC_ALC1_REG29 0x29 +#define ES8336_ADC_ALC2_REG2A 0x2a +#define ES8336_ADC_ALC3_REG2B 0x2b +#define ES8336_ADC_ALC4_REG2C 0x2c +#define ES8336_ADC_ALC5_REG2D 0x2d +#define ES8336_ADC_ALC6_REG2E 0x2e +/* + * DAC Control + */ +#define ES8336_DAC_PDN_REG2F 0x2f +#define ES8336_DAC_SET1_REG30 0x30 +#define ES8336_DAC_SET2_REG31 0x31 +#define ES8336_DAC_SET3_REG32 0x32 +#define ES8336_DAC_VOLL_REG33 0x33 +#define ES8336_DAC_VOLR_REG34 0x34 +/* + * GPIO + */ +#define ES8336_GPIO_SEL_REG4D 0x4D +#define ES8336_GPIO_DEBUNCE_INT_REG4E 0x4E +#define ES8336_GPIO_FLAG 0x4F +/* + * TEST MODE + */ +#define ES8336_TESTMODE_REG50 0x50 +#define ES8336_TEST1_REG51 0x51 +#define ES8336_TEST2_REG52 0x52 +#define ES8336_TEST3_REG53 0x53 + +#define ES8336_IFACE ES8336_SDP_MS_BCKDIV_REG09 +#define ES8336_ADC_IFACE ES8336_SDP_ADCFMT_REG0A +#define ES8336_DAC_IFACE ES8336_SDP_DACFMT_REG0B + +#define ES8336_REGNUM 84 + +/* REGISTER 0X01 CLOCK MANAGER */ +#define ES8336_CLKMGR_MCLK_DIV_MASK (0X1<<7) +#define ES8336_CLKMGR_MCLK_DIV_NML (0X0<<7) +#define ES8336_CLKMGR_MCLK_DIV_1 (0X1<<7) +#define ES8336_CLKMGR_ADC_MCLK_MASK (0X1<<3) +#define ES8336_CLKMGR_ADC_MCLK_EN (0X1<<3) +#define ES8336_CLKMGR_ADC_MCLK_DIS (0X0<<3) +#define ES8336_CLKMGR_DAC_MCLK_MASK (0X1<<2) +#define ES8336_CLKMGR_DAC_MCLK_EN (0X1<<2) +#define ES8336_CLKMGR_DAC_MCLK_DIS (0X0<<2) +#define ES8336_CLKMGR_ADC_ANALOG_MASK (0X1<<1) +#define ES8336_CLKMGR_ADC_ANALOG_EN (0X1<<1) +#define ES8336_CLKMGR_ADC_ANALOG_DIS (0X0<<1) +#define ES8336_CLKMGR_DAC_ANALOG_MASK (0X1<<0) +#define ES8336_CLKMGR_DAC_ANALOG_EN (0X1<<0) +#define ES8336_CLKMGR_DAC_ANALOG_DIS (0X0<<0) + +/* REGISTER 0X0A */ +#define ES8336_ADCWL_MASK (0x7 << 2) +#define ES8336_ADCWL_32 (0x4 << 2) +#define ES8336_ADCWL_24 (0x0 << 2) +#define ES8336_ADCWL_20 (0x1 << 2) +#define ES8336_ADCWL_18 (0x2 << 2) +#define ES8336_ADCWL_16 (0x3 << 2) +#define ES8336_ADCFMT_MASK (0x3 << 0) +#define ES8336_ADCFMT_I2S (0x0 << 0) +#define ES8336_ADCWL_LEFT (0x1 << 0) +#define ES8336_ADCWL_RIGHT (0x2 << 0) +#define ES8336_ADCWL_PCM (0x3 << 0) + +/* REGISTER 0X0B */ +#define ES8336_DACWL_MASK (0x7 << 2) +#define ES8336_DACWL_32 (0x4 << 2) +#define ES8336_DACWL_24 (0x0 << 2) +#define ES8336_DACWL_20 (0x1 << 2) +#define ES8336_DACWL_18 (0x2 << 2) +#define ES8336_DACWL_16 (0x3 << 2) +#define ES8336_DACFMT_MASK (0x3 << 0) +#define ES8336_DACFMT_I2S (0x0 << 0) +#define ES8336_DACWL_LEFT (0x1 << 0) +#define ES8336_DACWL_RIGHT (0x2 << 0) +#define ES8336_DACWL_PCM (0x3 << 0) + +#endif diff --git a/sound/soc/codecs/es8388.c b/sound/soc/codecs/es8388.c new file mode 100644 index 0000000000000000000000000000000000000000..86d1120d3bb85f1b6f4d02e88bc5bd844afc5bd4 --- /dev/null +++ b/sound/soc/codecs/es8388.c @@ -0,0 +1,819 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * es8388.c -- ES8388 ALSA SoC Audio driver + * + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + * Author: Yiqun Zhang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "es8388.h" +#include +#include + +static const unsigned int rates_12288[] = { + 8000, 12000, 16000, 24000, 32000, 48000, 96000, +}; + +static const int ratios_12288[] = { + 10, 7, 6, 4, 3, 2, 0, +}; + +static const struct snd_pcm_hw_constraint_list constraints_12288 = { + .count = ARRAY_SIZE(rates_12288), + .list = rates_12288, +}; + +static const unsigned int rates_11289[] = { + 8018, 11025, 22050, 44100, 88200, +}; + +static const int ratios_11289[] = { + 9, 7, 4, 2, 0, +}; + +static const struct snd_pcm_hw_constraint_list constraints_11289 = { + .count = ARRAY_SIZE(rates_11289), + .list = rates_11289, +}; + +#define ES8388_RATES (SNDRV_PCM_RATE_192000 | \ + SNDRV_PCM_RATE_96000 | \ + SNDRV_PCM_RATE_88200 | \ + SNDRV_PCM_RATE_8000_48000) +#define ES8388_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ + SNDRV_PCM_FMTBIT_S18_3LE | \ + SNDRV_PCM_FMTBIT_S20_3LE | \ + SNDRV_PCM_FMTBIT_S24_LE | \ + SNDRV_PCM_FMTBIT_S32_LE) + +struct es8388_priv { + struct regmap *regmap; + struct clk *clk; + int playback_fs; + bool deemph; + int mclkdiv2; + const struct snd_pcm_hw_constraint_list *sysclk_constraints; + const int *mclk_ratios; + bool master; +}; + +/* + * ES8388 Controls + */ +static const char * const adcpol_txt[] = {"Normal", "L Invert", "R Invert", + "L + R Invert"}; +static SOC_ENUM_SINGLE_DECL(adcpol, + ES8388_ADCCONTROL6, 6, adcpol_txt); + +static const DECLARE_TLV_DB_SCALE(play_tlv, -3000, 100, 0); +static const DECLARE_TLV_DB_SCALE(dac_adc_tlv, -9600, 50, 0); +static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 300, 0); +static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); +static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0); + +static const struct { + int rate; + unsigned int val; +} deemph_settings[] = { + { 0, ES8388_DACCONTROL6_DEEMPH_OFF }, + { 32000, ES8388_DACCONTROL6_DEEMPH_32k }, + { 44100, ES8388_DACCONTROL6_DEEMPH_44_1k }, + { 48000, ES8388_DACCONTROL6_DEEMPH_48k }, +}; + +static int es8388_set_deemph(struct snd_soc_component *component) +{ + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + int val, i, best; + + /* + * If we're using deemphasis select the nearest available sample + * rate. + */ + if (es8388->deemph) { + best = 0; + for (i = 1; i < ARRAY_SIZE(deemph_settings); i++) { + if (abs(deemph_settings[i].rate - es8388->playback_fs) < + abs(deemph_settings[best].rate - es8388->playback_fs)) + best = i; + } + + val = deemph_settings[best].val; + } else { + val = ES8388_DACCONTROL6_DEEMPH_OFF; + } + + dev_dbg(component->dev, "Set deemphasis %d\n", val); + + return snd_soc_component_update_bits(component, ES8388_DACCONTROL6, + ES8388_DACCONTROL6_DEEMPH_MASK, val); +} + +static int es8388_get_deemph(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + + ucontrol->value.integer.value[0] = es8388->deemph; + return 0; +} + +static int es8388_put_deemph(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + unsigned int deemph = ucontrol->value.integer.value[0]; + int ret; + + if (deemph > 1) + return -EINVAL; + + ret = es8388_set_deemph(component); + if (ret < 0) + return ret; + + es8388->deemph = deemph; + + return 0; +} + +static const struct snd_kcontrol_new es8388_snd_controls[] = { + SOC_DOUBLE_R_TLV("Capture Digital Volume", + ES8388_ADCCONTROL8, ES8388_ADCCONTROL9, + 0, 0xc0, 1, dac_adc_tlv), + + SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0, + es8388_get_deemph, es8388_put_deemph), + + SOC_ENUM("Capture Polarity", adcpol), + + SOC_SINGLE_TLV("Left Mixer Left Bypass Volume", + ES8388_DACCONTROL17, 3, 7, 1, bypass_tlv), + SOC_SINGLE_TLV("Left Mixer Right Bypass Volume", + ES8388_DACCONTROL19, 3, 7, 1, bypass_tlv), + SOC_SINGLE_TLV("Right Mixer Left Bypass Volume", + ES8388_DACCONTROL18, 3, 7, 1, bypass_tlv), + SOC_SINGLE_TLV("Right Mixer Right Bypass Volume", + ES8388_DACCONTROL20, 3, 7, 1, bypass_tlv), + + SOC_DOUBLE_R_TLV("PCM Volume", + ES8388_LDACVOL, ES8388_RDACVOL, + 0, ES8388_DACVOL_MAX, 1, dac_adc_tlv), + + SOC_DOUBLE_R_TLV("Output 1 Playback Volume", + ES8388_LOUT1VOL, ES8388_ROUT1VOL, + 0, ES8388_OUT1VOL_MAX, 0, play_tlv), + + SOC_DOUBLE_R_TLV("Output 2 Playback Volume", + ES8388_LOUT2VOL, ES8388_ROUT2VOL, + 0, ES8388_OUT2VOL_MAX, 0, play_tlv), + + SOC_DOUBLE_TLV("Mic PGA Volume", ES8388_ADCCONTROL1, + 4, 0, 8, 0, mic_tlv), +}; + +/* + * DAPM Controls + */ +static const char * const es8388_line_texts[] = { + "Line 1", "Line 2", "PGA", "Differential"}; + +static const struct soc_enum es8388_lline_enum = + SOC_ENUM_SINGLE(ES8388_DACCONTROL16, 3, + ARRAY_SIZE(es8388_line_texts), + es8388_line_texts); +static const struct snd_kcontrol_new es8388_left_line_controls = + SOC_DAPM_ENUM("Route", es8388_lline_enum); + +static const struct soc_enum es8388_rline_enum = + SOC_ENUM_SINGLE(ES8388_DACCONTROL16, 0, + ARRAY_SIZE(es8388_line_texts), + es8388_line_texts); +static const struct snd_kcontrol_new es8388_right_line_controls = + SOC_DAPM_ENUM("Route", es8388_lline_enum); + +/* Left Mixer */ +static const struct snd_kcontrol_new es8388_left_mixer_controls[] = { + SOC_DAPM_SINGLE("Playback Switch", ES8388_DACCONTROL17, 7, 1, 0), + SOC_DAPM_SINGLE("Left Bypass Switch", ES8388_DACCONTROL17, 6, 1, 0), + SOC_DAPM_SINGLE("Right Playback Switch", ES8388_DACCONTROL18, 7, 1, 0), + SOC_DAPM_SINGLE("Right Bypass Switch", ES8388_DACCONTROL18, 6, 1, 0), +}; + +/* Right Mixer */ +static const struct snd_kcontrol_new es8388_right_mixer_controls[] = { + SOC_DAPM_SINGLE("Left Playback Switch", ES8388_DACCONTROL19, 7, 1, 0), + SOC_DAPM_SINGLE("Left Bypass Switch", ES8388_DACCONTROL19, 6, 1, 0), + SOC_DAPM_SINGLE("Playback Switch", ES8388_DACCONTROL20, 7, 1, 0), + SOC_DAPM_SINGLE("Right Bypass Switch", ES8388_DACCONTROL20, 6, 1, 0), +}; + +static const char * const es8388_pga_sel[] = { + "Line 1", "Line 2", "Line 3", "Differential"}; + +/* Left PGA Mux */ +static const struct soc_enum es8388_lpga_enum = + SOC_ENUM_SINGLE(ES8388_ADCCONTROL2, 6, + ARRAY_SIZE(es8388_pga_sel), + es8388_pga_sel); +static const struct snd_kcontrol_new es8388_left_pga_controls = + SOC_DAPM_ENUM("Route", es8388_lpga_enum); + +/* Right PGA Mux */ +static const struct soc_enum es8388_rpga_enum = + SOC_ENUM_SINGLE(ES8388_ADCCONTROL2, 4, + ARRAY_SIZE(es8388_pga_sel), + es8388_pga_sel); +static const struct snd_kcontrol_new es8388_right_pga_controls = + SOC_DAPM_ENUM("Route", es8388_rpga_enum); + +/* Differential Mux */ +static const char * const es8388_diff_sel[] = {"Line 1", "Line 2"}; +static SOC_ENUM_SINGLE_DECL(diffmux, + ES8388_ADCCONTROL3, 7, es8388_diff_sel); +static const struct snd_kcontrol_new es8388_diffmux_controls = + SOC_DAPM_ENUM("Route", diffmux); + +/* Mono ADC Mux */ +static const char * const es8388_mono_mux[] = {"Stereo", "Mono (Left)", + "Mono (Right)", "Digital Mono"}; +static SOC_ENUM_SINGLE_DECL(monomux, + ES8388_ADCCONTROL3, 3, es8388_mono_mux); +static const struct snd_kcontrol_new es8388_monomux_controls = + SOC_DAPM_ENUM("Route", monomux); + +static const struct snd_soc_dapm_widget es8388_dapm_widgets[] = { + SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, + &es8388_diffmux_controls), + SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0, + &es8388_monomux_controls), + SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0, + &es8388_monomux_controls), + + SND_SOC_DAPM_MUX("Left PGA Mux", ES8388_ADCPOWER, + ES8388_ADCPOWER_AINL_OFF, 1, + &es8388_left_pga_controls), + SND_SOC_DAPM_MUX("Right PGA Mux", ES8388_ADCPOWER, + ES8388_ADCPOWER_AINR_OFF, 1, + &es8388_right_pga_controls), + + SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0, + &es8388_left_line_controls), + SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0, + &es8388_right_line_controls), + + SND_SOC_DAPM_ADC("Right ADC", "Right Capture", ES8388_ADCPOWER, + ES8388_ADCPOWER_ADCR_OFF, 1), + SND_SOC_DAPM_ADC("Left ADC", "Left Capture", ES8388_ADCPOWER, + ES8388_ADCPOWER_ADCL_OFF, 1), + + SND_SOC_DAPM_SUPPLY("DAC STM", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_DACSTM_RESET, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("ADC STM", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_ADCSTM_RESET, 1, NULL, 0), + + SND_SOC_DAPM_SUPPLY("DAC DIG", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_DACDIG_OFF, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("ADC DIG", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_ADCDIG_OFF, 1, NULL, 0), + + SND_SOC_DAPM_SUPPLY("DAC DLL", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_DACDLL_OFF, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("ADC DLL", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_ADCDLL_OFF, 1, NULL, 0), + + SND_SOC_DAPM_SUPPLY("ADC Vref", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_ADCVREF_OFF, 1, NULL, 0), + SND_SOC_DAPM_SUPPLY("DAC Vref", ES8388_CHIPPOWER, + ES8388_CHIPPOWER_DACVREF_OFF, 1, NULL, 0), + + SND_SOC_DAPM_DAC("Right DAC", "Right Playback", ES8388_DACPOWER, + ES8388_DACPOWER_RDAC_OFF, 1), + SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8388_DACPOWER, + ES8388_DACPOWER_LDAC_OFF, 1), + + SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, + &es8388_left_mixer_controls[0], + ARRAY_SIZE(es8388_left_mixer_controls)), + SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, + &es8388_right_mixer_controls[0], + ARRAY_SIZE(es8388_right_mixer_controls)), + + SND_SOC_DAPM_PGA("Right Out 2", ES8388_DACPOWER, + ES8388_DACPOWER_ROUT2_ON, 0, NULL, 0), + SND_SOC_DAPM_PGA("Left Out 2", ES8388_DACPOWER, + ES8388_DACPOWER_LOUT2_ON, 0, NULL, 0), + SND_SOC_DAPM_PGA("Right Out 1", ES8388_DACPOWER, + ES8388_DACPOWER_ROUT1_ON, 0, NULL, 0), + SND_SOC_DAPM_PGA("Left Out 1", ES8388_DACPOWER, + ES8388_DACPOWER_LOUT1_ON, 0, NULL, 0), + + SND_SOC_DAPM_OUTPUT("LOUT1"), + SND_SOC_DAPM_OUTPUT("ROUT1"), + SND_SOC_DAPM_OUTPUT("LOUT2"), + SND_SOC_DAPM_OUTPUT("ROUT2"), + + SND_SOC_DAPM_INPUT("LINPUT1"), + SND_SOC_DAPM_INPUT("LINPUT2"), + SND_SOC_DAPM_INPUT("RINPUT1"), + SND_SOC_DAPM_INPUT("RINPUT2"), +}; + +static const struct snd_soc_dapm_route es8388_dapm_routes[] = { + { "Left Line Mux", "Line 1", "LINPUT1" }, + { "Left Line Mux", "Line 2", "LINPUT2" }, + { "Left Line Mux", "PGA", "Left PGA Mux" }, + { "Left Line Mux", "Differential", "Differential Mux" }, + + { "Right Line Mux", "Line 1", "RINPUT1" }, + { "Right Line Mux", "Line 2", "RINPUT2" }, + { "Right Line Mux", "PGA", "Right PGA Mux" }, + { "Right Line Mux", "Differential", "Differential Mux" }, + + { "Left PGA Mux", "Line 1", "LINPUT1" }, + { "Left PGA Mux", "Line 2", "LINPUT2" }, + { "Left PGA Mux", "Differential", "Differential Mux" }, + + { "Right PGA Mux", "Line 1", "RINPUT1" }, + { "Right PGA Mux", "Line 2", "RINPUT2" }, + { "Right PGA Mux", "Differential", "Differential Mux" }, + + { "Differential Mux", "Line 1", "LINPUT1" }, + { "Differential Mux", "Line 1", "RINPUT1" }, + { "Differential Mux", "Line 2", "LINPUT2" }, + { "Differential Mux", "Line 2", "RINPUT2" }, + + { "Left ADC Mux", "Stereo", "Left PGA Mux" }, + { "Left ADC Mux", "Mono (Left)", "Left PGA Mux" }, + { "Left ADC Mux", "Digital Mono", "Left PGA Mux" }, + + { "Right ADC Mux", "Stereo", "Right PGA Mux" }, + { "Right ADC Mux", "Mono (Right)", "Right PGA Mux" }, + { "Right ADC Mux", "Digital Mono", "Right PGA Mux" }, + + { "Left ADC", NULL, "Left ADC Mux" }, + { "Right ADC", NULL, "Right ADC Mux" }, + + { "ADC DIG", NULL, "ADC STM" }, + { "ADC DIG", NULL, "ADC Vref" }, + { "ADC DIG", NULL, "ADC DLL" }, + + { "Left ADC", NULL, "ADC DIG" }, + { "Right ADC", NULL, "ADC DIG" }, + + { "Left Line Mux", "Line 1", "LINPUT1" }, + { "Left Line Mux", "Line 2", "LINPUT2" }, + { "Left Line Mux", "PGA", "Left PGA Mux" }, + { "Left Line Mux", "Differential", "Differential Mux" }, + + { "Right Line Mux", "Line 1", "RINPUT1" }, + { "Right Line Mux", "Line 2", "RINPUT2" }, + { "Right Line Mux", "PGA", "Right PGA Mux" }, + { "Right Line Mux", "Differential", "Differential Mux" }, + + { "Left Out 1", NULL, "Left DAC" }, + { "Right Out 1", NULL, "Right DAC" }, + { "Left Out 2", NULL, "Left DAC" }, + { "Right Out 2", NULL, "Right DAC" }, + + { "Left Mixer", "Playback Switch", "Left DAC" }, + { "Left Mixer", "Left Bypass Switch", "Left Line Mux" }, + { "Left Mixer", "Right Playback Switch", "Right DAC" }, + { "Left Mixer", "Right Bypass Switch", "Right Line Mux" }, + + { "Right Mixer", "Left Playback Switch", "Left DAC" }, + { "Right Mixer", "Left Bypass Switch", "Left Line Mux" }, + { "Right Mixer", "Playback Switch", "Right DAC" }, + { "Right Mixer", "Right Bypass Switch", "Right Line Mux" }, + + { "DAC DIG", NULL, "DAC STM" }, + { "DAC DIG", NULL, "DAC Vref" }, + { "DAC DIG", NULL, "DAC DLL" }, + + { "Left DAC", NULL, "DAC DIG" }, + { "Right DAC", NULL, "DAC DIG" }, + + { "Left Out 1", NULL, "Left Mixer" }, + { "LOUT1", NULL, "Left Out 1" }, + { "Right Out 1", NULL, "Right Mixer" }, + { "ROUT1", NULL, "Right Out 1" }, + + { "Left Out 2", NULL, "Left Mixer" }, + { "LOUT2", NULL, "Left Out 2" }, + { "Right Out 2", NULL, "Right Mixer" }, + { "ROUT2", NULL, "Right Out 2" }, +}; + +static int es8388_mute(struct snd_soc_dai *dai, int mute) +{ + return snd_soc_component_update_bits(dai->component, ES8388_DACCONTROL3, + ES8388_DACCONTROL3_DACMUTE, + mute ? ES8388_DACCONTROL3_DACMUTE : 0); +} + +static int es8388_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + + if (es8388->master && es8388->sysclk_constraints) + snd_pcm_hw_constraint_list(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_RATE, + es8388->sysclk_constraints); + + return 0; +} + +static int es8388_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_component *component = dai->component; + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + int i; + int reg; + int wl; + int ratio; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + reg = ES8388_DACCONTROL2; + else + reg = ES8388_ADCCONTROL5; + + if (es8388->master) { + if (!es8388->sysclk_constraints) { + dev_err(component->dev, "No MCLK configured\n"); + return -EINVAL; + } + + for (i = 0; i < es8388->sysclk_constraints->count; i++) + if (es8388->sysclk_constraints->list[i] == + params_rate(params)) + break; + + if (i == es8388->sysclk_constraints->count) { + dev_err(component->dev, + "LRCLK %d unsupported with current clock\n", + params_rate(params)); + return -EINVAL; + } + ratio = es8388->mclk_ratios[i]; + } else { + ratio = 0; + es8388->mclkdiv2 = 0; + } + + snd_soc_component_update_bits(component, ES8388_MASTERMODE, + ES8388_MASTERMODE_MCLKDIV2, + es8388->mclkdiv2 ? ES8388_MASTERMODE_MCLKDIV2 : 0); + + switch (params_width(params)) { + case 16: + wl = 3; + break; + case 18: + wl = 2; + break; + case 20: + wl = 1; + break; + case 24: + wl = 0; + break; + case 32: + wl = 4; + break; + default: + return -EINVAL; + } + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + snd_soc_component_update_bits(component, ES8388_DACCONTROL1, + ES8388_DACCONTROL1_DACWL_MASK, + wl << ES8388_DACCONTROL1_DACWL_SHIFT); + + es8388->playback_fs = params_rate(params); + es8388_set_deemph(component); + } else + snd_soc_component_update_bits(component, ES8388_ADCCONTROL4, + ES8388_ADCCONTROL4_ADCWL_MASK, + wl << ES8388_ADCCONTROL4_ADCWL_SHIFT); + + return snd_soc_component_update_bits(component, reg, ES8388_RATEMASK, ratio); +} + +static int es8388_set_sysclk(struct snd_soc_dai *codec_dai, + int clk_id, unsigned int freq, int dir) +{ + struct snd_soc_component *component = codec_dai->component; + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + int mclkdiv2 = 0; + + switch (freq) { + case 0: + es8388->sysclk_constraints = NULL; + es8388->mclk_ratios = NULL; + break; + case 22579200: + mclkdiv2 = 1; + /* fallthru */ + case 11289600: + es8388->sysclk_constraints = &constraints_11289; + es8388->mclk_ratios = ratios_11289; + break; + case 24576000: + mclkdiv2 = 1; + /* fallthru */ + case 12288000: + es8388->sysclk_constraints = &constraints_12288; + es8388->mclk_ratios = ratios_12288; + break; + default: + return -EINVAL; + } + + es8388->mclkdiv2 = mclkdiv2; + return 0; +} + +static int es8388_set_dai_fmt(struct snd_soc_dai *codec_dai, + unsigned int fmt) +{ + struct snd_soc_component *component = codec_dai->component; + struct es8388_priv *es8388 = snd_soc_component_get_drvdata(component); + u8 dac_mode = 0; + u8 adc_mode = 0; + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + /* Master serial port mode, with BCLK generated automatically */ + snd_soc_component_update_bits(component, ES8388_MASTERMODE, + ES8388_MASTERMODE_MSC, + ES8388_MASTERMODE_MSC); + es8388->master = true; + break; + case SND_SOC_DAIFMT_CBS_CFS: + /* Slave serial port mode */ + snd_soc_component_update_bits(component, ES8388_MASTERMODE, + ES8388_MASTERMODE_MSC, 0); + es8388->master = false; + break; + default: + return -EINVAL; + } + + /* interface format */ + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: + dac_mode |= ES8388_DACCONTROL1_DACFORMAT_I2S; + adc_mode |= ES8388_ADCCONTROL4_ADCFORMAT_I2S; + break; + case SND_SOC_DAIFMT_RIGHT_J: + dac_mode |= ES8388_DACCONTROL1_DACFORMAT_RJUST; + adc_mode |= ES8388_ADCCONTROL4_ADCFORMAT_RJUST; + break; + case SND_SOC_DAIFMT_LEFT_J: + dac_mode |= ES8388_DACCONTROL1_DACFORMAT_LJUST; + adc_mode |= ES8388_ADCCONTROL4_ADCFORMAT_LJUST; + break; + default: + return -EINVAL; + } + + /* clock inversion */ + if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF) + return -EINVAL; + + snd_soc_component_update_bits(component, ES8388_DACCONTROL1, + ES8388_DACCONTROL1_DACFORMAT_MASK, dac_mode); + snd_soc_component_update_bits(component, ES8388_ADCCONTROL4, + ES8388_ADCCONTROL4_ADCFORMAT_MASK, adc_mode); + + return 0; +} + +static int es8388_set_bias_level(struct snd_soc_component *component, + enum snd_soc_bias_level level) +{ + switch (level) { + case SND_SOC_BIAS_ON: + break; + + case SND_SOC_BIAS_PREPARE: + /* VREF, VMID=2x50k, digital enabled */ + snd_soc_component_write(component, ES8388_CHIPPOWER, 0); + snd_soc_component_update_bits(component, ES8388_CONTROL1, + ES8388_CONTROL1_VMIDSEL_MASK | + ES8388_CONTROL1_ENREF, + ES8388_CONTROL1_VMIDSEL_50k | + ES8388_CONTROL1_ENREF); + break; + + case SND_SOC_BIAS_STANDBY: + if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) { + snd_soc_component_update_bits(component, ES8388_CONTROL1, + ES8388_CONTROL1_VMIDSEL_MASK | + ES8388_CONTROL1_ENREF, + ES8388_CONTROL1_VMIDSEL_5k | + ES8388_CONTROL1_ENREF); + + /* Charge caps */ + msleep(100); + } + + snd_soc_component_write(component, ES8388_CONTROL2, + ES8388_CONTROL2_OVERCURRENT_ON | + ES8388_CONTROL2_THERMAL_SHUTDOWN_ON); + + /* VREF, VMID=2*500k, digital stopped */ + snd_soc_component_update_bits(component, ES8388_CONTROL1, + ES8388_CONTROL1_VMIDSEL_MASK | + ES8388_CONTROL1_ENREF, + ES8388_CONTROL1_VMIDSEL_500k | + ES8388_CONTROL1_ENREF); + break; + + case SND_SOC_BIAS_OFF: + snd_soc_component_update_bits(component, ES8388_CONTROL1, + ES8388_CONTROL1_VMIDSEL_MASK | + ES8388_CONTROL1_ENREF, + 0); + break; + } + return 0; +} + +static const struct snd_soc_dai_ops es8388_dai_ops = { + .startup = es8388_startup, + .hw_params = es8388_hw_params, + .digital_mute = es8388_mute, + .set_sysclk = es8388_set_sysclk, + .set_fmt = es8388_set_dai_fmt, +}; + +static struct snd_soc_dai_driver es8388_dai = { + .name = "es8388-hifi", + .playback = { + .stream_name = "Playback", + .channels_min = 2, + .channels_max = 2, + .rates = ES8388_RATES, + .formats = ES8388_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = 2, + .channels_max = 2, + .rates = ES8388_RATES, + .formats = ES8388_FORMATS, + }, + .ops = &es8388_dai_ops, + .symmetric_rates = 1, +}; + +static int es8388_suspend(struct snd_soc_component *component) +{ + return 0; +} + +static int es8388_resume(struct snd_soc_component *component) +{ + struct regmap *regmap = dev_get_regmap(component->dev, NULL); + struct es8388_priv *es8388; + int ret; + + es8388 = snd_soc_component_get_drvdata(component); + + regcache_mark_dirty(regmap); + ret = regcache_sync(regmap); + if (ret) { + dev_err(component->dev, "unable to sync regcache\n"); + return ret; + } + + return 0; +} + +static int es8388_component_probe(struct snd_soc_component *component) +{ + snd_soc_component_write(component, ES8388_ADCPOWER, 0xf0); + snd_soc_component_write(component, ES8388_CONTROL1, 0x30); + snd_soc_component_write(component, ES8388_DACCONTROL21, 0x80); + snd_soc_component_write(component, ES8388_ADCCONTROL10, 0xda); + + return 0; +} + +static void es8388_remove(struct snd_soc_component *component) +{ +} + +const struct regmap_config es8388_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = ES8388_REG_MAX, + .cache_type = REGCACHE_RBTREE, + .use_single_rw = true, +}; +EXPORT_SYMBOL_GPL(es8388_regmap_config); + +static const struct snd_soc_component_driver es8388_component_driver = { + .probe = es8388_component_probe, + .remove = es8388_remove, + .suspend = es8388_suspend, + .resume = es8388_resume, + .set_bias_level = es8388_set_bias_level, + .controls = es8388_snd_controls, + .num_controls = ARRAY_SIZE(es8388_snd_controls), + .dapm_widgets = es8388_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(es8388_dapm_widgets), + .dapm_routes = es8388_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(es8388_dapm_routes), + .suspend_bias_off = 1, + .idle_bias_on = 1, + .use_pmdown_time = 1, + .endianness = 1, + .non_legacy_dai_naming = 1, +}; + +int es8388_probe(struct device *dev, struct regmap *regmap) +{ + struct es8388_priv *es8388; + + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + es8388 = devm_kzalloc(dev, sizeof(*es8388), GFP_KERNEL); + if (es8388 == NULL) + return -ENOMEM; + + es8388->regmap = regmap; + + dev_set_drvdata(dev, es8388); + + return devm_snd_soc_register_component(dev, + &es8388_component_driver, &es8388_dai, 1); +} +EXPORT_SYMBOL_GPL(es8388_probe); + +static const struct i2c_device_id es8388_id[] = { + { "es8388", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, es8388_id); + +static const struct of_device_id es8388_of_match[] = { + { .compatible = "everest,es8388", }, + { } +}; +MODULE_DEVICE_TABLE(of, es8388_of_match); + +static struct acpi_device_id es8388_acpi_match[] = { + {"ESSX8388", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, es8388_acpi_match); + +static int es8388_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + return es8388_probe(&i2c->dev, + devm_regmap_init_i2c(i2c, &es8388_regmap_config)); +} + +static struct i2c_driver es8388_i2c_driver = { + .driver = { + .name = "es8388", + .of_match_table = es8388_of_match, + .acpi_match_table = es8388_acpi_match, + }, + .probe = es8388_i2c_probe, + .id_table = es8388_id, +}; + +module_i2c_driver(es8388_i2c_driver); + +MODULE_DESCRIPTION("ASoC ES8388 driver"); +MODULE_AUTHOR("Yiqun Zhang "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/es8388.h b/sound/soc/codecs/es8388.h new file mode 100644 index 0000000000000000000000000000000000000000..5858a71261fba6d8258eac45edc2d0cfcbda39ef --- /dev/null +++ b/sound/soc/codecs/es8388.h @@ -0,0 +1,290 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * es8388.h -- ES8388 ALSA SoC Audio driver + */ + +#ifndef _ES8388_H +#define _ES8388_H + +#include + +struct device; + +extern const struct regmap_config es8388_regmap_config; +int es8388_probe(struct device *dev, struct regmap *regmap); + +#define ES8388_DACLVOL 46 +#define ES8388_DACRVOL 47 +#define ES8388_DACCTL 28 +#define ES8388_RATEMASK (0x1f << 0) + +#define ES8388_CONTROL1 0x00 +#define ES8388_CONTROL1_VMIDSEL_OFF (0 << 0) +#define ES8388_CONTROL1_VMIDSEL_50k (1 << 0) +#define ES8388_CONTROL1_VMIDSEL_500k (2 << 0) +#define ES8388_CONTROL1_VMIDSEL_5k (3 << 0) +#define ES8388_CONTROL1_VMIDSEL_MASK (3 << 0) +#define ES8388_CONTROL1_ENREF (1 << 2) +#define ES8388_CONTROL1_SEQEN (1 << 3) +#define ES8388_CONTROL1_SAMEFS (1 << 4) +#define ES8388_CONTROL1_DACMCLK_ADC (0 << 5) +#define ES8388_CONTROL1_DACMCLK_DAC (1 << 5) +#define ES8388_CONTROL1_LRCM (1 << 6) +#define ES8388_CONTROL1_SCP_RESET (1 << 7) + +#define ES8388_CONTROL2 0x01 +#define ES8388_CONTROL2_VREF_BUF_OFF (1 << 0) +#define ES8388_CONTROL2_VREF_LOWPOWER (1 << 1) +#define ES8388_CONTROL2_IBIASGEN_OFF (1 << 2) +#define ES8388_CONTROL2_ANALOG_OFF (1 << 3) +#define ES8388_CONTROL2_VREF_BUF_LOWPOWER (1 << 4) +#define ES8388_CONTROL2_VCM_MOD_LOWPOWER (1 << 5) +#define ES8388_CONTROL2_OVERCURRENT_ON (1 << 6) +#define ES8388_CONTROL2_THERMAL_SHUTDOWN_ON (1 << 7) + +#define ES8388_CHIPPOWER 0x02 +#define ES8388_CHIPPOWER_DACVREF_OFF 0 +#define ES8388_CHIPPOWER_ADCVREF_OFF 1 +#define ES8388_CHIPPOWER_DACDLL_OFF 2 +#define ES8388_CHIPPOWER_ADCDLL_OFF 3 +#define ES8388_CHIPPOWER_DACSTM_RESET 4 +#define ES8388_CHIPPOWER_ADCSTM_RESET 5 +#define ES8388_CHIPPOWER_DACDIG_OFF 6 +#define ES8388_CHIPPOWER_ADCDIG_OFF 7 + +#define ES8388_ADCPOWER 0x03 +#define ES8388_ADCPOWER_INT1_LOWPOWER 0 +#define ES8388_ADCPOWER_FLASH_ADC_LOWPOWER 1 +#define ES8388_ADCPOWER_ADC_BIAS_GEN_OFF 2 +#define ES8388_ADCPOWER_MIC_BIAS_OFF 3 +#define ES8388_ADCPOWER_ADCR_OFF 4 +#define ES8388_ADCPOWER_ADCL_OFF 5 +#define ES8388_ADCPOWER_AINR_OFF 6 +#define ES8388_ADCPOWER_AINL_OFF 7 + +#define ES8388_DACPOWER 0x04 +#define ES8388_DACPOWER_OUT3_ON 0 +#define ES8388_DACPOWER_MONO_ON 1 +#define ES8388_DACPOWER_ROUT2_ON 2 +#define ES8388_DACPOWER_LOUT2_ON 3 +#define ES8388_DACPOWER_ROUT1_ON 4 +#define ES8388_DACPOWER_LOUT1_ON 5 +#define ES8388_DACPOWER_RDAC_OFF 6 +#define ES8388_DACPOWER_LDAC_OFF 7 + +#define ES8388_CHIPLOPOW1 0x05 +#define ES8388_CHIPLOPOW2 0x06 +#define ES8388_ANAVOLMANAG 0x07 + +#define ES8388_MASTERMODE 0x08 +#define ES8388_MASTERMODE_BCLKDIV (0 << 0) +#define ES8388_MASTERMODE_BCLK_INV (1 << 5) +#define ES8388_MASTERMODE_MCLKDIV2 (1 << 6) +#define ES8388_MASTERMODE_MSC (1 << 7) + +#define ES8388_ADCCONTROL1 0x09 +#define ES8388_ADCCONTROL2 0x0a +#define ES8388_ADCCONTROL3 0x0b + +#define ES8388_ADCCONTROL4 0x0c +#define ES8388_ADCCONTROL4_ADCFORMAT_MASK (3 << 0) +#define ES8388_ADCCONTROL4_ADCFORMAT_I2S (0 << 0) +#define ES8388_ADCCONTROL4_ADCFORMAT_LJUST (1 << 0) +#define ES8388_ADCCONTROL4_ADCFORMAT_RJUST (2 << 0) +#define ES8388_ADCCONTROL4_ADCFORMAT_PCM (3 << 0) +#define ES8388_ADCCONTROL4_ADCWL_SHIFT 2 +#define ES8388_ADCCONTROL4_ADCWL_MASK (7 << 2) +#define ES8388_ADCCONTROL4_ADCLRP_I2S_POL_NORMAL (0 << 5) +#define ES8388_ADCCONTROL4_ADCLRP_I2S_POL_INV (1 << 5) +#define ES8388_ADCCONTROL4_ADCLRP_PCM_MSB_CLK2 (0 << 5) +#define ES8388_ADCCONTROL4_ADCLRP_PCM_MSB_CLK1 (1 << 5) + +#define ES8388_ADCCONTROL5 0x0d +#define ES8388_ADCCONTROL5_RATEMASK (0x1f << 0) + +#define ES8388_ADCCONTROL6 0x0e + +#define ES8388_ADCCONTROL7 0x0f +#define ES8388_ADCCONTROL7_ADC_MUTE (1 << 2) +#define ES8388_ADCCONTROL7_ADC_LER (1 << 3) +#define ES8388_ADCCONTROL7_ADC_ZERO_CROSS (1 << 4) +#define ES8388_ADCCONTROL7_ADC_SOFT_RAMP (1 << 5) +#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_4 (0 << 6) +#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_8 (1 << 6) +#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_16 (2 << 6) +#define ES8388_ADCCONTROL7_ADC_RAMP_RATE_32 (3 << 6) + +#define ES8388_ADCCONTROL8 0x10 +#define ES8388_ADCCONTROL9 0x11 +#define ES8388_ADCCONTROL10 0x12 +#define ES8388_ADCCONTROL11 0x13 +#define ES8388_ADCCONTROL12 0x14 +#define ES8388_ADCCONTROL13 0x15 +#define ES8388_ADCCONTROL14 0x16 + +#define ES8388_DACCONTROL1 0x17 +#define ES8388_DACCONTROL1_DACFORMAT_MASK (3 << 1) +#define ES8388_DACCONTROL1_DACFORMAT_I2S (0 << 1) +#define ES8388_DACCONTROL1_DACFORMAT_LJUST (1 << 1) +#define ES8388_DACCONTROL1_DACFORMAT_RJUST (2 << 1) +#define ES8388_DACCONTROL1_DACFORMAT_PCM (3 << 1) +#define ES8388_DACCONTROL1_DACWL_SHIFT 3 +#define ES8388_DACCONTROL1_DACWL_MASK (7 << 3) +#define ES8388_DACCONTROL1_DACLRP_I2S_POL_NORMAL (0 << 6) +#define ES8388_DACCONTROL1_DACLRP_I2S_POL_INV (1 << 6) +#define ES8388_DACCONTROL1_DACLRP_PCM_MSB_CLK2 (0 << 6) +#define ES8388_DACCONTROL1_DACLRP_PCM_MSB_CLK1 (1 << 6) +#define ES8388_DACCONTROL1_LRSWAP (1 << 7) + +#define ES8388_DACCONTROL2 0x18 +#define ES8388_DACCONTROL2_RATEMASK (0x1f << 0) +#define ES8388_DACCONTROL2_DOUBLESPEED (1 << 5) + +#define ES8388_DACCONTROL3 0x19 +#define ES8388_DACCONTROL3_AUTOMUTE (1 << 2) +#define ES8388_DACCONTROL3_DACMUTE (1 << 2) +#define ES8388_DACCONTROL3_LEFTGAINVOL (1 << 3) +#define ES8388_DACCONTROL3_DACZEROCROSS (1 << 4) +#define ES8388_DACCONTROL3_DACSOFTRAMP (1 << 5) +#define ES8388_DACCONTROL3_DACRAMPRATE (3 << 6) + +#define ES8388_LDACVOL 0x1a +#define ES8388_LDACVOL_MASK (0 << 0) +#define ES8388_LDACVOL_MAX (0xc0) + +#define ES8388_RDACVOL 0x1b +#define ES8388_RDACVOL_MASK (0 << 0) +#define ES8388_RDACVOL_MAX (0xc0) + +#define ES8388_DACVOL_MAX (0xc0) + +#define ES8388_DACCONTROL4 0x1a +#define ES8388_DACCONTROL5 0x1b + +#define ES8388_DACCONTROL6 0x1c +#define ES8388_DACCONTROL6_CLICKFREE (1 << 3) +#define ES8388_DACCONTROL6_DAC_INVR (1 << 4) +#define ES8388_DACCONTROL6_DAC_INVL (1 << 5) +#define ES8388_DACCONTROL6_DEEMPH_MASK (3 << 6) +#define ES8388_DACCONTROL6_DEEMPH_OFF (0 << 6) +#define ES8388_DACCONTROL6_DEEMPH_32k (1 << 6) +#define ES8388_DACCONTROL6_DEEMPH_44_1k (2 << 6) +#define ES8388_DACCONTROL6_DEEMPH_48k (3 << 6) + +#define ES8388_DACCONTROL7 0x1d +#define ES8388_DACCONTROL7_VPP_SCALE_3p5 (0 << 0) +#define ES8388_DACCONTROL7_VPP_SCALE_4p0 (1 << 0) +#define ES8388_DACCONTROL7_VPP_SCALE_3p0 (2 << 0) +#define ES8388_DACCONTROL7_VPP_SCALE_2p5 (3 << 0) +#define ES8388_DACCONTROL7_SHELVING_STRENGTH (1 << 2) /* In eights */ +#define ES8388_DACCONTROL7_MONO (1 << 5) +#define ES8388_DACCONTROL7_ZEROR (1 << 6) +#define ES8388_DACCONTROL7_ZEROL (1 << 7) + +/* Shelving filter */ +#define ES8388_DACCONTROL8 0x1e +#define ES8388_DACCONTROL9 0x1f +#define ES8388_DACCONTROL10 0x20 +#define ES8388_DACCONTROL11 0x21 +#define ES8388_DACCONTROL12 0x22 +#define ES8388_DACCONTROL13 0x23 +#define ES8388_DACCONTROL14 0x24 +#define ES8388_DACCONTROL15 0x25 + +#define ES8388_DACCONTROL16 0x26 +#define ES8388_DACCONTROL16_RMIXSEL_RIN1 (0 << 0) +#define ES8388_DACCONTROL16_RMIXSEL_RIN2 (1 << 0) +#define ES8388_DACCONTROL16_RMIXSEL_RIN3 (2 << 0) +#define ES8388_DACCONTROL16_RMIXSEL_RADC (3 << 0) +#define ES8388_DACCONTROL16_LMIXSEL_LIN1 (0 << 3) +#define ES8388_DACCONTROL16_LMIXSEL_LIN2 (1 << 3) +#define ES8388_DACCONTROL16_LMIXSEL_LIN3 (2 << 3) +#define ES8388_DACCONTROL16_LMIXSEL_LADC (3 << 3) + +#define ES8388_DACCONTROL17 0x27 +#define ES8388_DACCONTROL17_LI2LOVOL (7 << 3) +#define ES8388_DACCONTROL17_LI2LO (1 << 6) +#define ES8388_DACCONTROL17_LD2LO (1 << 7) + +#define ES8388_DACCONTROL18 0x28 +#define ES8388_DACCONTROL18_RI2LOVOL (7 << 3) +#define ES8388_DACCONTROL18_RI2LO (1 << 6) +#define ES8388_DACCONTROL18_RD2LO (1 << 7) + +#define ES8388_DACCONTROL19 0x29 +#define ES8388_DACCONTROL19_LI2ROVOL (7 << 3) +#define ES8388_DACCONTROL19_LI2RO (1 << 6) +#define ES8388_DACCONTROL19_LD2RO (1 << 7) + +#define ES8388_DACCONTROL20 0x2a +#define ES8388_DACCONTROL20_RI2ROVOL (7 << 3) +#define ES8388_DACCONTROL20_RI2RO (1 << 6) +#define ES8388_DACCONTROL20_RD2RO (1 << 7) + +#define ES8388_DACCONTROL21 0x2b +#define ES8388_DACCONTROL21_LI2MOVOL (7 << 3) +#define ES8388_DACCONTROL21_LI2MO (1 << 6) +#define ES8388_DACCONTROL21_LD2MO (1 << 7) + +#define ES8388_DACCONTROL22 0x2c +#define ES8388_DACCONTROL22_RI2MOVOL (7 << 3) +#define ES8388_DACCONTROL22_RI2MO (1 << 6) +#define ES8388_DACCONTROL22_RD2MO (1 << 7) + +#define ES8388_DACCONTROL23 0x2d +#define ES8388_DACCONTROL23_MOUTINV (1 << 1) +#define ES8388_DACCONTROL23_HPSWPOL (1 << 2) +#define ES8388_DACCONTROL23_HPSWEN (1 << 3) +#define ES8388_DACCONTROL23_VROI_1p5k (0 << 4) +#define ES8388_DACCONTROL23_VROI_40k (1 << 4) +#define ES8388_DACCONTROL23_OUT3_VREF (0 << 5) +#define ES8388_DACCONTROL23_OUT3_ROUT1 (1 << 5) +#define ES8388_DACCONTROL23_OUT3_MONOOUT (2 << 5) +#define ES8388_DACCONTROL23_OUT3_RIGHT_MIXER (3 << 5) +#define ES8388_DACCONTROL23_ROUT2INV (1 << 7) + +/* LOUT1 Amplifier */ +#define ES8388_LOUT1VOL 0x2e +#define ES8388_LOUT1VOL_MASK (0 << 5) +#define ES8388_LOUT1VOL_MAX (0x24) + +/* ROUT1 Amplifier */ +#define ES8388_ROUT1VOL 0x2f +#define ES8388_ROUT1VOL_MASK (0 << 5) +#define ES8388_ROUT1VOL_MAX (0x24) + +#define ES8388_OUT1VOL_MAX (0x24) + +/* LOUT2 Amplifier */ +#define ES8388_LOUT2VOL 0x30 +#define ES8388_LOUT2VOL_MASK (0 << 5) +#define ES8388_LOUT2VOL_MAX (0x24) + +/* ROUT2 Amplifier */ +#define ES8388_ROUT2VOL 0x31 +#define ES8388_ROUT2VOL_MASK (0 << 5) +#define ES8388_ROUT2VOL_MAX (0x24) + +#define ES8388_OUT2VOL_MAX (0x24) + +/* Mono Out Amplifier */ +#define ES8388_MONOOUTVOL 0x32 +#define ES8388_MONOOUTVOL_MASK (0 << 5) +#define ES8388_MONOOUTVOL_MAX (0x24) + +#define ES8388_DACCONTROL29 0x33 +#define ES8388_DACCONTROL30 0x34 + +#define ES8388_SYSCLK 0 + +#define ES8388_REG_MAX 0x35 + +#define ES8388_1536FS 1536 +#define ES8388_1024FS 1024 +#define ES8388_768FS 768 +#define ES8388_512FS 512 +#define ES8388_384FS 384 +#define ES8388_256FS 256 +#define ES8388_128FS 128 + +#endif diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index 7994e8ddc7d217d2a7c23eafd23f9f6880f0daf9..44914e7986f553edf5cb91de9c320c3d892588c7 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -285,6 +286,8 @@ struct hdmi_codec_priv { uint8_t eld[MAX_ELD_BYTES]; struct snd_pcm_chmap *chmap_info; unsigned int chmap_idx; + struct snd_soc_jack *jack; + unsigned int jack_status; }; static const struct snd_soc_dapm_widget hdmi_widgets[] = { @@ -700,6 +703,44 @@ static int hdmi_dai_probe(struct snd_soc_dai *dai) return snd_soc_dapm_add_routes(dapm, &route, 1); } +static void hdmi_codec_jack_report(struct hdmi_codec_priv *hcp, + unsigned int jack_status) +{ + if (hcp->jack && jack_status != hcp->jack_status) { + snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT); + hcp->jack_status = jack_status; + } +} + +static void plugged_cb(struct device *dev, bool plugged) +{ + struct hdmi_codec_priv *hcp = dev_get_drvdata(dev); + + if (plugged) + hdmi_codec_jack_report(hcp, SND_JACK_LINEOUT); + else + hdmi_codec_jack_report(hcp, 0); +} + +static int hdmi_codec_set_jack(struct snd_soc_component *component, + struct snd_soc_jack *jack, + void *data) +{ + struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component); + int ret = -EOPNOTSUPP; + + if (hcp->hcd.ops->hook_plugged_cb) { + hcp->jack = jack; + ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent, + hcp->hcd.data, + plugged_cb, + component->dev); + if (ret) + hcp->jack = NULL; + } + return ret; +} + static const struct snd_soc_dai_driver hdmi_i2s_dai = { .name = "i2s-hifi", .id = DAI_ID_I2S, @@ -751,6 +792,7 @@ static const struct snd_soc_component_driver hdmi_driver = { .use_pmdown_time = 1, .endianness = 1, .non_legacy_dai_naming = 1, + .set_jack = hdmi_codec_set_jack, }; static int hdmi_codec_probe(struct platform_device *pdev) diff --git a/sound/soc/phytium/Kconfig b/sound/soc/phytium/Kconfig new file mode 100755 index 0000000000000000000000000000000000000000..bce2cd9d94a2f1332daddb0e1ce1917e9defbba9 --- /dev/null +++ b/sound/soc/phytium/Kconfig @@ -0,0 +1,30 @@ +config SND_SOC_PHYTIUM_I2S + tristate "Phytium I2S Device Driver" + help + Say Y or M if you want to add support for I2S driver for + Phytium I2S device . The device supports 2 channels each + for play and record. + +config SND_PMDK_ES8388 + tristate "Phytium machine support with ES8388" + depends on I2C && SND_SOC_PHYTIUM_I2S + select SND_SOC_ES8388 + help + Say Y if you want to add Phytium machine support for + ES8388 codecs. + +config SND_PMDK_ES8336 + tristate "Phytium machine support with ES8336" + depends on I2C && SND_SOC_PHYTIUM_I2S + select SND_SOC_ES8336 + help + Say Y if you want to add Phytium machine support for + ES8336 codecs. + +config SND_PMDK_DP + tristate "Phytium machine support with DP" + depends on I2C && SND_SOC_PHYTIUM_I2S + select SND_SOC_HDMI_CODEC + help + Say Y if you want to add Phytium machine support for + Displayport on Px210. diff --git a/sound/soc/phytium/Makefile b/sound/soc/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..db3c0659e8442884fddeb23499deb0bbea95564c --- /dev/null +++ b/sound/soc/phytium/Makefile @@ -0,0 +1,13 @@ +# PHYTIUM Platform Support + +snd-soc-phytium-i2s-objs :=phytium_i2s.o +obj-$(CONFIG_SND_SOC_PHYTIUM_I2S) += snd-soc-phytium-i2s.o + +snd-soc-pmdk-es8388-objs :=pmdk_es8388.o +obj-$(CONFIG_SND_PMDK_ES8388) += snd-soc-pmdk-es8388.o + +snd-soc-pmdk-es8336-objs :=pmdk_es8336.o +obj-$(CONFIG_SND_PMDK_ES8336) += snd-soc-pmdk-es8336.o + +snd-soc-pmdk-dp-objs :=pmdk_dp.o +obj-$(CONFIG_SND_PMDK_DP) += snd-soc-pmdk-dp.o diff --git a/sound/soc/phytium/local.h b/sound/soc/phytium/local.h new file mode 100644 index 0000000000000000000000000000000000000000..9a9e5a1e1e1ca9d23d5e1050f127e4fd600c878e --- /dev/null +++ b/sound/soc/phytium/local.h @@ -0,0 +1,328 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020-2023 Phytium Technology Co., Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __PHYTIUM_I2S_LOCAL_H +#define __PHYTIUM_I2S_LOCAL_H + +#include +#include +#include +#include +#include +#include + +/* I2S clk setting*/ +#define CLK_CFG0 0xc00 +#define CLK_CFG1 0xc04 + +/* common register for all channel */ +#define I2S_IER 0x000 +#define IRER 0x004 +#define ITER 0x008 +#define CER 0x00C + +#define RXFFR 0x014 +#define TXFFR 0x018 + +/* Interrupt status register fields */ +#define ISR_TXFO BIT(5) +#define ISR_TXFE BIT(4) +#define ISR_RXFO BIT(1) +#define ISR_RXDA BIT(0) + +/* I2STxRxRegisters for all channels */ +#define LRBR_LTHR(x) (0x40 * x + 0x020) +#define RRBR_RTHR(x) (0x40 * x + 0x024) +#define RER(x) (0x40 * x + 0x028) + +#define RCR(x) (0x40 * x + 0x030) + +#define ISR(x) (0x40 * x + 0x038) +#define IMR(x) (0x40 * x + 0x03C) +#define ROR(x) (0x40 * x + 0x040) +#define TOR(x) (0x40 * x + 0x044) +#define RFCR(x) (0x40 * x + 0x048) +#define TFCR(x) (0x40 * x + 0x04C) +#define RFF(x) (0x40 * x + 0x050) +#define TFF(x) (0x40 * x + 0x054) + +/*enable txd and rxd block channel0~3 */ +#define TER(x) (0x40 * x + 0x02C) +#define CCR 0x010 +#define TCR(x) (0x40 * x + 0x034) + + +/* I2SCOMPRegisters */ +#define I2S_COMP_PARAM_2 0x01F0 +#define I2S_COMP_PARAM_1 0x01F4 +#define I2S_COMP_VERSION 0x01F8 +#define I2S_COMP_TYPE 0x01FC + +/***I2S AND DMA***/ + +#define DMA_GCAP 0x0024 + +#define DMA_CHAL_CONFG1 0x0028 + +#define DMA_CHAL_CONFG0 0x0004 +#define DMA_MASK_INT 0x000c +#define DMA_BDLPU(x) (0x40 * x + 0x0040) +#define DMA_BDLPL(x) (0x40 * x + 0x0044) +#define DMA_CHALX_DEV_ADDR(x) (0x40 * x + 0x0048) +#define DMA_CHALX_CBL(x) (0x40 * x + 0x0054) +#define DMA_CHALX_LVI(x) (0x40 * x + 0x004c) + +#define DMA_CHALX_DSIZE(x) (0x40 * x + 0x0064) +#define DMA_CHALX_DLENTH(x) (0x40 * x + 0x0068) +#define DMA_CHALX_CTL(x) (0x40 * x + 0x0058) + + +#define DMA_CTL 0x0000 + +#define DMA_LPIB(x) (0x40 * x + 0x0050) + +#define DMA_STS 0x0008 + +/****************/ + + +/* max number of fragments - we may use more if allocating more pages for BDL */ +#define BDL_SIZE 4096 +#define AZX_MAX_BDL_ENTRIES (BDL_SIZE / 16) + +/* + * Component parameter register fields - define the I2S block's + * configuration. + */ +#define COMP1_TX_WORDSIZE_3(r) (((r) & GENMASK(27, 25)) >> 25) +#define COMP1_TX_WORDSIZE_2(r) (((r) & GENMASK(24, 22)) >> 22) +#define COMP1_TX_WORDSIZE_1(r) (((r) & GENMASK(21, 19)) >> 19) +#define COMP1_TX_WORDSIZE_0(r) (((r) & GENMASK(18, 16)) >> 16) +#define COMP1_TX_CHANNELS(r) (((r) & GENMASK(10, 9)) >> 9) +#define COMP1_RX_CHANNELS(r) (((r) & GENMASK(8, 7)) >> 7) +#define COMP1_RX_ENABLED(r) (((r) & BIT(6)) >> 6) +#define COMP1_TX_ENABLED(r) (((r) & BIT(5)) >> 5) +#define COMP1_MODE_EN(r) (((r) & BIT(4)) >> 4) +#define COMP1_FIFO_DEPTH_GLOBAL(r) (((r) & GENMASK(3, 2)) >> 2) +#define COMP1_APB_DATA_WIDTH(r) (((r) & GENMASK(1, 0)) >> 0) + +#define COMP2_RX_WORDSIZE_3(r) (((r) & GENMASK(12, 10)) >> 10) +#define COMP2_RX_WORDSIZE_2(r) (((r) & GENMASK(9, 7)) >> 7) +#define COMP2_RX_WORDSIZE_1(r) (((r) & GENMASK(5, 3)) >> 3) +#define COMP2_RX_WORDSIZE_0(r) (((r) & GENMASK(2, 0)) >> 0) + +/* Number of entries in WORDSIZE and DATA_WIDTH parameter registers */ +#define COMP_MAX_WORDSIZE (1 << 3) +#define COMP_MAX_DATA_WIDTH (1 << 2) + +#define MAX_CHANNEL_NUM 8 +#define MIN_CHANNEL_NUM 2 + +#define azx_bus(chip) (&(chip)->bus.core) +#define bus_to_azx(_bus) container_of(_bus, struct azx, bus.core) + +#define I2S_UNSOL_QUEUE_SIZE 64 +#define I2S_MAX_CODECS 8 /* limit by controller side */ + + +#define azx_stream(dev) (&(dev)->core) + +struct i2sc_bus { + struct device *dev; + const struct i2s_bus_ops *ops; + const struct i2s_io_ops *io_ops; + const struct i2s_ext_bus_ops *ext_ops; + + /* h/w resources */ + unsigned long addr; + void __iomem *remap_addr; + int irq; + + /* codec linked list */ + struct list_head codec_list; + unsigned int num_codecs; + + unsigned int unsol_rp, unsol_wp; + struct work_struct unsol_work; + + struct snd_dma_buffer bdl0; + struct snd_dma_buffer bdl1; + + /* i2s_stream linked list */ + struct list_head stream_list; + + bool reverse_assign; /* assign devices in reverse order */ + + int bdl_pos_adj; /* BDL position adjustment */ + + /* locks */ + spinlock_t reg_lock; +}; + +struct i2s_bus { + struct i2sc_bus core; + + struct snd_card *card; + + struct pci_dev *pci; + + struct mutex prepare_mutex; +}; + + +/* + * i2s stream + */ +struct i2s_stream { + struct i2sc_bus *bus; + struct snd_dma_buffer bdl; /* BDL buffer */ + __le32 *posbuf; /* position buffer pointer */ + int direction; /* playback / capture (SNDRV_PCM_STREAM_*) */ + + unsigned int bufsize; /* size of the play buffer in bytes */ + unsigned int period_bytes; /* size of the period in bytes */ + unsigned int frags; /* number for period in the play buffer */ + unsigned int fifo_size; /* FIFO size */ + + void __iomem *sd_addr; /* stream descriptor pointer */ + + u32 sd_int_sta_mask; /* stream int status mask */ + + /* pcm support */ + struct snd_pcm_substream *substream; /* assigned substream, + * set in PCM open + */ + unsigned int format_val; /* format value to be set in the + * controller and the codec + */ + unsigned char stream_tag; /* assigned stream */ + unsigned char index; /* stream index */ + int assigned_key; /* last device# key assigned to */ + + bool opened; + bool running; + bool prepared; + bool no_period_wakeup; + + int delay_negative_threshold; + + struct list_head list; + +}; + + +struct azx_dev { + struct i2s_stream core; + unsigned int irq_pending:1; +}; + + + +/* PCM setup */ +static inline struct azx_dev *get_azx_dev(struct snd_pcm_substream *substream) +{ + return substream->runtime->private_data; +} + + +#define AZX_MAX_CODECS HDA_MAX_CODECS +#define AZX_DEFAULT_CODECS 4 + +#define stream_to_azx_dev(s) container_of(s, struct azx_dev, core) + +struct azx; + +struct i2s_controller_ops { + int (*substream_alloc_pages)(struct azx *chip, + struct snd_pcm_substream *substream, + size_t size); + int (*substream_free_pages)(struct azx *chip, + struct snd_pcm_substream *substream); + int (*position_check)(struct azx *chip, struct azx_dev *azx_dev); +}; + +struct i2s_io_ops { + int (*dma_alloc_pages)(struct i2sc_bus *bus, int type, size_t size, + struct snd_dma_buffer *buf); + void (*dma_free_pages)(struct i2sc_bus *bus, + struct snd_dma_buffer *buf); +}; + +struct azx { + struct i2s_bus bus; + + struct snd_card *card; + struct pci_dev *pci; + int dev_index; + + int playback_streams; + int playback_index_offset; + int capture_streams; + int capture_index_offset; + int num_streams; + + /* Register interaction. */ + const struct i2s_controller_ops *ops; + + /* locks */ + struct mutex open_mutex; /* Prevents concurrent open/close operations */ + + /* PCM */ + struct list_head pcm_list; /* azx_pcm list */ + + /* flags */ + int bdl_pos_adj; + unsigned int running:1; + unsigned int region_requested:1; + unsigned int disabled:1; +}; +struct i2s_phytium { + struct azx chip; + struct snd_pcm_substream *substream; + struct device *dev; + struct device *pdev; + u32 paddr; + void __iomem *regs; + void __iomem *regs_db; + int irq_id; + + /* for pending irqs */ + struct work_struct irq_pending_work; + + /* sync probing */ + struct completion probe_wait; + struct work_struct probe_work; + + /* extra flags */ + unsigned int pcie:1; + unsigned int irq_pending_warned:1; + unsigned int probe_continued:1; + unsigned int i2s_dp:1; + + unsigned int i2s_reg_comp1; + unsigned int i2s_reg_comp2; + struct clk *clk; + unsigned int capability; + unsigned int quirks; + u32 fifo_th; + int active; + u32 xfer_resolution; + u32 ccr; + u32 clk_base; + + struct i2s_clk_config_data config; + + /*azx_dev*/ + struct i2s_stream core; +}; + +#define azx_alloc_stream_pages(chip) \ + snd_i2s_bus_alloc_stream_pages(azx_bus(chip)) + +#endif diff --git a/sound/soc/phytium/phytium_i2s.c b/sound/soc/phytium/phytium_i2s.c new file mode 100755 index 0000000000000000000000000000000000000000..284b130540ffab213f3d1bdbd9eefbe26cfa69d3 --- /dev/null +++ b/sound/soc/phytium/phytium_i2s.c @@ -0,0 +1,1415 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2S ASoc driver + * + * Copyright (c) 2020-2023 Phytium Technology Co., Ltd. + * + * Derived from sound/soc/dwc/dwc-i2s.c + * Copyright (C) 2010 ST Microelectronics + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "local.h" + +#define NUM_CAPTURE 1 +#define NUM_PLAYBACK 1 + +#define PHYTIUM_I2S_PLAY (1 << 0) +#define PHYTIUM_I2S_RECORD (1 << 1) +#define PHYTIUM_I2S_SLAVE (1 << 2) +#define PHYTIUM_I2S_MASTER (1 << 3) + +#define PHYTIUM_I2S_QUIRK_16BIT_IDX_OVERRIDE (1 << 2) + +#define TWO_CHANNEL_SUPPORT 2 /* up to 2.0 */ +#define FOUR_CHANNEL_SUPPORT 4 /* up to 3.1 */ +#define SIX_CHANNEL_SUPPORT 6 /* up to 5.1 */ +#define EIGHT_CHANNEL_SUPPORT 8 /* up to 7.1 */ + +struct pdata_px210_mfd { + struct device *dev; + char *name; + int clk_base; +}; + +static inline void i2s_write_reg(void __iomem *io_base, int reg, u32 val) +{ + writel(val, io_base + reg); +} + +static inline u32 i2s_read_reg(void __iomem *io_base, int reg) +{ + return readl(io_base + reg); +} + +static inline void i2s_disable_channels(struct i2s_phytium *dev, u32 stream) +{ + u32 i = 0; + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + for (i = 0; i < 4; i++) + i2s_write_reg(dev->regs, TER(i), 0); + } else { + for (i = 0; i < 4; i++) + i2s_write_reg(dev->regs, RER(i), 0); + } +} + +static int substream_free_pages(struct azx *chip, + struct snd_pcm_substream *substream) +{ + return snd_pcm_lib_free_pages(substream); +} + +static void stream_update(struct i2sc_bus *bus, struct i2s_stream *s) +{ + struct azx *chip = bus_to_azx(bus); + + struct azx_dev *azx_dev = stream_to_azx_dev(s); + + /* check whether this IRQ is really acceptable */ + if (!chip->ops->position_check || + chip->ops->position_check(chip, azx_dev)) { + spin_unlock(&bus->reg_lock); + snd_pcm_period_elapsed(azx_stream(azx_dev)->substream); + spin_lock(&bus->reg_lock); + } + +} + +int snd_i2s_bus_handle_stream_irq(struct i2sc_bus *bus, unsigned int status, + void (*ack)(struct i2sc_bus *, + struct i2s_stream *)) +{ + struct i2s_stream *azx_dev; + u32 sd_status, qc_sd_status; + int handled = 0; + + list_for_each_entry(azx_dev, &bus->stream_list, list) { + + if (status & azx_dev->sd_int_sta_mask) { + sd_status = i2s_read_reg(azx_dev->sd_addr, DMA_STS); + i2s_write_reg(azx_dev->sd_addr, DMA_STS, azx_dev->sd_int_sta_mask); + qc_sd_status = i2s_read_reg(azx_dev->sd_addr, DMA_STS); + handled |= 1 << azx_dev->index; + azx_dev->running = 1; + if (!azx_dev->substream || !azx_dev->running || + !(sd_status & 0xffffffff)) { + continue; + } + if (ack) + ack(bus, azx_dev); + } + } + return handled; +} + +irqreturn_t azx_i2s_interrupt(int irq, void *dev_id) +{ + struct azx *chip = dev_id; + struct i2sc_bus *bus = azx_bus(chip); + u32 status; + bool active, handled = false; + int repeat = 0; /* count for avoiding endless loop */ + + spin_lock(&bus->reg_lock); + + if (chip->disabled) + goto unlock; + + do { + + status = i2s_read_reg(bus->remap_addr, DMA_STS); + + if (status == 0) + break; + + handled = true; + active = false; + if (snd_i2s_bus_handle_stream_irq(bus, status, stream_update)) + active = true; + + + } while (active && ++repeat < 1); + + unlock: + spin_unlock(&bus->reg_lock); + + return IRQ_RETVAL(handled); +} + +static int azx_acquire_irq(struct azx *chip, int do_disconnect) +{ + struct i2sc_bus *bus = azx_bus(chip); + struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); + int err; + + err = devm_request_irq(i2s->dev, i2s->irq_id, azx_i2s_interrupt, IRQF_SHARED, + "phytium i2s", chip); + + if (err < 0) { + dev_err(i2s->dev, "failed to request irq\n"); + return err; + } + + bus->irq = i2s->irq_id; + + return 0; +} + +static void i2s_start(struct i2s_phytium *dev, + struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(dev->regs, ITER, 1); + else + i2s_write_reg(dev->regs, IRER, 1); + + /*enable the clock*/ + i2s_write_reg(dev->regs, CER, 1); + + /*enable the i2s*/ + i2s_write_reg(dev->regs, I2S_IER, 1); +} + +static void i2s_stop(struct i2s_phytium *dev, + struct snd_pcm_substream *substream) +{ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(dev->regs, ITER, 0); + else + i2s_write_reg(dev->regs, IRER, 0); + + if (!dev->active) { + i2s_write_reg(dev->regs, CER, 0); + i2s_write_reg(dev->regs, I2S_IER, 0); + } +} + +static void phytium_i2s_config(struct i2s_phytium *dev, int stream) +{ + i2s_disable_channels(dev, stream); + + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + i2s_write_reg(dev->regs, TCR(0), dev->xfer_resolution); + i2s_write_reg(dev->regs, TER(0), 1); + } else { + i2s_write_reg(dev->regs, RCR(0), dev->xfer_resolution); + i2s_write_reg(dev->regs, RER(0), 1); + } +} + +static int phytium_i2s_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); + struct i2s_clk_config_data *config = &dev->config; + u64 fix, point; + u32 cfg = 0; + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + config->data_width = 16; + dev->ccr = 0x00; + dev->xfer_resolution = 0x02; + break; + + case SNDRV_PCM_FORMAT_S24_LE: + config->data_width = 24; + dev->ccr = 0x08; + dev->xfer_resolution = 0x04; + break; + + case SNDRV_PCM_FORMAT_S32_LE: + config->data_width = 32; + dev->ccr = 0x10; + dev->xfer_resolution = 0x05; + break; + + default: + dev_err(dev->dev, "phytium-i2s: unsupported PCM fmt"); + return -EINVAL; + } + + config->chan_nr = params_channels(params); + + switch (config->chan_nr) { + case EIGHT_CHANNEL_SUPPORT: + case SIX_CHANNEL_SUPPORT: + case FOUR_CHANNEL_SUPPORT: + case TWO_CHANNEL_SUPPORT: + break; + default: + dev_err(dev->dev, "channel not supported\n"); + return -EINVAL; + } + + phytium_i2s_config(dev, substream->stream); + + i2s_write_reg(dev->regs, CCR, dev->ccr); + + config->sample_rate = params_rate(params); + if (dev->capability & PHYTIUM_I2S_MASTER) { + fix = dev->clk_base / config->sample_rate / config->data_width / 32; + point = ((dev->clk_base / config->sample_rate) << 10) / config->data_width / 32; + point = (point - (fix << 10)) * 10; + cfg = ((u16) fix << 16) | (u16) point; + i2s_write_reg(dev->regs, CLK_CFG0, cfg); + i2s_write_reg(dev->regs, CLK_CFG1, 0xf); + } + return 0; +} + +static int phytium_i2s_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(dev->regs, TXFFR, 1); + else + i2s_write_reg(dev->regs, RXFFR, 1); + + return 0; +} + +static int phytium_i2s_trigger(struct snd_pcm_substream *substream, + int cmd, struct snd_soc_dai *dai) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); + int ret = 0; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + dev->active++; + i2s_start(dev, substream); + break; + + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + dev->active--; + i2s_stop(dev, substream); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int phytium_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(cpu_dai); + int ret = 0; + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + if (dev->capability & PHYTIUM_I2S_SLAVE) + ret = 0; + else + ret = -EINVAL; + break; + case SND_SOC_DAIFMT_CBS_CFS: + if (dev->capability & PHYTIUM_I2S_MASTER) + ret = 0; + else + ret = -EINVAL; + break; + case SND_SOC_DAIFMT_CBM_CFS: + case SND_SOC_DAIFMT_CBS_CFM: + ret = -EINVAL; + break; + default: + dev_dbg(dev->dev, "phytium/i2s: Invalid master/slave format\n"); + ret = -EINVAL; + break; + } + return ret; +} + +static const struct snd_soc_dai_ops phytium_i2s_dai_ops = { + .hw_params = phytium_i2s_hw_params, + .prepare = phytium_i2s_prepare, + .trigger = phytium_i2s_trigger, + .set_fmt = phytium_i2s_set_fmt, +}; + +#ifdef CONFIG_PM +static int phytium_i2s_suspend(struct snd_soc_dai *dai) +{ + return 0; +} + +static int phytium_i2s_resume(struct snd_soc_dai *dai) +{ + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(dai); + if (dai->playback_active) + phytium_i2s_config(dev, SNDRV_PCM_STREAM_PLAYBACK); + if (dai->capture_active) + phytium_i2s_config(dev, SNDRV_PCM_STREAM_CAPTURE); + return 0; +} +#else +#define phytium_i2s_suspend NULL +#define phytium_i2s_resume NULL +#endif + +static struct snd_soc_dai_driver phytium_i2s_dai = { + .playback = { + .stream_name = "i2s-Playback", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_192000, + .formats = SNDRV_PCM_FMTBIT_S8 | + SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE, + }, + .capture = { + .stream_name = "i2s-Capture", + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_8000_192000, + .formats = SNDRV_PCM_FMTBIT_S8 | + SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE, + }, + .ops = &phytium_i2s_dai_ops, + .suspend = phytium_i2s_suspend, + .resume = phytium_i2s_resume, + .symmetric_rates = 1, +}; + +static const struct snd_pcm_hardware phytium_pcm_hardware = { + .info = SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_MMAP | + SNDRV_PCM_INFO_MMAP_VALID | + SNDRV_PCM_INFO_BLOCK_TRANSFER, + .rates = SNDRV_PCM_RATE_8000 | + SNDRV_PCM_RATE_32000 | + SNDRV_PCM_RATE_44100 | + SNDRV_PCM_RATE_48000, + .rate_min = 8000, + .rate_max = 48000, + .formats = (SNDRV_PCM_FMTBIT_S8 | + SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE), + .channels_min = 2, + .channels_max = 2, + .buffer_bytes_max = 4096*16, + .period_bytes_min = 1024, + .period_bytes_max = 4096*4, + .periods_min = 2, + .periods_max = 16, + .fifo_size = 16, +}; + +struct i2s_stream *snd_i2s_stream_assign(struct i2sc_bus *bus, + struct snd_pcm_substream *substream) +{ + struct i2s_stream *azx_dev; + struct i2s_stream *res = NULL; + + /* make a non-zero unique key for the substream */ + int key = (substream->pcm->device << 16) | (substream->number << 2) | + (substream->stream + 1); + + list_for_each_entry(azx_dev, &bus->stream_list, list) { + if (azx_dev->direction != substream->stream) + continue; + + azx_dev->opened = 0; + + if (azx_dev->assigned_key == key) { + res = azx_dev; + break; + } + + if (!res || bus->reverse_assign) + res = azx_dev; + } + + if (res) { + spin_lock_irq(&bus->reg_lock); + res->opened = 1; + res->running = 0; + res->assigned_key = key; + res->substream = substream; + spin_unlock_irq(&bus->reg_lock); + } + + return res; +} + +/* assign a stream for the PCM */ +static inline struct azx_dev * +azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream) +{ + struct i2s_stream *s; + + s = snd_i2s_stream_assign(azx_bus(chip), substream); + if (!s) + return NULL; + return stream_to_azx_dev(s); +} + +static int phytium_pcm_open(struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); + struct azx *chip = &dev->chip; + struct azx_dev *azx_dev; + struct snd_pcm_runtime *runtime = substream->runtime; + + azx_dev = azx_assign_device(chip, substream); + if (azx_dev == NULL) + return -EBUSY; + + snd_soc_set_runtime_hwparams(substream, &phytium_pcm_hardware); + snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); + snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128); + runtime->private_data = azx_dev; + + return 0; +} + +static int phytium_pcm_close(struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); + struct azx *chip = &dev->chip; + struct azx_dev *azx_dev = get_azx_dev(substream); + + mutex_lock(&chip->open_mutex); + azx_stream(azx_dev)->opened = 0; + azx_stream(azx_dev)->running = 0; + azx_stream(azx_dev)->substream = NULL; + + mutex_unlock(&chip->open_mutex); + return 0; +} + +static int phytium_pcm_new(struct snd_soc_pcm_runtime *rtd) +{ + struct i2s_phytium *i2s = snd_soc_dai_get_drvdata(rtd->cpu_dai); + size_t size = phytium_pcm_hardware.buffer_bytes_max; + + return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, + SNDRV_DMA_TYPE_DEV, + i2s->pdev, size, size); +} + +static const struct i2s_io_ops axi_i2s_io_ops; +static const struct i2s_controller_ops axi_i2s_ops; + +static int phytium_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *hw_params) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); + + struct azx *chip = &dev->chip; + struct azx_dev *azx_dev = get_azx_dev(substream); + int ret; + + azx_dev->core.bufsize = 0; + azx_dev->core.period_bytes = 0; + azx_dev->core.format_val = 0; + + ret = chip->ops->substream_alloc_pages(chip, substream, + params_buffer_bytes(hw_params)); + + return ret; +} +/* + * set up a BDL entry + */ +static int setup_bdle(struct i2sc_bus *bus, + struct snd_dma_buffer *dmab, + struct i2s_stream *azx_dev, __le32 **bdlp, + int ofs, int size, int with_ioc) +{ + struct snd_pcm_substream *substream = azx_dev->substream; + struct snd_pcm_runtime *runtime = substream->runtime; + __le32 *bdl = *bdlp; + + dmab->addr = runtime->dma_addr; + while (size > 0) { + dma_addr_t addr; + int chunk; + + if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES) + return -EINVAL; + + addr = snd_sgbuf_get_addr(dmab, ofs); + + /* program the address field of the BDL entry */ + bdl[0] = cpu_to_le32((u32)addr); + + bdl[1] = cpu_to_le32(upper_32_bits(addr)); + + /* program the size field of the BDL entry */ + chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size); + + bdl[2] = cpu_to_le32(chunk); + + /* program the IOC to enable interrupt + * only when the whole fragment is processed + */ + size -= chunk; + bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01); + + bdl += 4; + azx_dev->frags++; + ofs += chunk; + } + *bdlp = bdl; + return ofs; +} + +int snd_i2s_stream_setup_periods(struct i2s_stream *azx_dev) +{ + struct i2sc_bus *bus = azx_dev->bus; + struct snd_pcm_substream *substream = azx_dev->substream; + struct snd_pcm_runtime *runtime = substream->runtime; + __le32 *bdl; + int i, ofs, periods, period_bytes; + int pos_adj, pos_align; + + period_bytes = azx_dev->period_bytes; + periods = azx_dev->bufsize / period_bytes; + + /* program the initial BDL entries */ + bdl = (__le32 *)azx_dev->bdl.area; + + ofs = 0; + azx_dev->frags = 0; + + pos_adj = bus->bdl_pos_adj; + + if (!azx_dev->no_period_wakeup && pos_adj > 0) { + + pos_align = pos_adj; + pos_adj = (pos_adj * runtime->rate + 47999) / 48000; + if (!pos_adj) + pos_adj = pos_align; + else + pos_adj = ((pos_adj + pos_align - 1) / pos_align) * + pos_align; + pos_adj = frames_to_bytes(runtime, pos_adj); + if (pos_adj >= period_bytes) { + dev_warn(bus->dev, "Too big adjustment %d\n", + pos_adj); + pos_adj = 0; + } else { + + ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), + azx_dev, + &bdl, ofs, pos_adj, true); + if (ofs < 0) + goto error; + } + } else + pos_adj = 0; + + for (i = 0; i < periods; i++) { + if (i == periods - 1 && pos_adj) + ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), + azx_dev, &bdl, ofs, + period_bytes - pos_adj, 0); + else + ofs = setup_bdle(bus, snd_pcm_get_dma_buf(substream), + azx_dev, &bdl, ofs, + period_bytes, + !azx_dev->no_period_wakeup); + if (ofs < 0) + goto error; + } + return 0; + + error: + dev_err(bus->dev, "Too many BDL entries: buffer=%d, period=%d\n", + azx_dev->bufsize, period_bytes); + return -EINVAL; +} + +int snd_i2s_stream_set_params(struct i2s_stream *azx_dev, + unsigned int format_val) +{ + unsigned int bufsize, period_bytes; + struct snd_pcm_substream *substream = azx_dev->substream; + struct snd_pcm_runtime *runtime; + int err; + + if (!substream) + return -EINVAL; + + runtime = substream->runtime; + bufsize = snd_pcm_lib_buffer_bytes(substream); + period_bytes = snd_pcm_lib_period_bytes(substream); + if (bufsize != azx_dev->bufsize || + period_bytes != azx_dev->period_bytes || + format_val != azx_dev->format_val || + runtime->no_period_wakeup != azx_dev->no_period_wakeup) { + + azx_dev->bufsize = bufsize; + azx_dev->period_bytes = period_bytes; + azx_dev->format_val = format_val; + azx_dev->no_period_wakeup = runtime->no_period_wakeup; + err = snd_i2s_stream_setup_periods(azx_dev); + if (err < 0) + return err; + } + + return 0; +} + +int snd_i2s_stream_setup(struct i2s_stream *azx_dev, int pcie, u32 paddr) +{ + struct snd_pcm_runtime *runtime; + + if (azx_dev->substream) + runtime = azx_dev->substream->runtime; + else + runtime = NULL; + + i2s_write_reg(azx_dev->sd_addr, DMA_CHAL_CONFG0, 0x8180); + i2s_write_reg(azx_dev->sd_addr, DMA_MASK_INT, 0x80000003); + + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) { + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), (u32)azx_dev->bdl.addr); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), upper_32_bits(azx_dev->bdl.addr)); + if (pcie) + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(0), 0x1c8); + else + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(0), paddr + 0x1c8); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CBL(0), azx_dev->bufsize); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_LVI(0), azx_dev->frags - 1); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DSIZE(0), 0x2);//0x2 + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DLENTH(0), 0x0);//0x0 + } else { + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(1), (u32)azx_dev->bdl.addr); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(1), upper_32_bits(azx_dev->bdl.addr)); + if (pcie) + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(1), 0x1c0); + else + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(1), paddr + 0x1c0); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CBL(1), azx_dev->bufsize); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_LVI(1), azx_dev->frags - 1); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DSIZE(1), 0x8);//0x8 + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DLENTH(1), 0x0); + } + + if (runtime && runtime->period_size > 64) + azx_dev->delay_negative_threshold = + -frames_to_bytes(runtime, 64); + else + azx_dev->delay_negative_threshold = 0; + + return 0; +} + +static int phytium_pcm_prepare(struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); + + struct azx *chip = &dev->chip; + struct azx_dev *azx_dev = get_azx_dev(substream); + struct i2sc_bus *bus = azx_bus(chip); + struct i2s_stream *hstr_p; + int err; + + dev->substream = substream; + azx_dev->core.substream = substream; + azx_dev->core.sd_addr = dev->regs_db; + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + azx_dev->core.bdl.area = bus->bdl0.area; + azx_dev->core.bdl.addr = bus->bdl0.addr; + } else { + azx_dev->core.bdl.area = bus->bdl1.area; + azx_dev->core.bdl.addr = bus->bdl1.addr; + } + + if (!substream) + return -EINVAL; + + hstr_p = azx_stream(azx_dev); + hstr_p->direction = substream->stream; + + err = snd_i2s_stream_set_params(azx_stream(azx_dev), 0); + if (err < 0) + goto unlock; + + snd_i2s_stream_setup(azx_stream(azx_dev), dev->pcie, dev->paddr); + + unlock: + if (!err) + azx_stream(azx_dev)->prepared = 1; + + return err; +} + +void snd_i2s_stream_clear(struct i2s_stream *azx_dev) +{ + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0x0); + else + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0x0); + + azx_dev->running = false; +} + +void snd_i2s_stream_stop(struct i2s_stream *azx_dev) +{ + snd_i2s_stream_clear(azx_dev); +} + +void snd_i2s_stream_start(struct i2s_stream *azx_dev, bool fresh_start) +{ + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0x1); + else + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0x5); + + azx_dev->running = true; +} + +static int phytium_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); + + struct azx *chip = &dev->chip; + struct i2sc_bus *bus = azx_bus(chip); + struct azx_dev *azx_dev = get_azx_dev(substream); + struct snd_pcm_substream *s; + struct i2s_stream *hstr; + bool start; + int sbits = 0; + + hstr = azx_stream(azx_dev); + hstr->direction = substream->stream; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + case SNDRV_PCM_TRIGGER_RESUME: + start = true; + break; + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_STOP: + start = false; + break; + default: + return -EINVAL; + } + + snd_pcm_group_for_each_entry(s, substream) { + if (s->pcm->card != substream->pcm->card) + continue; + azx_dev = get_azx_dev(s); + sbits |= 1 << azx_dev->core.index; + snd_pcm_trigger_done(s, substream); + } + + spin_lock(&bus->reg_lock); + + snd_pcm_group_for_each_entry(s, substream) { + if (s->pcm->card != substream->pcm->card) + continue; + azx_dev = get_azx_dev(s); + if (start) + snd_i2s_stream_start(azx_stream(azx_dev), true); + else + snd_i2s_stream_stop(azx_stream(azx_dev)); + } + + i2s_write_reg(dev->regs_db, DMA_CTL, 0x1); + spin_unlock(&bus->reg_lock); + + return 0; +} + +static void phytium_pcm_free(struct snd_pcm *pcm) +{ + snd_pcm_lib_preallocate_free_for_all(pcm); +} + +void snd_i2s_stream_cleanup(struct i2s_stream *azx_dev) +{ + int cnt = 10; + u32 mask; + + if (azx_dev->sd_addr) { + if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) { + mask = i2s_read_reg(azx_dev->sd_addr, DMA_MASK_INT); + mask &= ~BIT(0); + i2s_write_reg(azx_dev->sd_addr, DMA_MASK_INT, mask); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); + while (cnt--) { + if (i2s_read_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0)) == 0) + break; + } + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 2); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), 0); + } else { + mask = i2s_read_reg(azx_dev->sd_addr, DMA_MASK_INT); + mask &= ~BIT(1); + i2s_write_reg(azx_dev->sd_addr, DMA_MASK_INT, mask); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0); + while (cnt--) { + if (i2s_read_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1)) == 0) + break; + } + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 2); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(1), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(1), 0); + } + } +} + +static int phytium_pcm_hw_free(struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); + + struct azx *chip = &dev->chip; + struct i2s_stream *hstr_p; + struct azx_dev *azx_dev = get_azx_dev(substream); + int err; + + hstr_p = azx_stream(azx_dev); + hstr_p->direction = substream->stream; + snd_i2s_stream_cleanup(azx_stream(azx_dev)); + + err = chip->ops->substream_free_pages(chip, substream); + azx_stream(azx_dev)->prepared = 0; + + return err; +} + +static snd_pcm_uframes_t phytium_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct i2s_phytium *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); + + int stream = substream->stream; + + u32 pos = i2s_read_reg(dev->regs_db, DMA_LPIB(stream)); + + return bytes_to_frames(substream->runtime, pos); +} + +static const struct snd_pcm_ops phytium_pcm_ops = { + .open = phytium_pcm_open, + .close = phytium_pcm_close, + .hw_params = phytium_pcm_hw_params, + .prepare = phytium_pcm_prepare, + .hw_free = phytium_pcm_hw_free, + .trigger = phytium_pcm_trigger, + .pointer = phytium_pcm_pointer, +}; + +static const struct snd_soc_component_driver phytium_i2s_component = { + .name = "phytium-i2s", + .pcm_new = phytium_pcm_new, + .pcm_free = phytium_pcm_free, + .ops = &phytium_pcm_ops, +}; + +/* Maximum bit resolution of a channel - not uniformly spaced */ +static const u32 fifo_width[COMP_MAX_WORDSIZE] = { + 12, 16, 20, 24, 32, 0, 0, 0 +}; + +/* Width of (DMA) bus */ +static const u32 bus_widths[COMP_MAX_DATA_WIDTH] = { + DMA_SLAVE_BUSWIDTH_1_BYTE, + DMA_SLAVE_BUSWIDTH_2_BYTES, + DMA_SLAVE_BUSWIDTH_4_BYTES, + DMA_SLAVE_BUSWIDTH_UNDEFINED +}; + +/* PCM format to support channel resolution */ +static const u32 formats[COMP_MAX_WORDSIZE] = { + SNDRV_PCM_FMTBIT_S16_LE, + SNDRV_PCM_FMTBIT_S16_LE, + SNDRV_PCM_FMTBIT_S24_LE, + SNDRV_PCM_FMTBIT_S24_LE, + SNDRV_PCM_FMTBIT_S32_LE, + 0, + 0, + 0 +}; + +static int phytium_configure_dai(struct i2s_phytium *dev) +{ + u32 comp1 = i2s_read_reg(dev->regs, dev->i2s_reg_comp1); + u32 comp2 = i2s_read_reg(dev->regs, dev->i2s_reg_comp2); + u32 fifo_depth = 1 << (1 + COMP1_FIFO_DEPTH_GLOBAL(comp1)); + u32 idx; + + if (COMP1_TX_ENABLED(comp1)) { + dev_dbg(dev->dev, " phytium: play supported\n"); + idx = COMP1_TX_WORDSIZE_0(comp1); + if (WARN_ON(idx >= ARRAY_SIZE(formats))) + return -EINVAL; + } + + if (COMP1_RX_ENABLED(comp1)) { + dev_dbg(dev->dev, "phytium: record supported\n"); + idx = COMP2_RX_WORDSIZE_0(comp2); + if (WARN_ON(idx >= ARRAY_SIZE(formats))) + return -EINVAL; + if (dev->quirks & PHYTIUM_I2S_QUIRK_16BIT_IDX_OVERRIDE) + idx = 1; + } + + if (COMP1_MODE_EN(comp1)) { + dev_dbg(dev->dev, "phytium: i2s master mode supported\n"); + dev->capability |= PHYTIUM_I2S_MASTER; + } else { + dev_dbg(dev->dev, "phytium: i2s slave mode supported\n"); + dev->capability |= PHYTIUM_I2S_SLAVE; + } + + dev->fifo_th = fifo_depth / 2; + return 0; +} + +static int phytium_configure_dai_by_dt(struct i2s_phytium *dev) +{ + u32 comp1 = i2s_read_reg(dev->regs, I2S_COMP_PARAM_1); + u32 comp2 = i2s_read_reg(dev->regs, I2S_COMP_PARAM_2); + u32 idx = COMP1_APB_DATA_WIDTH(comp1); + u32 idx2; + int ret; + + if (WARN_ON(idx >= ARRAY_SIZE(bus_widths))) + return -EINVAL; + + ret = phytium_configure_dai(dev); + if (ret < 0) + return ret; + + if (COMP1_TX_ENABLED(comp1)) { + idx2 = COMP1_TX_WORDSIZE_0(comp1); + dev->capability |= DWC_I2S_PLAY; + } + if (COMP1_RX_ENABLED(comp1)) { + idx2 = COMP2_RX_WORDSIZE_0(comp2); + dev->capability |= DWC_I2S_RECORD; + } + + return 0; +} + +static int phytium_dma_alloc_pages(struct i2sc_bus *bus, int type, size_t size, + struct snd_dma_buffer *buf) +{ + int err; + + err = snd_dma_alloc_pages(type, bus->dev, size, buf); + if (err < 0) + return err; + + return 0; +} + +int snd_i2s_bus_alloc_stream_pages(struct i2sc_bus *bus) +{ + struct i2s_stream *s; + int num_streams = 0; + int err; + + list_for_each_entry(s, &bus->stream_list, list) { + + /* allocate memory for the BDL for each stream */ + err = bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, + BDL_SIZE, &s->bdl); + if (num_streams == 0) { + bus->bdl0.addr = s->bdl.addr; + bus->bdl0.area = s->bdl.area; + } else { + bus->bdl1.addr = s->bdl.addr; + bus->bdl1.area = s->bdl.area; + } + num_streams++; + if (err < 0) + return -ENOMEM; + } + + if (WARN_ON(!num_streams)) + return -EINVAL; + + return 0; +} + +static int stream_direction(struct azx *chip, unsigned char index) +{ + if (index >= chip->playback_index_offset && + index < chip->playback_index_offset + chip->playback_streams) + return SNDRV_PCM_STREAM_PLAYBACK; + return SNDRV_PCM_STREAM_CAPTURE; + +} + +void snd_i2s_stream_init(struct i2sc_bus *bus, struct i2s_stream *azx_dev, + int idx, int direction, int tag) +{ + azx_dev->bus = bus; + azx_dev->sd_addr = bus->remap_addr; + + if (idx == 0) + azx_dev->sd_int_sta_mask = 1 << idx; + else + azx_dev->sd_int_sta_mask = 1 << 8; + + azx_dev->index = idx; + azx_dev->direction = direction; + azx_dev->stream_tag = tag; + + list_add_tail(&azx_dev->list, &bus->stream_list); + +} + +int azx_i2s_init_streams(struct azx *chip) +{ + int i; + + for (i = 0; i < chip->num_streams; i++) { + struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL); + int dir, tag; + + if (!azx_dev) + return -ENOMEM; + + dir = stream_direction(chip, i); + + tag = i + 1; + + snd_i2s_stream_init(azx_bus(chip), azx_stream(azx_dev), + i, dir, tag); + } + + return 0; +} + +static int azx_first_init(struct azx *chip) +{ + struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); + struct platform_device *pdev = to_platform_device(i2s->dev); + struct device *i2sdev = i2s->dev; + struct i2sc_bus *bus = azx_bus(chip); + struct resource *res; + int err; + unsigned int dma_bits = 64; + + chip->region_requested = 1; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bus->addr = res->start; + bus->remap_addr = i2s->regs_db; + bus->dev = i2s->pdev; + + if (bus->remap_addr == NULL) { + dev_err(i2sdev, "ioremap error\n"); + return -ENXIO; + } + + if (azx_acquire_irq(chip, 0) < 0) + return -EBUSY; + + synchronize_irq(bus->irq); + + spin_lock_init(&bus->reg_lock); + + if (!dma_set_mask(i2sdev, DMA_BIT_MASK(dma_bits))) { + err = dma_set_coherent_mask(i2sdev, DMA_BIT_MASK(dma_bits)); + } else { + err = dma_set_mask(i2sdev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(i2sdev, DMA_BIT_MASK(32)); + } + + chip->playback_streams = NUM_PLAYBACK; + chip->capture_streams = NUM_CAPTURE; + + chip->playback_index_offset = 0; + chip->capture_index_offset = chip->playback_streams; + chip->num_streams = chip->playback_streams + chip->capture_streams; + + err = azx_i2s_init_streams(chip); + if (err < 0) + return err; + + err = azx_alloc_stream_pages(chip); + if (err < 0) + return err; + + return 0; +} + +static int azx_probe_continue(struct azx *chip) +{ + struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); + int err; + + i2s->probe_continued = 1; + + err = azx_first_init(chip); + if (err < 0) + goto out_free; + + chip->running = 1; + +out_free: + return err; +} + +static void azx_probe_work(struct work_struct *work) +{ + struct i2s_phytium *i2s = container_of(work, struct i2s_phytium, probe_work); + + azx_probe_continue(&i2s->chip); +} + +int azx_i2s_bus_init(struct azx *chip, + const struct i2s_io_ops *io_ops) +{ + struct i2s_bus *bus = &chip->bus; + + bus->core.io_ops = io_ops; + + INIT_LIST_HEAD(&bus->core.stream_list); + bus->card = chip->card; + mutex_init(&bus->prepare_mutex); + bus->pci = chip->pci; + + bus->core.bdl_pos_adj = chip->bdl_pos_adj; + return 0; +} + +static int i2s_phytium_create(struct platform_device *pdev, + int dev, struct azx **rchip, struct i2s_phytium *i2s) +{ + struct azx *chip; + int err; + + *rchip = NULL; + + if (!i2s) + return -ENOMEM; + chip = &i2s->chip; + + mutex_init(&chip->open_mutex); + + chip->ops = &axi_i2s_ops; + chip->dev_index = dev; + + INIT_LIST_HEAD(&chip->pcm_list); + init_completion(&i2s->probe_wait); + + chip->bdl_pos_adj = 32; + err = azx_i2s_bus_init(chip, &axi_i2s_io_ops); + if (err < 0) { + kfree(i2s); + return err; + } + + INIT_WORK(&i2s->probe_work, azx_probe_work); + *rchip = chip; + return 0; +} + +static int substream_alloc_pages(struct azx *chip, + struct snd_pcm_substream *substream, + size_t size) +{ + int ret; + + ret = snd_pcm_lib_malloc_pages(substream, size); + if (ret < 0) + return ret; + + return 0; +} + +static void phytium_dma_free_pages(struct i2sc_bus *bus, + struct snd_dma_buffer *buf) +{ + snd_dma_free_pages(buf); +} + +static const struct i2s_io_ops axi_i2s_io_ops = { + .dma_alloc_pages = phytium_dma_alloc_pages, + .dma_free_pages = phytium_dma_free_pages, +}; + +static const struct i2s_controller_ops axi_i2s_ops = { + .substream_alloc_pages = substream_alloc_pages, + .substream_free_pages = substream_free_pages, +}; + + +static int phytium_i2s_probe(struct platform_device *pdev) +{ + struct i2s_phytium *i2s; + struct azx *chip; + struct resource *res; + struct pdata_px210_mfd *pdata; + struct snd_soc_dai_driver *dai_drv; + struct clk *clk; + int err, ret; + int card_num = 1; + bool schedule_probe; + + i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL); + if (!i2s) + return -ENOMEM; + + dai_drv = devm_kzalloc(&pdev->dev, sizeof(*dai_drv), GFP_KERNEL); + if (!dai_drv) + return -ENOMEM; + memcpy(dai_drv, &phytium_i2s_dai, sizeof(phytium_i2s_dai)); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2s->paddr = res->start; + i2s->regs = devm_ioremap_resource(&pdev->dev, res); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + i2s->regs_db = devm_ioremap_resource(&pdev->dev, res); + + if (IS_ERR(i2s->regs)) + return PTR_ERR(i2s->regs); + + i2s->irq_id = platform_get_irq(pdev, 0); + + if (i2s->irq_id < 0) + return i2s->irq_id; + + i2s->i2s_reg_comp1 = I2S_COMP_PARAM_1; + i2s->i2s_reg_comp2 = I2S_COMP_PARAM_2; + + ret = phytium_configure_dai_by_dt(i2s); + if (ret < 0) + return ret; + + err = i2s_phytium_create(pdev, card_num, &chip, i2s); + if (err < 0) + return err; + i2s = container_of(chip, struct i2s_phytium, chip); + schedule_probe = !chip->disabled; + + dev_set_drvdata(&pdev->dev, i2s); + + pdata = dev_get_platdata(&pdev->dev); + i2s->dev = &pdev->dev; + if (pdata) { + dai_drv->name = pdata->name; + i2s->pdev = pdata->dev; + i2s->clk_base = pdata->clk_base; + i2s->pcie = 1; + } else { + device_property_read_string(&pdev->dev, "dai-name", &dai_drv->name); + if (ret < 0) { + dev_err(&pdev->dev, "missing dai-name property\n"); + goto failed_get_dai_name; + } + i2s->pdev = &pdev->dev; + if ((&pdev->dev)->of_node) { + clk = devm_clk_get(&pdev->dev, NULL); + i2s->clk_base = clk_get_rate(clk); + } + else if (has_acpi_companion(&pdev->dev)) { + ret = device_property_read_u32(&pdev->dev, "i2s_clk", &i2s->clk_base); + if (ret < 0) { + dev_info(&pdev->dev, "missing i2s_clk property from acpi, use default value\n"); + i2s->clk_base = 600000000; + } + } + } + ret = devm_snd_soc_register_component(&pdev->dev, &phytium_i2s_component, + dai_drv, 1); + if (ret != 0) + dev_err(&pdev->dev, "not able to register dai\n"); + + if (schedule_probe) + schedule_work(&i2s->probe_work); + + if (chip->disabled) + complete_all(&i2s->probe_wait); + + return 0; +failed_get_dai_name: + return ret; +} + +static int phytium_i2s_remove(struct platform_device *pdev) +{ + pm_runtime_disable(&pdev->dev); + return 0; +} + +static const struct of_device_id phytium_i2s_of_match[] = { + { .compatible = "phytium,i2s", }, + {}, +}; +MODULE_DEVICE_TABLE(of, phytium_i2s_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_i2s_acpi_match[] = { + { "PHYT0016", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, phytium_i2s_acpi_match); +#else +#define phytium_i2s_acpi_match NULL +#endif + +static struct platform_driver phytium_i2s_driver = { + .probe = phytium_i2s_probe, + .remove = phytium_i2s_remove, + .driver = { + .name = "phytium-i2s", + .of_match_table = of_match_ptr(phytium_i2s_of_match), + .acpi_match_table = phytium_i2s_acpi_match, + }, +}; + +module_platform_driver(phytium_i2s_driver); + +MODULE_DESCRIPTION("Phytium I2S Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Zhang Yiqun "); diff --git a/sound/soc/phytium/pmdk_dp.c b/sound/soc/phytium/pmdk_dp.c new file mode 100755 index 0000000000000000000000000000000000000000..581907f0d8455ecb36eda605c23070f2bf021d6b --- /dev/null +++ b/sound/soc/phytium/pmdk_dp.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include + +#define DAI_CNT(pmdk_dai_) sizeof(pmdk_dai_)/sizeof(struct snd_soc_dai_link) + +struct pmdk_dp_private { + struct snd_soc_jack jack0; + struct snd_soc_jack jack1; + struct snd_soc_jack jack2; +}; + +/* PMDK widgets */ +static const struct snd_soc_dapm_widget pmdk_dp_dapm_widgets[] = { + SND_SOC_DAPM_LINE("DP", NULL), +}; + +/* PMDK control */ +static const struct snd_kcontrol_new pmdk_controls[] = { + SOC_DAPM_PIN_SWITCH("DP"), +}; + +/* PMDK connections */ +static const struct snd_soc_dapm_route pmdk_dp_audio_map[] = { + {"DP", NULL, "TX"}, +}; + +static struct snd_soc_jack_pin dp0_pins[] = { + { + .pin = "HDMI/DP,pcm=0", + .mask = SND_JACK_LINEOUT, + }, +}; + +static struct snd_soc_jack_pin dp1_pins[] = { + { + .pin = "HDMI/DP,pcm=1", + .mask = SND_JACK_LINEOUT, + }, +}; + +static struct snd_soc_jack_pin dp2_pins[] = { + { + .pin = "HDMI/DP,pcm=2", + .mask = SND_JACK_LINEOUT, + }, +}; + +#define SMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ + SND_SOC_DAIFMT_CBS_CFS) + +static int pmdk_dp0_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_card *card = runtime->card; + struct pmdk_dp_private *priv = snd_soc_card_get_drvdata(card); + struct snd_soc_component *component = runtime->codec_dai->component; + int ret; + + ret = snd_soc_card_jack_new(card, "HDMI/DP,pcm=0", + SND_JACK_LINEOUT, + &priv->jack0, dp0_pins, + ARRAY_SIZE(dp0_pins)); + if (ret) { + dev_err(card->dev, "Jack creation failed %d\n", ret); + return ret; + } + snd_soc_component_set_jack(component, &priv->jack0, NULL); + return ret; +} + +static int pmdk_dp1_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_card *card = runtime->card; + struct pmdk_dp_private *priv = snd_soc_card_get_drvdata(card); + struct snd_soc_component *component = runtime->codec_dai->component; + int ret; + + ret = snd_soc_card_jack_new(card, "HDMI/DP,pcm=1", + SND_JACK_LINEOUT, + &priv->jack1, dp1_pins, + ARRAY_SIZE(dp1_pins)); + if (ret) { + dev_err(card->dev, "Jack creation failed %d\n", ret); + return ret; + } + snd_soc_component_set_jack(component, &priv->jack1, NULL); + return ret; +} + +static int pmdk_dp2_init(struct snd_soc_pcm_runtime *runtime) +{ + struct snd_soc_card *card = runtime->card; + struct pmdk_dp_private *priv = snd_soc_card_get_drvdata(card); + struct snd_soc_component *component = runtime->codec_dai->component; + int ret; + + ret = snd_soc_card_jack_new(card, "HDMI/DP,pcm=2", + SND_JACK_LINEOUT, + &priv->jack2, dp2_pins, + ARRAY_SIZE(dp2_pins)); + if (ret) { + dev_err(card->dev, "Jack creation failed %d\n", ret); + return ret; + } + snd_soc_component_set_jack(component, &priv->jack2, NULL); + return ret; +} + +static struct snd_soc_dai_link pmdk_dai_local[] = { +{ + .name = "Phytium dp0-audio", + .stream_name = "Playback", + .cpu_dai_name = "phytium-i2s-dp0", + .codec_dai_name = "i2s-hifi", + .platform_name = "snd-soc-dummy", + .codec_name = "hdmi-audio-codec.0", + .dai_fmt = SMDK_DAI_FMT, + .init = pmdk_dp0_init, +},{ + .name = "Phytium dp1-audio", + .stream_name = "Playback", + .cpu_dai_name = "phytium-i2s-dp1", + .codec_dai_name = "i2s-hifi", + .platform_name = "snd-soc-dummy", + .codec_name = "hdmi-audio-codec.1", + .dai_fmt = SMDK_DAI_FMT, + .init = pmdk_dp1_init, +}, +{ + .name = "Phytium dp2-audio", + .stream_name = "Playback", + .cpu_dai_name = "phytium-i2s-dp2", + .codec_dai_name = "i2s-hifi", + .platform_name = "snd-soc-dummy", + .codec_name = "hdmi-audio-codec.2", + .dai_fmt = SMDK_DAI_FMT, + .init = pmdk_dp2_init, +}, +}; + +static struct snd_soc_card pmdk = { + .name = "PMDK-I2S", + .owner = THIS_MODULE, + + .dapm_widgets = pmdk_dp_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(pmdk_dp_dapm_widgets), + .controls = pmdk_controls, + .num_controls = ARRAY_SIZE(pmdk_controls), + .dapm_routes = pmdk_dp_audio_map, + .num_dapm_routes = ARRAY_SIZE(pmdk_dp_audio_map), +}; + +static int pmdk_sound_probe(struct platform_device *pdev) +{ + struct snd_soc_card *card = &pmdk; + struct pmdk_dp_private *priv; + struct snd_soc_dai_link *pmdk_dai; + int num_dp = 2; + char dp_mask; + int i,j = 0; + card->dev = &pdev->dev; + + device_property_read_u32(&pdev->dev, "num-dp", &num_dp); + device_property_read_u8(&pdev->dev, "dp-mask", &dp_mask); + pmdk_dai = devm_kzalloc(&pdev->dev, num_dp * sizeof(*pmdk_dai), GFP_KERNEL); + if (!pmdk_dai) + return -ENOMEM; + + if (!num_dp || num_dp > DAI_CNT(pmdk_dai_local)) + return -EINVAL; + + for(i = 0; i < num_dp; i++) { + for (; j < DAI_CNT(pmdk_dai_local); j++) { + if (BIT(j) & dp_mask) { + pmdk_dai[i] = pmdk_dai_local[j]; + j++; + break; + } + } + } + + card->dai_link = pmdk_dai; + card->num_links = num_dp; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + snd_soc_card_set_drvdata(card, priv); + + return devm_snd_soc_register_card(&pdev->dev, card); +} + +static const struct of_device_id pmdk_sound_of_match[] = { + { .compatible = "phytium,pmdk-dp",}, + { } +}; +MODULE_DEVICE_TABLE(of, pmdk_sound_of_match); + +static const struct acpi_device_id pmdk_sound_acpi_match[] = { + { "PHYT8006", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, pmdk_sound_acpi_match); + +static struct platform_driver pmdk_sound_driver = { + .probe = pmdk_sound_probe, + .driver = { + .name = "pmdk_dp", + .acpi_match_table = pmdk_sound_acpi_match, + .of_match_table = pmdk_sound_of_match, +#ifdef CONFIG_PM + .pm = &snd_soc_pm_ops, +#endif + }, +}; + +module_platform_driver(pmdk_sound_driver); + +MODULE_AUTHOR("Zhang Yiqun"); +MODULE_DESCRIPTION("ALSA SoC PMDK DP"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/phytium/pmdk_es8336.c b/sound/soc/phytium/pmdk_es8336.c new file mode 100644 index 0000000000000000000000000000000000000000..38919f40694c7f00fe27761b562d441df7da3ce5 --- /dev/null +++ b/sound/soc/phytium/pmdk_es8336.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include + + +/* PMDK widgets */ +static const struct snd_soc_dapm_widget pmdk_es8336_dapm_widgets[] = { + SND_SOC_DAPM_HP("HP", NULL), + SND_SOC_DAPM_MIC("Int Mic", NULL), + SND_SOC_DAPM_MIC("Mic In", NULL), +}; + +/* PMDK control */ +static const struct snd_kcontrol_new pmdk_controls[] = { + SOC_DAPM_PIN_SWITCH("HP"), + SOC_DAPM_PIN_SWITCH("Int Mic"), + SOC_DAPM_PIN_SWITCH("Mic In"), +}; + +/* PMDK connections */ +static const struct snd_soc_dapm_route pmdk_es8336_audio_map[] = { + {"DMIC", NULL, "Int Mic"}, + {"MIC1", NULL, "Mic In"}, + {"MIC2", NULL, "Mic In"}, + + {"HP", NULL, "HPOL"}, + {"HP", NULL, "HPOR"}, +}; + +#define PMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ + SND_SOC_DAIFMT_CBS_CFS) + +static struct snd_soc_dai_link pmdk_dai[] = { + { + .name = "ES8336 HIFI", + .stream_name = "ES8336 HIFI", + .cpu_dai_name = "phytium-i2s-lsd", + .codec_dai_name = "es8336-hifi", + .platform_name = "snd-soc-dummy", + .codec_name = "i2c-ESSX8336:00", + .dai_fmt = PMDK_DAI_FMT, + }, +}; + +static struct snd_soc_card pmdk = { + .name = "PMDK-I2S", + .owner = THIS_MODULE, + .dai_link = pmdk_dai, + .num_links = ARRAY_SIZE(pmdk_dai), + + .dapm_widgets = pmdk_es8336_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(pmdk_es8336_dapm_widgets), + .controls = pmdk_controls, + .num_controls = ARRAY_SIZE(pmdk_controls), + .dapm_routes = pmdk_es8336_audio_map, + .num_dapm_routes = ARRAY_SIZE(pmdk_es8336_audio_map), +}; + +static int pmdk_sound_probe(struct platform_device *pdev) +{ + struct snd_soc_card *card = &pmdk; + struct device *dev = &pdev->dev; + card->dev = dev; + return devm_snd_soc_register_card(&pdev->dev, card); +} + +static const struct acpi_device_id pmdk_sound_acpi_match[] = { + { "PHYT8005", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, pmdk_sound_acpi_match); + +static struct platform_driver pmdk_sound_driver = { + .probe = pmdk_sound_probe, + .driver = { + .name = "pmdk_es8336", + .acpi_match_table = pmdk_sound_acpi_match, +#ifdef CONFIG_PM + .pm = &snd_soc_pm_ops, +#endif + }, +}; + +module_platform_driver(pmdk_sound_driver); +MODULE_AUTHOR("Zhang Yiqun "); +MODULE_DESCRIPTION("ALSA SoC PMDK ES8336"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/phytium/pmdk_es8388.c b/sound/soc/phytium/pmdk_es8388.c new file mode 100644 index 0000000000000000000000000000000000000000..0a1bd3b52d3b6f72719423ff7a23cb652911cb8a --- /dev/null +++ b/sound/soc/phytium/pmdk_es8388.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021-2023 Phytium Technology Co., Ltd. + */ + + +#include +#include +#include +#include +#include + +static struct snd_soc_jack hs_jack; + +/* Headset jack detection DAPM pins */ +static struct snd_soc_jack_pin hs_jack_pins[] = { + { + .pin = "FrontIn", + .mask = SND_JACK_MICROPHONE, + }, + { + .pin = "RearIn", + .mask = SND_JACK_MICROPHONE, + .invert = 1 + }, + { + .pin = "Front", + .mask = SND_JACK_HEADPHONE, + }, + { + .pin = "Rear", + .mask = SND_JACK_HEADPHONE, + .invert = 1 + }, +}; + +/* Headset jack detection gpios */ +static struct snd_soc_jack_gpio hs_jack_gpios[] = { + { + .name = "det", + .report = SND_JACK_HEADSET, + .debounce_time = 200, + .invert = 1, + }, +}; + +/* PMDK widgets */ +static const struct snd_soc_dapm_widget pmdk_es8388_dapm_widgets[] = { + SND_SOC_DAPM_HP("Front", NULL), + SND_SOC_DAPM_HP("Rear", NULL), + + SND_SOC_DAPM_MIC("FrontIn", NULL), + SND_SOC_DAPM_MIC("RearIn", NULL), +}; + +/* PMDK control */ +static const struct snd_kcontrol_new pmdk_controls[] = { + SOC_DAPM_PIN_SWITCH("Front"), + SOC_DAPM_PIN_SWITCH("Rear"), + SOC_DAPM_PIN_SWITCH("FrontIn"), + SOC_DAPM_PIN_SWITCH("RearIn"), +}; + +/* PMDK connections */ +static const struct snd_soc_dapm_route pmdk_es8388_audio_map[] = { + {"LINPUT1", NULL, "FrontIn"}, + {"RINPUT1", NULL, "FrontIn"}, + + {"LINPUT2", NULL, "RearIn"}, + {"RINPUT2", NULL, "RearIn"}, + + {"Front", NULL, "LOUT1"}, + {"Front", NULL, "ROUT1"}, + + {"Rear", NULL, "LOUT2"}, + {"Rear", NULL, "ROUT2"}, +}; + +static int pmdk_es8388_init(struct snd_soc_pcm_runtime *rtd) +{ + int ret; + + /* Jack detection API stuff */ + ret = snd_soc_card_jack_new(rtd->card, "Headset Jack", SND_JACK_HEADSET, + &hs_jack, hs_jack_pins, + ARRAY_SIZE(hs_jack_pins)); + if (ret) + goto err; + + ret = snd_soc_jack_add_gpios(&hs_jack, ARRAY_SIZE(hs_jack_gpios), + hs_jack_gpios); + if (ret) + goto err; + + return 0; + +err: + return ret; +} + +#define PMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ + SND_SOC_DAIFMT_CBS_CFS) + +static struct snd_soc_dai_link pmdk_dai[] = { + { + .name = "ES8388 HIFI", + .stream_name = "ES8388 HIFI", + .cpu_dai_name = "phytium-i2s-lsd", + .codec_dai_name = "es8388-hifi", + .platform_name = "snd-soc-dummy", + .codec_name = "i2c-ESSX8388:00", + .dai_fmt = PMDK_DAI_FMT, + .init = pmdk_es8388_init, + }, +}; + +static struct snd_soc_card pmdk = { + .name = "PMDK-I2S", + .owner = THIS_MODULE, + .dai_link = pmdk_dai, + .num_links = ARRAY_SIZE(pmdk_dai), + + .dapm_widgets = pmdk_es8388_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(pmdk_es8388_dapm_widgets), + .controls = pmdk_controls, + .num_controls = ARRAY_SIZE(pmdk_controls), + .dapm_routes = pmdk_es8388_audio_map, + .num_dapm_routes = ARRAY_SIZE(pmdk_es8388_audio_map), +}; + +static int pmdk_sound_probe(struct platform_device *pdev) +{ + struct snd_soc_card *card = &pmdk; + struct device *dev = &pdev->dev; + int n; + + card->dev = dev; + hs_jack_gpios[0].gpiod_dev = dev; + n = gpiod_count(dev, "det"); + + if(n < 0) + pmdk_dai[0].init = NULL; + + return devm_snd_soc_register_card(&pdev->dev, card); +} + +static const struct acpi_device_id pmdk_sound_acpi_match[] = { + { "PHYT8004", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, pmdk_sound_acpi_match); + +static struct platform_driver pmdk_sound_driver = { + .probe = pmdk_sound_probe, + .driver = { + .name = "pmdk_es8388", + .acpi_match_table = pmdk_sound_acpi_match, +#ifdef CONFIG_PM + .pm = &snd_soc_pm_ops, +#endif + }, +}; + +module_platform_driver(pmdk_sound_driver); + +MODULE_AUTHOR("Zhang Yiqun"); +MODULE_DESCRIPTION("ALSA SoC PMDK ES8388"); +MODULE_LICENSE("GPL"); diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 17cecc96f735e3785bb1f6a30e4e20ee613b4bcd..217d39f4039365be32f87dd7c82e84a3015b503f 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -67,7 +67,7 @@ static inline bool userspace_irqchip(struct kvm *kvm) static void soft_timer_start(struct hrtimer *hrt, u64 ns) { hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), - HRTIMER_MODE_ABS); + HRTIMER_MODE_ABS_HARD); } static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) @@ -638,10 +638,10 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) vcpu_ptimer(vcpu)->cntvoff = 0; INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); - hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); timer->bg_timer.function = kvm_bg_timer_expire; - hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); timer->phys_timer.function = kvm_phys_timer_expire; vtimer->irq.irq = default_vtimer_irq.irq; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index b943ec5345cbd4724b1260836653cde521aeb15d..ddc6db1e8cd27ad9156526d9203634f684a715f6 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -725,7 +725,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * involves poking the GIC, which must be done in a * non-preemptible context. */ - preempt_disable(); + migrate_disable(); kvm_pmu_flush_hwstate(vcpu); @@ -774,7 +774,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); local_irq_enable(); - preempt_enable(); + migrate_enable(); continue; } @@ -852,7 +852,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) /* Exit types that need handling before we can be preempted */ handle_exit_early(vcpu, run, ret); - preempt_enable(); + migrate_enable(); ret = handle_exit(vcpu, run, ret); }