diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6125.yaml b/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6125.yaml index 8a210c4c5f82..0a3ef7fd03fa 100644 --- a/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6125.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,dispcc-sm6125.yaml @@ -29,6 +29,7 @@ properties: - description: Link clock from DP PHY - description: VCO DIV clock from DP PHY - description: AHB config clock from GCC + - description: GPLL0 div source from GCC clock-names: items: @@ -39,6 +40,7 @@ properties: - const: dp_phy_pll_link_clk - const: dp_phy_pll_vco_div_clk - const: cfg_ahb_clk + - const: gcc_disp_gpll0_div_clk_src '#clock-cells': const: 1 @@ -46,6 +48,16 @@ properties: '#power-domain-cells': const: 1 + power-domains: + description: + A phandle and PM domain specifier for the CX power domain. + maxItems: 1 + + required-opps: + description: + A phandle to an OPP node describing the power domain's performance point. + maxItems: 1 + reg: maxItems: 1 @@ -63,23 +75,31 @@ examples: - | #include #include + #include clock-controller@5f00000 { compatible = "qcom,sm6125-dispcc"; reg = <0x5f00000 0x20000>; + clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>, <&dsi0_phy 0>, <&dsi0_phy 1>, <&dsi1_phy 1>, <&dp_phy 0>, <&dp_phy 1>, - <&gcc GCC_DISP_AHB_CLK>; + <&gcc GCC_DISP_AHB_CLK>, + <&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>; clock-names = "bi_tcxo", "dsi0_phy_pll_out_byteclk", "dsi0_phy_pll_out_dsiclk", "dsi1_phy_pll_out_dsiclk", "dp_phy_pll_link_clk", "dp_phy_pll_vco_div_clk", - "cfg_ahb_clk"; + "cfg_ahb_clk", + "gcc_disp_gpll0_div_clk_src"; + + required-opps = <&rpmhpd_opp_ret>; + power-domains = <&rpmpd SM6125_VDDCX>; + #clock-cells = <1>; #power-domain-cells = <1>; }; diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml index 0521261b04a9..ae894d996d21 100644 --- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml +++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml @@ -49,6 +49,9 @@ properties: description: | OF device-tree gpio specification for RSTX pin(active low system reset) + interrupts: + maxItems: 1 + toshiba,hpd-pin: $ref: /schemas/types.yaml#/definitions/uint32 enum: diff --git a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml index 7a7cf3fb3e6d..a31ec9a4179f 100644 --- a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml +++ b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml @@ -28,6 +28,7 @@ properties: - qcom,sm8350-dp - items: - enum: + - qcom,sm8250-dp - qcom,sm8450-dp - qcom,sm8550-dp - const: qcom,sm8350-dp diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml index 01848bdd5873..b8d1f2b7d541 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml @@ -27,6 +27,7 @@ properties: - qcom,sdm660-dsi-ctrl - qcom,sdm845-dsi-ctrl - qcom,sm6115-dsi-ctrl + - qcom,sm6125-dsi-ctrl - qcom,sm6350-dsi-ctrl - qcom,sm6375-dsi-ctrl - qcom,sm8150-dsi-ctrl @@ -166,6 +167,10 @@ properties: description: Phandle to vdd regulator device node + refgen-supply: + description: + Phandle to REFGEN regulator device node + vcca-supply: description: Phandle to vdd regulator device node @@ -301,6 +306,7 @@ allOf: contains: enum: - qcom,msm8998-dsi-ctrl + - qcom,sm6125-dsi-ctrl - qcom,sm6350-dsi-ctrl then: properties: diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml index a43e11d3b00d..2361da5f6736 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml @@ -19,6 +19,7 @@ properties: - qcom,dsi-phy-14nm-2290 - qcom,dsi-phy-14nm-660 - qcom,dsi-phy-14nm-8953 + - qcom,sm6125-dsi-phy-14nm reg: items: @@ -35,6 +36,16 @@ properties: vcca-supply: description: Phandle to vcca regulator device node. + power-domains: + description: + A phandle and PM domain specifier for an optional power domain. + maxItems: 1 + + required-opps: + description: + A phandle to an OPP node describing the power domain's performance point. + maxItems: 1 + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/display/msm/gpu.yaml b/Documentation/devicetree/bindings/display/msm/gpu.yaml index 58ca8912a8c3..56b9b247e8c2 100644 --- a/Documentation/devicetree/bindings/display/msm/gpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/gpu.yaml @@ -13,6 +13,12 @@ maintainers: properties: compatible: oneOf: + - description: | + The driver is parsing the compat string for Adreno to + figure out the chip-id. + items: + - pattern: '^qcom,adreno-[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]$' + - const: qcom,adreno - description: | The driver is parsing the compat string for Adreno to figure out the gpu-id and patch level. diff --git a/Documentation/devicetree/bindings/display/msm/mdss-common.yaml b/Documentation/devicetree/bindings/display/msm/mdss-common.yaml index ccd7d6417523..a8086ca09d9f 100644 --- a/Documentation/devicetree/bindings/display/msm/mdss-common.yaml +++ b/Documentation/devicetree/bindings/display/msm/mdss-common.yaml @@ -77,6 +77,12 @@ properties: items: - description: MDSS_CORE reset + memory-region: + maxItems: 1 + description: + Phandle to a node describing a reserved framebuffer memory region. + For example, the splash memory region set up by the bootloader. + required: - reg - reg-names diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sc7180-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sc7180-dpu.yaml index 630b11480496..ea75f0f95d5c 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sc7180-dpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sc7180-dpu.yaml @@ -15,6 +15,7 @@ properties: compatible: enum: - qcom,sc7180-dpu + - qcom,sm6125-dpu - qcom,sm6350-dpu - qcom,sm6375-dpu @@ -63,7 +64,9 @@ allOf: - if: properties: compatible: - const: qcom,sm6375-dpu + enum: + - qcom,sm6375-dpu + - qcom,sm6125-dpu then: properties: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm6125-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm6125-mdss.yaml new file mode 100644 index 000000000000..57f0e3647711 --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm6125-mdss.yaml @@ -0,0 +1,213 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,sm6125-mdss.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SM6125 Display MDSS + +maintainers: + - Marijn Suijten + +description: + SM6125 MSM Mobile Display Subsystem (MDSS), which encapsulates sub-blocks + like DPU display controller, DSI and DP interfaces etc. + +$ref: /schemas/display/msm/mdss-common.yaml# + +properties: + compatible: + const: qcom,sm6125-mdss + + clocks: + items: + - description: Display AHB clock from gcc + - description: Display AHB clock + - description: Display core clock + + clock-names: + items: + - const: iface + - const: ahb + - const: core + + iommus: + maxItems: 1 + + interconnects: + maxItems: 2 + + interconnect-names: + maxItems: 2 + +patternProperties: + "^display-controller@[0-9a-f]+$": + type: object + properties: + compatible: + const: qcom,sm6125-dpu + + "^dsi@[0-9a-f]+$": + type: object + properties: + compatible: + items: + - const: qcom,sm6125-dsi-ctrl + - const: qcom,mdss-dsi-ctrl + + "^phy@[0-9a-f]+$": + type: object + properties: + compatible: + const: qcom,sm6125-dsi-phy-14nm + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + #include + #include + + display-subsystem@5e00000 { + compatible = "qcom,sm6125-mdss"; + reg = <0x05e00000 0x1000>; + reg-names = "mdss"; + + interrupts = ; + interrupt-controller; + #interrupt-cells = <1>; + + clocks = <&gcc GCC_DISP_AHB_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>; + clock-names = "iface", + "ahb", + "core"; + + power-domains = <&dispcc MDSS_GDSC>; + + iommus = <&apps_smmu 0x400 0x0>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + display-controller@5e01000 { + compatible = "qcom,sm6125-dpu"; + reg = <0x05e01000 0x83208>, + <0x05eb0000 0x2008>; + reg-names = "mdp", "vbif"; + + interrupt-parent = <&mdss>; + interrupts = <0>; + + clocks = <&gcc GCC_DISP_HF_AXI_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&dispcc DISP_CC_MDSS_ROT_CLK>, + <&dispcc DISP_CC_MDSS_MDP_LUT_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>, + <&dispcc DISP_CC_MDSS_VSYNC_CLK>, + <&gcc GCC_DISP_THROTTLE_CORE_CLK>; + clock-names = "bus", + "iface", + "rot", + "lut", + "core", + "vsync", + "throttle"; + assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + assigned-clock-rates = <19200000>; + + operating-points-v2 = <&mdp_opp_table>; + power-domains = <&rpmpd SM6125_VDDCX>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dpu_intf1_out: endpoint { + remote-endpoint = <&mdss_dsi0_in>; + }; + }; + }; + }; + + dsi@5e94000 { + compatible = "qcom,sm6125-dsi-ctrl", "qcom,mdss-dsi-ctrl"; + reg = <0x05e94000 0x400>; + reg-names = "dsi_ctrl"; + + interrupt-parent = <&mdss>; + interrupts = <4>; + + clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>, + <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>, + <&dispcc DISP_CC_MDSS_PCLK0_CLK>, + <&dispcc DISP_CC_MDSS_ESC0_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&gcc GCC_DISP_HF_AXI_CLK>; + clock-names = "byte", + "byte_intf", + "pixel", + "core", + "iface", + "bus"; + assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, + <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>; + assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>; + + operating-points-v2 = <&dsi_opp_table>; + power-domains = <&rpmpd SM6125_VDDCX>; + + phys = <&mdss_dsi0_phy>; + phy-names = "dsi"; + + #address-cells = <1>; + #size-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + mdss_dsi0_in: endpoint { + remote-endpoint = <&dpu_intf1_out>; + }; + }; + + port@1 { + reg = <1>; + mdss_dsi0_out: endpoint { + }; + }; + }; + }; + + phy@5e94400 { + compatible = "qcom,sm6125-dsi-phy-14nm"; + reg = <0x05e94400 0x100>, + <0x05e94500 0x300>, + <0x05e94800 0x188>; + reg-names = "dsi_phy", + "dsi_phy_lane", + "dsi_pll"; + + #clock-cells = <1>; + #phy-cells = <0>; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&rpmcc RPM_SMD_XO_CLK_SRC>; + clock-names = "iface", + "ref"; + + required-opps = <&rpmpd_opp_nom>; + power-domains = <&rpmpd SM6125_VDDMX>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm6350-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm6350-mdss.yaml index ed0ad194d4ce..63962a8f2faf 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm6350-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm6350-mdss.yaml @@ -131,13 +131,6 @@ examples: remote-endpoint = <&dsi0_in>; }; }; - - port@1 { - reg = <1>; - dpu_intf2_out: endpoint { - remote-endpoint = <&dsi1_in>; - }; - }; }; }; diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm6375-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm6375-mdss.yaml index 76369a4f7c4d..595a9d56949c 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm6375-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm6375-mdss.yaml @@ -132,13 +132,6 @@ examples: remote-endpoint = <&dsi0_in>; }; }; - - port@1 { - reg = <1>; - dpu_intf2_out: endpoint { - remote-endpoint = <&dsi1_in>; - }; - }; }; }; diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8350-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8350-mdss.yaml index 79a226e4cc6a..f2cbeb435f1b 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm8350-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8350-mdss.yaml @@ -52,6 +52,12 @@ patternProperties: compatible: const: qcom,sm8350-dpu + "^displayport-controller@[0-9a-f]+$": + type: object + properties: + compatible: + const: qcom,sm8350-dp + "^dsi@[0-9a-f]+$": type: object properties: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8450-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8450-mdss.yaml index f26eb5643aed..494e2a080e99 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm8450-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8450-mdss.yaml @@ -42,6 +42,14 @@ patternProperties: compatible: const: qcom,sm8450-dpu + "^displayport-controller@[0-9a-f]+$": + type: object + properties: + compatible: + items: + - const: qcom,sm8450-dp + - const: qcom,sm8350-dp + "^dsi@[0-9a-f]+$": type: object properties: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml index 887be33ba108..70ce7cb7a80d 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml @@ -42,6 +42,14 @@ patternProperties: compatible: const: qcom,sm8550-dpu + "^displayport-controller@[0-9a-f]+$": + type: object + properties: + compatible: + items: + - const: qcom,sm8550-dp + - const: qcom,sm8350-dp + "^dsi@[0-9a-f]+$": type: object properties: diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml index c5d1df680858..e7ab6224b52e 100644 --- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml +++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml @@ -18,6 +18,7 @@ properties: - enum: - bananapi,lhr050h41 - feixin,k101-im2byl02 + - tdo,tl050hdv35 - wanchanglong,w552946aba - const: ilitek,ili9881c diff --git a/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml b/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml index 929fe046d1e7..9f1016551e0b 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml @@ -40,6 +40,12 @@ properties: items: - enum: - auo,b101ew05 + # Chunghwa Picture Tubes Ltd. 7" WXGA (800x1280) TFT LCD LVDS panel + - chunghwa,claa070wp03xg + # HannStar Display Corp. HSD101PWW2 10.1" WXGA (1280x800) LVDS panel + - hannstar,hsd101pww2 + # Hydis Technologies 7" WXGA (800x1280) TFT LCD LVDS panel + - hydis,hv070wx2-1e0 - tbs,a711-panel - const: panel-lvds diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index 1d4936fc5182..25b4589d4a58 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -103,8 +103,6 @@ properties: - cdtech,s070wv95-ct16 # Chefree CH101OLHLWH-002 10.1" (1280x800) color TFT LCD panel - chefree,ch101olhlwh-002 - # Chunghwa Picture Tubes Ltd. 7" WXGA TFT LCD panel - - chunghwa,claa070wp03xg # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel - chunghwa,claa101wa01a # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel @@ -168,8 +166,6 @@ properties: - hannstar,hsd070pww1 # HannStar Display Corp. HSD100PXN1 10.1" XGA LVDS panel - hannstar,hsd100pxn1 - # HannStar Display Corp. HSD101PWW2 10.1" WXGA (1280x800) LVDS panel - - hannstar,hsd101pww2 # Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel - hit,tx23d38vm0caa # InfoVision Optoelectronics M133NWF4 R0 13.3" FHD (1920x1080) TFT LCD panel @@ -196,6 +192,8 @@ properties: - innolux,n116bge # InnoLux 13.3" FHD (1920x1080) eDP TFT LCD panel - innolux,n125hce-gn1 + # InnoLux 15.6" FHD (1920x1080) TFT LCD panel + - innolux,g156hce-l01 # InnoLux 15.6" WXGA TFT LCD panel - innolux,n156bge-l21 # Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml index fa6556363cca..ef162b51d010 100644 --- a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml +++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml @@ -15,17 +15,26 @@ allOf: properties: compatible: - const: sitronix,st7789v + enum: + - edt,et028013dma + - inanbo,t28cp45tn89-v17 + - jasonic,jt240mhqs-hwt-ek-e3 + - sitronix,st7789v reg: true reset-gpios: true power-supply: true backlight: true port: true + rotation: true spi-cpha: true spi-cpol: true + spi-rx-bus-width: + minimum: 0 + maximum: 1 + dc-gpios: maxItems: 1 description: DCX pin, Display data/command selection pin in parallel interface @@ -33,7 +42,6 @@ properties: required: - compatible - reg - - reset-gpios - power-supply unevaluatedProperties: false @@ -52,6 +60,7 @@ examples: reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>; backlight = <&pwm_bl>; power-supply = <&power>; + rotation = <180>; spi-max-frequency = <100000>; spi-cpol; spi-cpha; diff --git a/Documentation/devicetree/bindings/display/panel/startek,kd070fhfid015.yaml b/Documentation/devicetree/bindings/display/panel/startek,kd070fhfid015.yaml new file mode 100644 index 000000000000..d817f998cddc --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/startek,kd070fhfid015.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/startek,kd070fhfid015.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Startek Electronic Technology Co. kd070fhfid015 7 inch TFT LCD panel + +maintainers: + - Alexandre Mergnat + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: startek,kd070fhfid015 + + enable-gpios: true + + iovcc-supply: + description: Reference to the regulator powering the panel IO pins. + + reg: + maxItems: 1 + description: DSI virtual channel + + reset-gpios: true + + port: true + + power-supply: true + +additionalProperties: false + +required: + - compatible + - enable-gpios + - iovcc-supply + - reg + - reset-gpios + - port + - power-supply + +examples: + - | + #include + + dsi0 { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "startek,kd070fhfid015"; + reg = <0>; + enable-gpios = <&pio 67 GPIO_ACTIVE_HIGH>; + reset-gpios = <&pio 20 GPIO_ACTIVE_HIGH>; + iovcc-supply = <&mt6357_vsim1_reg>; + power-supply = <&vsys_lcm_reg>; + + port { + panel_in: endpoint { + remote-endpoint = <&dsi_out>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/visionox,r66451.yaml b/Documentation/devicetree/bindings/display/panel/visionox,r66451.yaml new file mode 100644 index 000000000000..6ba323683921 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/visionox,r66451.yaml @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/visionox,r66451.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Visionox R66451 AMOLED DSI Panel + +maintainers: + - Jessica Zhang + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: visionox,r66451 + + reg: + maxItems: 1 + description: DSI virtual channel + + vddio-supply: true + vdd-supply: true + port: true + reset-gpios: true + +additionalProperties: false + +required: + - compatible + - reg + - vddio-supply + - vdd-supply + - reset-gpios + - port + +examples: + - | + #include + dsi { + #address-cells = <1>; + #size-cells = <0>; + panel@0 { + compatible = "visionox,r66451"; + reg = <0>; + vddio-supply = <&vreg_l12c_1p8>; + vdd-supply = <&vreg_l13c_3p0>; + + reset-gpios = <&tlmm 24 GPIO_ACTIVE_LOW>; + + port { + panel0_in: endpoint { + remote-endpoint = <&dsi0_out>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml index 94bb5ef567c6..20e2bd15d4d2 100644 --- a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml +++ b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml @@ -49,15 +49,15 @@ properties: solomon,height: $ref: /schemas/types.yaml#/definitions/uint32 - default: 16 description: - Height in pixel of the screen driven by the controller + Height in pixel of the screen driven by the controller. + The default value is controller-dependent. solomon,width: $ref: /schemas/types.yaml#/definitions/uint32 - default: 96 description: - Width in pixel of the screen driven by the controller + Width in pixel of the screen driven by the controller. + The default value is controller-dependent. solomon,page-offset: $ref: /schemas/types.yaml#/definitions/uint32 @@ -157,6 +157,10 @@ allOf: const: sinowealth,sh1106 then: properties: + width: + default: 132 + height: + default: 64 solomon,dclk-div: default: 1 solomon,dclk-frq: @@ -171,6 +175,10 @@ allOf: - solomon,ssd1305 then: properties: + width: + default: 132 + height: + default: 64 solomon,dclk-div: default: 1 solomon,dclk-frq: @@ -185,6 +193,10 @@ allOf: - solomon,ssd1306 then: properties: + width: + default: 128 + height: + default: 64 solomon,dclk-div: default: 1 solomon,dclk-frq: @@ -199,6 +211,10 @@ allOf: - solomon,ssd1307 then: properties: + width: + default: 128 + height: + default: 39 solomon,dclk-div: default: 2 solomon,dclk-frq: @@ -215,6 +231,10 @@ allOf: - solomon,ssd1309 then: properties: + width: + default: 128 + height: + default: 64 solomon,dclk-div: default: 1 solomon,dclk-frq: diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml index b6b402f16161..ae09cd3cbce1 100644 --- a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml +++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml @@ -12,14 +12,18 @@ maintainers: - Tomi Valkeinen description: | - The AM65x TI Keystone Display SubSystem with two output ports and - two video planes. The first video port supports OLDI and the second - supports DPI format. The fist plane is full video plane with all - features and the second is a "lite plane" without scaling support. + The AM625 and AM65x TI Keystone Display SubSystem with two output + ports and two video planes. In AM65x DSS, the first video port + supports 1 OLDI TX and in AM625 DSS, the first video port output is + internally routed to 2 OLDI TXes. The second video port supports DPI + format. The first plane is full video plane with all features and the + second is a "lite plane" without scaling support. properties: compatible: - const: ti,am65x-dss + enum: + - ti,am625-dss + - ti,am65x-dss reg: description: @@ -80,7 +84,9 @@ properties: port@0: $ref: /schemas/graph.yaml#/properties/port description: - The DSS OLDI output port node form video port 1 + For AM65x DSS, the OLDI output port node from video port 1. + For AM625 DSS, the internal DPI output port node from video + port 1. port@1: $ref: /schemas/graph.yaml#/properties/port diff --git a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml index 05e6f2df604c..3e2d216c6432 100644 --- a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml +++ b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml @@ -13,6 +13,9 @@ description: Supports the Elan eKTH6915 touchscreen controller. This touchscreen controller uses the i2c-hid protocol with a reset GPIO. +allOf: + - $ref: /schemas/input/touchscreen/touchscreen.yaml# + properties: compatible: items: @@ -24,6 +27,8 @@ properties: interrupts: maxItems: 1 + panel: true + reset-gpios: description: Reset GPIO; not all touchscreens using eKTH6915 hook this up. diff --git a/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml b/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml index 1edad1da1196..358cb8275bf1 100644 --- a/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml +++ b/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml @@ -14,6 +14,9 @@ description: This touchscreen uses the i2c-hid protocol but has some non-standard power sequencing required. +allOf: + - $ref: /schemas/input/touchscreen/touchscreen.yaml# + properties: compatible: oneOf: @@ -30,6 +33,8 @@ properties: interrupts: maxItems: 1 + panel: true + reset-gpios: true diff --git a/Documentation/devicetree/bindings/input/hid-over-i2c.yaml b/Documentation/devicetree/bindings/input/hid-over-i2c.yaml index 7156b08f7645..138caad96a29 100644 --- a/Documentation/devicetree/bindings/input/hid-over-i2c.yaml +++ b/Documentation/devicetree/bindings/input/hid-over-i2c.yaml @@ -44,6 +44,8 @@ properties: description: HID descriptor address $ref: /schemas/types.yaml#/definitions/uint32 + panel: true + post-power-on-delay-ms: description: Time required by the device after enabling its regulators or powering it on, before it is ready for communication. diff --git a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml index 895592da9626..431c13335c40 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml @@ -10,6 +10,13 @@ maintainers: - Dmitry Torokhov properties: + panel: + description: If this touchscreen is integrally connected to a panel, this + is a reference to that panel. The presence of this reference indicates + that the touchscreen should be power sequenced together with the panel + and that they may share power and/or reset signals. + $ref: /schemas/types.yaml#/definitions/phandle + touchscreen-min-x: description: minimum x coordinate reported $ref: /schemas/types.yaml#/definitions/uint32 diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index af60bf1a6664..1dfafc339ddd 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -617,6 +617,8 @@ patternProperties: description: Imagination Technologies Ltd. "^imi,.*": description: Integrated Micro-Electronics Inc. + "^inanbo,.*": + description: Shenzhen INANBO Electronic Technology Co., Ltd. "^incircuit,.*": description: In-Circuit GmbH "^indiedroid,.*": @@ -675,6 +677,8 @@ patternProperties: description: iWave Systems Technologies Pvt. Ltd. "^jadard,.*": description: Jadard Technology Inc. + "^jasonic,.*": + description: Jasonic Technology Ltd. "^jdi,.*": description: Japan Display Inc. "^jedec,.*": diff --git a/Documentation/gpu/amdgpu/flashing.rst b/Documentation/gpu/amdgpu/flashing.rst new file mode 100644 index 000000000000..bd745c42a538 --- /dev/null +++ b/Documentation/gpu/amdgpu/flashing.rst @@ -0,0 +1,33 @@ +======================= + dGPU firmware flashing +======================= + +IFWI +---- +Flashing the dGPU integrated firmware image (IFWI) is supported by GPUs that +use the PSP to orchestrate the update (Navi3x or newer GPUs). +For supported GPUs, `amdgpu` will export a series of sysfs files that can be +used for the flash process. + +The IFWI flash process is: + +1. Ensure the IFWI image is intended for the dGPU on the system. +2. "Write" the IFWI image to the sysfs file `psp_vbflash`. This will stage the IFWI in memory. +3. "Read" from the `psp_vbflash` sysfs file to initiate the flash process. +4. Poll the `psp_vbflash_status` sysfs file to determine when the flash process completes. + +USB-C PD F/W +------------ +On GPUs that support flashing an updated USB-C PD firmware image, the process +is done using the `usbc_pd_fw` sysfs file. + +* Reading the file will provide the current firmware version. +* Writing the name of a firmware payload stored in `/lib/firmware/amdgpu` to the sysfs file will initiate the flash process. + +The firmware payload stored in `/lib/firmware/amdgpu` can be named any name +as long as it doesn't conflict with other existing binaries that are used by +`amdgpu`. + +sysfs files +----------- +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c diff --git a/Documentation/gpu/amdgpu/index.rst b/Documentation/gpu/amdgpu/index.rst index 03c2966cae79..912e699fd373 100644 --- a/Documentation/gpu/amdgpu/index.rst +++ b/Documentation/gpu/amdgpu/index.rst @@ -10,6 +10,7 @@ Next (GCN), Radeon DNA (RDNA), and Compute DNA (CDNA) architectures. module-parameters driver-core display/index + flashing xgmi ras thermal diff --git a/Documentation/gpu/driver-uapi.rst b/Documentation/gpu/driver-uapi.rst index 4411e6919a3d..c08bcbb95fb3 100644 --- a/Documentation/gpu/driver-uapi.rst +++ b/Documentation/gpu/driver-uapi.rst @@ -6,3 +6,14 @@ drm/i915 uAPI ============= .. kernel-doc:: include/uapi/drm/i915_drm.h + +drm/nouveau uAPI +================ + +VM_BIND / EXEC uAPI +------------------- + +.. kernel-doc:: drivers/gpu/drm/nouveau/nouveau_exec.c + :doc: Overview + +.. kernel-doc:: include/uapi/drm/nouveau_drm.h diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index a79fd3549ff8..c19b34b1c0ed 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -466,6 +466,42 @@ DRM MM Range Allocator Function References .. kernel-doc:: drivers/gpu/drm/drm_mm.c :export: +DRM GPU VA Manager +================== + +Overview +-------- + +.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c + :doc: Overview + +Split and Merge +--------------- + +.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c + :doc: Split and Merge + +Locking +------- + +.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c + :doc: Locking + +Examples +-------- + +.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c + :doc: Examples + +DRM GPU VA Manager Function References +-------------------------------------- + +.. kernel-doc:: include/drm/drm_gpuva_mgr.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c + :export: + DRM Buddy Allocator =================== @@ -481,8 +517,10 @@ DRM Cache Handling and Fast WC memcpy() .. kernel-doc:: drivers/gpu/drm/drm_cache.c :export: +.. _drm_sync_objects: + DRM Sync Objects -=========================== +================ .. kernel-doc:: drivers/gpu/drm/drm_syncobj.c :doc: Overview @@ -493,6 +531,18 @@ DRM Sync Objects .. kernel-doc:: drivers/gpu/drm/drm_syncobj.c :export: +DRM Execution context +===================== + +.. kernel-doc:: drivers/gpu/drm/drm_exec.c + :doc: Overview + +.. kernel-doc:: include/drm/drm_exec.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/drm_exec.c + :export: + GPU Scheduler ============= diff --git a/Documentation/gpu/rfc/i915_scheduler.rst b/Documentation/gpu/rfc/i915_scheduler.rst index d630f15ab795..ec086e7a43ff 100644 --- a/Documentation/gpu/rfc/i915_scheduler.rst +++ b/Documentation/gpu/rfc/i915_scheduler.rst @@ -135,9 +135,13 @@ Add I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT and drm_i915_context_engines_parallel_submit to the uAPI to implement this extension. +.. c:namespace-push:: rfc + .. kernel-doc:: include/uapi/drm/i915_drm.h :functions: i915_context_engines_parallel_submit +.. c:namespace-pop:: + Extend execbuf2 IOCTL to support submitting N BBs in a single IOCTL ------------------------------------------------------------------- Contexts that have been configured with the 'set_parallel' extension can only diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 68bdafa0284f..139980487ccf 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -49,14 +49,18 @@ converted over. Modern compositors like Wayland or Surfaceflinger on Android really want an atomic modeset interface, so this is all about the bright future. -There is a conversion guide for atomic and all you need is a GPU for a -non-converted driver (again virtual HW drivers for KVM are still all -suitable). +There is a conversion guide for atomic [1]_ and all you need is a GPU for a +non-converted driver. The "Atomic mode setting design overview" series [2]_ +[3]_ at LWN.net can also be helpful. As part of this drivers also need to convert to universal plane (which means exposing primary & cursor as proper plane objects). But that's much easier to do by directly using the new atomic helper driver callbacks. + .. [1] https://blog.ffwll.ch/2014/11/atomic-modeset-support-for-kms-drivers.html + .. [2] https://lwn.net/Articles/653071/ + .. [3] https://lwn.net/Articles/653466/ + Contact: Daniel Vetter, respective driver maintainers Level: Advanced @@ -319,15 +323,6 @@ Contact: Daniel Vetter, Noralf Tronnes Level: Advanced -struct drm_gem_object_funcs ---------------------------- - -GEM objects can now have a function table instead of having the callbacks on the -DRM driver struct. This is now the preferred way. Callbacks in drivers have been -converted, except for struct drm_driver.gem_prime_mmap. - -Level: Intermediate - connector register/unregister fixes ----------------------------------- @@ -452,6 +447,44 @@ Contact: Thomas Zimmermann Level: Starter +Remove driver dependencies on FB_DEVICE +--------------------------------------- + +A number of fbdev drivers provide attributes via sysfs and therefore depend +on CONFIG_FB_DEVICE to be selected. Review each driver and attempt to make +any dependencies on CONFIG_FB_DEVICE optional. At the minimum, the respective +code in the driver could be conditionalized via ifdef CONFIG_FB_DEVICE. Not +all drivers might be able to drop CONFIG_FB_DEVICE. + +Contact: Thomas Zimmermann + +Level: Starter + +Clean up checks for already prepared/enabled in panels +------------------------------------------------------ + +In a whole pile of panel drivers, we have code to make the +prepare/unprepare/enable/disable callbacks behave as no-ops if they've already +been called. To get some idea of the duplicated code, try:: + + git grep 'if.*>prepared' -- drivers/gpu/drm/panel + git grep 'if.*>enabled' -- drivers/gpu/drm/panel + +In the patch ("drm/panel: Check for already prepared/enabled in drm_panel") +we've moved this check to the core. Now we can most definitely remove the +check from the individual panels and save a pile of code. + +In adition to removing the check from the individual panels, it is believed +that even the core shouldn't need this check and that should be considered +an error if other code ever relies on this check. The check in the core +currently prints a warning whenever something is relying on this check with +dev_warn(). After a little while, we likely want to promote this to a +WARN(1) to help encourage folks not to rely on this behavior. + +Contact: Douglas Anderson + +Level: Starter/Intermediate + Core refactorings ================= @@ -749,16 +782,16 @@ existing hardware. The new driver's call-back functions are filled from existing fbdev code. More complex fbdev drivers can be refactored step-by-step into a DRM -driver with the help of the DRM fbconv helpers. [1] These helpers provide +driver with the help of the DRM fbconv helpers [4]_. These helpers provide the transition layer between the DRM core infrastructure and the fbdev driver interface. Create a new DRM driver on top of the fbconv helpers, copy over the fbdev driver, and hook it up to the DRM code. Examples for -several fbdev drivers are available at [1] and a tutorial of this process -available at [2]. The result is a primitive DRM driver that can run X11 -and Weston. +several fbdev drivers are available in Thomas Zimmermann's fbconv tree +[4]_, as well as a tutorial of this process [5]_. The result is a primitive +DRM driver that can run X11 and Weston. - - [1] https://gitlab.freedesktop.org/tzimmermann/linux/tree/fbconv - - [2] https://gitlab.freedesktop.org/tzimmermann/linux/blob/fbconv/drivers/gpu/drm/drm_fbconv_helper.c + .. [4] https://gitlab.freedesktop.org/tzimmermann/linux/tree/fbconv + .. [5] https://gitlab.freedesktop.org/tzimmermann/linux/blob/fbconv/drivers/gpu/drm/drm_fbconv_helper.c Contact: Thomas Zimmermann diff --git a/MAINTAINERS b/MAINTAINERS index c08d655faa17..116177e833d3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6180,10 +6180,9 @@ F: kernel/dma/ DMA-BUF HEAPS FRAMEWORK M: Sumit Semwal R: Benjamin Gaignard -R: Liam Mark -R: Laura Abbott R: Brian Starkey R: John Stultz +R: T.J. Mercier L: linux-media@vger.kernel.org L: dri-devel@lists.freedesktop.org L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers) @@ -6425,6 +6424,7 @@ F: drivers/gpu/drm/aspeed/ DRM DRIVER FOR AST SERVER GRAPHICS CHIPS M: Dave Airlie R: Thomas Zimmermann +R: Jocelyn Falempe L: dri-devel@lists.freedesktop.org S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc @@ -6576,6 +6576,7 @@ F: drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c DRM DRIVER FOR MGA G200 GRAPHICS CHIPS M: Dave Airlie R: Thomas Zimmermann +R: Jocelyn Falempe L: dri-devel@lists.freedesktop.org S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc @@ -6975,6 +6976,13 @@ T: git git://anongit.freedesktop.org/drm/drm-misc F: drivers/gpu/drm/lima/ F: include/uapi/drm/lima_drm.h +DRM DRIVERS FOR LOONGSON +M: Sui Jingfeng +L: dri-devel@lists.freedesktop.org +S: Supported +T: git git://anongit.freedesktop.org/drm/drm-misc +F: drivers/gpu/drm/loongson/ + DRM DRIVERS FOR MEDIATEK M: Chun-Kuang Hu M: Philipp Zabel @@ -7044,7 +7052,7 @@ F: drivers/gpu/drm/stm DRM DRIVERS FOR TI KEYSTONE M: Jyri Sarha -M: Tomi Valkeinen +M: Tomi Valkeinen L: dri-devel@lists.freedesktop.org S: Maintained T: git git://anongit.freedesktop.org/drm/drm-misc @@ -7055,16 +7063,18 @@ F: drivers/gpu/drm/tidss/ DRM DRIVERS FOR TI LCDC M: Jyri Sarha -R: Tomi Valkeinen +M: Tomi Valkeinen L: dri-devel@lists.freedesktop.org S: Maintained +T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/tilcdc/ F: drivers/gpu/drm/tilcdc/ DRM DRIVERS FOR TI OMAP -M: Tomi Valkeinen +M: Tomi Valkeinen L: dri-devel@lists.freedesktop.org S: Maintained +T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/ti/ F: drivers/gpu/drm/omapdrm/ diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c index e2b9d2618c67..e94655ef16bb 100644 --- a/arch/arm/kernel/efi.c +++ b/arch/arm/kernel/efi.c @@ -5,6 +5,8 @@ #include #include +#include + #include #include #include diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 49efbdbd6f7a..2b478ca356b0 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -9,6 +9,7 @@ #include #include +#include #include #include diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 3d448fef3af4..9fc10cea21e1 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 674da7ebd8b7..310513646c9b 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -386,7 +386,7 @@ static struct property_entry gpio_backlight_props[] = { }; static struct gpio_backlight_platform_data gpio_backlight_data = { - .fbdev = &lcdc_device.dev, + .dev = &lcdc_device.dev, }; static const struct platform_device_info gpio_backlight_device_info = { diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index 20f4db778ed6..a18e80394aed 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -202,7 +202,7 @@ static struct platform_device kfr2r09_sh_lcdc_device = { }; static struct lv5207lp_platform_data kfr2r09_backlight_data = { - .fbdev = &kfr2r09_sh_lcdc_device.dev, + .dev = &kfr2r09_sh_lcdc_device.dev, .def_value = 13, .max_value = 13, }; diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c index 97e715e4e9b3..e25193001ea0 100644 --- a/arch/sh/boards/mach-sh7763rdp/setup.c +++ b/arch/sh/boards/mach-sh7763rdp/setup.c @@ -119,7 +119,7 @@ static struct fb_videomode sh7763fb_videomode = { .vsync_len = 1, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, - .flag = FBINFO_FLAG_DEFAULT, + .flag = FB_MODE_IS_UNKNOWN, }; static struct sh7760fb_platdata sh7763fb_def_pdata = { diff --git a/arch/x86/Makefile b/arch/x86/Makefile index fdc2e3abd615..95315d3474a2 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -259,7 +259,7 @@ drivers-$(CONFIG_PCI) += arch/x86/pci/ # suspend and hibernation support drivers-$(CONFIG_PM) += arch/x86/power/ -drivers-$(CONFIG_FB) += arch/x86/video/ +drivers-$(CONFIG_FB_CORE) += arch/x86/video/ #### # boot loader support. Several targets are kept for legacy purposes diff --git a/arch/x86/video/Makefile b/arch/x86/video/Makefile index 11640c116115..5ebe48752ffc 100644 --- a/arch/x86/video/Makefile +++ b/arch/x86/video/Makefile @@ -1,2 +1,2 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_FB) += fbdev.o +obj-$(CONFIG_FB_CORE) += fbdev.o diff --git a/drivers/Kconfig b/drivers/Kconfig index 514ae6b24cb2..496ca02ee18f 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -129,8 +129,6 @@ source "drivers/dma-buf/Kconfig" source "drivers/dca/Kconfig" -source "drivers/auxdisplay/Kconfig" - source "drivers/uio/Kconfig" source "drivers/vfio/Kconfig" diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile index 80f1fb3548ae..e4328b430564 100644 --- a/drivers/accel/ivpu/Makefile +++ b/drivers/accel/ivpu/Makefile @@ -2,10 +2,13 @@ # Copyright (C) 2023 Intel Corporation intel_vpu-y := \ + ivpu_debugfs.o \ ivpu_drv.o \ ivpu_fw.o \ + ivpu_fw_log.o \ ivpu_gem.o \ - ivpu_hw_mtl.o \ + ivpu_hw_37xx.o \ + ivpu_hw_40xx.o \ ivpu_ipc.o \ ivpu_job.o \ ivpu_jsm_msg.o \ @@ -13,4 +16,4 @@ intel_vpu-y := \ ivpu_mmu_context.o \ ivpu_pm.o -obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o \ No newline at end of file +obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c new file mode 100644 index 000000000000..5e5996fd4f9f --- /dev/null +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include +#include +#include + +#include + +#include "ivpu_debugfs.h" +#include "ivpu_drv.h" +#include "ivpu_fw.h" +#include "ivpu_fw_log.h" +#include "ivpu_gem.h" +#include "ivpu_jsm_msg.h" +#include "ivpu_pm.h" + +static int bo_list_show(struct seq_file *s, void *v) +{ + struct drm_info_node *node = (struct drm_info_node *)s->private; + struct drm_printer p = drm_seq_file_printer(s); + + ivpu_bo_list(node->minor->dev, &p); + + return 0; +} + +static int fw_name_show(struct seq_file *s, void *v) +{ + struct drm_info_node *node = (struct drm_info_node *)s->private; + struct ivpu_device *vdev = to_ivpu_device(node->minor->dev); + + seq_printf(s, "%s\n", vdev->fw->name); + return 0; +} + +static int fw_trace_capability_show(struct seq_file *s, void *v) +{ + struct drm_info_node *node = (struct drm_info_node *)s->private; + struct ivpu_device *vdev = to_ivpu_device(node->minor->dev); + u64 trace_hw_component_mask; + u32 trace_destination_mask; + int ret; + + ret = ivpu_jsm_trace_get_capability(vdev, &trace_destination_mask, + &trace_hw_component_mask); + if (!ret) { + seq_printf(s, + "trace_destination_mask: %#18x\n" + "trace_hw_component_mask: %#18llx\n", + trace_destination_mask, trace_hw_component_mask); + } + return 0; +} + +static int fw_trace_config_show(struct seq_file *s, void *v) +{ + struct drm_info_node *node = (struct drm_info_node *)s->private; + struct ivpu_device *vdev = to_ivpu_device(node->minor->dev); + /** + * WA: VPU_JSM_MSG_TRACE_GET_CONFIG command is not working yet, + * so we use values from vdev->fw instead of calling ivpu_jsm_trace_get_config() + */ + u32 trace_level = vdev->fw->trace_level; + u32 trace_destination_mask = vdev->fw->trace_destination_mask; + u64 trace_hw_component_mask = vdev->fw->trace_hw_component_mask; + + seq_printf(s, + "trace_level: %#18x\n" + "trace_destination_mask: %#18x\n" + "trace_hw_component_mask: %#18llx\n", + trace_level, trace_destination_mask, trace_hw_component_mask); + + return 0; +} + +static int last_bootmode_show(struct seq_file *s, void *v) +{ + struct drm_info_node *node = (struct drm_info_node *)s->private; + struct ivpu_device *vdev = to_ivpu_device(node->minor->dev); + + seq_printf(s, "%s\n", (vdev->pm->is_warmboot) ? "warmboot" : "coldboot"); + + return 0; +} + +static int reset_counter_show(struct seq_file *s, void *v) +{ + struct drm_info_node *node = (struct drm_info_node *)s->private; + struct ivpu_device *vdev = to_ivpu_device(node->minor->dev); + + seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_counter)); + return 0; +} + +static int reset_pending_show(struct seq_file *s, void *v) +{ + struct drm_info_node *node = (struct drm_info_node *)s->private; + struct ivpu_device *vdev = to_ivpu_device(node->minor->dev); + + seq_printf(s, "%d\n", atomic_read(&vdev->pm->in_reset)); + return 0; +} + +static const struct drm_info_list vdev_debugfs_list[] = { + {"bo_list", bo_list_show, 0}, + {"fw_name", fw_name_show, 0}, + {"fw_trace_capability", fw_trace_capability_show, 0}, + {"fw_trace_config", fw_trace_config_show, 0}, + {"last_bootmode", last_bootmode_show, 0}, + {"reset_counter", reset_counter_show, 0}, + {"reset_pending", reset_pending_show, 0}, +}; + +static int fw_log_show(struct seq_file *s, void *v) +{ + struct ivpu_device *vdev = s->private; + struct drm_printer p = drm_seq_file_printer(s); + + ivpu_fw_log_print(vdev, true, &p); + return 0; +} + +static int fw_log_fops_open(struct inode *inode, struct file *file) +{ + return single_open(file, fw_log_show, inode->i_private); +} + +static ssize_t +fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos) +{ + struct seq_file *s = file->private_data; + struct ivpu_device *vdev = s->private; + + if (!size) + return -EINVAL; + + ivpu_fw_log_clear(vdev); + return size; +} + +static const struct file_operations fw_log_fops = { + .owner = THIS_MODULE, + .open = fw_log_fops_open, + .write = fw_log_fops_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t +fw_trace_destination_mask_fops_write(struct file *file, const char __user *user_buf, + size_t size, loff_t *pos) +{ + struct ivpu_device *vdev = file->private_data; + struct ivpu_fw_info *fw = vdev->fw; + u32 trace_destination_mask; + int ret; + + ret = kstrtou32_from_user(user_buf, size, 0, &trace_destination_mask); + if (ret < 0) + return ret; + + fw->trace_destination_mask = trace_destination_mask; + + ivpu_jsm_trace_set_config(vdev, fw->trace_level, trace_destination_mask, + fw->trace_hw_component_mask); + + return size; +} + +static const struct file_operations fw_trace_destination_mask_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = fw_trace_destination_mask_fops_write, +}; + +static ssize_t +fw_trace_hw_comp_mask_fops_write(struct file *file, const char __user *user_buf, + size_t size, loff_t *pos) +{ + struct ivpu_device *vdev = file->private_data; + struct ivpu_fw_info *fw = vdev->fw; + u64 trace_hw_component_mask; + int ret; + + ret = kstrtou64_from_user(user_buf, size, 0, &trace_hw_component_mask); + if (ret < 0) + return ret; + + fw->trace_hw_component_mask = trace_hw_component_mask; + + ivpu_jsm_trace_set_config(vdev, fw->trace_level, fw->trace_destination_mask, + trace_hw_component_mask); + + return size; +} + +static const struct file_operations fw_trace_hw_comp_mask_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = fw_trace_hw_comp_mask_fops_write, +}; + +static ssize_t +fw_trace_level_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos) +{ + struct ivpu_device *vdev = file->private_data; + struct ivpu_fw_info *fw = vdev->fw; + u32 trace_level; + int ret; + + ret = kstrtou32_from_user(user_buf, size, 0, &trace_level); + if (ret < 0) + return ret; + + fw->trace_level = trace_level; + + ivpu_jsm_trace_set_config(vdev, trace_level, fw->trace_destination_mask, + fw->trace_hw_component_mask); + + return size; +} + +static const struct file_operations fw_trace_level_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = fw_trace_level_fops_write, +}; + +static ssize_t +ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos) +{ + struct ivpu_device *vdev = file->private_data; + + if (!size) + return -EINVAL; + + if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE)) + return -ENODEV; + if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY)) + return -ENODEV; + + return size; +} + +static ssize_t +ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos) +{ + struct ivpu_device *vdev = file->private_data; + + if (!size) + return -EINVAL; + + ivpu_pm_schedule_recovery(vdev); + return size; +} + +static const struct file_operations ivpu_force_recovery_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = ivpu_force_recovery_fn, +}; + +static const struct file_operations ivpu_reset_engine_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = ivpu_reset_engine_fn, +}; + +void ivpu_debugfs_init(struct drm_minor *minor) +{ + struct ivpu_device *vdev = to_ivpu_device(minor->dev); + + drm_debugfs_create_files(vdev_debugfs_list, ARRAY_SIZE(vdev_debugfs_list), + minor->debugfs_root, minor); + + debugfs_create_file("force_recovery", 0200, minor->debugfs_root, vdev, + &ivpu_force_recovery_fops); + + debugfs_create_file("fw_log", 0644, minor->debugfs_root, vdev, + &fw_log_fops); + debugfs_create_file("fw_trace_destination_mask", 0200, minor->debugfs_root, vdev, + &fw_trace_destination_mask_fops); + debugfs_create_file("fw_trace_hw_comp_mask", 0200, minor->debugfs_root, vdev, + &fw_trace_hw_comp_mask_fops); + debugfs_create_file("fw_trace_level", 0200, minor->debugfs_root, vdev, + &fw_trace_level_fops); + + debugfs_create_file("reset_engine", 0200, minor->debugfs_root, vdev, + &ivpu_reset_engine_fops); +} diff --git a/drivers/accel/ivpu/ivpu_debugfs.h b/drivers/accel/ivpu/ivpu_debugfs.h new file mode 100644 index 000000000000..78f80c1e00e4 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_debugfs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_DEBUGFS_H__ +#define __IVPU_DEBUGFS_H__ + +struct drm_minor; + +void ivpu_debugfs_init(struct drm_minor *minor); + +#endif /* __IVPU_DEBUGFS_H__ */ diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 8396db2b5203..ba79f397c9e8 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -14,6 +14,7 @@ #include #include "vpu_boot_api.h" +#include "ivpu_debugfs.h" #include "ivpu_drv.h" #include "ivpu_fw.h" #include "ivpu_gem.h" @@ -50,6 +51,10 @@ u8 ivpu_pll_max_ratio = U8_MAX; module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644); MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency"); +bool ivpu_disable_mmu_cont_pages; +module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644); +MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization"); + struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv) { struct ivpu_device *vdev = file_priv->vdev; @@ -110,6 +115,22 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link) kref_put(&file_priv->ref, file_priv_release); } +static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args) +{ + switch (args->index) { + case DRM_IVPU_CAP_METRIC_STREAMER: + args->value = 0; + break; + case DRM_IVPU_CAP_DMA_MEMORY_RANGE: + args->value = 1; + break; + default: + return -EINVAL; + } + + return 0; +} + static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct ivpu_file_priv *file_priv = file->driver_priv; @@ -139,7 +160,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f args->value = ivpu_get_context_count(vdev); break; case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: - args->value = vdev->hw->ranges.user_low.start; + args->value = vdev->hw->ranges.user.start; break; case DRM_IVPU_PARAM_CONTEXT_PRIORITY: args->value = file_priv->priority; @@ -169,6 +190,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f case DRM_IVPU_PARAM_SKU: args->value = vdev->hw->sku; break; + case DRM_IVPU_PARAM_CAPABILITIES: + ret = ivpu_get_capabilities(vdev, args); + break; default: ret = -EINVAL; break; @@ -369,10 +393,11 @@ static const struct drm_driver driver = { .open = ivpu_open, .postclose = ivpu_postclose, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = ivpu_gem_prime_import, - .gem_prime_mmap = drm_gem_prime_mmap, + +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = ivpu_debugfs_init, +#endif .ioctls = ivpu_drm_ioctls, .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls), @@ -427,7 +452,7 @@ static int ivpu_pci_init(struct ivpu_device *vdev) return PTR_ERR(vdev->regb); } - ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(38)); + ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits)); if (ret) { ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret); return ret; @@ -437,8 +462,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev) /* Clear any pending errors */ pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f); - /* VPU MTL does not require PCI spec 10m D3hot delay */ - if (ivpu_is_mtl(vdev)) + /* VPU 37XX does not require 10m D3hot delay */ + if (ivpu_hw_gen(vdev) == IVPU_HW_37XX) pdev->d3hot_delay = 0; ret = pcim_enable_device(pdev); @@ -476,7 +501,14 @@ static int ivpu_dev_init(struct ivpu_device *vdev) if (!vdev->pm) return -ENOMEM; - vdev->hw->ops = &ivpu_hw_mtl_ops; + if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) { + vdev->hw->ops = &ivpu_hw_40xx_ops; + vdev->hw->dma_bits = 48; + } else { + vdev->hw->ops = &ivpu_hw_37xx_ops; + vdev->hw->dma_bits = 38; + } + vdev->platform = IVPU_PLATFORM_INVALID; vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID; vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID; @@ -602,6 +634,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev) static struct pci_device_id ivpu_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) }, { } }; MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 399dc5dcefd7..9e8c075fe9ef 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -23,6 +23,10 @@ #define DRIVER_DATE "20230117" #define PCI_DEVICE_ID_MTL 0x7d1d +#define PCI_DEVICE_ID_LNL 0x643e + +#define IVPU_HW_37XX 37 +#define IVPU_HW_40XX 40 #define IVPU_GLOBAL_CONTEXT_MMU_SSID 0 /* SSID 1 is used by the VPU to represent invalid context */ @@ -76,6 +80,7 @@ struct ivpu_wa_table { bool clear_runtime_mem; bool d3hot_after_power_off; bool interrupt_clear_with_0; + bool disable_clock_relinquish; }; struct ivpu_hw_info; @@ -132,6 +137,7 @@ struct ivpu_file_priv { extern int ivpu_dbg_mask; extern u8 ivpu_pll_min_ratio; extern u8 ivpu_pll_max_ratio; +extern bool ivpu_disable_mmu_cont_pages; #define IVPU_TEST_MODE_DISABLED 0 #define IVPU_TEST_MODE_FW_TEST 1 @@ -145,11 +151,6 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link); int ivpu_boot(struct ivpu_device *vdev); int ivpu_shutdown(struct ivpu_device *vdev); -static inline bool ivpu_is_mtl(struct ivpu_device *vdev) -{ - return to_pci_dev(vdev->drm.dev)->device == PCI_DEVICE_ID_MTL; -} - static inline u8 ivpu_revision(struct ivpu_device *vdev) { return to_pci_dev(vdev->drm.dev)->revision; @@ -160,6 +161,19 @@ static inline u16 ivpu_device_id(struct ivpu_device *vdev) return to_pci_dev(vdev->drm.dev)->device; } +static inline int ivpu_hw_gen(struct ivpu_device *vdev) +{ + switch (ivpu_device_id(vdev)) { + case PCI_DEVICE_ID_MTL: + return IVPU_HW_37XX; + case PCI_DEVICE_ID_LNL: + return IVPU_HW_40XX; + default: + ivpu_err(vdev, "Unknown VPU device\n"); + return 0; + } +} + static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev) { return container_of(dev, struct ivpu_device, drm); diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index f58951a0d81b..9827ea4d7b83 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -11,6 +11,7 @@ #include "vpu_boot_api.h" #include "ivpu_drv.h" #include "ivpu_fw.h" +#include "ivpu_fw_log.h" #include "ivpu_gem.h" #include "ivpu_hw.h" #include "ivpu_ipc.h" @@ -42,22 +43,39 @@ static char *ivpu_firmware; module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644); MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/.."); +/* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */ +static struct { + int gen; + const char *name; +} fw_names[] = { + { IVPU_HW_37XX, "vpu_37xx.bin" }, + { IVPU_HW_37XX, "mtl_vpu.bin" }, + { IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" }, + { IVPU_HW_40XX, "vpu_40xx.bin" }, + { IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" }, +}; + static int ivpu_fw_request(struct ivpu_device *vdev) { - static const char * const fw_names[] = { - "mtl_vpu.bin", - "intel/vpu/mtl_vpu_v0.0.bin" - }; int ret = -ENOENT; int i; - if (ivpu_firmware) - return request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev); + if (ivpu_firmware) { + ret = request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev); + if (!ret) + vdev->fw->name = ivpu_firmware; + return ret; + } for (i = 0; i < ARRAY_SIZE(fw_names); i++) { - ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i], vdev->drm.dev); - if (!ret) + if (fw_names[i].gen != ivpu_hw_gen(vdev)) + continue; + + ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev); + if (!ret) { + vdev->fw->name = fw_names[i].name; return 0; + } } ivpu_err(vdev, "Failed to request firmware: %d\n", ret); @@ -142,7 +160,9 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) } ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n", fw_hdr->header_version, fw_hdr->image_format); - ivpu_dbg(vdev, FW_BOOT, "FW version: %s\n", (char *)fw_hdr + VPU_FW_HEADER_SIZE); + + ivpu_info(vdev, "Firmware: %s, version: %s", fw->name, + (const char *)fw_hdr + VPU_FW_HEADER_SIZE); if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3)) return -EINVAL; @@ -158,6 +178,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) fw->cold_boot_entry_point = fw_hdr->entry_point; fw->entry_point = fw->cold_boot_entry_point; + fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL); + fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING; + fw->trace_hw_component_mask = -1; + ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n", fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size); ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n", @@ -182,13 +206,14 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev) return -EINVAL; } - ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size); + ivpu_hw_init_range(&vdev->hw->ranges.global, start, size); return 0; } static int ivpu_fw_mem_init(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; + int log_verb_size; int ret; ret = ivpu_fw_update_global_range(vdev); @@ -201,17 +226,45 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev) return -ENOMEM; } + fw->mem_log_crit = ivpu_bo_alloc_internal(vdev, 0, IVPU_FW_CRITICAL_BUFFER_SIZE, + DRM_IVPU_BO_CACHED); + if (!fw->mem_log_crit) { + ivpu_err(vdev, "Failed to allocate critical log buffer\n"); + ret = -ENOMEM; + goto err_free_fw_mem; + } + + if (ivpu_log_level <= IVPU_FW_LOG_INFO) + log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE; + else + log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE; + + fw->mem_log_verb = ivpu_bo_alloc_internal(vdev, 0, log_verb_size, DRM_IVPU_BO_CACHED); + if (!fw->mem_log_verb) { + ivpu_err(vdev, "Failed to allocate verbose log buffer\n"); + ret = -ENOMEM; + goto err_free_log_crit; + } + if (fw->shave_nn_size) { - fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start, + fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start, fw->shave_nn_size, DRM_IVPU_BO_UNCACHED); if (!fw->mem_shave_nn) { ivpu_err(vdev, "Failed to allocate shavenn buffer\n"); - ivpu_bo_free_internal(fw->mem); - return -ENOMEM; + ret = -ENOMEM; + goto err_free_log_verb; } } return 0; + +err_free_log_verb: + ivpu_bo_free_internal(fw->mem_log_verb); +err_free_log_crit: + ivpu_bo_free_internal(fw->mem_log_crit); +err_free_fw_mem: + ivpu_bo_free_internal(fw->mem); + return ret; } static void ivpu_fw_mem_fini(struct ivpu_device *vdev) @@ -223,7 +276,12 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev) fw->mem_shave_nn = NULL; } + ivpu_bo_free_internal(fw->mem_log_verb); + ivpu_bo_free_internal(fw->mem_log_crit); ivpu_bo_free_internal(fw->mem); + + fw->mem_log_verb = NULL; + fw->mem_log_crit = NULL; fw->mem = NULL; } @@ -387,9 +445,9 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params * Uncached region of VPU address space, covers IPC buffers, job queues * and log buffers, programmable to L2$ Uncached by VPU MTRR */ - boot_params->shared_region_base = vdev->hw->ranges.global_low.start; - boot_params->shared_region_size = vdev->hw->ranges.global_low.end - - vdev->hw->ranges.global_low.start; + boot_params->shared_region_base = vdev->hw->ranges.global.start; + boot_params->shared_region_size = vdev->hw->ranges.global.end - + vdev->hw->ranges.global.start; boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr; boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2; @@ -397,10 +455,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2; boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2; - boot_params->global_aliased_pio_base = - vdev->hw->ranges.global_aliased_pio.start; - boot_params->global_aliased_pio_size = - ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio); + boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start; + boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user); /* Allow configuration for L2C_PAGE_TABLE with boot param value */ boot_params->autoconfig = 1; @@ -408,7 +464,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params /* Enable L2 cache for first 2GB of high memory */ boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1; boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = - ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start); + ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start); if (vdev->fw->mem_shave_nn) boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr; @@ -424,6 +480,15 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->pn_freq_pll_ratio = vdev->hw->pll.pn_ratio; boot_params->max_freq_pll_ratio = vdev->hw->pll.max_ratio; + boot_params->default_trace_level = vdev->fw->trace_level; + boot_params->tracing_buff_message_format_mask = BIT(VPU_TRACING_FORMAT_STRING); + boot_params->trace_destination_mask = vdev->fw->trace_destination_mask; + boot_params->trace_hw_component_mask = vdev->fw->trace_hw_component_mask; + boot_params->crit_tracing_buff_addr = vdev->fw->mem_log_crit->vpu_addr; + boot_params->crit_tracing_buff_size = vdev->fw->mem_log_crit->base.size; + boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr; + boot_params->verbose_tracing_buff_size = vdev->fw->mem_log_verb->base.size; + boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev); boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev); boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev); diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h index 8d275c802d1c..8567fdf925fe 100644 --- a/drivers/accel/ivpu/ivpu_fw.h +++ b/drivers/accel/ivpu/ivpu_fw.h @@ -12,6 +12,7 @@ struct vpu_boot_params; struct ivpu_fw_info { const struct firmware *file; + const char *name; struct ivpu_bo *mem; struct ivpu_bo *mem_shave_nn; struct ivpu_bo *mem_log_crit; @@ -23,6 +24,9 @@ struct ivpu_fw_info { u32 shave_nn_size; u64 entry_point; /* Cold or warm boot entry point for next boot */ u64 cold_boot_entry_point; + u32 trace_level; + u32 trace_destination_mask; + u64 trace_hw_component_mask; }; int ivpu_fw_init(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_fw_log.c b/drivers/accel/ivpu/ivpu_fw_log.c new file mode 100644 index 000000000000..95065cac9fbd --- /dev/null +++ b/drivers/accel/ivpu/ivpu_fw_log.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include "vpu_boot_api.h" +#include "ivpu_drv.h" +#include "ivpu_fw.h" +#include "ivpu_fw_log.h" +#include "ivpu_gem.h" + +#define IVPU_FW_LOG_LINE_LENGTH 256 + +unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR; +module_param(ivpu_log_level, uint, 0444); +MODULE_PARM_DESC(ivpu_log_level, + "VPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG) + " info=" __stringify(IVPU_FW_LOG_INFO) + " warn=" __stringify(IVPU_FW_LOG_WARN) + " error=" __stringify(IVPU_FW_LOG_ERROR) + " fatal=" __stringify(IVPU_FW_LOG_FATAL)); + +static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset, + struct vpu_tracing_buffer_header **log_header) +{ + struct vpu_tracing_buffer_header *log; + + if ((*offset + sizeof(*log)) > bo->base.size) + return -EINVAL; + + log = bo->kvaddr + *offset; + + if (log->vpu_canary_start != VPU_TRACING_BUFFER_CANARY) + return -EINVAL; + + if (log->header_size < sizeof(*log) || log->header_size > 1024) { + ivpu_dbg(vdev, FW_BOOT, "Invalid header size 0x%x\n", log->header_size); + return -EINVAL; + } + if ((char *)log + log->size > (char *)bo->kvaddr + bo->base.size) { + ivpu_dbg(vdev, FW_BOOT, "Invalid log size 0x%x\n", log->size); + return -EINVAL; + } + + *log_header = log; + *offset += log->size; + + ivpu_dbg(vdev, FW_BOOT, + "FW log name \"%s\", write offset 0x%x size 0x%x, wrap count %d, hdr version %d size %d format %d, alignment %d", + log->name, log->write_index, log->size, log->wrap_count, log->header_version, + log->header_size, log->format, log->alignment); + + return 0; +} + +static void buffer_print(char *buffer, u32 size, struct drm_printer *p) +{ + char line[IVPU_FW_LOG_LINE_LENGTH]; + u32 index = 0; + + if (!size || !buffer) + return; + + while (size--) { + if (*buffer == '\n' || *buffer == 0) { + line[index] = 0; + if (index != 0) + drm_printf(p, "%s\n", line); + index = 0; + buffer++; + continue; + } + if (index == IVPU_FW_LOG_LINE_LENGTH - 1) { + line[index] = 0; + index = 0; + drm_printf(p, "%s\n", line); + } + if (*buffer != '\r' && (isprint(*buffer) || iscntrl(*buffer))) + line[index++] = *buffer; + buffer++; + } + line[index] = 0; + if (index != 0) + drm_printf(p, "%s\n", line); +} + +static void fw_log_print_buffer(struct ivpu_device *vdev, struct vpu_tracing_buffer_header *log, + const char *prefix, bool only_new_msgs, struct drm_printer *p) +{ + char *log_buffer = (void *)log + log->header_size; + u32 log_size = log->size - log->header_size; + u32 log_start = log->read_index; + u32 log_end = log->write_index; + + if (!(log->write_index || log->wrap_count) || + (log->write_index == log->read_index && only_new_msgs)) { + drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name); + return; + } + + drm_printf(p, "==== %s \"%s\" log start ====\n", prefix, log->name); + if (log->write_index > log->read_index) { + buffer_print(log_buffer + log_start, log_end - log_start, p); + } else { + buffer_print(log_buffer + log_end, log_size - log_end, p); + buffer_print(log_buffer, log_end, p); + } + drm_printf(p, "\x1b[0m"); + drm_printf(p, "==== %s \"%s\" log end ====\n", prefix, log->name); +} + +void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p) +{ + struct vpu_tracing_buffer_header *log_header; + u32 next = 0; + + while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0) + fw_log_print_buffer(vdev, log_header, "VPU critical", only_new_msgs, p); + + next = 0; + while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0) + fw_log_print_buffer(vdev, log_header, "VPU verbose", only_new_msgs, p); +} + +void ivpu_fw_log_clear(struct ivpu_device *vdev) +{ + struct vpu_tracing_buffer_header *log_header; + u32 next = 0; + + while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0) + log_header->read_index = log_header->write_index; + + next = 0; + while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0) + log_header->read_index = log_header->write_index; +} diff --git a/drivers/accel/ivpu/ivpu_fw_log.h b/drivers/accel/ivpu/ivpu_fw_log.h new file mode 100644 index 000000000000..0b2573f6f315 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_fw_log.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_FW_LOG_H__ +#define __IVPU_FW_LOG_H__ + +#include + +#include + +#include "ivpu_drv.h" + +#define IVPU_FW_LOG_DEFAULT 0 +#define IVPU_FW_LOG_DEBUG 1 +#define IVPU_FW_LOG_INFO 2 +#define IVPU_FW_LOG_WARN 3 +#define IVPU_FW_LOG_ERROR 4 +#define IVPU_FW_LOG_FATAL 5 + +extern unsigned int ivpu_log_level; + +#define IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE SZ_1M +#define IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE SZ_8M +#define IVPU_FW_CRITICAL_BUFFER_SIZE SZ_512K + +void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p); +void ivpu_fw_log_clear(struct ivpu_device *vdev); + +static inline void ivpu_fw_log_dump(struct ivpu_device *vdev) +{ + struct drm_printer p = drm_info_printer(vdev->drm.dev); + + ivpu_fw_log_print(vdev, false, &p); +} + +#endif /* __IVPU_FW_LOG_H__ */ diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index 9967fcfa27ec..d09f13b35902 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -282,10 +282,12 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, int ret; if (!range) { - if (bo->flags & DRM_IVPU_BO_HIGH_MEM) - range = &vdev->hw->ranges.user_high; + if (bo->flags & DRM_IVPU_BO_SHAVE_MEM) + range = &vdev->hw->ranges.shave; + else if (bo->flags & DRM_IVPU_BO_DMA_MEM) + range = &vdev->hw->ranges.dma; else - range = &vdev->hw->ranges.user_low; + range = &vdev->hw->ranges.user; } mutex_lock(&ctx->lock); @@ -573,7 +575,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla fixed_range.end = vpu_addr + size; range = &fixed_range; } else { - range = &vdev->hw->ranges.global_low; + range = &vdev->hw->ranges.global; } bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0); diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h index 50a9304ab09c..ab341237bcf9 100644 --- a/drivers/accel/ivpu/ivpu_hw.h +++ b/drivers/accel/ivpu/ivpu_hw.h @@ -38,11 +38,10 @@ struct ivpu_addr_range { struct ivpu_hw_info { const struct ivpu_hw_ops *ops; struct { - struct ivpu_addr_range global_low; - struct ivpu_addr_range global_high; - struct ivpu_addr_range user_low; - struct ivpu_addr_range user_high; - struct ivpu_addr_range global_aliased_pio; + struct ivpu_addr_range global; + struct ivpu_addr_range user; + struct ivpu_addr_range shave; + struct ivpu_addr_range dma; } ranges; struct { u8 min_ratio; @@ -57,9 +56,11 @@ struct ivpu_hw_info { u32 tile_fuse; u32 sku; u16 config; + int dma_bits; }; -extern const struct ivpu_hw_ops ivpu_hw_mtl_ops; +extern const struct ivpu_hw_ops ivpu_hw_37xx_ops; +extern const struct ivpu_hw_ops ivpu_hw_40xx_ops; static inline int ivpu_hw_info_init(struct ivpu_device *vdev) { diff --git a/drivers/accel/ivpu/ivpu_hw_mtl.c b/drivers/accel/ivpu/ivpu_hw_37xx.c similarity index 52% rename from drivers/accel/ivpu/ivpu_hw_mtl.c rename to drivers/accel/ivpu/ivpu_hw_37xx.c index 2a5dd3a5dc46..9eae1c241bc0 100644 --- a/drivers/accel/ivpu/ivpu_hw_mtl.c +++ b/drivers/accel/ivpu/ivpu_hw_37xx.c @@ -5,7 +5,7 @@ #include "ivpu_drv.h" #include "ivpu_fw.h" -#include "ivpu_hw_mtl_reg.h" +#include "ivpu_hw_37xx_reg.h" #include "ivpu_hw_reg_io.h" #include "ivpu_hw.h" #include "ivpu_ipc.h" @@ -39,34 +39,34 @@ #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) #define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC) -#define ICB_0_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ - (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ - (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \ - (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \ - (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \ - (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \ - (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT))) +#define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \ + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \ + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \ + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \ + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT))) -#define ICB_1_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \ - (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \ - (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT))) +#define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \ + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \ + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT))) #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK) -#define BUTTRESS_IRQ_MASK ((REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \ - (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \ - (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR))) +#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \ + (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \ + (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR))) #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK) #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1) -#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \ - (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \ - (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \ - (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \ - (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \ - (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \ - (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX))) +#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \ + (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \ + (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \ + (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \ + (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \ + (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \ + (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX))) static char *ivpu_platform_to_str(u32 platform) { @@ -84,8 +84,8 @@ static char *ivpu_platform_to_str(u32 platform) static void ivpu_hw_read_platform(struct ivpu_device *vdev) { - u32 gen_ctrl = REGV_RD32(MTL_VPU_HOST_SS_GEN_CTRL); - u32 platform = REG_GET_FLD(MTL_VPU_HOST_SS_GEN_CTRL, PS, gen_ctrl); + u32 gen_ctrl = REGV_RD32(VPU_37XX_HOST_SS_GEN_CTRL); + u32 platform = REG_GET_FLD(VPU_37XX_HOST_SS_GEN_CTRL, PS, gen_ctrl); if (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA) vdev->platform = platform; @@ -123,7 +123,7 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev) { - return REGB_POLL_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); + return REGB_POLL_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); } /* Send KMD initiated workpoint change */ @@ -139,23 +139,23 @@ static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ra return ret; } - val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD0); - val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val); - val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val); - REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD0, val); + val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0); + val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val); + val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val); + REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, val); - val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD1); - val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val); - val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val); - REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD1, val); + val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1); + val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val); + val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val); + REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, val); - val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD2); - val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val); - REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD2, val); + val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2); + val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val); + REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, val); - val = REGB_RD32(MTL_BUTTRESS_WP_REQ_CMD); - val = REG_SET_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, val); - REGB_WR32(MTL_BUTTRESS_WP_REQ_CMD, val); + val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_CMD); + val = REG_SET_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, val); + REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_CMD, val); ret = ivpu_pll_wait_for_cmd_send(vdev); if (ret) @@ -171,7 +171,7 @@ static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable) if (IVPU_WA(punit_disabled)) return 0; - return REGB_POLL_FLD(MTL_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US); + return REGB_POLL_FLD(VPU_37XX_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US); } static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev) @@ -179,7 +179,7 @@ static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev) if (IVPU_WA(punit_disabled)) return 0; - return REGB_POLL_FLD(MTL_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US); + return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US); } static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev) @@ -188,21 +188,21 @@ static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev) u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio; u32 fmin_fuse, fmax_fuse; - fmin_fuse = REGB_RD32(MTL_BUTTRESS_FMIN_FUSE); - fuse_min_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse); - fuse_pn_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse); + fmin_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMIN_FUSE); + fuse_min_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse); + fuse_pn_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse); - fmax_fuse = REGB_RD32(MTL_BUTTRESS_FMAX_FUSE); - fuse_max_ratio = REG_GET_FLD(MTL_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse); + fmax_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMAX_FUSE); + fuse_max_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse); hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio); hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio); hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio); } -static int ivpu_hw_mtl_wait_for_vpuip_bar(struct ivpu_device *vdev) +static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev) { - return REGV_POLL_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, AON, 0, 100); + return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100); } static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable) @@ -248,7 +248,7 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable) return ret; } - ret = ivpu_hw_mtl_wait_for_vpuip_bar(vdev); + ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev); if (ret) { ivpu_err(vdev, "Timed out waiting for VPUIP bar\n"); return ret; @@ -272,52 +272,52 @@ static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev) { u32 val = 0; - val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val); - val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val); - val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, MSS_MAS, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val); - REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_CLR, val); + REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val); } static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable) { - u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_SET); + u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET); if (enable) { - val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val); - val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val); - val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val); } else { - val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val); - val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val); - val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val); + val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val); + val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val); + val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val); } - REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_SET, val); + REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val); } static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable) { - u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_CLK_SET); + u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET); if (enable) { - val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val); - val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val); - val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val); } else { - val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val); - val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val); - val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val); + val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val); + val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val); + val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val); } - REGV_WR32(MTL_VPU_HOST_SS_CPR_CLK_SET, val); + REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val); } static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val) { - u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN); + u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN); - if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val)) + if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val)) return -EIO; return 0; @@ -325,9 +325,9 @@ static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val) static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) { - u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QACCEPTN); + u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN); - if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val)) + if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val)) return -EIO; return 0; @@ -335,9 +335,9 @@ static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) { - u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QDENY); + u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY); - if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val)) + if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val)) return -EIO; return 0; @@ -385,7 +385,7 @@ static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev) static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev) { - REGV_WR32(MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN, 0x0); + REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0); } static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable) @@ -393,12 +393,12 @@ static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable) int ret; u32 val; - val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN); + val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN); if (enable) - val = REG_SET_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); else - val = REG_CLR_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); - REGV_WR32(MTL_VPU_HOST_SS_NOC_QREQN, val); + val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); + REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val); ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); if (ret) { @@ -453,26 +453,26 @@ static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev) static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable) { - u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); + u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); if (enable) - val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); else - val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); + val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); - REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); + REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); } static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable) { - u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0); + u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0); if (enable) - val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); else - val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); + val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); - REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, val); + REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val); } static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val) @@ -481,32 +481,32 @@ static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 ex if (ivpu_is_fpga(vdev)) return 0; - return REGV_POLL_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, + return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, exp_val, PWR_ISLAND_STATUS_TIMEOUT_US); } static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable) { - u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0); + u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0); if (enable) - val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); else - val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); + val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); - REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, val); + REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val); } static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable) { - u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE); + u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE); if (enable) - val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); else - val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); + val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); - REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val); + REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val); } static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) @@ -538,36 +538,25 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) { - u32 val = REGV_RD32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES); + u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val); + val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val); + val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); + val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val); - REGV_WR32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, val); + REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val); } static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev) { - u32 val = REGV_RD32(MTL_VPU_HOST_IF_TBU_MMUSSIDV); + u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV); - if (ivpu_is_fpga(vdev)) { - val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); - } else { - val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_AWMMUSSIDV, val); - val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_ARMMUSSIDV, val); - } + val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); + val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); + val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); + val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); - REGV_WR32(MTL_VPU_HOST_IF_TBU_MMUSSIDV, val); + REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val); } static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev) @@ -587,10 +576,10 @@ static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev) REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); val = vdev->fw->entry_point >> 9; - REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val); + REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val); - val = REG_SET_FLD(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, DONE, val); - REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val); + val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val); + REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val); ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n", vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume"); @@ -601,27 +590,27 @@ static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable) int ret; u32 val; - ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); + ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); if (ret) { ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); return ret; } - val = REGB_RD32(MTL_BUTTRESS_VPU_D0I3_CONTROL); + val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL); if (enable) - val = REG_SET_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val); + val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val); else - val = REG_CLR_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val); - REGB_WR32(MTL_BUTTRESS_VPU_D0I3_CONTROL, val); + val = REG_CLR_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val); + REGB_WR32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, val); - ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); + ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); if (ret) ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); return ret; } -static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev) +static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev) { struct ivpu_hw_info *hw = vdev->hw; @@ -631,16 +620,15 @@ static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev) ivpu_pll_init_frequency_ratios(vdev); - ivpu_hw_init_range(&hw->ranges.global_low, 0x80000000, SZ_512M); - ivpu_hw_init_range(&hw->ranges.global_high, 0x180000000, SZ_2M); - ivpu_hw_init_range(&hw->ranges.user_low, 0xc0000000, 255 * SZ_1M); - ivpu_hw_init_range(&hw->ranges.user_high, 0x180000000, SZ_2G); - hw->ranges.global_aliased_pio = hw->ranges.user_low; + ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M); + ivpu_hw_init_range(&hw->ranges.user, 0xc0000000, 255 * SZ_1M); + ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G); + ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G); return 0; } -static int ivpu_hw_mtl_reset(struct ivpu_device *vdev) +static int ivpu_hw_37xx_reset(struct ivpu_device *vdev) { int ret; u32 val; @@ -648,24 +636,24 @@ static int ivpu_hw_mtl_reset(struct ivpu_device *vdev) if (IVPU_WA(punit_disabled)) return 0; - ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); + ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); if (ret) { ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n"); return ret; } - val = REGB_RD32(MTL_BUTTRESS_VPU_IP_RESET); - val = REG_SET_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, val); - REGB_WR32(MTL_BUTTRESS_VPU_IP_RESET, val); + val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET); + val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val); + REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val); - ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); + ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); if (ret) ivpu_err(vdev, "Timed out waiting for RESET completion\n"); return ret; } -static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev) +static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev) { int ret; @@ -678,7 +666,7 @@ static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev) return ret; } -static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev) +static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev) { int ret; @@ -689,7 +677,7 @@ static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev) return ret; } -static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev) +static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev) { int ret; @@ -697,11 +685,11 @@ static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev) ivpu_hw_wa_init(vdev); ivpu_hw_timeouts_init(vdev); - ret = ivpu_hw_mtl_reset(vdev); + ret = ivpu_hw_37xx_reset(vdev); if (ret) ivpu_warn(vdev, "Failed to reset HW: %d\n", ret); - ret = ivpu_hw_mtl_d0i3_disable(vdev); + ret = ivpu_hw_37xx_d0i3_disable(vdev); if (ret) ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); @@ -743,7 +731,7 @@ static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev) return ret; } -static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev) +static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev) { ivpu_boot_no_snoop_enable(vdev); ivpu_boot_tbu_mmu_enable(vdev); @@ -752,32 +740,31 @@ static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev) return 0; } -static bool ivpu_hw_mtl_is_idle(struct ivpu_device *vdev) +static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev) { u32 val; if (IVPU_WA(punit_disabled)) return true; - val = REGB_RD32(MTL_BUTTRESS_VPU_STATUS); - return REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, READY, val) && - REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, IDLE, val); + val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_STATUS); + return REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, val) && + REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val); } -static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev) +static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev) { int ret = 0; - if (!ivpu_hw_mtl_is_idle(vdev) && ivpu_hw_mtl_reset(vdev)) { + if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev)) ivpu_err(vdev, "Failed to reset the VPU\n"); - } if (ivpu_pll_disable(vdev)) { ivpu_err(vdev, "Failed to disable PLL\n"); ret = -EIO; } - if (ivpu_hw_mtl_d0i3_enable(vdev)) { + if (ivpu_hw_37xx_d0i3_enable(vdev)) { ivpu_err(vdev, "Failed to enter D0I3\n"); ret = -EIO; } @@ -785,7 +772,7 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev) return ret; } -static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev) +static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev) { u32 val; @@ -803,7 +790,7 @@ static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev) REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val); } -static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config) +static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config) { u32 pll_clock = PLL_REF_CLK_FREQ * ratio; u32 cpu_clock; @@ -817,35 +804,35 @@ static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config) } /* Register indirect accesses */ -static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev) +static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev) { u32 pll_curr_ratio; - pll_curr_ratio = REGB_RD32(MTL_BUTTRESS_CURRENT_PLL); - pll_curr_ratio &= MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK; + pll_curr_ratio = REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL); + pll_curr_ratio &= VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK; if (!ivpu_is_silicon(vdev)) return PLL_SIMULATION_FREQ; - return ivpu_hw_mtl_pll_to_freq(pll_curr_ratio, vdev->hw->config); + return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config); } -static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev) +static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev) { - return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_OFFSET); + return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET); } -static u32 ivpu_hw_mtl_reg_telemetry_size_get(struct ivpu_device *vdev) +static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev) { - return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_SIZE); + return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE); } -static u32 ivpu_hw_mtl_reg_telemetry_enable_get(struct ivpu_device *vdev) +static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev) { - return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_ENABLE); + return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE); } -static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id) +static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id) { u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0; u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET); @@ -853,52 +840,52 @@ static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id) REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val); } -static u32 ivpu_hw_mtl_reg_ipc_rx_addr_get(struct ivpu_device *vdev) +static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev) { - return REGV_RD32(MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM); + return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM); } -static u32 ivpu_hw_mtl_reg_ipc_rx_count_get(struct ivpu_device *vdev) +static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev) { - u32 count = REGV_RD32_SILENT(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT); + u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT); - return REG_GET_FLD(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count); + return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count); } -static void ivpu_hw_mtl_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) +static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) { REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr); } -static void ivpu_hw_mtl_irq_clear(struct ivpu_device *vdev) +static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev) { - REGV_WR64(MTL_VPU_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK); + REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK); } -static void ivpu_hw_mtl_irq_enable(struct ivpu_device *vdev) +static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev) { - REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK); - REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK); - REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK); - REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0); + REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK); + REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK); + REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK); + REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); } -static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev) +static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev) { - REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1); - REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK); - REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull); - REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0); + REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); + REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK); + REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull); + REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0); } -static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev) +static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev) { ivpu_err_ratelimited(vdev, "WDT NCE irq\n"); ivpu_pm_schedule_recovery(vdev); } -static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev) +static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev) { ivpu_err_ratelimited(vdev, "WDT MSS irq\n"); @@ -906,7 +893,7 @@ static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev) ivpu_pm_schedule_recovery(vdev); } -static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev) +static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev) { ivpu_err_ratelimited(vdev, "NOC Firewall irq\n"); @@ -914,65 +901,66 @@ static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev) } /* Handler for IRQs from VPU core (irqV) */ -static u32 ivpu_hw_mtl_irqv_handler(struct ivpu_device *vdev, int irq) +static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq) { - u32 status = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; + u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; - REGV_WR32(MTL_VPU_HOST_SS_ICB_CLEAR_0, status); + REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status); - if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) ivpu_mmu_irq_evtq_handler(vdev); - if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) ivpu_ipc_irq_handler(vdev); - if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); - if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status)) + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status)) ivpu_mmu_irq_gerr_handler(vdev); - if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status)) - ivpu_hw_mtl_irq_wdt_mss_handler(vdev); + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status)) + ivpu_hw_37xx_irq_wdt_mss_handler(vdev); - if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status)) - ivpu_hw_mtl_irq_wdt_nce_handler(vdev); + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status)) + ivpu_hw_37xx_irq_wdt_nce_handler(vdev); - if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) - ivpu_hw_mtl_irq_noc_firewall_handler(vdev); + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) + ivpu_hw_37xx_irq_noc_firewall_handler(vdev); return status; } /* Handler for IRQs from Buttress core (irqB) */ -static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq) +static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq) { - u32 status = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; + u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; bool schedule_recovery = false; if (status == 0) return 0; /* Disable global interrupt before handling local buttress interrupts */ - REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1); + REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); - if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) - ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(MTL_BUTTRESS_CURRENT_PLL)); + if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) + ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", + REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL)); - if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) { - ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0)); - REGB_WR32(MTL_BUTTRESS_ATS_ERR_CLEAR, 0x1); + if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) { + ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0)); + REGB_WR32(VPU_37XX_BUTTRESS_ATS_ERR_CLEAR, 0x1); schedule_recovery = true; } - if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) { - u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG); + if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) { + u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG); ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", - ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log), - REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log), - REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log)); - REGB_WR32(MTL_BUTTRESS_UFI_ERR_CLEAR, 0x1); + ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log), + REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log), + REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log)); + REGB_WR32(VPU_37XX_BUTTRESS_UFI_ERR_CLEAR, 0x1); schedule_recovery = true; } @@ -982,12 +970,12 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq) * Writing 1 triggers an interrupt, so we can't perform read update write. * Clear local interrupt status by writing 0 to all bits. */ - REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0); + REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0); else - REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, status); + REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status); /* Re-enable global interrupt */ - REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0); + REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); if (schedule_recovery) ivpu_pm_schedule_recovery(vdev); @@ -995,65 +983,65 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq) return status; } -static irqreturn_t ivpu_hw_mtl_irq_handler(int irq, void *ptr) +static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr) { struct ivpu_device *vdev = ptr; u32 ret_irqv, ret_irqb; - ret_irqv = ivpu_hw_mtl_irqv_handler(vdev, irq); - ret_irqb = ivpu_hw_mtl_irqb_handler(vdev, irq); + ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq); + ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq); return IRQ_RETVAL(ret_irqb | ret_irqv); } -static void ivpu_hw_mtl_diagnose_failure(struct ivpu_device *vdev) +static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev) { - u32 irqv = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; - u32 irqb = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; + u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; + u32 irqb = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; - if (ivpu_hw_mtl_reg_ipc_rx_count_get(vdev)) + if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev)) ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ"); - if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv)) + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv)) ivpu_err(vdev, "WDT MSS timeout detected\n"); - if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv)) + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv)) ivpu_err(vdev, "WDT NCE timeout detected\n"); - if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv)) + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv)) ivpu_err(vdev, "NOC Firewall irq detected\n"); - if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) - ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0)); + if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) + ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0)); - if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) { - u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG); + if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) { + u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG); ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", - ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log), - REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log), - REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log)); + ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log), + REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log), + REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log)); } } -const struct ivpu_hw_ops ivpu_hw_mtl_ops = { - .info_init = ivpu_hw_mtl_info_init, - .power_up = ivpu_hw_mtl_power_up, - .is_idle = ivpu_hw_mtl_is_idle, - .power_down = ivpu_hw_mtl_power_down, - .boot_fw = ivpu_hw_mtl_boot_fw, - .wdt_disable = ivpu_hw_mtl_wdt_disable, - .diagnose_failure = ivpu_hw_mtl_diagnose_failure, - .reg_pll_freq_get = ivpu_hw_mtl_reg_pll_freq_get, - .reg_telemetry_offset_get = ivpu_hw_mtl_reg_telemetry_offset_get, - .reg_telemetry_size_get = ivpu_hw_mtl_reg_telemetry_size_get, - .reg_telemetry_enable_get = ivpu_hw_mtl_reg_telemetry_enable_get, - .reg_db_set = ivpu_hw_mtl_reg_db_set, - .reg_ipc_rx_addr_get = ivpu_hw_mtl_reg_ipc_rx_addr_get, - .reg_ipc_rx_count_get = ivpu_hw_mtl_reg_ipc_rx_count_get, - .reg_ipc_tx_set = ivpu_hw_mtl_reg_ipc_tx_set, - .irq_clear = ivpu_hw_mtl_irq_clear, - .irq_enable = ivpu_hw_mtl_irq_enable, - .irq_disable = ivpu_hw_mtl_irq_disable, - .irq_handler = ivpu_hw_mtl_irq_handler, +const struct ivpu_hw_ops ivpu_hw_37xx_ops = { + .info_init = ivpu_hw_37xx_info_init, + .power_up = ivpu_hw_37xx_power_up, + .is_idle = ivpu_hw_37xx_is_idle, + .power_down = ivpu_hw_37xx_power_down, + .boot_fw = ivpu_hw_37xx_boot_fw, + .wdt_disable = ivpu_hw_37xx_wdt_disable, + .diagnose_failure = ivpu_hw_37xx_diagnose_failure, + .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get, + .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get, + .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get, + .reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get, + .reg_db_set = ivpu_hw_37xx_reg_db_set, + .reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get, + .reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get, + .reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set, + .irq_clear = ivpu_hw_37xx_irq_clear, + .irq_enable = ivpu_hw_37xx_irq_enable, + .irq_disable = ivpu_hw_37xx_irq_disable, + .irq_handler = ivpu_hw_37xx_irq_handler, }; diff --git a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h new file mode 100644 index 000000000000..6e4e915948f9 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h @@ -0,0 +1,281 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_HW_MTL_REG_H__ +#define __IVPU_HW_MTL_REG_H__ + +#include + +#define VPU_37XX_BUTTRESS_INTERRUPT_TYPE 0x00000000u + +#define VPU_37XX_BUTTRESS_INTERRUPT_STAT 0x00000004u +#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0) +#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1) +#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2) + +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0) +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16) + +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0) +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16) + +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0) + +#define VPU_37XX_BUTTRESS_WP_REQ_CMD 0x00000014u +#define VPU_37XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0) + +#define VPU_37XX_BUTTRESS_WP_DOWNLOAD 0x00000018u +#define VPU_37XX_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0) + +#define VPU_37XX_BUTTRESS_CURRENT_PLL 0x0000001cu +#define VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0) + +#define VPU_37XX_BUTTRESS_PLL_ENABLE 0x00000020u + +#define VPU_37XX_BUTTRESS_FMIN_FUSE 0x00000024u +#define VPU_37XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0) +#define VPU_37XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8) + +#define VPU_37XX_BUTTRESS_FMAX_FUSE 0x00000028u +#define VPU_37XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0) + +#define VPU_37XX_BUTTRESS_TILE_FUSE 0x0000002cu +#define VPU_37XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0) +#define VPU_37XX_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2) + +#define VPU_37XX_BUTTRESS_LOCAL_INT_MASK 0x00000030u +#define VPU_37XX_BUTTRESS_GLOBAL_INT_MASK 0x00000034u + +#define VPU_37XX_BUTTRESS_PLL_STATUS 0x00000040u +#define VPU_37XX_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1) + +#define VPU_37XX_BUTTRESS_VPU_STATUS 0x00000044u +#define VPU_37XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0) +#define VPU_37XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1) + +#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u +#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0) +#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2) + +#define VPU_37XX_BUTTRESS_VPU_IP_RESET 0x00000050u +#define VPU_37XX_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0) + +#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u +#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u +#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u + +#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u +#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u +#define VPU_37XX_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u + +#define VPU_37XX_BUTTRESS_UFI_ERR_LOG 0x000000b0u +#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0) +#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12) +#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20) + +#define VPU_37XX_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u + +#define VPU_37XX_HOST_SS_CPR_CLK_SET 0x00000084u +#define VPU_37XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1) +#define VPU_37XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10) +#define VPU_37XX_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11) + +#define VPU_37XX_HOST_SS_CPR_RST_SET 0x00000094u +#define VPU_37XX_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1) +#define VPU_37XX_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10) +#define VPU_37XX_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11) + +#define VPU_37XX_HOST_SS_CPR_RST_CLR 0x00000098u +#define VPU_37XX_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0) +#define VPU_37XX_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1) +#define VPU_37XX_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10) +#define VPU_37XX_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11) + +#define VPU_37XX_HOST_SS_HW_VERSION 0x00000108u +#define VPU_37XX_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0) +#define VPU_37XX_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8) +#define VPU_37XX_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16) + +#define VPU_37XX_HOST_SS_GEN_CTRL 0x00000118u +#define VPU_37XX_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29) + +#define VPU_37XX_HOST_SS_NOC_QREQN 0x00000154u +#define VPU_37XX_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0) + +#define VPU_37XX_HOST_SS_NOC_QACCEPTN 0x00000158u +#define VPU_37XX_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0) + +#define VPU_37XX_HOST_SS_NOC_QDENY 0x0000015cu +#define VPU_37XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0) + +#define MTL_VPU_TOP_NOC_QREQN 0x00000160u +#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0) +#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1) + +#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u +#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0) +#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1) + +#define MTL_VPU_TOP_NOC_QDENY 0x00000168u +#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0) +#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1) + +#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN 0x00000170u +#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0) +#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1) +#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2) +#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3) +#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4) +#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5) +#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6) + +#define VPU_37XX_HOST_SS_ICB_STATUS_0 0x00010210u +#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31) + +#define VPU_37XX_HOST_SS_ICB_STATUS_1 0x00010214u +#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0) +#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1) +#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2) + +#define VPU_37XX_HOST_SS_ICB_CLEAR_0 0x00010220u +#define VPU_37XX_HOST_SS_ICB_CLEAR_1 0x00010224u +#define VPU_37XX_HOST_SS_ICB_ENABLE_0 0x00010240u + +#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u + +#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu +#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0) +#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8) +#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16) +#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24) + +#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0 0x00030020u +#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3) + +#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u +#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3) + +#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u +#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3) + +#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu +#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3) + +#define VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u +#define VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0) + +#define VPU_37XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u +#define VPU_37XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0) + +#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO 0x00041040u +#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0) +#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1) +#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3) + +#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u +#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0) +#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16) + +#define VPU_37XX_HOST_MMU_IDR0 0x00200000u +#define VPU_37XX_HOST_MMU_IDR1 0x00200004u +#define VPU_37XX_HOST_MMU_IDR3 0x0020000cu +#define VPU_37XX_HOST_MMU_IDR5 0x00200014u +#define VPU_37XX_HOST_MMU_CR0 0x00200020u +#define VPU_37XX_HOST_MMU_CR0ACK 0x00200024u +#define VPU_37XX_HOST_MMU_CR1 0x00200028u +#define VPU_37XX_HOST_MMU_CR2 0x0020002cu +#define VPU_37XX_HOST_MMU_IRQ_CTRL 0x00200050u +#define VPU_37XX_HOST_MMU_IRQ_CTRLACK 0x00200054u + +#define VPU_37XX_HOST_MMU_GERROR 0x00200060u +#define VPU_37XX_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0) +#define VPU_37XX_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2) +#define VPU_37XX_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3) +#define VPU_37XX_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4) +#define VPU_37XX_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5) +#define VPU_37XX_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6) +#define VPU_37XX_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7) + +#define VPU_37XX_HOST_MMU_GERRORN 0x00200064u + +#define VPU_37XX_HOST_MMU_STRTAB_BASE 0x00200080u +#define VPU_37XX_HOST_MMU_STRTAB_BASE_CFG 0x00200088u +#define VPU_37XX_HOST_MMU_CMDQ_BASE 0x00200090u +#define VPU_37XX_HOST_MMU_CMDQ_PROD 0x00200098u +#define VPU_37XX_HOST_MMU_CMDQ_CONS 0x0020009cu +#define VPU_37XX_HOST_MMU_EVTQ_BASE 0x002000a0u +#define VPU_37XX_HOST_MMU_EVTQ_PROD 0x002000a8u +#define VPU_37XX_HOST_MMU_EVTQ_CONS 0x002000acu +#define VPU_37XX_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K) +#define VPU_37XX_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K) + +#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u +#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0) +#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1) +#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2) +#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3) +#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4) +#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5) +#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6) +#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11) + +#define VPU_37XX_HOST_IF_TBU_MMUSSIDV 0x00360004u +#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0) +#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1) +#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2) +#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3) +#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4) +#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5) +#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6) +#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7) +#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8) +#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9) + +#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u +#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u +#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u +#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u +#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u + +#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u +#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1) + +#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u +#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1) + +#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u +#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0) +#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1) +#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2) +#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3) +#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4) + +#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu +#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u +#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u +#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u + +#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u +#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9) + +#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u +#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0) + +#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u + +#endif /* __IVPU_HW_MTL_REG_H__ */ diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c new file mode 100644 index 000000000000..34626d66fa10 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_hw_40xx.c @@ -0,0 +1,1178 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#include "ivpu_drv.h" +#include "ivpu_fw.h" +#include "ivpu_hw.h" +#include "ivpu_hw_40xx_reg.h" +#include "ivpu_hw_reg_io.h" +#include "ivpu_ipc.h" +#include "ivpu_mmu.h" +#include "ivpu_pm.h" + +#include + +#define TILE_MAX_NUM 6 +#define TILE_MAX_MASK 0x3f + +#define LNL_HW_ID 0x4040 + +#define SKU_TILE_SHIFT 0u +#define SKU_TILE_MASK 0x0000ffffu +#define SKU_HW_ID_SHIFT 16u +#define SKU_HW_ID_MASK 0xffff0000u + +#define PLL_CONFIG_DEFAULT 0x1 +#define PLL_CDYN_DEFAULT 0x80 +#define PLL_EPP_DEFAULT 0x80 +#define PLL_REF_CLK_FREQ (50 * 1000000) +#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ) + +#define PLL_PROFILING_FREQ_DEFAULT 38400000 +#define PLL_PROFILING_FREQ_HIGH 400000000 + +#define TIM_SAFE_ENABLE 0xf1d0dead +#define TIM_WATCHDOG_RESET_VALUE 0xffffffff + +#define TIMEOUT_US (150 * USEC_PER_MSEC) +#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) +#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) + +#define WEIGHTS_DEFAULT 0xf711f711u +#define WEIGHTS_ATS_DEFAULT 0x0000f711u + +#define ICB_0_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \ + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \ + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \ + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \ + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT))) + +#define ICB_1_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \ + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \ + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT))) + +#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK) + +#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \ + (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \ + (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \ + (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \ + (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \ + (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR)) | \ + (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR))) + +#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK) +#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1) + +#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \ + (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \ + (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \ + (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \ + (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \ + (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \ + (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX))) + +static char *ivpu_platform_to_str(u32 platform) +{ + switch (platform) { + case IVPU_PLATFORM_SILICON: + return "IVPU_PLATFORM_SILICON"; + case IVPU_PLATFORM_SIMICS: + return "IVPU_PLATFORM_SIMICS"; + case IVPU_PLATFORM_FPGA: + return "IVPU_PLATFORM_FPGA"; + default: + return "Invalid platform"; + } +} + +static const struct dmi_system_id ivpu_dmi_platform_simulation[] = { + { + .ident = "Intel Simics", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"), + DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_BOARD_SERIAL, "123456789"), + }, + }, + { + .ident = "Intel Simics", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "Simics"), + }, + }, + { } +}; + +static void ivpu_hw_read_platform(struct ivpu_device *vdev) +{ + if (dmi_check_system(ivpu_dmi_platform_simulation)) + vdev->platform = IVPU_PLATFORM_SIMICS; + else + vdev->platform = IVPU_PLATFORM_SILICON; + + ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", + ivpu_platform_to_str(vdev->platform), vdev->platform); +} + +static void ivpu_hw_wa_init(struct ivpu_device *vdev) +{ + vdev->wa.punit_disabled = ivpu_is_fpga(vdev); + vdev->wa.clear_runtime_mem = false; + + if (ivpu_hw_gen(vdev) == IVPU_HW_40XX) + vdev->wa.disable_clock_relinquish = true; +} + +static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) +{ + if (ivpu_is_fpga(vdev)) { + vdev->timeout.boot = 100000; + vdev->timeout.jsm = 50000; + vdev->timeout.tdr = 2000000; + vdev->timeout.reschedule_suspend = 1000; + } else if (ivpu_is_simics(vdev)) { + vdev->timeout.boot = 50; + vdev->timeout.jsm = 500; + vdev->timeout.tdr = 10000; + vdev->timeout.reschedule_suspend = 10; + } else { + vdev->timeout.boot = 1000; + vdev->timeout.jsm = 500; + vdev->timeout.tdr = 2000; + vdev->timeout.reschedule_suspend = 10; + } +} + +static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev) +{ + return REGB_POLL_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); +} + +static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio, + u16 target_ratio, u16 epp, u16 config, u16 cdyn) +{ + int ret; + u32 val; + + ret = ivpu_pll_wait_for_cmd_send(vdev); + if (ret) { + ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret); + return ret; + } + + val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0); + val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val); + val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val); + REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, val); + + val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1); + val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val); + val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, epp, val); + REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, val); + + val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2); + val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val); + val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CDYN, cdyn, val); + REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, val); + + val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_CMD); + val = REG_SET_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, val); + REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_CMD, val); + + ret = ivpu_pll_wait_for_cmd_send(vdev); + if (ret) + ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret); + + return ret; +} + +static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev) +{ + return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US); +} + +static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev) +{ + struct ivpu_hw_info *hw = vdev->hw; + u8 fuse_min_ratio, fuse_pn_ratio, fuse_max_ratio; + u32 fmin_fuse, fmax_fuse; + + fmin_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMIN_FUSE); + fuse_min_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse); + fuse_pn_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse); + + fmax_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMAX_FUSE); + fuse_max_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse); + + hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio); + hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio); + hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio); +} + +static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable) +{ + u16 config = enable ? PLL_CONFIG_DEFAULT : 0; + u16 cdyn = enable ? PLL_CDYN_DEFAULT : 0; + u16 epp = enable ? PLL_EPP_DEFAULT : 0; + struct ivpu_hw_info *hw = vdev->hw; + u16 target_ratio = hw->pll.pn_ratio; + int ret; + + ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, epp: 0x%x, config: 0x%x, cdyn: 0x%x\n", + PLL_RATIO_TO_FREQ(target_ratio), epp, config, cdyn); + + ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, + target_ratio, epp, config, cdyn); + if (ret) { + ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret); + return ret; + } + + if (enable) { + ret = ivpu_pll_wait_for_status_ready(vdev); + if (ret) { + ivpu_err(vdev, "Timed out waiting for PLL ready status\n"); + return ret; + } + } + + return 0; +} + +static int ivpu_pll_enable(struct ivpu_device *vdev) +{ + return ivpu_pll_drive(vdev, true); +} + +static int ivpu_pll_disable(struct ivpu_device *vdev) +{ + return ivpu_pll_drive(vdev, false); +} + +static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable) +{ + u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN); + + if (enable) { + val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val); + val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val); + val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val); + } else { + val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val); + val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val); + val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val); + } + + REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val); +} + +static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable) +{ + u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN); + + if (enable) { + val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val); + val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val); + val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val); + } else { + val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val); + val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val); + val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val); + } + + REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val); +} + +static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN); + + if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN); + + if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY); + + if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN); + + if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) || + !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN); + + if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) || + !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY); + + if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) || + !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val)) + return -EIO; + + return 0; +} + +static void ivpu_boot_idle_gen_drive(struct ivpu_device *vdev, bool enable) +{ + u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN); + + if (enable) + val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val); + else + val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val); + + REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val); +} + +static int ivpu_boot_host_ss_check(struct ivpu_device *vdev) +{ + int ret; + + ret = ivpu_boot_noc_qreqn_check(vdev, 0x0); + if (ret) { + ivpu_err(vdev, "Failed qreqn check: %d\n", ret); + return ret; + } + + ret = ivpu_boot_noc_qacceptn_check(vdev, 0x0); + if (ret) { + ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); + return ret; + } + + ret = ivpu_boot_noc_qdeny_check(vdev, 0x0); + if (ret) + ivpu_err(vdev, "Failed qdeny check %d\n", ret); + + return ret; +} + +static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable) +{ + int ret; + u32 val; + + val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN); + if (enable) + val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); + else + val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); + REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val); + + ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); + if (ret) { + ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); + return ret; + } + + ret = ivpu_boot_noc_qdeny_check(vdev, 0x0); + if (ret) { + ivpu_err(vdev, "Failed qdeny check: %d\n", ret); + return ret; + } + + if (enable) { + REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT); + REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT); + } + + return ret; +} + +static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev) +{ + return ivpu_boot_host_ss_axi_drive(vdev, true); +} + +static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable) +{ + int ret; + u32 val; + + val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN); + if (enable) { + val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val); + val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); + } else { + val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val); + val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); + } + REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val); + + ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); + if (ret) { + ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); + return ret; + } + + ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0); + if (ret) + ivpu_err(vdev, "Failed qdeny check: %d\n", ret); + + return ret; +} + +static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev) +{ + return ivpu_boot_host_ss_top_noc_drive(vdev, true); +} + +static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable) +{ + u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); + + if (enable) + val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val); + else + val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val); + + REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); + + if (enable) + ndelay(500); +} + +static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable) +{ + u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0); + + if (enable) + val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val); + else + val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val); + + REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val); + + if (!enable) + ndelay(500); +} + +static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val) +{ + if (ivpu_is_fpga(vdev)) + return 0; + + return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU, + exp_val, PWR_ISLAND_STATUS_TIMEOUT_US); +} + +static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable) +{ + u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0); + + if (enable) + val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val); + else + val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val); + + REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val); +} + +static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) +{ + u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES); + + val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val); + val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val); + val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val); + + REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val); +} + +static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev) +{ + u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV); + + val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); + val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); + val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val); + val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val); + val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); + val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); + + REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val); +} + +static int ivpu_boot_cpu_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN); + + if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_cpu_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) +{ + u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY); + + if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val)) + return -EIO; + + return 0; +} + +static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) +{ + int ret; + + ivpu_boot_pwr_island_trickle_drive(vdev, true); + ivpu_boot_pwr_island_drive(vdev, true); + + ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1); + if (ret) { + ivpu_err(vdev, "Timed out waiting for power island status\n"); + return ret; + } + + ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0); + if (ret) { + ivpu_err(vdev, "Failed qrenqn check %d\n", ret); + return ret; + } + + ivpu_boot_host_ss_clk_drive(vdev, true); + ivpu_boot_host_ss_rst_drive(vdev, true); + ivpu_boot_pwr_island_isolation_drive(vdev, false); + + return ret; +} + +static int ivpu_boot_soc_cpu_drive(struct ivpu_device *vdev, bool enable) +{ + int ret; + u32 val; + + val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN); + if (enable) + val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val); + else + val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val); + REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val); + + ret = ivpu_boot_cpu_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); + if (ret) { + ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); + return ret; + } + + ret = ivpu_boot_cpu_noc_qdeny_check(vdev, 0x0); + if (ret) + ivpu_err(vdev, "Failed qdeny check: %d\n", ret); + + return ret; +} + +static int ivpu_boot_soc_cpu_enable(struct ivpu_device *vdev) +{ + return ivpu_boot_soc_cpu_drive(vdev, true); +} + +static int ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev) +{ + int ret; + u32 val; + u64 val64; + + ret = ivpu_boot_soc_cpu_enable(vdev); + if (ret) { + ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret); + return ret; + } + + val64 = vdev->fw->entry_point; + val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1; + REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64); + + val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO); + val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val); + REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val); + + ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n", + ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume"); + + return 0; +} + +static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable) +{ + int ret; + u32 val; + + ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); + if (ret) { + ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); + return ret; + } + + val = REGB_RD32(VPU_40XX_BUTTRESS_D0I3_CONTROL); + if (enable) + val = REG_SET_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val); + else + val = REG_CLR_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val); + REGB_WR32(VPU_40XX_BUTTRESS_D0I3_CONTROL, val); + + ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); + if (ret) { + ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); + return ret; + } + + return 0; +} + +static bool ivpu_tile_disable_check(u32 config) +{ + /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */ + if (config == 0) + return true; + + if (config > BIT(TILE_MAX_NUM - 1)) + return false; + + if ((config & (config - 1)) == 0) + return true; + + return false; +} + +static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev) +{ + struct ivpu_hw_info *hw = vdev->hw; + u32 tile_disable; + u32 tile_enable; + u32 fuse; + + fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE); + if (!REG_TEST_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, VALID, fuse)) { + ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse); + return -EIO; + } + + tile_disable = REG_GET_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, CONFIG, fuse); + if (!ivpu_tile_disable_check(tile_disable)) { + ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", tile_disable); + return -EIO; + } + + if (tile_disable) + ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n", + TILE_MAX_NUM - 1, ffs(tile_disable) - 1); + else + ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM); + + tile_enable = (~tile_disable) & TILE_MAX_MASK; + + hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku); + hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku); + hw->tile_fuse = tile_disable; + hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; + + ivpu_pll_init_frequency_ratios(vdev); + + ivpu_hw_init_range(&vdev->hw->ranges.global, 0x80000000, SZ_512M); + ivpu_hw_init_range(&vdev->hw->ranges.user, 0x80000000, SZ_256M); + ivpu_hw_init_range(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M); + ivpu_hw_init_range(&vdev->hw->ranges.dma, 0x200000000, SZ_8G); + + return 0; +} + +static int ivpu_hw_40xx_reset(struct ivpu_device *vdev) +{ + int ret; + u32 val; + + ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US); + if (ret) { + ivpu_err(vdev, "Wait for *_TRIGGER timed out\n"); + return ret; + } + + val = REGB_RD32(VPU_40XX_BUTTRESS_IP_RESET); + val = REG_SET_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, val); + REGB_WR32(VPU_40XX_BUTTRESS_IP_RESET, val); + + ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US); + if (ret) + ivpu_err(vdev, "Timed out waiting for RESET completion\n"); + + return ret; +} + +static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev) +{ + int ret; + + if (IVPU_WA(punit_disabled)) + return 0; + + ret = ivpu_boot_d0i3_drive(vdev, true); + if (ret) + ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret); + + udelay(5); /* VPU requires 5 us to complete the transition */ + + return ret; +} + +static int ivpu_hw_40xx_d0i3_disable(struct ivpu_device *vdev) +{ + int ret; + + if (IVPU_WA(punit_disabled)) + return 0; + + ret = ivpu_boot_d0i3_drive(vdev, false); + if (ret) + ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret); + + return ret; +} + +static void ivpu_hw_40xx_profiling_freq_reg_set(struct ivpu_device *vdev) +{ + u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS); + + if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) + val = REG_CLR_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val); + else + val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val); + + REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val); +} + +static void ivpu_hw_40xx_ats_print(struct ivpu_device *vdev) +{ + ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n", + REGB_RD32(VPU_40XX_BUTTRESS_HM_ATS) ? "Enable" : "Disable"); +} + +static void ivpu_hw_40xx_clock_relinquish_disable(struct ivpu_device *vdev) +{ + u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS); + + val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, DISABLE_CLK_RELINQUISH, val); + REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val); +} + +static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev) +{ + int ret; + + ret = ivpu_hw_40xx_reset(vdev); + if (ret) { + ivpu_err(vdev, "Failed to reset HW: %d\n", ret); + return ret; + } + + ivpu_hw_read_platform(vdev); + ivpu_hw_wa_init(vdev); + ivpu_hw_timeouts_init(vdev); + + ret = ivpu_hw_40xx_d0i3_disable(vdev); + if (ret) + ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); + + ret = ivpu_pll_enable(vdev); + if (ret) { + ivpu_err(vdev, "Failed to enable PLL: %d\n", ret); + return ret; + } + + if (IVPU_WA(disable_clock_relinquish)) + ivpu_hw_40xx_clock_relinquish_disable(vdev); + ivpu_hw_40xx_profiling_freq_reg_set(vdev); + ivpu_hw_40xx_ats_print(vdev); + + ret = ivpu_boot_host_ss_check(vdev); + if (ret) { + ivpu_err(vdev, "Failed to configure host SS: %d\n", ret); + return ret; + } + + ivpu_boot_idle_gen_drive(vdev, false); + + ret = ivpu_boot_pwr_domain_enable(vdev); + if (ret) { + ivpu_err(vdev, "Failed to enable power domain: %d\n", ret); + return ret; + } + + ret = ivpu_boot_host_ss_axi_enable(vdev); + if (ret) { + ivpu_err(vdev, "Failed to enable AXI: %d\n", ret); + return ret; + } + + ret = ivpu_boot_host_ss_top_noc_enable(vdev); + if (ret) + ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret); + + return ret; +} + +static int ivpu_hw_40xx_boot_fw(struct ivpu_device *vdev) +{ + int ret; + + ivpu_boot_no_snoop_enable(vdev); + ivpu_boot_tbu_mmu_enable(vdev); + + ret = ivpu_boot_soc_cpu_boot(vdev); + if (ret) + ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret); + + return ret; +} + +static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev) +{ + u32 val; + + if (IVPU_WA(punit_disabled)) + return true; + + val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS); + return REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, val) && + REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, val); +} + +static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev) +{ + int ret = 0; + + if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev)) + ivpu_warn(vdev, "Failed to reset the VPU\n"); + + if (ivpu_pll_disable(vdev)) { + ivpu_err(vdev, "Failed to disable PLL\n"); + ret = -EIO; + } + + if (ivpu_hw_40xx_d0i3_enable(vdev)) { + ivpu_err(vdev, "Failed to enter D0I3\n"); + ret = -EIO; + } + + return ret; +} + +static void ivpu_hw_40xx_wdt_disable(struct ivpu_device *vdev) +{ + u32 val; + + REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); + REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE); + + REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); + REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0); + + val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG); + val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val); + REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val); +} + +/* Register indirect accesses */ +static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev) +{ + u32 pll_curr_ratio; + + pll_curr_ratio = REGB_RD32(VPU_40XX_BUTTRESS_PLL_FREQ); + pll_curr_ratio &= VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK; + + return PLL_RATIO_TO_FREQ(pll_curr_ratio); +} + +static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev) +{ + return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET); +} + +static u32 ivpu_hw_40xx_reg_telemetry_size_get(struct ivpu_device *vdev) +{ + return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE); +} + +static u32 ivpu_hw_40xx_reg_telemetry_enable_get(struct ivpu_device *vdev) +{ + return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE); +} + +static void ivpu_hw_40xx_reg_db_set(struct ivpu_device *vdev, u32 db_id) +{ + u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0; + u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET); + + REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val); +} + +static u32 ivpu_hw_40xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev) +{ + return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM); +} + +static u32 ivpu_hw_40xx_reg_ipc_rx_count_get(struct ivpu_device *vdev) +{ + u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT); + + return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count); +} + +static void ivpu_hw_40xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) +{ + REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr); +} + +static void ivpu_hw_40xx_irq_clear(struct ivpu_device *vdev) +{ + REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK); +} + +static void ivpu_hw_40xx_irq_enable(struct ivpu_device *vdev) +{ + REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK); + REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK); + REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK); + REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); +} + +static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev) +{ + REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); + REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK); + REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull); + REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul); +} + +static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev) +{ + /* TODO: For LNN hang consider engine reset instead of full recovery */ + ivpu_pm_schedule_recovery(vdev); +} + +static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev) +{ + ivpu_hw_wdt_disable(vdev); + ivpu_pm_schedule_recovery(vdev); +} + +static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev) +{ + ivpu_pm_schedule_recovery(vdev); +} + +/* Handler for IRQs from VPU core (irqV) */ +static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq) +{ + u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; + irqreturn_t ret = IRQ_NONE; + + if (!status) + return IRQ_NONE; + + REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status); + + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) + ivpu_mmu_irq_evtq_handler(vdev); + + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) + ret |= ivpu_ipc_irq_handler(vdev); + + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) + ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); + + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status)) + ivpu_mmu_irq_gerr_handler(vdev); + + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status)) + ivpu_hw_40xx_irq_wdt_mss_handler(vdev); + + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status)) + ivpu_hw_40xx_irq_wdt_nce_handler(vdev); + + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) + ivpu_hw_40xx_irq_noc_firewall_handler(vdev); + + return ret; +} + +/* Handler for IRQs from Buttress core (irqB) */ +static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq) +{ + bool schedule_recovery = false; + u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; + + if (status == 0) + return IRQ_NONE; + + REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status); + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) + ivpu_dbg(vdev, IRQ, "FREQ_CHANGE"); + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) { + ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", + REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1), + REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2)); + REGB_WR32(VPU_40XX_BUTTRESS_ATS_ERR_CLEAR, 0x1); + schedule_recovery = true; + } + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, status)) { + ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG)); + REGB_WR32(VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR, 0x1); + schedule_recovery = true; + } + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, status)) { + ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG)); + REGB_WR32(VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR, 0x1); + schedule_recovery = true; + } + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, status)) { + ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x", + REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW), + REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH)); + REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR, 0x1); + schedule_recovery = true; + } + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, status)) { + ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x", + REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW), + REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH)); + REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR, 0x1); + schedule_recovery = true; + } + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, status)) { + ivpu_err(vdev, "Survivability error detected\n"); + schedule_recovery = true; + } + + if (schedule_recovery) + ivpu_pm_schedule_recovery(vdev); + + return IRQ_HANDLED; +} + +static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr) +{ + struct ivpu_device *vdev = ptr; + irqreturn_t ret = IRQ_NONE; + + ret |= ivpu_hw_40xx_irqv_handler(vdev, irq); + ret |= ivpu_hw_40xx_irqb_handler(vdev, irq); + + if (ret & IRQ_WAKE_THREAD) + return IRQ_WAKE_THREAD; + + return ret; +} + +static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev) +{ + u32 irqv = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; + u32 irqb = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; + + if (ivpu_hw_40xx_reg_ipc_rx_count_get(vdev)) + ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ"); + + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv)) + ivpu_err(vdev, "WDT MSS timeout detected\n"); + + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv)) + ivpu_err(vdev, "WDT NCE timeout detected\n"); + + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv)) + ivpu_err(vdev, "NOC Firewall irq detected\n"); + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) { + ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", + REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1), + REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2)); + } + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, irqb)) + ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG)); + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, irqb)) + ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG)); + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, irqb)) + ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n", + REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW), + REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH)); + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, irqb)) + ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n", + REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW), + REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH)); + + if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, irqb)) + ivpu_err(vdev, "Survivability error detected\n"); +} + +const struct ivpu_hw_ops ivpu_hw_40xx_ops = { + .info_init = ivpu_hw_40xx_info_init, + .power_up = ivpu_hw_40xx_power_up, + .is_idle = ivpu_hw_40xx_is_idle, + .power_down = ivpu_hw_40xx_power_down, + .boot_fw = ivpu_hw_40xx_boot_fw, + .wdt_disable = ivpu_hw_40xx_wdt_disable, + .diagnose_failure = ivpu_hw_40xx_diagnose_failure, + .reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get, + .reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get, + .reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get, + .reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get, + .reg_db_set = ivpu_hw_40xx_reg_db_set, + .reg_ipc_rx_addr_get = ivpu_hw_40xx_reg_ipc_rx_addr_get, + .reg_ipc_rx_count_get = ivpu_hw_40xx_reg_ipc_rx_count_get, + .reg_ipc_tx_set = ivpu_hw_40xx_reg_ipc_tx_set, + .irq_clear = ivpu_hw_40xx_irq_clear, + .irq_enable = ivpu_hw_40xx_irq_enable, + .irq_disable = ivpu_hw_40xx_irq_disable, + .irq_handler = ivpu_hw_40xx_irq_handler, +}; diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h new file mode 100644 index 000000000000..5139cfe88532 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h @@ -0,0 +1,267 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ + +#ifndef __IVPU_HW_40XX_REG_H__ +#define __IVPU_HW_40XX_REG_H__ + +#include + +#define VPU_40XX_BUTTRESS_INTERRUPT_STAT 0x00000000u +#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0) +#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1) +#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI0_ERR_MASK BIT_MASK(2) +#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI1_ERR_MASK BIT_MASK(3) +#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR0_ERR_MASK BIT_MASK(4) +#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR1_ERR_MASK BIT_MASK(5) +#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_SURV_ERR_MASK BIT_MASK(6) + +#define VPU_40XX_BUTTRESS_LOCAL_INT_MASK 0x00000004u +#define VPU_40XX_BUTTRESS_GLOBAL_INT_MASK 0x00000008u + +#define VPU_40XX_BUTTRESS_HM_ATS 0x0000000cu + +#define VPU_40XX_BUTTRESS_ATS_ERR_LOG1 0x00000010u +#define VPU_40XX_BUTTRESS_ATS_ERR_LOG2 0x00000014u +#define VPU_40XX_BUTTRESS_ATS_ERR_CLEAR 0x00000018u + +#define VPU_40XX_BUTTRESS_CFI0_ERR_LOG 0x0000001cu +#define VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR 0x00000020u + +#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS 0x00000024u + +#define VPU_40XX_BUTTRESS_CFI1_ERR_LOG 0x00000040u +#define VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR 0x00000044u + +#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW 0x00000048u +#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH 0x0000004cu +#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR 0x00000050u + +#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS 0x00000054u + +#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW 0x00000058u +#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH 0x0000005cu +#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR 0x00000060u + +#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000130u +#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0) +#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16) + +#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1 0x00000134u +#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0) +#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16) + +#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000138u +#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0) +#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CDYN_MASK GENMASK(31, 16) + +#define VPU_40XX_BUTTRESS_WP_REQ_CMD 0x0000013cu +#define VPU_40XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0) + +#define VPU_40XX_BUTTRESS_PLL_FREQ 0x00000148u +#define VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK GENMASK(15, 0) + +#define VPU_40XX_BUTTRESS_TILE_FUSE 0x00000150u +#define VPU_40XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0) +#define VPU_40XX_BUTTRESS_TILE_FUSE_CONFIG_MASK GENMASK(6, 1) + +#define VPU_40XX_BUTTRESS_VPU_STATUS 0x00000154u +#define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0) +#define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1) +#define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2) +#define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11) +#define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12) + +#define VPU_40XX_BUTTRESS_IP_RESET 0x00000160u +#define VPU_40XX_BUTTRESS_IP_RESET_TRIGGER_MASK BIT_MASK(0) + +#define VPU_40XX_BUTTRESS_D0I3_CONTROL 0x00000164u +#define VPU_40XX_BUTTRESS_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0) +#define VPU_40XX_BUTTRESS_D0I3_CONTROL_I3_MASK BIT_MASK(2) + +#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000168u +#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x0000016cu +#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000170u + +#define VPU_40XX_BUTTRESS_FMIN_FUSE 0x00000174u +#define VPU_40XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0) +#define VPU_40XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8) + +#define VPU_40XX_BUTTRESS_FMAX_FUSE 0x00000178u +#define VPU_40XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0) + +#define VPU_40XX_HOST_SS_CPR_CLK_EN 0x00000080u +#define VPU_40XX_HOST_SS_CPR_CLK_EN_TOP_NOC_MASK BIT_MASK(1) +#define VPU_40XX_HOST_SS_CPR_CLK_EN_DSS_MAS_MASK BIT_MASK(10) +#define VPU_40XX_HOST_SS_CPR_CLK_EN_CSS_MAS_MASK BIT_MASK(11) + +#define VPU_40XX_HOST_SS_CPR_CLK_SET 0x00000084u +#define VPU_40XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1) +#define VPU_40XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10) +#define VPU_40XX_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11) + +#define VPU_40XX_HOST_SS_CPR_RST_EN 0x00000090u +#define VPU_40XX_HOST_SS_CPR_RST_EN_TOP_NOC_MASK BIT_MASK(1) +#define VPU_40XX_HOST_SS_CPR_RST_EN_DSS_MAS_MASK BIT_MASK(10) +#define VPU_40XX_HOST_SS_CPR_RST_EN_CSS_MAS_MASK BIT_MASK(11) + +#define VPU_40XX_HOST_SS_CPR_RST_SET 0x00000094u +#define VPU_40XX_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1) +#define VPU_40XX_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10) +#define VPU_40XX_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11) + +#define VPU_40XX_HOST_SS_CPR_RST_CLR 0x00000098u +#define VPU_40XX_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1) +#define VPU_40XX_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10) +#define VPU_40XX_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11) + +#define VPU_40XX_HOST_SS_HW_VERSION 0x00000108u +#define VPU_40XX_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0) +#define VPU_40XX_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8) +#define VPU_40XX_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16) + +#define VPU_40XX_HOST_SS_SW_VERSION 0x0000010cu + +#define VPU_40XX_HOST_SS_GEN_CTRL 0x00000118u +#define VPU_40XX_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29) + +#define VPU_40XX_HOST_SS_NOC_QREQN 0x00000154u +#define VPU_40XX_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0) + +#define VPU_40XX_HOST_SS_NOC_QACCEPTN 0x00000158u +#define VPU_40XX_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0) + +#define VPU_40XX_HOST_SS_NOC_QDENY 0x0000015cu +#define VPU_40XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0) + +#define VPU_40XX_TOP_NOC_QREQN 0x00000160u +#define VPU_40XX_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0) +#define VPU_40XX_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(2) + +#define VPU_40XX_TOP_NOC_QACCEPTN 0x00000164u +#define VPU_40XX_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0) +#define VPU_40XX_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(2) + +#define VPU_40XX_TOP_NOC_QDENY 0x00000168u +#define VPU_40XX_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0) +#define VPU_40XX_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(2) + +#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN 0x00000170u +#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0) +#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1) +#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2) +#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3) +#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4) +#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5) +#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6) + +#define VPU_40XX_HOST_SS_ICB_STATUS_0 0x00010210u +#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0) +#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1) +#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2) +#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3) +#define VPU_40XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4) +#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5) +#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6) +#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7) +#define VPU_40XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8) +#define VPU_40XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30) +#define VPU_40XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31) + +#define VPU_40XX_HOST_SS_ICB_STATUS_1 0x00010214u +#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0) +#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1) +#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2) + +#define VPU_40XX_HOST_SS_ICB_CLEAR_0 0x00010220u +#define VPU_40XX_HOST_SS_ICB_CLEAR_1 0x00010224u +#define VPU_40XX_HOST_SS_ICB_ENABLE_0 0x00010240u +#define VPU_40XX_HOST_SS_ICB_ENABLE_1 0x00010244u + +#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u + +#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu +#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16) + +#define VPU_40XX_HOST_SS_AON_PWR_ISO_EN0 0x00030020u +#define VPU_40XX_HOST_SS_AON_PWR_ISO_EN0_CSS_CPU_MASK BIT_MASK(3) + +#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u +#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0_CSS_CPU_MASK BIT_MASK(3) + +#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u +#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_CSS_CPU_MASK BIT_MASK(3) + +#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu +#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0_CSS_CPU_MASK BIT_MASK(3) + +#define VPU_40XX_HOST_SS_AON_IDLE_GEN 0x00030200u +#define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK BIT_MASK(0) +#define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK BIT_MASK(1) + +#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u +#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0) + +#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO 0x00040040u +#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_DONE_MASK BIT_MASK(0) +#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1) +#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3) + +#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u +#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0) +#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16) + +#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u +#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0) +#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1) +#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2) +#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_SNOOP_OVERRIDE_EN_MASK BIT_MASK(3) +#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AW_SNOOP_OVERRIDE_MASK BIT_MASK(4) +#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AR_SNOOP_OVERRIDE_MASK BIT_MASK(5) +#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6) +#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11) + +#define VPU_40XX_HOST_IF_TBU_MMUSSIDV 0x00360004u +#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0) +#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1) +#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2) +#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3) +#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4) +#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5) +#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6) +#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7) +#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8) +#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9) + +#define VPU_40XX_CPU_SS_DSU_LEON_RT_BASE 0x04000000u +#define VPU_40XX_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u +#define VPU_40XX_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u +#define VPU_40XX_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u +#define VPU_40XX_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u + +#define VPU_40XX_CPU_SS_TIM_WATCHDOG 0x0102009cu +#define VPU_40XX_CPU_SS_TIM_WDOG_EN 0x010200a4u +#define VPU_40XX_CPU_SS_TIM_SAFE 0x010200a8u + +#define VPU_40XX_CPU_SS_TIM_GEN_CONFIG 0x01021008u +#define VPU_40XX_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9) + +#define VPU_40XX_CPU_SS_CPR_NOC_QREQN 0x01010030u +#define VPU_40XX_CPU_SS_CPR_NOC_QREQN_TOP_MMIO_MASK BIT_MASK(0) + +#define VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN 0x01010034u +#define VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN_TOP_MMIO_MASK BIT_MASK(0) + +#define VPU_40XX_CPU_SS_CPR_NOC_QDENY 0x01010038u +#define VPU_40XX_CPU_SS_CPR_NOC_QDENY_TOP_MMIO_MASK BIT_MASK(0) + +#define VPU_40XX_CPU_SS_TIM_IPC_FIFO 0x010200f0u +#define VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT 0x01029008u + +#define VPU_40XX_CPU_SS_DOORBELL_0 0x01300000u +#define VPU_40XX_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0) + +#define VPU_40XX_CPU_SS_DOORBELL_1 0x01301000u + +#endif /* __IVPU_HW_40XX_REG_H__ */ diff --git a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h deleted file mode 100644 index 593b8ff07417..000000000000 --- a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h +++ /dev/null @@ -1,281 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2020-2023 Intel Corporation - */ - -#ifndef __IVPU_HW_MTL_REG_H__ -#define __IVPU_HW_MTL_REG_H__ - -#include - -#define MTL_BUTTRESS_INTERRUPT_TYPE 0x00000000u - -#define MTL_BUTTRESS_INTERRUPT_STAT 0x00000004u -#define MTL_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0) -#define MTL_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1) -#define MTL_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2) - -#define MTL_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u -#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0) -#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16) - -#define MTL_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu -#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0) -#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16) - -#define MTL_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u -#define MTL_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0) - -#define MTL_BUTTRESS_WP_REQ_CMD 0x00000014u -#define MTL_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0) - -#define MTL_BUTTRESS_WP_DOWNLOAD 0x00000018u -#define MTL_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0) - -#define MTL_BUTTRESS_CURRENT_PLL 0x0000001cu -#define MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0) - -#define MTL_BUTTRESS_PLL_ENABLE 0x00000020u - -#define MTL_BUTTRESS_FMIN_FUSE 0x00000024u -#define MTL_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0) -#define MTL_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8) - -#define MTL_BUTTRESS_FMAX_FUSE 0x00000028u -#define MTL_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0) - -#define MTL_BUTTRESS_TILE_FUSE 0x0000002cu -#define MTL_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0) -#define MTL_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2) - -#define MTL_BUTTRESS_LOCAL_INT_MASK 0x00000030u -#define MTL_BUTTRESS_GLOBAL_INT_MASK 0x00000034u - -#define MTL_BUTTRESS_PLL_STATUS 0x00000040u -#define MTL_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1) - -#define MTL_BUTTRESS_VPU_STATUS 0x00000044u -#define MTL_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0) -#define MTL_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1) - -#define MTL_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u -#define MTL_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0) -#define MTL_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2) - -#define MTL_BUTTRESS_VPU_IP_RESET 0x00000050u -#define MTL_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0) - -#define MTL_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u -#define MTL_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u -#define MTL_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u - -#define MTL_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u -#define MTL_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u -#define MTL_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u - -#define MTL_BUTTRESS_UFI_ERR_LOG 0x000000b0u -#define MTL_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0) -#define MTL_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12) -#define MTL_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20) - -#define MTL_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u - -#define MTL_VPU_HOST_SS_CPR_CLK_SET 0x00000084u -#define MTL_VPU_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1) -#define MTL_VPU_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10) -#define MTL_VPU_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11) - -#define MTL_VPU_HOST_SS_CPR_RST_SET 0x00000094u -#define MTL_VPU_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1) -#define MTL_VPU_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10) -#define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11) - -#define MTL_VPU_HOST_SS_CPR_RST_CLR 0x00000098u -#define MTL_VPU_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0) -#define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1) -#define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10) -#define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11) - -#define MTL_VPU_HOST_SS_HW_VERSION 0x00000108u -#define MTL_VPU_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0) -#define MTL_VPU_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8) -#define MTL_VPU_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16) - -#define MTL_VPU_HOST_SS_GEN_CTRL 0x00000118u -#define MTL_VPU_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29) - -#define MTL_VPU_HOST_SS_NOC_QREQN 0x00000154u -#define MTL_VPU_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0) - -#define MTL_VPU_HOST_SS_NOC_QACCEPTN 0x00000158u -#define MTL_VPU_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0) - -#define MTL_VPU_HOST_SS_NOC_QDENY 0x0000015cu -#define MTL_VPU_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0) - -#define MTL_VPU_TOP_NOC_QREQN 0x00000160u -#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0) -#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1) - -#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u -#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0) -#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1) - -#define MTL_VPU_TOP_NOC_QDENY 0x00000168u -#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0) -#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1) - -#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN 0x00000170u -#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0) -#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1) -#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2) -#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3) -#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4) -#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5) -#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6) - -#define MTL_VPU_HOST_SS_ICB_STATUS_0 0x00010210u -#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0) -#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1) -#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2) -#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3) -#define MTL_VPU_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4) -#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5) -#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6) -#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7) -#define MTL_VPU_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8) -#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30) -#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31) - -#define MTL_VPU_HOST_SS_ICB_STATUS_1 0x00010214u -#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0) -#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1) -#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2) - -#define MTL_VPU_HOST_SS_ICB_CLEAR_0 0x00010220u -#define MTL_VPU_HOST_SS_ICB_CLEAR_1 0x00010224u -#define MTL_VPU_HOST_SS_ICB_ENABLE_0 0x00010240u - -#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u - -#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu -#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0) -#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8) -#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16) -#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24) - -#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0 0x00030020u -#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3) - -#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u -#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3) - -#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u -#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3) - -#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu -#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3) - -#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u -#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0) - -#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE 0x00030204u -#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0) - -#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO 0x00041040u -#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0) -#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1) -#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3) - -#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u -#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0) -#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16) - -#define MTL_VPU_HOST_MMU_IDR0 0x00200000u -#define MTL_VPU_HOST_MMU_IDR1 0x00200004u -#define MTL_VPU_HOST_MMU_IDR3 0x0020000cu -#define MTL_VPU_HOST_MMU_IDR5 0x00200014u -#define MTL_VPU_HOST_MMU_CR0 0x00200020u -#define MTL_VPU_HOST_MMU_CR0ACK 0x00200024u -#define MTL_VPU_HOST_MMU_CR1 0x00200028u -#define MTL_VPU_HOST_MMU_CR2 0x0020002cu -#define MTL_VPU_HOST_MMU_IRQ_CTRL 0x00200050u -#define MTL_VPU_HOST_MMU_IRQ_CTRLACK 0x00200054u - -#define MTL_VPU_HOST_MMU_GERROR 0x00200060u -#define MTL_VPU_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0) -#define MTL_VPU_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2) -#define MTL_VPU_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3) -#define MTL_VPU_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4) -#define MTL_VPU_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5) -#define MTL_VPU_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6) -#define MTL_VPU_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7) - -#define MTL_VPU_HOST_MMU_GERRORN 0x00200064u - -#define MTL_VPU_HOST_MMU_STRTAB_BASE 0x00200080u -#define MTL_VPU_HOST_MMU_STRTAB_BASE_CFG 0x00200088u -#define MTL_VPU_HOST_MMU_CMDQ_BASE 0x00200090u -#define MTL_VPU_HOST_MMU_CMDQ_PROD 0x00200098u -#define MTL_VPU_HOST_MMU_CMDQ_CONS 0x0020009cu -#define MTL_VPU_HOST_MMU_EVTQ_BASE 0x002000a0u -#define MTL_VPU_HOST_MMU_EVTQ_PROD 0x002000a8u -#define MTL_VPU_HOST_MMU_EVTQ_CONS 0x002000acu -#define MTL_VPU_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K) -#define MTL_VPU_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K) - -#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u -#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0) -#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1) -#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2) -#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3) -#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4) -#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5) -#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6) -#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11) - -#define MTL_VPU_HOST_IF_TBU_MMUSSIDV 0x00360004u -#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0) -#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1) -#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2) -#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3) -#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4) -#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5) -#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6) -#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7) -#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8) -#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9) - -#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u -#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u -#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u -#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u -#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u - -#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u -#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1) - -#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u -#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1) - -#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u -#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0) -#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1) -#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2) -#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3) -#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4) - -#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu -#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u -#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u -#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u - -#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u -#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9) - -#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u -#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0) - -#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u - -#endif /* __IVPU_HW_MTL_REG_H__ */ diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index d45be0615b47..de9e69f70af7 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -289,15 +289,13 @@ ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count) { struct ivpu_device *vdev = file_priv->vdev; struct ivpu_job *job; - size_t buf_size; int ret; ret = ivpu_rpm_get(vdev); if (ret < 0) return NULL; - buf_size = sizeof(*job) + bo_count * sizeof(struct ivpu_bo *); - job = kzalloc(buf_size, GFP_KERNEL); + job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL); if (!job) goto err_rpm_put; diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index b8b259b3aa63..baefaf7bb3cb 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -7,7 +7,7 @@ #include #include "ivpu_drv.h" -#include "ivpu_hw_mtl_reg.h" +#include "ivpu_hw_37xx_reg.h" #include "ivpu_hw_reg_io.h" #include "ivpu_mmu.h" #include "ivpu_mmu_context.h" @@ -143,6 +143,16 @@ #define IVPU_MMU_CD_0_ASET BIT(47) #define IVPU_MMU_CD_0_ASID GENMASK_ULL(63, 48) +#define IVPU_MMU_T0SZ_48BIT 16 +#define IVPU_MMU_T0SZ_38BIT 26 + +#define IVPU_MMU_IPS_48BIT 5 +#define IVPU_MMU_IPS_44BIT 4 +#define IVPU_MMU_IPS_42BIT 3 +#define IVPU_MMU_IPS_40BIT 2 +#define IVPU_MMU_IPS_36BIT 1 +#define IVPU_MMU_IPS_32BIT 0 + #define IVPU_MMU_CD_1_TTB0_MASK GENMASK_ULL(51, 4) #define IVPU_MMU_STE_0_S1CDMAX GENMASK_ULL(63, 59) @@ -176,13 +186,13 @@ #define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC) #define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC) -#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ)) | \ - (REG_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT)) | \ - (REG_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT)) | \ - (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \ - (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \ - (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \ - (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT))) +#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \ + (REG_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT)) | \ + (REG_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT)) | \ + (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \ + (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \ + (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \ + (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT))) static char *ivpu_mmu_event_to_str(u32 cmd) { @@ -240,15 +250,15 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev) else val_ref = IVPU_MMU_IDR0_REF; - val = REGV_RD32(MTL_VPU_HOST_MMU_IDR0); + val = REGV_RD32(VPU_37XX_HOST_MMU_IDR0); if (val != val_ref) ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref); - val = REGV_RD32(MTL_VPU_HOST_MMU_IDR1); + val = REGV_RD32(VPU_37XX_HOST_MMU_IDR1); if (val != IVPU_MMU_IDR1_REF) ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF); - val = REGV_RD32(MTL_VPU_HOST_MMU_IDR3); + val = REGV_RD32(VPU_37XX_HOST_MMU_IDR3); if (val != IVPU_MMU_IDR3_REF) ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF); @@ -259,7 +269,7 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev) else val_ref = IVPU_MMU_IDR5_REF; - val = REGV_RD32(MTL_VPU_HOST_MMU_IDR5); + val = REGV_RD32(VPU_37XX_HOST_MMU_IDR5); if (val != val_ref) ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref); } @@ -386,18 +396,18 @@ static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev) u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN; int ret; - ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, 0); + ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0); if (ret) return ret; - return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, irq_ctrl); + return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl); } static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev) { struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; - return REGV_POLL(MTL_VPU_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons), + return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons), IVPU_MMU_QUEUE_TIMEOUT_US); } @@ -437,7 +447,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev) return ret; clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE); - REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, q->prod); + REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod); ret = ivpu_mmu_cmdq_wait_for_cons(vdev); if (ret) @@ -485,7 +495,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) mmu->evtq.prod = 0; mmu->evtq.cons = 0; - ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, 0); + ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0); if (ret) return ret; @@ -495,17 +505,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) | FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) | FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB); - REGV_WR32(MTL_VPU_HOST_MMU_CR1, val); + REGV_WR32(VPU_37XX_HOST_MMU_CR1, val); - REGV_WR64(MTL_VPU_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q); - REGV_WR32(MTL_VPU_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg); + REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q); + REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg); - REGV_WR64(MTL_VPU_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q); - REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, 0); - REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_CONS, 0); + REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q); + REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, 0); + REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_CONS, 0); val = IVPU_MMU_CR0_CMDQEN; - ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val); + ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val); if (ret) return ret; @@ -521,17 +531,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) if (ret) return ret; - REGV_WR64(MTL_VPU_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q); - REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC, 0); - REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, 0); + REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q); + REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC, 0); + REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, 0); val |= IVPU_MMU_CR0_EVTQEN; - ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val); + ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val); if (ret) return ret; val |= IVPU_MMU_CR0_ATSCHK; - ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val); + ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val); if (ret) return ret; @@ -540,7 +550,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) return ret; val |= IVPU_MMU_CR0_SMMUEN; - return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val); + return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val); } static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid) @@ -617,12 +627,12 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma) entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE); if (cd_dma != 0) { - cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, 26) | + cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) | FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) | FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) | FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) | FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) | - FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, 3) | + FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) | FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) | IVPU_MMU_CD_0_TCR_EPD1 | IVPU_MMU_CD_0_AA64 | @@ -791,14 +801,14 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev) u32 idx = IVPU_MMU_Q_IDX(evtq->cons); u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE); - evtq->prod = REGV_RD32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC); + evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC); if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT)) return NULL; clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE); evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK; - REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons); + REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons); return evt; } @@ -831,35 +841,35 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev) ivpu_dbg(vdev, IRQ, "MMU error\n"); - gerror_val = REGV_RD32(MTL_VPU_HOST_MMU_GERROR); - gerrorn_val = REGV_RD32(MTL_VPU_HOST_MMU_GERRORN); + gerror_val = REGV_RD32(VPU_37XX_HOST_MMU_GERROR); + gerrorn_val = REGV_RD32(VPU_37XX_HOST_MMU_GERRORN); active = gerror_val ^ gerrorn_val; if (!(active & IVPU_MMU_GERROR_ERR_MASK)) return; - if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT, active)) + if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT, active)) ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n"); - if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT, active)) + if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT, active)) ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n"); - if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT, active)) + if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT, active)) ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n"); - if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT, active)) + if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT, active)) ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n"); - if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT, active)) + if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT, active)) ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n"); - if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT, active)) + if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT, active)) ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n"); - if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ, active)) + if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active)) ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n"); - REGV_WR32(MTL_VPU_HOST_MMU_GERRORN, gerror_val); + REGV_WR32(VPU_37XX_HOST_MMU_GERRORN, gerror_val); } int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable) diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c index 8ce9b12ac356..1d2e554e2c4a 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.c +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -11,10 +11,12 @@ #include "ivpu_mmu.h" #include "ivpu_mmu_context.h" -#define IVPU_MMU_PGD_INDEX_MASK GENMASK(38, 30) +#define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39) +#define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30) #define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21) #define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12) -#define IVPU_MMU_ENTRY_FLAGS_MASK GENMASK(11, 0) +#define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0)) +#define IVPU_MMU_ENTRY_FLAG_CONT BIT(52) #define IVPU_MMU_ENTRY_FLAG_NG BIT(11) #define IVPU_MMU_ENTRY_FLAG_AF BIT(10) #define IVPU_MMU_ENTRY_FLAG_USER BIT(6) @@ -22,10 +24,13 @@ #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1) #define IVPU_MMU_ENTRY_FLAG_VALID BIT(0) -#define IVPU_MMU_PAGE_SIZE SZ_4K -#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE) -#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE) -#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64)) +#define IVPU_MMU_PAGE_SIZE SZ_4K +#define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16) +#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE) +#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE) +#define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE) +#define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE) +#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64)) #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000 #define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID) @@ -36,167 +41,268 @@ static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) { dma_addr_t pgd_dma; - u64 *pgd; - pgd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma, GFP_KERNEL); - if (!pgd) + pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma, + GFP_KERNEL); + if (!pgtable->pgd_dma_ptr) return -ENOMEM; - pgtable->pgd = pgd; pgtable->pgd_dma = pgd_dma; return 0; } -static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) +static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr) { - int pgd_index, pmd_index; + if (cpu_addr) + dma_free_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, cpu_addr, + dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK); +} - for (pgd_index = 0; pgd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_index) { - u64 **pmd_entries = pgtable->pgd_cpu_entries[pgd_index]; - u64 *pmd = pgtable->pgd_entries[pgd_index]; +static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) +{ + int pgd_idx, pud_idx, pmd_idx; + dma_addr_t pud_dma, pmd_dma, pte_dma; + u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr; - if (!pmd_entries) + for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) { + pud_dma_ptr = pgtable->pud_ptrs[pgd_idx]; + pud_dma = pgtable->pgd_dma_ptr[pgd_idx]; + + if (!pud_dma_ptr) continue; - for (pmd_index = 0; pmd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_index) { - if (pmd_entries[pmd_index]) - dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, - pmd_entries[pmd_index], - pmd[pmd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK); + for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) { + pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx]; + pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx]; + + if (!pmd_dma_ptr) + continue; + + for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) { + pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx]; + pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx]; + + ivpu_mmu_pgtable_free(vdev, pte_dma_ptr, pte_dma); + } + + kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]); + ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma); } - kfree(pmd_entries); - dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd_entries[pgd_index], - pgtable->pgd[pgd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK); + kfree(pgtable->pmd_ptrs[pgd_idx]); + kfree(pgtable->pte_ptrs[pgd_idx]); + ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma); } - dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd, - pgtable->pgd_dma & ~IVPU_MMU_ENTRY_FLAGS_MASK); + ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma); } static u64* -ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, u64 pgd_index) +ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx) { - u64 **pmd_entries; - dma_addr_t pmd_dma; - u64 *pmd; + u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx]; + dma_addr_t pud_dma; - if (pgtable->pgd_entries[pgd_index]) - return pgtable->pgd_entries[pgd_index]; + if (pud_dma_ptr) + return pud_dma_ptr; - pmd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL); - if (!pmd) + pud_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL); + if (!pud_dma_ptr) return NULL; - pmd_entries = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); - if (!pmd_entries) - goto err_free_pgd; + drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]); + pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); + if (!pgtable->pmd_ptrs[pgd_idx]) + goto err_free_pud_dma_ptr; - pgtable->pgd_entries[pgd_index] = pmd; - pgtable->pgd_cpu_entries[pgd_index] = pmd_entries; - pgtable->pgd[pgd_index] = pmd_dma | IVPU_MMU_ENTRY_VALID; + drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]); + pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); + if (!pgtable->pte_ptrs[pgd_idx]) + goto err_free_pmd_ptrs; - return pmd; + pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr; + pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID; -err_free_pgd: - dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pmd, pmd_dma); + return pud_dma_ptr; + +err_free_pmd_ptrs: + kfree(pgtable->pmd_ptrs[pgd_idx]); + +err_free_pud_dma_ptr: + ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma); + return NULL; +} + +static u64* +ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx, + int pud_idx) +{ + u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx]; + dma_addr_t pmd_dma; + + if (pmd_dma_ptr) + return pmd_dma_ptr; + + pmd_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL); + if (!pmd_dma_ptr) + return NULL; + + drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]); + pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); + if (!pgtable->pte_ptrs[pgd_idx][pud_idx]) + goto err_free_pmd_dma_ptr; + + pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr; + pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID; + + return pmd_dma_ptr; + +err_free_pmd_dma_ptr: + ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma); return NULL; } static u64* ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, - int pgd_index, int pmd_index) + int pgd_idx, int pud_idx, int pmd_idx) { + u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx]; dma_addr_t pte_dma; - u64 *pte; - if (pgtable->pgd_cpu_entries[pgd_index][pmd_index]) - return pgtable->pgd_cpu_entries[pgd_index][pmd_index]; + if (pte_dma_ptr) + return pte_dma_ptr; - pte = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL); - if (!pte) + pte_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL); + if (!pte_dma_ptr) return NULL; - pgtable->pgd_cpu_entries[pgd_index][pmd_index] = pte; - pgtable->pgd_entries[pgd_index][pmd_index] = pte_dma | IVPU_MMU_ENTRY_VALID; + pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr; + pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID; - return pte; + return pte_dma_ptr; } static int ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, - u64 vpu_addr, dma_addr_t dma_addr, int prot) + u64 vpu_addr, dma_addr_t dma_addr, u64 prot) { u64 *pte; - int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); - int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); - int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); + int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); + int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); + int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); + int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); - /* Allocate PMD - second level page table if needed */ - if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_index)) + /* Allocate PUD - second level page table if needed */ + if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx)) return -ENOMEM; - /* Allocate PTE - third level page table if needed */ - pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_index, pmd_index); + /* Allocate PMD - third level page table if needed */ + if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx)) + return -ENOMEM; + + /* Allocate PTE - fourth level page table if needed */ + pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx); if (!pte) return -ENOMEM; - /* Update PTE - third level page table with DMA address */ - pte[pte_index] = dma_addr | prot; + /* Update PTE */ + pte[pte_idx] = dma_addr | prot; return 0; } -static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr) -{ - int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); - int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); - int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); - - /* Update PTE with dummy physical address and clear flags */ - ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index][pte_index] = IVPU_MMU_ENTRY_INVALID; -} - -static void -ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) -{ - u64 end_addr = vpu_addr + size; - u64 *pgd = ctx->pgtable.pgd; - - /* Align to PMD entry (2 MB) */ - vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1); - - while (vpu_addr < end_addr) { - int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); - u64 pmd_end = (pgd_index + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE; - u64 *pmd = ctx->pgtable.pgd_entries[pgd_index]; - - while (vpu_addr < end_addr && vpu_addr < pmd_end) { - int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); - u64 *pte = ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index]; - - clflush_cache_range(pte, IVPU_MMU_PGTABLE_SIZE); - vpu_addr += IVPU_MMU_PTE_MAP_SIZE; - } - clflush_cache_range(pmd, IVPU_MMU_PGTABLE_SIZE); - } - clflush_cache_range(pgd, IVPU_MMU_PGTABLE_SIZE); -} - static int -ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, - u64 vpu_addr, dma_addr_t dma_addr, size_t size, int prot) +ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, + dma_addr_t dma_addr, u64 prot) { + size_t size = IVPU_MMU_CONT_PAGES_SIZE; + + drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size)); + drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size)); + + prot |= IVPU_MMU_ENTRY_FLAG_CONT; + while (size) { int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); if (ret) return ret; + size -= IVPU_MMU_PAGE_SIZE; vpu_addr += IVPU_MMU_PAGE_SIZE; dma_addr += IVPU_MMU_PAGE_SIZE; - size -= IVPU_MMU_PAGE_SIZE; + } + + return 0; +} + +static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr) +{ + int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); + int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); + int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); + int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); + + /* Update PTE with dummy physical address and clear flags */ + ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID; +} + +static void +ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) +{ + struct ivpu_mmu_pgtable *pgtable = &ctx->pgtable; + u64 end_addr = vpu_addr + size; + + /* Align to PMD entry (2 MB) */ + vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1); + + while (vpu_addr < end_addr) { + int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); + u64 pud_end = (pgd_idx + 1) * (u64)IVPU_MMU_PUD_MAP_SIZE; + + while (vpu_addr < end_addr && vpu_addr < pud_end) { + int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); + u64 pmd_end = (pud_idx + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE; + + while (vpu_addr < end_addr && vpu_addr < pmd_end) { + int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); + + clflush_cache_range(pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx], + IVPU_MMU_PGTABLE_SIZE); + vpu_addr += IVPU_MMU_PTE_MAP_SIZE; + } + clflush_cache_range(pgtable->pmd_ptrs[pgd_idx][pud_idx], + IVPU_MMU_PGTABLE_SIZE); + } + clflush_cache_range(pgtable->pud_ptrs[pgd_idx], IVPU_MMU_PGTABLE_SIZE); + } + clflush_cache_range(pgtable->pgd_dma_ptr, IVPU_MMU_PGTABLE_SIZE); +} + +static int +ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, + u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot) +{ + int map_size; + int ret; + + while (size) { + if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE && + IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) { + ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot); + map_size = IVPU_MMU_CONT_PAGES_SIZE; + } else { + ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); + map_size = IVPU_MMU_PAGE_SIZE; + } + + if (ret) + return ret; + + vpu_addr += map_size; + dma_addr += map_size; + size -= map_size; } return 0; @@ -216,8 +322,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, struct sg_table *sgt, bool llc_coherent) { struct scatterlist *sg; - int prot; int ret; + u64 prot; u64 i; if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) @@ -237,7 +343,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, mutex_lock(&ctx->lock); for_each_sgtable_dma_sg(sgt, sg, i) { - u64 dma_addr = sg_dma_address(sg) - sg->offset; + dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset; size_t size = sg_dma_len(sg) + sg->offset; ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); @@ -293,8 +399,14 @@ ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx, { lockdep_assert_held(&ctx->lock); - return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, - 0, range->start, range->end, DRM_MM_INSERT_BEST); + if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) { + if (!drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0, + range->start, range->end, DRM_MM_INSERT_BEST)) + return 0; + } + + return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0, + range->start, range->end, DRM_MM_INSERT_BEST); } void @@ -319,11 +431,11 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3 return ret; if (!context_id) { - start = vdev->hw->ranges.global_low.start; - end = vdev->hw->ranges.global_high.end; + start = vdev->hw->ranges.global.start; + end = vdev->hw->ranges.shave.end; } else { - start = vdev->hw->ranges.user_low.start; - end = vdev->hw->ranges.user_high.end; + start = vdev->hw->ranges.user.start; + end = vdev->hw->ranges.dma.end; } drm_mm_init(&ctx->mm, start, end - start); @@ -334,11 +446,15 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3 static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) { - drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd); + if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr)) + return; mutex_destroy(&ctx->lock); - ivpu_mmu_pgtable_free(vdev, &ctx->pgtable); + ivpu_mmu_pgtables_free(vdev, &ctx->pgtable); drm_mm_takedown(&ctx->mm); + + ctx->pgtable.pgd_dma_ptr = NULL; + ctx->pgtable.pgd_dma = 0; } int ivpu_mmu_global_context_init(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h index ddf11b95023a..961a0d6a6c7f 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.h +++ b/drivers/accel/ivpu/ivpu_mmu_context.h @@ -12,12 +12,13 @@ struct ivpu_device; struct ivpu_file_priv; struct ivpu_addr_range; -#define IVPU_MMU_PGTABLE_ENTRIES 512 +#define IVPU_MMU_PGTABLE_ENTRIES 512ull struct ivpu_mmu_pgtable { - u64 **pgd_cpu_entries[IVPU_MMU_PGTABLE_ENTRIES]; - u64 *pgd_entries[IVPU_MMU_PGTABLE_ENTRIES]; - u64 *pgd; + u64 ***pte_ptrs[IVPU_MMU_PGTABLE_ENTRIES]; + u64 **pmd_ptrs[IVPU_MMU_PGTABLE_ENTRIES]; + u64 *pud_ptrs[IVPU_MMU_PGTABLE_ENTRIES]; + u64 *pgd_dma_ptr; dma_addr_t pgd_dma; }; diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index aa4d56dc52b3..e6f27daf5560 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -259,6 +259,7 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev) pm_runtime_get_sync(vdev->drm.dev); ivpu_dbg(vdev, PM, "Pre-reset..\n"); + atomic_inc(&vdev->pm->reset_counter); atomic_set(&vdev->pm->in_reset, 1); ivpu_shutdown(vdev); ivpu_pm_prepare_cold_boot(vdev); diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h index baca98187255..fd4eada1290f 100644 --- a/drivers/accel/ivpu/ivpu_pm.h +++ b/drivers/accel/ivpu/ivpu_pm.h @@ -14,6 +14,7 @@ struct ivpu_pm_info { struct ivpu_device *vdev; struct work_struct recovery_work; atomic_t in_reset; + atomic_t reset_counter; bool is_warmboot; u32 suspend_reschedule_counter; }; diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index 6b6d981a71be..f4b06792c6f1 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -1293,7 +1293,6 @@ static void update_profiling_data(struct drm_file *file_priv, static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv, bool is_partial) { - struct qaic_partial_execute_entry *pexec; struct qaic_execute *args = data; struct qaic_execute_entry *exec; struct dma_bridge_chan *dbc; @@ -1313,7 +1312,7 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr received_ts = ktime_get_ns(); - size = is_partial ? sizeof(*pexec) : sizeof(*exec); + size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec); n = (unsigned long)size * args->hdr.count; if (args->hdr.count == 0 || n / args->hdr.count != size) return -EINVAL; @@ -1321,7 +1320,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr user_data = u64_to_user_ptr(args->data); exec = kcalloc(args->hdr.count, size, GFP_KERNEL); - pexec = (struct qaic_partial_execute_entry *)exec; if (!exec) return -ENOMEM; diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index b5ba550a0c04..b5de82e6eb4d 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -165,7 +165,6 @@ static const struct drm_driver qaic_accel_driver = { .ioctls = qaic_drm_ioctls, .num_ioctls = ARRAY_SIZE(qaic_drm_ioctls), - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = qaic_gem_prime_import, }; diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c index c2cab7e2b126..729845bcc803 100644 --- a/drivers/auxdisplay/cfag12864bfb.c +++ b/drivers/auxdisplay/cfag12864bfb.c @@ -79,7 +79,6 @@ static int cfag12864bfb_probe(struct platform_device *device) info->var = cfag12864bfb_var; info->pseudo_palette = NULL; info->par = NULL; - info->flags = FBINFO_FLAG_DEFAULT; if (register_framebuffer(info) < 0) goto fballoced; diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index 0c5cd5193fbf..3a2d88387224 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -646,7 +646,6 @@ static int ht16k33_fbdev_probe(struct device *dev, struct ht16k33_priv *priv, fbdev->info->var = ht16k33_fb_var; fbdev->info->bl_dev = bl; fbdev->info->pseudo_palette = NULL; - fbdev->info->flags = FBINFO_FLAG_DEFAULT; fbdev->info->par = priv; err = register_framebuffer(fbdev->info); diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c index 6cfbbf0720bd..b5b62e40ccc1 100644 --- a/drivers/dma-buf/dma-buf-sysfs-stats.c +++ b/drivers/dma-buf/dma-buf-sysfs-stats.c @@ -33,7 +33,7 @@ * into their address space. This necessitated the creation of the DMA-BUF sysfs * statistics interface to provide per-buffer information on production systems. * - * The interface at ``/sys/kernel/dma-buf/buffers`` exposes information about + * The interface at ``/sys/kernel/dmabuf/buffers`` exposes information about * every DMA-BUF when ``CONFIG_DMABUF_SYSFS_STATS`` is enabled. * * The following stats are exposed by the interface: diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index aa4ea8530cb3..21916bba77d5 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -131,7 +131,6 @@ static struct file_system_type dma_buf_fs_type = { static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) { struct dma_buf *dmabuf; - int ret; if (!is_dma_buf_file(file)) return -EINVAL; @@ -147,11 +146,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) dmabuf->size >> PAGE_SHIFT) return -EINVAL; - dma_resv_lock(dmabuf->resv, NULL); - ret = dmabuf->ops->mmap(dmabuf, vma); - dma_resv_unlock(dmabuf->resv); - - return ret; + return dmabuf->ops->mmap(dmabuf, vma); } static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) @@ -850,6 +845,7 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, * - &dma_buf_ops.release() * - &dma_buf_ops.begin_cpu_access() * - &dma_buf_ops.end_cpu_access() + * - &dma_buf_ops.mmap() * * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf * reservation and exporter can't take the lock: @@ -858,7 +854,6 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, * - &dma_buf_ops.unpin() * - &dma_buf_ops.map_dma_buf() * - &dma_buf_ops.unmap_dma_buf() - * - &dma_buf_ops.mmap() * - &dma_buf_ops.vmap() * - &dma_buf_ops.vunmap() * @@ -1463,8 +1458,6 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF); int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, unsigned long pgoff) { - int ret; - if (WARN_ON(!dmabuf || !vma)) return -EINVAL; @@ -1485,11 +1478,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, vma_set_file(vma, dmabuf->file); vma->vm_pgoff = pgoff; - dma_resv_lock(dmabuf->resv, NULL); - ret = dmabuf->ops->mmap(dmabuf, vma); - dma_resv_unlock(dmabuf->resv); - - return ret; + return dmabuf->ops->mmap(dmabuf, vma); } EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index a7f048048864..ee899f8e6721 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -183,8 +182,6 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct cma_heap_buffer *buffer = dmabuf->priv; - dma_resv_assert_held(dmabuf->resv); - if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) return -EINVAL; diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index ee7059399e9c..9076d47ed2ef 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -201,8 +200,6 @@ static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) struct sg_page_iter piter; int ret; - dma_resv_assert_held(dmabuf->resv); - for_each_sgtable_page(table, &piter, vma->vm_pgoff) { struct page *page = sg_page_iter_page(&piter); diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 12cf6bb2e3ce..c40645999648 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -51,8 +51,6 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) { struct udmabuf *ubuf = buf->priv; - dma_resv_assert_held(buf->resv); - if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) return -EINVAL; diff --git a/drivers/firmware/efi/libstub/efi-stub-entry.c b/drivers/firmware/efi/libstub/efi-stub-entry.c index cc4dcaea67fa..2f1902e5d407 100644 --- a/drivers/firmware/efi/libstub/efi-stub-entry.c +++ b/drivers/firmware/efi/libstub/efi-stub-entry.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only #include +#include + #include #include "efistub.h" diff --git a/drivers/firmware/efi/libstub/screen_info.c b/drivers/firmware/efi/libstub/screen_info.c index 4be1c4d1f922..a51ec201ca3c 100644 --- a/drivers/firmware/efi/libstub/screen_info.c +++ b/drivers/firmware/efi/libstub/screen_info.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include + #include #include "efistub.h" diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index afb3b2f5f425..ab9ef1c20349 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -9,6 +9,9 @@ menuconfig DRM tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA select DRM_PANEL_ORIENTATION_QUIRKS + select DRM_KMS_HELPER if DRM_FBDEV_EMULATION + select FB_CORE if DRM_FBDEV_EMULATION + select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION select HDMI select I2C select DMA_SHARED_BUFFER @@ -80,6 +83,7 @@ config DRM_KUNIT_TEST select DRM_BUDDY select DRM_EXPORT_FOR_TESTS if m select DRM_KUNIT_TEST_HELPERS + select DRM_EXEC default KUNIT_ALL_TESTS help This builds unit tests for DRM. This option is not useful for @@ -95,7 +99,6 @@ config DRM_KUNIT_TEST config DRM_KMS_HELPER tristate depends on DRM - select FB_SYS_HELPERS_DEFERRED if DRM_FBDEV_EMULATION help CRTC helpers for KMS drivers. @@ -131,9 +134,7 @@ config DRM_DEBUG_MODESET_LOCK config DRM_FBDEV_EMULATION bool "Enable legacy fbdev support for your modesetting driver" - depends on DRM_KMS_HELPER - depends on FB=y || FB=DRM_KMS_HELPER - select FRAMEBUFFER_CONSOLE if !EXPERT + depends on DRM select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE default y help @@ -194,6 +195,27 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. +config DRM_TTM_KUNIT_TEST + tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS + default n + depends on DRM && KUNIT && MMU + select DRM_TTM + select DRM_EXPORT_FOR_TESTS if m + select DRM_KUNIT_TEST_HELPERS + default KUNIT_ALL_TESTS + help + Enables unit tests for TTM, a GPU memory manager subsystem used + to manage memory buffers. This option is mostly useful for kernel + developers. + + If in doubt, say "N". + +config DRM_EXEC + tristate + depends on DRM + help + Execution context for command submissions + config DRM_BUDDY tristate depends on DRM @@ -216,7 +238,7 @@ config DRM_TTM_HELPER config DRM_GEM_DMA_HELPER tristate depends on DRM - select FB_SYS_HELPERS if DRM_FBDEV_EMULATION + select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION help Choose this if you need the GEM DMA helper functions @@ -323,6 +345,8 @@ source "drivers/gpu/drm/v3d/Kconfig" source "drivers/gpu/drm/vc4/Kconfig" +source "drivers/gpu/drm/loongson/Kconfig" + source "drivers/gpu/drm/etnaviv/Kconfig" source "drivers/gpu/drm/hisilicon/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 7a09a89b493b..215e78e79125 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -45,6 +45,7 @@ drm-y := \ drm_vblank.o \ drm_vblank_work.o \ drm_vma_manager.o \ + drm_gpuva_mgr.o \ drm_writeback.o drm-$(CONFIG_DRM_LEGACY) += \ drm_agpsupport.o \ @@ -78,6 +79,8 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o # # Memory-management helpers # +# +obj-$(CONFIG_DRM_EXEC) += drm_exec.o obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o @@ -194,3 +197,4 @@ obj-y += gud/ obj-$(CONFIG_DRM_HYPERV) += hyperv/ obj-y += solomon/ obj-$(CONFIG_DRM_SPRD) += sprd/ +obj-$(CONFIG_DRM_LOONGSON) += loongson/ diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index b91e79c721e2..22d88f8ef527 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -21,6 +21,7 @@ config DRM_AMDGPU select INTERVAL_TREE select DRM_BUDDY select DRM_SUBALLOC_HELPER + select DRM_EXEC # amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work # ACPI_VIDEO's dependencies must also be selected. select INPUT if ACPI diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 8d16f280b695..384b798a9bad 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -62,7 +62,7 @@ subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror amdgpu-y := amdgpu_drv.o # add KMS driver -amdgpu-y += amdgpu_device.o amdgpu_kms.o \ +amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \ amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \ atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \ amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \ @@ -98,7 +98,7 @@ amdgpu-y += \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \ nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \ sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \ - nbio_v7_9.o aqua_vanjaram_reg_init.o + nbio_v7_9.o aqua_vanjaram.o # add DF block amdgpu-y += \ @@ -129,7 +129,8 @@ amdgpu-y += \ vega10_ih.o \ vega20_ih.o \ navi10_ih.o \ - ih_v6_0.o + ih_v6_0.o \ + ih_v6_1.o # add PSP block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6dc950c1b689..dc2d53081e80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -53,7 +53,6 @@ #include #include -#include #include #include @@ -193,7 +192,6 @@ extern int amdgpu_emu_mode; extern uint amdgpu_smu_memory_pool_size; extern int amdgpu_smu_pptable_id; extern uint amdgpu_dc_feature_mask; -extern uint amdgpu_freesync_vid_mode; extern uint amdgpu_dc_debug_mask; extern uint amdgpu_dc_visual_confirm; extern uint amdgpu_dm_abm_level; @@ -1034,7 +1032,6 @@ struct amdgpu_device { bool has_pr3; bool ucode_sysfs_en; - bool psp_sysfs_en; /* Chip product information */ char product_number[20]; @@ -1129,7 +1126,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev, void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, u64 reg_addr, u32 reg_data); void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, - uint32_t reg, uint32_t v); + uint32_t reg, uint32_t v, uint32_t xcc_id); void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); @@ -1508,4 +1505,8 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) int amdgpu_in_reset(struct amdgpu_device *adev); +extern const struct attribute_group amdgpu_vram_mgr_attr_group; +extern const struct attribute_group amdgpu_gtt_mgr_attr_group; +extern const struct attribute_group amdgpu_flash_attr_group; + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 385c6acb5728..2bca37044ad0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -706,7 +706,7 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, atcs_input.size = sizeof(struct atcs_pref_req_input); /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */ - atcs_input.client_id = adev->pdev->devfn | (adev->pdev->bus->number << 8); + atcs_input.client_id = pci_dev_id(adev->pdev); atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK; atcs_input.flags = ATCS_WAIT_FOR_COMPLETION; if (advertise) @@ -776,7 +776,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, atcs_input.size = sizeof(struct atcs_pwr_shift_input); /* dGPU id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */ - atcs_input.dgpu_id = adev->pdev->devfn | (adev->pdev->bus->number << 8); + atcs_input.dgpu_id = pci_dev_id(adev->pdev); atcs_input.dev_acpi_state = dev_state; atcs_input.drv_state = drv_state; @@ -868,7 +868,7 @@ static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm) if (!numa_info) { struct sysinfo info; - numa_info = kzalloc(sizeof *numa_info, GFP_KERNEL); + numa_info = kzalloc(sizeof(*numa_info), GFP_KERNEL); if (!numa_info) return NULL; @@ -1141,7 +1141,7 @@ int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, if (!tmr_offset || !tmr_size) return -EINVAL; - bdf = (adev->pdev->bus->number << 8) | adev->pdev->devfn; + bdf = pci_dev_id(adev->pdev); dev_info = amdgpu_acpi_get_dev(bdf); if (!dev_info) return -ENOENT; @@ -1162,7 +1162,7 @@ int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id, if (!numa_info) return -EINVAL; - bdf = (adev->pdev->bus->number << 8) | adev->pdev->devfn; + bdf = pci_dev_id(adev->pdev); dev_info = amdgpu_acpi_get_dev(bdf); if (!dev_info) return -ENOENT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index b4fcad0e62f7..df633e9ce920 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -226,16 +226,6 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm) kgd2kfd_suspend(adev->kfd.dev, run_pm); } -int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev) -{ - int r = 0; - - if (adev->kfd.dev) - r = kgd2kfd_resume_iommu(adev->kfd.dev); - - return r; -} - int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) { int r = 0; @@ -830,3 +820,53 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id) return adev->gmc.real_vram_size; } } + +int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, + u32 inst) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + struct amdgpu_ring_funcs *ring_funcs; + struct amdgpu_ring *ring; + int r = 0; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL); + if (!ring_funcs) + return -ENOMEM; + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) { + r = -ENOMEM; + goto free_ring_funcs; + } + + ring_funcs->type = AMDGPU_RING_TYPE_COMPUTE; + ring->doorbell_index = doorbell_off; + ring->funcs = ring_funcs; + + spin_lock(&kiq->ring_lock); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { + spin_unlock(&kiq->ring_lock); + r = -ENOMEM; + goto free_ring; + } + + kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0); + + if (kiq_ring->sched.ready && !adev->job_hang) + r = amdgpu_ring_test_helper(kiq_ring); + + spin_unlock(&kiq->ring_lock); + +free_ring: + kfree(ring); + +free_ring_funcs: + kfree(ring_funcs); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 2d0406bff84e..2fe9860725bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -25,6 +25,7 @@ #ifndef AMDGPU_AMDKFD_H_INCLUDED #define AMDGPU_AMDKFD_H_INCLUDED +#include #include #include #include @@ -32,7 +33,6 @@ #include #include #include -#include #include "amdgpu_sync.h" #include "amdgpu_vm.h" #include "amdgpu_xcp.h" @@ -71,8 +71,7 @@ struct kgd_mem { struct hmm_range *range; struct list_head attachments; /* protected by amdkfd_process_info.lock */ - struct ttm_validate_buffer validate_list; - struct ttm_validate_buffer resv_list; + struct list_head validate_list; uint32_t domain; unsigned int mapped_to_gpu_memory; uint64_t va; @@ -149,7 +148,6 @@ int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); -int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev); int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, const void *ih_ring_entry); @@ -252,6 +250,8 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min); int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, uint32_t *payload); +int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, + u32 inst); /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when @@ -398,7 +398,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); -int kgd2kfd_resume_iommu(struct kfd_dev *kfd); int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); int kgd2kfd_pre_reset(struct kfd_dev *kfd); int kgd2kfd_post_reset(struct kfd_dev *kfd); @@ -438,11 +437,6 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { } -static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd) -{ - return 0; -} - static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index 60f9e027fb66..aff08321e976 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -23,6 +23,7 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd_arcturus.h" #include "amdgpu_amdkfd_gfx_v9.h" +#include "amdgpu_amdkfd_aldebaran.h" #include "gc/gc_9_4_2_offset.h" #include "gc/gc_9_4_2_sh_mask.h" #include @@ -36,7 +37,7 @@ * initialize the debug mode registers after it has disabled GFX off during the * debug session. */ -static uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev, +uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev, bool restore_dbg_registers, uint32_t vmid) { @@ -107,7 +108,7 @@ static uint32_t kgd_aldebaran_set_wave_launch_trap_override(struct amdgpu_device return data; } -static uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev, +uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev, uint8_t wave_launch_mode, uint32_t vmid) { @@ -125,7 +126,8 @@ static uint32_t kgd_gfx_aldebaran_set_address_watch( uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid) + uint32_t debug_vmid, + uint32_t inst) { uint32_t watch_address_high; uint32_t watch_address_low; @@ -161,12 +163,6 @@ static uint32_t kgd_gfx_aldebaran_set_address_watch( return watch_address_cntl; } -static uint32_t kgd_gfx_aldebaran_clear_address_watch(struct amdgpu_device *adev, - uint32_t watch_id) -{ - return 0; -} - const struct kfd2kgd_calls aldebaran_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, @@ -191,7 +187,7 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .set_wave_launch_trap_override = kgd_aldebaran_set_wave_launch_trap_override, .set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode, .set_address_watch = kgd_gfx_aldebaran_set_address_watch, - .clear_address_watch = kgd_gfx_aldebaran_clear_address_watch, + .clear_address_watch = kgd_gfx_v9_clear_address_watch, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h new file mode 100644 index 000000000000..a7bdaf8d82dd --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h @@ -0,0 +1,27 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev, + bool restore_dbg_registers, + uint32_t vmid); +uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev, + uint8_t wave_launch_mode, + uint32_t vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 5b4b7f8b92a5..490c8f5ddb60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -22,6 +22,7 @@ #include "amdgpu.h" #include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd_gfx_v9.h" +#include "amdgpu_amdkfd_aldebaran.h" #include "gc/gc_9_4_3_offset.h" #include "gc/gc_9_4_3_sh_mask.h" #include "athub/athub_1_8_0_offset.h" @@ -32,6 +33,7 @@ #include "soc15.h" #include "sdma/sdma_4_4_2_offset.h" #include "sdma/sdma_4_4_2_sh_mask.h" +#include static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) { @@ -361,6 +363,156 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, return 0; } +/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */ +static uint32_t kgd_gfx_v9_4_3_disable_debug_trap(struct amdgpu_device *adev, + bool keep_trap_enabled, + uint32_t vmid) +{ + uint32_t data = 0; + + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0); + + return data; +} + +static int kgd_gfx_v9_4_3_validate_trap_override_request( + struct amdgpu_device *adev, + uint32_t trap_override, + uint32_t *trap_mask_supported) +{ + *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID | + KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL | + KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_FP_OVERFLOW | + KFD_DBG_TRAP_MASK_FP_UNDERFLOW | + KFD_DBG_TRAP_MASK_FP_INEXACT | + KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH | + KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION | + KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START | + KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END; + + if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR && + trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE) + return -EPERM; + + return 0; +} + +static uint32_t trap_mask_map_sw_to_hw(uint32_t mask) +{ + uint32_t trap_on_start = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START) ? 1 : 0; + uint32_t trap_on_end = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END) ? 1 : 0; + uint32_t excp_en = mask & (KFD_DBG_TRAP_MASK_FP_INVALID | + KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL | + KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_FP_OVERFLOW | + KFD_DBG_TRAP_MASK_FP_UNDERFLOW | + KFD_DBG_TRAP_MASK_FP_INEXACT | + KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO | + KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH | + KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION); + uint32_t ret; + + ret = REG_SET_FIELD(0, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, excp_en); + ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START, trap_on_start); + ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END, trap_on_end); + + return ret; +} + +static uint32_t trap_mask_map_hw_to_sw(uint32_t mask) +{ + uint32_t ret = REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, EXCP_EN); + + if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START)) + ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START; + + if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END)) + ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END; + + return ret; +} + +/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */ +static uint32_t kgd_gfx_v9_4_3_set_wave_launch_trap_override( + struct amdgpu_device *adev, + uint32_t vmid, + uint32_t trap_override, + uint32_t trap_mask_bits, + uint32_t trap_mask_request, + uint32_t *trap_mask_prev, + uint32_t kfd_dbg_trap_cntl_prev) + +{ + uint32_t data = 0; + + *trap_mask_prev = trap_mask_map_hw_to_sw(kfd_dbg_trap_cntl_prev); + + data = (trap_mask_bits & trap_mask_request) | + (*trap_mask_prev & ~trap_mask_request); + data = trap_mask_map_sw_to_hw(data); + + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override); + + return data; +} + +#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H) +static uint32_t kgd_gfx_v9_4_3_set_address_watch( + struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid, + uint32_t inst) +{ + uint32_t watch_address_high; + uint32_t watch_address_low; + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + watch_address_low = lower_32_bits(watch_address); + watch_address_high = upper_32_bits(watch_address) & 0xffff; + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MODE, + watch_mode); + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MASK, + watch_address_mask >> 7); + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 1); + + WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + regTCP_WATCH0_ADDR_H) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_high); + + WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + regTCP_WATCH0_ADDR_L) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_low); + + return watch_address_cntl; +} + +static uint32_t kgd_gfx_v9_4_3_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id) +{ + return 0; +} + const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_4_3_set_pasid_vmid_mapping, @@ -379,6 +531,19 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = { kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, + .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = - kgd_gfx_v9_program_trap_handler_settings + kgd_gfx_v9_program_trap_handler_settings, + .build_grace_period_packet_info = + kgd_gfx_v9_build_grace_period_packet_info, + .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, + .enable_debug_trap = kgd_aldebaran_enable_debug_trap, + .disable_debug_trap = kgd_gfx_v9_4_3_disable_debug_trap, + .validate_trap_override_request = + kgd_gfx_v9_4_3_validate_trap_override_request, + .set_wave_launch_trap_override = + kgd_gfx_v9_4_3_set_wave_launch_trap_override, + .set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode, + .set_address_watch = kgd_gfx_v9_4_3_set_address_watch, + .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 8ad7a7779e14..f1f2c24de081 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -886,7 +886,8 @@ uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev, uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid) + uint32_t debug_vmid, + uint32_t inst) { uint32_t watch_address_high; uint32_t watch_address_low; @@ -968,7 +969,8 @@ uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev, * deq_retry_wait_time -- Wait Count for Global Wave Syncs. */ void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, - uint32_t *wait_times) + uint32_t *wait_times, + uint32_t inst) { *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); @@ -978,7 +980,8 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data) + uint32_t *reg_data, + uint32_t inst) { *reg_data = wait_times; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h index e6b70196071a..ecaead24e8c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -44,12 +44,16 @@ uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev, uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid); + uint32_t debug_vmid, + uint32_t inst); uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev, uint32_t watch_id); -void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); +void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, + uint32_t *wait_times, + uint32_t inst); void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data); + uint32_t *reg_data, + uint32_t inst); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 91c3574ebed3..d67d003bada2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -637,7 +637,7 @@ static uint32_t kgd_gfx_v11_disable_debug_trap(struct amdgpu_device *adev, { uint32_t data = 0; - data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, keep_trap_enabled); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0); @@ -743,7 +743,8 @@ static uint32_t kgd_gfx_v11_set_address_watch(struct amdgpu_device *adev, uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid) + uint32_t debug_vmid, + uint32_t inst) { uint32_t watch_address_high; uint32_t watch_address_low; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 51d93fb13ea3..fa5ee96f8845 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -822,7 +822,8 @@ uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev, uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid) + uint32_t debug_vmid, + uint32_t inst) { uint32_t watch_address_high; uint32_t watch_address_low; @@ -903,10 +904,12 @@ uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev, * deq_retry_wait_time -- Wait Count for Global Wave Syncs. */ void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, - uint32_t *wait_times) + uint32_t *wait_times, + uint32_t inst) { - *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); + *wait_times = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + mmCP_IQ_WAIT_TIME2)); } void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, @@ -1100,12 +1103,13 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data) + uint32_t *reg_data, + uint32_t inst) { *reg_data = wait_times; /* - * The CP cannont handle a 0 grace period input and will result in + * The CP cannot handle a 0 grace period input and will result in * an infinite grace period being set so set to 1 to prevent this. */ if (grace_period == 0) @@ -1116,7 +1120,8 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, SCH_WAVE, grace_period); - *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2); + *reg_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + mmCP_IQ_WAIT_TIME2); } void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, @@ -1128,9 +1133,9 @@ void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, * Program TBA registers */ WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_LO, - lower_32_bits(tba_addr >> 8)); + lower_32_bits(tba_addr >> 8)); WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_HI, - upper_32_bits(tba_addr >> 8)); + upper_32_bits(tba_addr >> 8)); /* * Program TMA registers diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 5f54bff0db49..936e501908ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -89,12 +89,16 @@ uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev, uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid); + uint32_t debug_vmid, + uint32_t inst); uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev, uint32_t watch_id); -void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); +void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, + uint32_t *wait_times, + uint32_t inst); void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data); + uint32_t *reg_data, + uint32_t inst); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index d34c3ef8f3ed..7d6daf8d2bfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -27,6 +27,8 @@ #include #include +#include + #include "amdgpu_object.h" #include "amdgpu_gem.h" #include "amdgpu_vm.h" @@ -37,7 +39,6 @@ #include "amdgpu_xgmi.h" #include "kfd_priv.h" #include "kfd_smi_events.h" -#include /* Userptr restore delay, just long enough to allow consecutive VM * changes to accumulate @@ -964,28 +965,20 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, struct amdkfd_process_info *process_info, bool userptr) { - struct ttm_validate_buffer *entry = &mem->validate_list; - struct amdgpu_bo *bo = mem->bo; - - INIT_LIST_HEAD(&entry->head); - entry->num_shared = 1; - entry->bo = &bo->tbo; mutex_lock(&process_info->lock); if (userptr) - list_add_tail(&entry->head, &process_info->userptr_valid_list); + list_add_tail(&mem->validate_list, + &process_info->userptr_valid_list); else - list_add_tail(&entry->head, &process_info->kfd_bo_list); + list_add_tail(&mem->validate_list, &process_info->kfd_bo_list); mutex_unlock(&process_info->lock); } static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, struct amdkfd_process_info *process_info) { - struct ttm_validate_buffer *bo_list_entry; - - bo_list_entry = &mem->validate_list; mutex_lock(&process_info->lock); - list_del(&bo_list_entry->head); + list_del(&mem->validate_list); mutex_unlock(&process_info->lock); } @@ -1072,13 +1065,12 @@ out: * object can track VM updates. */ struct bo_vm_reservation_context { - struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */ - unsigned int n_vms; /* Number of VMs reserved */ - struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */ - struct ww_acquire_ctx ticket; /* Reservation ticket */ - struct list_head list, duplicates; /* BO lists */ - struct amdgpu_sync *sync; /* Pointer to sync object */ - bool reserved; /* Whether BOs are reserved */ + /* DRM execution context for the reservation */ + struct drm_exec exec; + /* Number of VMs reserved */ + unsigned int n_vms; + /* Pointer to sync object */ + struct amdgpu_sync *sync; }; enum bo_vm_match { @@ -1102,35 +1094,26 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, WARN_ON(!vm); - ctx->reserved = false; ctx->n_vms = 1; ctx->sync = &mem->sync; + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_until_all_locked(&ctx->exec) { + ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); + drm_exec_retry_on_contention(&ctx->exec); + if (unlikely(ret)) + goto error; - INIT_LIST_HEAD(&ctx->list); - INIT_LIST_HEAD(&ctx->duplicates); - - ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL); - if (!ctx->vm_pd) - return -ENOMEM; - - ctx->kfd_bo.priority = 0; - ctx->kfd_bo.tv.bo = &bo->tbo; - ctx->kfd_bo.tv.num_shared = 1; - list_add(&ctx->kfd_bo.tv.head, &ctx->list); - - amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); - - ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, - false, &ctx->duplicates); - if (ret) { - pr_err("Failed to reserve buffers in ttm.\n"); - kfree(ctx->vm_pd); - ctx->vm_pd = NULL; - return ret; + ret = drm_exec_lock_obj(&ctx->exec, &bo->tbo.base); + drm_exec_retry_on_contention(&ctx->exec); + if (unlikely(ret)) + goto error; } - - ctx->reserved = true; return 0; + +error: + pr_err("Failed to reserve buffers in ttm.\n"); + drm_exec_fini(&ctx->exec); + return ret; } /** @@ -1147,63 +1130,39 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, struct amdgpu_vm *vm, enum bo_vm_match map_type, struct bo_vm_reservation_context *ctx) { - struct amdgpu_bo *bo = mem->bo; struct kfd_mem_attachment *entry; - unsigned int i; + struct amdgpu_bo *bo = mem->bo; int ret; - ctx->reserved = false; - ctx->n_vms = 0; - ctx->vm_pd = NULL; ctx->sync = &mem->sync; + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_until_all_locked(&ctx->exec) { + ctx->n_vms = 0; + list_for_each_entry(entry, &mem->attachments, list) { + if ((vm && vm != entry->bo_va->base.vm) || + (entry->is_mapped != map_type + && map_type != BO_VM_ALL)) + continue; - INIT_LIST_HEAD(&ctx->list); - INIT_LIST_HEAD(&ctx->duplicates); + ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm, + &ctx->exec, 2); + drm_exec_retry_on_contention(&ctx->exec); + if (unlikely(ret)) + goto error; + ++ctx->n_vms; + } - list_for_each_entry(entry, &mem->attachments, list) { - if ((vm && vm != entry->bo_va->base.vm) || - (entry->is_mapped != map_type - && map_type != BO_VM_ALL)) - continue; - - ctx->n_vms++; + ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1); + drm_exec_retry_on_contention(&ctx->exec); + if (unlikely(ret)) + goto error; } - - if (ctx->n_vms != 0) { - ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), - GFP_KERNEL); - if (!ctx->vm_pd) - return -ENOMEM; - } - - ctx->kfd_bo.priority = 0; - ctx->kfd_bo.tv.bo = &bo->tbo; - ctx->kfd_bo.tv.num_shared = 1; - list_add(&ctx->kfd_bo.tv.head, &ctx->list); - - i = 0; - list_for_each_entry(entry, &mem->attachments, list) { - if ((vm && vm != entry->bo_va->base.vm) || - (entry->is_mapped != map_type - && map_type != BO_VM_ALL)) - continue; - - amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list, - &ctx->vm_pd[i]); - i++; - } - - ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, - false, &ctx->duplicates); - if (ret) { - pr_err("Failed to reserve buffers in ttm.\n"); - kfree(ctx->vm_pd); - ctx->vm_pd = NULL; - return ret; - } - - ctx->reserved = true; return 0; + +error: + pr_err("Failed to reserve buffers in ttm.\n"); + drm_exec_fini(&ctx->exec); + return ret; } /** @@ -1224,15 +1183,8 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, if (wait) ret = amdgpu_sync_wait(ctx->sync, intr); - if (ctx->reserved) - ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list); - kfree(ctx->vm_pd); - + drm_exec_fini(&ctx->exec); ctx->sync = NULL; - - ctx->reserved = false; - ctx->vm_pd = NULL; - return ret; } @@ -1855,7 +1807,6 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( bool use_release_notifier = (mem->bo->kfd_bo == mem); struct kfd_mem_attachment *entry, *tmp; struct bo_vm_reservation_context ctx; - struct ttm_validate_buffer *bo_list_entry; unsigned int mapped_to_gpu_memory; int ret; bool is_imported = false; @@ -1883,9 +1834,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } /* Make sure restore workers don't access the BO any more */ - bo_list_entry = &mem->validate_list; mutex_lock(&process_info->lock); - list_del(&bo_list_entry->head); + list_del(&mem->validate_list); mutex_unlock(&process_info->lock); /* Cleanup user pages and MMU notifiers */ @@ -2452,14 +2402,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, /* Move all invalidated BOs to the userptr_inval_list */ list_for_each_entry_safe(mem, tmp_mem, &process_info->userptr_valid_list, - validate_list.head) + validate_list) if (mem->invalid) - list_move_tail(&mem->validate_list.head, + list_move_tail(&mem->validate_list, &process_info->userptr_inval_list); /* Go through userptr_inval_list and update any invalid user_pages */ list_for_each_entry(mem, &process_info->userptr_inval_list, - validate_list.head) { + validate_list) { invalid = mem->invalid; if (!invalid) /* BO hasn't been invalidated since the last @@ -2539,51 +2489,42 @@ unlock_out: */ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) { - struct amdgpu_bo_list_entry *pd_bo_list_entries; - struct list_head resv_list, duplicates; - struct ww_acquire_ctx ticket; + struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_sync sync; + struct drm_exec exec; struct amdgpu_vm *peer_vm; struct kgd_mem *mem, *tmp_mem; struct amdgpu_bo *bo; - struct ttm_operation_ctx ctx = { false, false }; - int i, ret; - - pd_bo_list_entries = kcalloc(process_info->n_vms, - sizeof(struct amdgpu_bo_list_entry), - GFP_KERNEL); - if (!pd_bo_list_entries) { - pr_err("%s: Failed to allocate PD BO list entries\n", __func__); - ret = -ENOMEM; - goto out_no_mem; - } - - INIT_LIST_HEAD(&resv_list); - INIT_LIST_HEAD(&duplicates); - - /* Get all the page directory BOs that need to be reserved */ - i = 0; - list_for_each_entry(peer_vm, &process_info->vm_list_head, - vm_list_node) - amdgpu_vm_get_pd_bo(peer_vm, &resv_list, - &pd_bo_list_entries[i++]); - /* Add the userptr_inval_list entries to resv_list */ - list_for_each_entry(mem, &process_info->userptr_inval_list, - validate_list.head) { - list_add_tail(&mem->resv_list.head, &resv_list); - mem->resv_list.bo = mem->validate_list.bo; - mem->resv_list.num_shared = mem->validate_list.num_shared; - } - - /* Reserve all BOs and page tables for validation */ - ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); - WARN(!list_empty(&duplicates), "Duplicates should be empty"); - if (ret) - goto out_free; + int ret; amdgpu_sync_create(&sync); + drm_exec_init(&exec, 0); + /* Reserve all BOs and page tables for validation */ + drm_exec_until_all_locked(&exec) { + /* Reserve all the page directories */ + list_for_each_entry(peer_vm, &process_info->vm_list_head, + vm_list_node) { + ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto unreserve_out; + } + + /* Reserve the userptr_inval_list entries to resv_list */ + list_for_each_entry(mem, &process_info->userptr_inval_list, + validate_list) { + struct drm_gem_object *gobj; + + gobj = &mem->bo->tbo.base; + ret = drm_exec_prepare_obj(&exec, gobj, 1); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto unreserve_out; + } + } + ret = process_validate_vms(process_info); if (ret) goto unreserve_out; @@ -2591,7 +2532,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) /* Validate BOs and update GPUVM page tables */ list_for_each_entry_safe(mem, tmp_mem, &process_info->userptr_inval_list, - validate_list.head) { + validate_list) { struct kfd_mem_attachment *attachment; bo = mem->bo; @@ -2633,12 +2574,9 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ret = process_update_pds(process_info, &sync); unreserve_out: - ttm_eu_backoff_reservation(&ticket, &resv_list); + drm_exec_fini(&exec); amdgpu_sync_wait(&sync, false); amdgpu_sync_free(&sync); -out_free: - kfree(pd_bo_list_entries); -out_no_mem: return ret; } @@ -2654,7 +2592,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i list_for_each_entry_safe(mem, tmp_mem, &process_info->userptr_inval_list, - validate_list.head) { + validate_list) { bool valid; /* keep mem without hmm range at userptr_inval_list */ @@ -2678,7 +2616,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i continue; } - list_move_tail(&mem->validate_list.head, + list_move_tail(&mem->validate_list, &process_info->userptr_valid_list); } @@ -2788,50 +2726,44 @@ unlock_out: */ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) { - struct amdgpu_bo_list_entry *pd_bo_list; struct amdkfd_process_info *process_info = info; struct amdgpu_vm *peer_vm; struct kgd_mem *mem; - struct bo_vm_reservation_context ctx; struct amdgpu_amdkfd_fence *new_fence; - int ret = 0, i; struct list_head duplicate_save; struct amdgpu_sync sync_obj; unsigned long failed_size = 0; unsigned long total_size = 0; + struct drm_exec exec; + int ret; INIT_LIST_HEAD(&duplicate_save); - INIT_LIST_HEAD(&ctx.list); - INIT_LIST_HEAD(&ctx.duplicates); - pd_bo_list = kcalloc(process_info->n_vms, - sizeof(struct amdgpu_bo_list_entry), - GFP_KERNEL); - if (!pd_bo_list) - return -ENOMEM; - - i = 0; mutex_lock(&process_info->lock); - list_for_each_entry(peer_vm, &process_info->vm_list_head, - vm_list_node) - amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]); - /* Reserve all BOs and page tables/directory. Add all BOs from - * kfd_bo_list to ctx.list - */ - list_for_each_entry(mem, &process_info->kfd_bo_list, - validate_list.head) { + drm_exec_init(&exec, 0); + drm_exec_until_all_locked(&exec) { + list_for_each_entry(peer_vm, &process_info->vm_list_head, + vm_list_node) { + ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto ttm_reserve_fail; + } - list_add_tail(&mem->resv_list.head, &ctx.list); - mem->resv_list.bo = mem->validate_list.bo; - mem->resv_list.num_shared = mem->validate_list.num_shared; - } + /* Reserve all BOs and page tables/directory. Add all BOs from + * kfd_bo_list to ctx.list + */ + list_for_each_entry(mem, &process_info->kfd_bo_list, + validate_list) { + struct drm_gem_object *gobj; - ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, - false, &duplicate_save); - if (ret) { - pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); - goto ttm_reserve_fail; + gobj = &mem->bo->tbo.base; + ret = drm_exec_prepare_obj(&exec, gobj, 1); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto ttm_reserve_fail; + } } amdgpu_sync_create(&sync_obj); @@ -2849,7 +2781,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) /* Validate BOs and map them to GPUVM (update VM page tables). */ list_for_each_entry(mem, &process_info->kfd_bo_list, - validate_list.head) { + validate_list) { struct amdgpu_bo *bo = mem->bo; uint32_t domain = mem->domain; @@ -2925,8 +2857,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) *ef = dma_fence_get(&new_fence->base); /* Attach new eviction fence to all BOs except pinned ones */ - list_for_each_entry(mem, &process_info->kfd_bo_list, - validate_list.head) { + list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { if (mem->bo->tbo.pin_count) continue; @@ -2945,11 +2876,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) } validate_map_fail: - ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list); amdgpu_sync_free(&sync_obj); ttm_reserve_fail: + drm_exec_fini(&exec); mutex_unlock(&process_info->lock); - kfree(pd_bo_list); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index f4e3c133a16c..73ee14f7a9a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1776,7 +1776,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev, struct amdgpu_device *adev = drm_to_adev(ddev); struct atom_context *ctx = adev->mode_info.atom_context; - return sysfs_emit(buf, "%s\n", ctx->vbios_version); + return sysfs_emit(buf, "%s\n", ctx->vbios_ver_str); } static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index b639a80ee3fc..0811474e8fd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -89,8 +89,7 @@ struct atom_memory_info { #define MAX_AC_TIMING_ENTRIES 16 -struct atom_memory_clock_range_table -{ +struct atom_memory_clock_range_table { u8 num_entries; u8 rsv[3]; u32 mclk[MAX_AC_TIMING_ENTRIES]; @@ -118,14 +117,12 @@ struct atom_mc_reg_table { #define MAX_VOLTAGE_ENTRIES 32 -struct atom_voltage_table_entry -{ +struct atom_voltage_table_entry { u16 value; u32 smio_low; }; -struct atom_voltage_table -{ +struct atom_voltage_table { u32 count; u32 mask_low; u32 phase_delay; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 0b7f4c4d58e5..835980e94b9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -58,7 +58,7 @@ uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *ade if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, &frev, &crev, &data_offset)) { /* support firmware_info 3.1 + */ - if ((frev == 3 && crev >=1) || (frev > 3)) { + if ((frev == 3 && crev >= 1) || (frev > 3)) { firmware_info = (union firmware_info *) (mode_info->atom_context->bios + data_offset); fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability); @@ -597,7 +597,7 @@ bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, index, &size, &frev, &crev, &data_offset)) { /* support firmware_info 3.4 + */ - if ((frev == 3 && crev >=4) || (frev > 3)) { + if ((frev == 3 && crev >= 4) || (frev > 3)) { firmware_info = (union firmware_info *) (mode_info->atom_context->bios + data_offset); /* The ras_rom_i2c_slave_addr should ideally @@ -850,7 +850,7 @@ int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev) firmware_info = (union firmware_info *)(ctx->bios + data_offset); - if (frev !=3) + if (frev != 3) return -EINVAL; switch (crev) { @@ -909,7 +909,7 @@ int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset) } index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, - asic_init); + asic_init); if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) { if (frev == 2 && crev >= 1) { memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index d6d986be906a..375f02002579 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -74,24 +74,29 @@ struct atpx_mux { u16 mux; } __packed; -bool amdgpu_has_atpx(void) { +bool amdgpu_has_atpx(void) +{ return amdgpu_atpx_priv.atpx_detected; } -bool amdgpu_has_atpx_dgpu_power_cntl(void) { +bool amdgpu_has_atpx_dgpu_power_cntl(void) +{ return amdgpu_atpx_priv.atpx.functions.power_cntl; } -bool amdgpu_is_atpx_hybrid(void) { +bool amdgpu_is_atpx_hybrid(void) +{ return amdgpu_atpx_priv.atpx.is_hybrid; } -bool amdgpu_atpx_dgpu_req_power_for_displays(void) { +bool amdgpu_atpx_dgpu_req_power_for_displays(void) +{ return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; } #if defined(CONFIG_ACPI) -void *amdgpu_atpx_get_dhandle(void) { +void *amdgpu_atpx_get_dhandle(void) +{ return amdgpu_atpx_priv.dhandle; } #endif @@ -134,7 +139,7 @@ static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function, /* Fail only if calling the method fails and ATPX is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - printk("failed to evaluate ATPX got %s\n", + pr_err("failed to evaluate ATPX got %s\n", acpi_format_exception(status)); kfree(buffer.pointer); return NULL; @@ -190,7 +195,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) size = *(u16 *) info->buffer.pointer; if (size < 10) { - printk("ATPX buffer is too small: %zu\n", size); + pr_err("ATPX buffer is too small: %zu\n", size); kfree(info); return -EINVAL; } @@ -223,11 +228,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) atpx->is_hybrid = false; if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) { - printk("ATPX Hybrid Graphics, forcing to ATPX\n"); + pr_warn("ATPX Hybrid Graphics, forcing to ATPX\n"); atpx->functions.power_cntl = true; atpx->is_hybrid = false; } else { - printk("ATPX Hybrid Graphics\n"); + pr_notice("ATPX Hybrid Graphics\n"); /* * Disable legacy PM methods only when pcie port PM is usable, * otherwise the device might fail to power off or power on. @@ -269,7 +274,7 @@ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx) size = *(u16 *) info->buffer.pointer; if (size < 8) { - printk("ATPX buffer is too small: %zu\n", size); + pr_err("ATPX buffer is too small: %zu\n", size); err = -EINVAL; goto out; } @@ -278,8 +283,8 @@ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx) memcpy(&output, info->buffer.pointer, size); /* TODO: check version? */ - printk("ATPX version %u, functions 0x%08x\n", - output.version, output.function_bits); + pr_notice("ATPX version %u, functions 0x%08x\n", + output.version, output.function_bits); amdgpu_atpx_parse_functions(&atpx->functions, output.function_bits); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index b582b83c4984..38ccec913f00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -460,7 +460,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) return false; success: - adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false; + adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10; return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 252a876b0725..b6298e901cbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -28,6 +28,7 @@ * Christian König */ +#include #include #include "amdgpu.h" @@ -50,15 +51,22 @@ static void amdgpu_bo_list_free(struct kref *ref) refcount); struct amdgpu_bo_list_entry *e; - amdgpu_bo_list_for_each_entry(e, list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); - - amdgpu_bo_unref(&bo); - } - + amdgpu_bo_list_for_each_entry(e, list) + amdgpu_bo_unref(&e->bo); call_rcu(&list->rhead, amdgpu_bo_list_free_rcu); } +static int amdgpu_bo_list_entry_cmp(const void *_a, const void *_b) +{ + const struct amdgpu_bo_list_entry *a = _a, *b = _b; + + if (a->priority > b->priority) + return 1; + if (a->priority < b->priority) + return -1; + return 0; +} + int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, struct drm_amdgpu_bo_list_entry *info, size_t num_entries, struct amdgpu_bo_list **result) @@ -118,7 +126,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, entry->priority = min(info[i].bo_priority, AMDGPU_BO_LIST_MAX_PRIORITY); - entry->tv.bo = &bo->tbo; + entry->bo = bo; if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) list->gds_obj = bo; @@ -133,6 +141,8 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, list->first_userptr = first_userptr; list->num_entries = num_entries; + sort(array, last_entry, sizeof(struct amdgpu_bo_list_entry), + amdgpu_bo_list_entry_cmp, NULL); trace_amdgpu_cs_bo_status(list->num_entries, total_size); @@ -141,16 +151,10 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, return 0; error_free: - for (i = 0; i < last_entry; ++i) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo); - - amdgpu_bo_unref(&bo); - } - for (i = first_userptr; i < num_entries; ++i) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo); - - amdgpu_bo_unref(&bo); - } + for (i = 0; i < last_entry; ++i) + amdgpu_bo_unref(&array[i].bo); + for (i = first_userptr; i < num_entries; ++i) + amdgpu_bo_unref(&array[i].bo); kvfree(list); return r; @@ -182,41 +186,6 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id, return -ENOENT; } -void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, - struct list_head *validated) -{ - /* This is based on the bucket sort with O(n) time complexity. - * An item with priority "i" is added to bucket[i]. The lists are then - * concatenated in descending order. - */ - struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; - struct amdgpu_bo_list_entry *e; - unsigned i; - - for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) - INIT_LIST_HEAD(&bucket[i]); - - /* Since buffers which appear sooner in the relocation list are - * likely to be used more often than buffers which appear later - * in the list, the sort mustn't change the ordering of buffers - * with the same priority, i.e. it must be stable. - */ - amdgpu_bo_list_for_each_entry(e, list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); - unsigned priority = e->priority; - - if (!bo->parent) - list_add_tail(&e->tv.head, &bucket[priority]); - - e->user_pages = NULL; - e->range = NULL; - } - - /* Connect the sorted buckets in the output list. */ - for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) - list_splice(&bucket[i], validated); -} - void amdgpu_bo_list_put(struct amdgpu_bo_list *list) { kref_put(&list->refcount, amdgpu_bo_list_free); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index ededdc01ca28..26c01cb131f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -23,7 +23,6 @@ #ifndef __AMDGPU_BO_LIST_H__ #define __AMDGPU_BO_LIST_H__ -#include #include struct hmm_range; @@ -36,7 +35,7 @@ struct amdgpu_bo_va; struct amdgpu_fpriv; struct amdgpu_bo_list_entry { - struct ttm_validate_buffer tv; + struct amdgpu_bo *bo; struct amdgpu_bo_va *bo_va; uint32_t priority; struct page **user_pages; @@ -60,8 +59,6 @@ struct amdgpu_bo_list { int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id, struct amdgpu_bo_list **result); -void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, - struct list_head *validated); void amdgpu_bo_list_put(struct amdgpu_bo_list *list); int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, struct drm_amdgpu_bo_list_entry **info_param); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 456e385333b6..b8280be6225d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -41,13 +41,13 @@ struct amdgpu_cgs_device { ((struct amdgpu_cgs_device *)cgs_device)->adev -static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset) +static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned int offset) { CGS_FUNC_ADEV; return RREG32(offset); } -static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset, +static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned int offset, uint32_t value) { CGS_FUNC_ADEV; @@ -56,7 +56,7 @@ static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned of static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, enum cgs_ind_reg space, - unsigned index) + unsigned int index) { CGS_FUNC_ADEV; switch (space) { @@ -84,7 +84,7 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, enum cgs_ind_reg space, - unsigned index, uint32_t value) + unsigned int index, uint32_t value) { CGS_FUNC_ADEV; switch (space) { @@ -163,38 +163,38 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device, uint16_t fw_version = 0; switch (type) { - case CGS_UCODE_ID_SDMA0: - fw_version = adev->sdma.instance[0].fw_version; - break; - case CGS_UCODE_ID_SDMA1: - fw_version = adev->sdma.instance[1].fw_version; - break; - case CGS_UCODE_ID_CP_CE: - fw_version = adev->gfx.ce_fw_version; - break; - case CGS_UCODE_ID_CP_PFP: - fw_version = adev->gfx.pfp_fw_version; - break; - case CGS_UCODE_ID_CP_ME: - fw_version = adev->gfx.me_fw_version; - break; - case CGS_UCODE_ID_CP_MEC: - fw_version = adev->gfx.mec_fw_version; - break; - case CGS_UCODE_ID_CP_MEC_JT1: - fw_version = adev->gfx.mec_fw_version; - break; - case CGS_UCODE_ID_CP_MEC_JT2: - fw_version = adev->gfx.mec_fw_version; - break; - case CGS_UCODE_ID_RLC_G: - fw_version = adev->gfx.rlc_fw_version; - break; - case CGS_UCODE_ID_STORAGE: - break; - default: - DRM_ERROR("firmware type %d do not have version\n", type); - break; + case CGS_UCODE_ID_SDMA0: + fw_version = adev->sdma.instance[0].fw_version; + break; + case CGS_UCODE_ID_SDMA1: + fw_version = adev->sdma.instance[1].fw_version; + break; + case CGS_UCODE_ID_CP_CE: + fw_version = adev->gfx.ce_fw_version; + break; + case CGS_UCODE_ID_CP_PFP: + fw_version = adev->gfx.pfp_fw_version; + break; + case CGS_UCODE_ID_CP_ME: + fw_version = adev->gfx.me_fw_version; + break; + case CGS_UCODE_ID_CP_MEC: + fw_version = adev->gfx.mec_fw_version; + break; + case CGS_UCODE_ID_CP_MEC_JT1: + fw_version = adev->gfx.mec_fw_version; + break; + case CGS_UCODE_ID_CP_MEC_JT2: + fw_version = adev->gfx.mec_fw_version; + break; + case CGS_UCODE_ID_RLC_G: + fw_version = adev->gfx.rlc_fw_version; + break; + case CGS_UCODE_ID_STORAGE: + break; + default: + DRM_ERROR("firmware type %d do not have version\n", type); + break; } return fw_version; } @@ -205,7 +205,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, { CGS_FUNC_ADEV; - if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) { + if (type != CGS_UCODE_ID_SMU && type != CGS_UCODE_ID_SMU_SK) { uint64_t gpu_addr; uint32_t data_size; const struct gfx_firmware_header_v1_0 *header; @@ -232,7 +232,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, info->mc_addr = gpu_addr; info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); - if (CGS_UCODE_ID_CP_MEC == type) + if (type == CGS_UCODE_ID_CP_MEC) info->image_size = le32_to_cpu(header->jt_offset) << 2; info->fw_version = amdgpu_get_firmware_version(cgs_device, type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index fb78a8f47587..49dd9aa8da70 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -65,6 +65,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, } amdgpu_sync_create(&p->sync); + drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); return 0; } @@ -125,7 +126,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, uint32_t *offset) { struct drm_gem_object *gobj; - struct amdgpu_bo *bo; unsigned long size; int r; @@ -133,18 +133,16 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, if (gobj == NULL) return -EINVAL; - bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); - p->uf_entry.priority = 0; - p->uf_entry.tv.bo = &bo->tbo; + p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); drm_gem_object_put(gobj); - size = amdgpu_bo_size(bo); + size = amdgpu_bo_size(p->uf_bo); if (size != PAGE_SIZE || (data->offset + 8) > size) { r = -EINVAL; goto error_unref; } - if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { + if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) { r = -EINVAL; goto error_unref; } @@ -154,7 +152,7 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, return 0; error_unref: - amdgpu_bo_unref(&bo); + amdgpu_bo_unref(&p->uf_bo); return r; } @@ -311,7 +309,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, goto free_all_kdata; } - if (p->uf_entry.tv.bo) + if (p->uf_bo) p->gang_leader->uf_addr = uf_offset; kvfree(chunk_array); @@ -356,7 +354,7 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p, ib = &job->ibs[job->num_ibs++]; /* MM engine doesn't support user fences */ - if (p->uf_entry.tv.bo && ring->funcs->no_user_fence) + if (p->uf_bo && ring->funcs->no_user_fence) return -EINVAL; if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && @@ -841,55 +839,18 @@ retry: return r; } -static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, - struct list_head *validated) -{ - struct ttm_operation_ctx ctx = { true, false }; - struct amdgpu_bo_list_entry *lobj; - int r; - - list_for_each_entry(lobj, validated, tv.head) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo); - struct mm_struct *usermm; - - usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); - if (usermm && usermm != current->mm) - return -EPERM; - - if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) && - lobj->user_invalidated && lobj->user_pages) { - amdgpu_bo_placement_from_domain(bo, - AMDGPU_GEM_DOMAIN_CPU); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (r) - return r; - - amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, - lobj->user_pages); - } - - r = amdgpu_cs_bo_validate(p, bo); - if (r) - return r; - - kvfree(lobj->user_pages); - lobj->user_pages = NULL; - } - return 0; -} - static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; + struct ttm_operation_ctx ctx = { true, false }; struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_list_entry *e; - struct list_head duplicates; + struct drm_gem_object *obj; + unsigned long index; unsigned int i; int r; - INIT_LIST_HEAD(&p->validated); - /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ if (cs->in.bo_list_handle) { if (p->bo_list) @@ -909,29 +870,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, mutex_lock(&p->bo_list->bo_list_mutex); - /* One for TTM and one for each CS job */ - amdgpu_bo_list_for_each_entry(e, p->bo_list) - e->tv.num_shared = 1 + p->gang_size; - p->uf_entry.tv.num_shared = 1 + p->gang_size; - - amdgpu_bo_list_get_list(p->bo_list, &p->validated); - - INIT_LIST_HEAD(&duplicates); - amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); - - /* Two for VM updates, one for TTM and one for each CS job */ - p->vm_pd.tv.num_shared = 3 + p->gang_size; - - if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent) - list_add(&p->uf_entry.tv.head, &p->validated); - /* Get userptr backing pages. If pages are updated after registered * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do * amdgpu_ttm_backend_bind() to flush and invalidate new pages */ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); bool userpage_invalidated = false; + struct amdgpu_bo *bo = e->bo; int i; e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, @@ -959,18 +904,56 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, e->user_invalidated = userpage_invalidated; } - r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, - &duplicates); - if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) - DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); - goto out_free_user_pages; + drm_exec_until_all_locked(&p->exec) { + r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size); + drm_exec_retry_on_contention(&p->exec); + if (unlikely(r)) + goto out_free_user_pages; + + amdgpu_bo_list_for_each_entry(e, p->bo_list) { + /* One fence for TTM and one for each CS job */ + r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base, + 1 + p->gang_size); + drm_exec_retry_on_contention(&p->exec); + if (unlikely(r)) + goto out_free_user_pages; + + e->bo_va = amdgpu_vm_bo_find(vm, e->bo); + } + + if (p->uf_bo) { + r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base, + 1 + p->gang_size); + drm_exec_retry_on_contention(&p->exec); + if (unlikely(r)) + goto out_free_user_pages; + } } - amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { + struct mm_struct *usermm; - e->bo_va = amdgpu_vm_bo_find(vm, bo); + usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm); + if (usermm && usermm != current->mm) { + r = -EPERM; + goto out_free_user_pages; + } + + if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) && + e->user_invalidated && e->user_pages) { + amdgpu_bo_placement_from_domain(e->bo, + AMDGPU_GEM_DOMAIN_CPU); + r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement, + &ctx); + if (r) + goto out_free_user_pages; + + amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm, + e->user_pages); + } + + kvfree(e->user_pages); + e->user_pages = NULL; } amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, @@ -982,25 +965,21 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, amdgpu_cs_bo_validate, p); if (r) { DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); - goto error_validate; + goto out_free_user_pages; } - r = amdgpu_cs_list_validate(p, &duplicates); - if (r) - goto error_validate; + drm_exec_for_each_locked_object(&p->exec, index, obj) { + r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj)); + if (unlikely(r)) + goto out_free_user_pages; + } - r = amdgpu_cs_list_validate(p, &p->validated); - if (r) - goto error_validate; + if (p->uf_bo) { + r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo); + if (unlikely(r)) + goto out_free_user_pages; - if (p->uf_entry.tv.bo) { - struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo); - - r = amdgpu_ttm_alloc_gart(&uf->tbo); - if (r) - goto error_validate; - - p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf); + p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo); } amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, @@ -1012,12 +991,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, p->bo_list->oa_obj); return 0; -error_validate: - ttm_eu_backoff_reservation(&p->ticket, &p->validated); - out_free_user_pages: amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + struct amdgpu_bo *bo = e->bo; if (!e->user_pages) continue; @@ -1123,7 +1099,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_list_entry *e; struct amdgpu_bo_va *bo_va; - struct amdgpu_bo *bo; unsigned int i; int r; @@ -1152,11 +1127,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) } amdgpu_bo_list_for_each_entry(e, p->bo_list) { - /* ignore duplicates */ - bo = ttm_to_amdgpu_bo(e->tv.bo); - if (!bo) - continue; - bo_va = e->bo_va; if (bo_va == NULL) continue; @@ -1194,7 +1164,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (amdgpu_vm_debug) { /* Invalidate all BOs to test for userspace bugs */ amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + struct amdgpu_bo *bo = e->bo; /* ignore duplicates */ if (!bo) @@ -1211,8 +1181,9 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct drm_gpu_scheduler *sched; - struct amdgpu_bo_list_entry *e; + struct drm_gem_object *obj; struct dma_fence *fence; + unsigned long index; unsigned int i; int r; @@ -1223,8 +1194,9 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) return r; } - list_for_each_entry(e, &p->validated, tv.head) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + drm_exec_for_each_locked_object(&p->exec, index, obj) { + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct dma_resv *resv = bo->tbo.base.resv; enum amdgpu_sync_mode sync_mode; @@ -1288,6 +1260,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_job *leader = p->gang_leader; struct amdgpu_bo_list_entry *e; + struct drm_gem_object *gobj; + unsigned long index; unsigned int i; uint64_t seq; int r; @@ -1326,9 +1300,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, */ r = 0; amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); - - r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); + r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm, + e->range); e->range = NULL; } if (r) { @@ -1338,20 +1311,22 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, } p->fence = dma_fence_get(&leader->base.s_fence->finished); - list_for_each_entry(e, &p->validated, tv.head) { + drm_exec_for_each_locked_object(&p->exec, index, gobj) { + + ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo); /* Everybody except for the gang leader uses READ */ for (i = 0; i < p->gang_size; ++i) { if (p->jobs[i] == leader) continue; - dma_resv_add_fence(e->tv.bo->base.resv, + dma_resv_add_fence(gobj->resv, &p->jobs[i]->base.s_fence->finished, DMA_RESV_USAGE_READ); } - /* The gang leader is remembered as writer */ - e->tv.num_shared = 0; + /* The gang leader as remembered as writer */ + dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE); } seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx], @@ -1367,7 +1342,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, cs->out.handle = seq; leader->uf_sequence = seq; - amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket); + amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket); for (i = 0; i < p->gang_size; ++i) { amdgpu_job_free_resources(p->jobs[i]); trace_amdgpu_cs_ioctl(p->jobs[i]); @@ -1376,7 +1351,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, } amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); - ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); mutex_unlock(&p->adev->notifier_lock); mutex_unlock(&p->bo_list->bo_list_mutex); @@ -1389,6 +1363,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) unsigned int i; amdgpu_sync_free(&parser->sync); + drm_exec_fini(&parser->exec); + for (i = 0; i < parser->num_post_deps; i++) { drm_syncobj_put(parser->post_deps[i].syncobj); kfree(parser->post_deps[i].chain); @@ -1409,11 +1385,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) if (parser->jobs[i]) amdgpu_job_free(parser->jobs[i]); } - if (parser->uf_entry.tv.bo) { - struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo); - - amdgpu_bo_unref(&uf); - } + amdgpu_bo_unref(&parser->uf_bo); } int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) @@ -1474,7 +1446,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return 0; error_backoff: - ttm_eu_backoff_reservation(&parser.ticket, &parser.validated); mutex_unlock(&parser.bo_list->bo_list_mutex); error_fini: @@ -1809,7 +1780,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, *map = mapping; /* Double check that the BO is reserved by this CS */ - if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) + if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket) return -EINVAL; if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h index fb3e3d56d427..39c33ad100cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h @@ -24,6 +24,7 @@ #define __AMDGPU_CS_H__ #include +#include #include "amdgpu_job.h" #include "amdgpu_bo_list.h" @@ -62,11 +63,9 @@ struct amdgpu_cs_parser { struct amdgpu_job *gang_leader; /* buffer objects */ - struct ww_acquire_ctx ticket; + struct drm_exec exec; struct amdgpu_bo_list *bo_list; struct amdgpu_mn *mn; - struct amdgpu_bo_list_entry vm_pd; - struct list_head validated; struct dma_fence *fence; uint64_t bytes_moved_threshold; uint64_t bytes_moved_vis_threshold; @@ -74,7 +73,7 @@ struct amdgpu_cs_parser { uint64_t bytes_moved_vis; /* user fence */ - struct amdgpu_bo_list_entry uf_entry; + struct amdgpu_bo *uf_bo; unsigned num_post_deps; struct amdgpu_cs_post_dep *post_deps; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 23d054526e7c..720011019741 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -22,6 +22,8 @@ * * Author: Monk.liu@amd.com */ +#include + #include "amdgpu.h" uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) @@ -65,31 +67,25 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, uint64_t csa_addr, uint32_t size) { - struct ww_acquire_ctx ticket; - struct list_head list; - struct amdgpu_bo_list_entry pd; - struct ttm_validate_buffer csa_tv; + struct drm_exec exec; int r; - INIT_LIST_HEAD(&list); - INIT_LIST_HEAD(&csa_tv.head); - csa_tv.bo = &bo->tbo; - csa_tv.num_shared = 1; - - list_add(&csa_tv.head, &list); - amdgpu_vm_get_pd_bo(vm, &list, &pd); - - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); - if (r) { - DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); - return r; + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_until_all_locked(&exec) { + r = amdgpu_vm_lock_pd(vm, &exec, 0); + if (likely(!r)) + r = drm_exec_lock_obj(&exec, &bo->tbo.base); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) { + DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); + goto error; + } } *bo_va = amdgpu_vm_bo_add(adev, vm, bo); if (!*bo_va) { - ttm_eu_backoff_reservation(&ticket, &list); - DRM_ERROR("failed to create bo_va for static CSA\n"); - return -ENOMEM; + r = -ENOMEM; + goto error; } r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, @@ -99,48 +95,42 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) { DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); amdgpu_vm_bo_del(adev, *bo_va); - ttm_eu_backoff_reservation(&ticket, &list); - return r; + goto error; } - ttm_eu_backoff_reservation(&ticket, &list); - return 0; +error: + drm_exec_fini(&exec); + return r; } int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va, uint64_t csa_addr) { - struct ww_acquire_ctx ticket; - struct list_head list; - struct amdgpu_bo_list_entry pd; - struct ttm_validate_buffer csa_tv; + struct drm_exec exec; int r; - INIT_LIST_HEAD(&list); - INIT_LIST_HEAD(&csa_tv.head); - csa_tv.bo = &bo->tbo; - csa_tv.num_shared = 1; - - list_add(&csa_tv.head, &list); - amdgpu_vm_get_pd_bo(vm, &list, &pd); - - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); - if (r) { - DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); - return r; + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_until_all_locked(&exec) { + r = amdgpu_vm_lock_pd(vm, &exec, 0); + if (likely(!r)) + r = drm_exec_lock_obj(&exec, &bo->tbo.base); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) { + DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); + goto error; + } } r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr); if (r) { DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r); - ttm_eu_backoff_reservation(&ticket, &list); - return r; + goto error; } amdgpu_vm_bo_del(adev, bo_va); - ttm_eu_backoff_reservation(&ticket, &list); - - return 0; +error: + drm_exec_fini(&exec); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 56e89e76ff17..a4faea4fa0b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -154,7 +154,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, } else { r = get_user(value, (uint32_t *)buf); if (!r) - amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value); + amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0); } if (r) { result = r; @@ -283,7 +283,7 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off } else { r = get_user(value, (uint32_t *)buf); if (!r) - amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value); + amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value, rd->id.xcc_id); } if (r) { result = r; @@ -375,7 +375,7 @@ static int amdgpu_debugfs_gprwave_open(struct inode *inode, struct file *file) { struct amdgpu_debugfs_gprwave_data *rd; - rd = kzalloc(sizeof *rd, GFP_KERNEL); + rd = kzalloc(sizeof(*rd), GFP_KERNEL); if (!rd) return -ENOMEM; rd->adev = file_inode(file)->i_private; @@ -388,6 +388,7 @@ static int amdgpu_debugfs_gprwave_open(struct inode *inode, struct file *file) static int amdgpu_debugfs_gprwave_release(struct inode *inode, struct file *file) { struct amdgpu_debugfs_gprwave_data *rd = file->private_data; + mutex_destroy(&rd->lock); kfree(file->private_data); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6238701cde23..e77f048c99d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -159,76 +159,11 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, return sysfs_emit(buf, "%llu\n", cnt); } -static DEVICE_ATTR(pcie_replay_count, S_IRUGO, +static DEVICE_ATTR(pcie_replay_count, 0444, amdgpu_device_get_pcie_replay_count, NULL); static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); -/** - * DOC: product_name - * - * The amdgpu driver provides a sysfs API for reporting the product name - * for the device - * The file product_name is used for this and returns the product name - * as returned from the FRU. - * NOTE: This is only available for certain server cards - */ - -static ssize_t amdgpu_device_get_product_name(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - - return sysfs_emit(buf, "%s\n", adev->product_name); -} - -static DEVICE_ATTR(product_name, S_IRUGO, - amdgpu_device_get_product_name, NULL); - -/** - * DOC: product_number - * - * The amdgpu driver provides a sysfs API for reporting the part number - * for the device - * The file product_number is used for this and returns the part number - * as returned from the FRU. - * NOTE: This is only available for certain server cards - */ - -static ssize_t amdgpu_device_get_product_number(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - - return sysfs_emit(buf, "%s\n", adev->product_number); -} - -static DEVICE_ATTR(product_number, S_IRUGO, - amdgpu_device_get_product_number, NULL); - -/** - * DOC: serial_number - * - * The amdgpu driver provides a sysfs API for reporting the serial number - * for the device - * The file serial_number is used for this and returns the serial number - * as returned from the FRU. - * NOTE: This is only available for certain server cards - */ - -static ssize_t amdgpu_device_get_serial_number(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - - return sysfs_emit(buf, "%s\n", adev->serial); -} - -static DEVICE_ATTR(serial_number, S_IRUGO, - amdgpu_device_get_serial_number, NULL); /** * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control @@ -370,10 +305,16 @@ size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, if (write) { memcpy_toio(addr, buf, count); + /* Make sure HDP write cache flush happens without any reordering + * after the system memory contents are sent over PCIe device + */ mb(); amdgpu_device_flush_hdp(adev, NULL); } else { amdgpu_device_invalidate_hdp(adev, NULL); + /* Make sure HDP read cache is invalidated before issuing a read + * to the PCIe device + */ mb(); memcpy_fromio(buf, addr, count); } @@ -481,8 +422,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, /* * MMIO register read with bytes helper functions * @offset:bytes offset from MMIO start - * -*/ + */ /** * amdgpu_mm_rreg8 - read a memory mapped IO register @@ -506,8 +446,8 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) * MMIO register write with bytes helper functions * @offset:bytes offset from MMIO start * @value: the value want to be written to the register - * -*/ + */ + /** * amdgpu_mm_wreg8 - read a memory mapped IO register * @@ -571,7 +511,8 @@ void amdgpu_device_wreg(struct amdgpu_device *adev, * this function is invoked only for the debugfs register access */ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, - uint32_t reg, uint32_t v) + uint32_t reg, uint32_t v, + uint32_t xcc_id) { if (amdgpu_device_skip_hw_access(adev)) return; @@ -580,7 +521,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->is_rlcg_access_range) { if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) - return amdgpu_sriov_wreg(adev, reg, v, 0, 0); + return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id); } else if ((reg * 4) >= adev->rmmio_size) { adev->pcie_wreg(adev, reg * 4, v); } else { @@ -588,94 +529,6 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, } } -/** - * amdgpu_mm_rdoorbell - read a doorbell dword - * - * @adev: amdgpu_device pointer - * @index: doorbell index - * - * Returns the value in the doorbell aperture at the - * requested doorbell index (CIK). - */ -u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) -{ - if (amdgpu_device_skip_hw_access(adev)) - return 0; - - if (index < adev->doorbell.num_kernel_doorbells) { - return readl(adev->doorbell.ptr + index); - } else { - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); - return 0; - } -} - -/** - * amdgpu_mm_wdoorbell - write a doorbell dword - * - * @adev: amdgpu_device pointer - * @index: doorbell index - * @v: value to write - * - * Writes @v to the doorbell aperture at the - * requested doorbell index (CIK). - */ -void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) -{ - if (amdgpu_device_skip_hw_access(adev)) - return; - - if (index < adev->doorbell.num_kernel_doorbells) { - writel(v, adev->doorbell.ptr + index); - } else { - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); - } -} - -/** - * amdgpu_mm_rdoorbell64 - read a doorbell Qword - * - * @adev: amdgpu_device pointer - * @index: doorbell index - * - * Returns the value in the doorbell aperture at the - * requested doorbell index (VEGA10+). - */ -u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) -{ - if (amdgpu_device_skip_hw_access(adev)) - return 0; - - if (index < adev->doorbell.num_kernel_doorbells) { - return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); - } else { - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); - return 0; - } -} - -/** - * amdgpu_mm_wdoorbell64 - write a doorbell Qword - * - * @adev: amdgpu_device pointer - * @index: doorbell index - * @v: value to write - * - * Writes @v to the doorbell aperture at the - * requested doorbell index (VEGA10+). - */ -void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) -{ - if (amdgpu_device_skip_hw_access(adev)) - return; - - if (index < adev->doorbell.num_kernel_doorbells) { - atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); - } else { - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); - } -} - /** * amdgpu_device_indirect_rreg - read an indirect register * @@ -1078,7 +931,7 @@ static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev) * @registers: pointer to the register array * @array_size: size of the register array * - * Programs an array or registers with and and or masks. + * Programs an array or registers with and or masks. * This is a helper for setting golden registers. */ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, @@ -1135,83 +988,6 @@ int amdgpu_device_pci_reset(struct amdgpu_device *adev) return pci_reset_function(adev->pdev); } -/* - * GPU doorbell aperture helpers function. - */ -/** - * amdgpu_device_doorbell_init - Init doorbell driver information. - * - * @adev: amdgpu_device pointer - * - * Init doorbell driver information (CIK) - * Returns 0 on success, error on failure. - */ -static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) -{ - - /* No doorbell on SI hardware generation */ - if (adev->asic_type < CHIP_BONAIRE) { - adev->doorbell.base = 0; - adev->doorbell.size = 0; - adev->doorbell.num_kernel_doorbells = 0; - adev->doorbell.ptr = NULL; - return 0; - } - - if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) - return -EINVAL; - - amdgpu_asic_init_doorbell_index(adev); - - /* doorbell bar mapping */ - adev->doorbell.base = pci_resource_start(adev->pdev, 2); - adev->doorbell.size = pci_resource_len(adev->pdev, 2); - - if (adev->enable_mes) { - adev->doorbell.num_kernel_doorbells = - adev->doorbell.size / sizeof(u32); - } else { - adev->doorbell.num_kernel_doorbells = - min_t(u32, adev->doorbell.size / sizeof(u32), - adev->doorbell_index.max_assignment+1); - if (adev->doorbell.num_kernel_doorbells == 0) - return -EINVAL; - - /* For Vega, reserve and map two pages on doorbell BAR since SDMA - * paging queue doorbell use the second page. The - * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the - * doorbells are in the first page. So with paging queue enabled, - * the max num_kernel_doorbells should + 1 page (0x400 in dword) - */ - if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) && - adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0)) - adev->doorbell.num_kernel_doorbells += 0x400; - } - - adev->doorbell.ptr = ioremap(adev->doorbell.base, - adev->doorbell.num_kernel_doorbells * - sizeof(u32)); - if (adev->doorbell.ptr == NULL) - return -ENOMEM; - - return 0; -} - -/** - * amdgpu_device_doorbell_fini - Tear down doorbell driver information. - * - * @adev: amdgpu_device pointer - * - * Tear down doorbell driver information (CIK) - */ -static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) -{ - iounmap(adev->doorbell.ptr); - adev->doorbell.ptr = NULL; -} - - - /* * amdgpu_device_wb_*() * Writeback is the method by which the GPU updates special pages in memory @@ -1321,10 +1097,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); struct pci_bus *root; struct resource *res; - unsigned i; + unsigned int i; u16 cmd; int r; + if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) + return 0; + /* Bypass for VF */ if (amdgpu_sriov_vf(adev)) return 0; @@ -1359,7 +1138,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) cmd & ~PCI_COMMAND_MEMORY); /* Free the VRAM and doorbell BAR, we most likely need to move both. */ - amdgpu_device_doorbell_fini(adev); + amdgpu_doorbell_fini(adev); if (adev->asic_type >= CHIP_BONAIRE) pci_release_resource(adev->pdev, 2); @@ -1376,7 +1155,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) /* When the doorbell or fb BAR isn't available we have no chance of * using the device. */ - r = amdgpu_device_doorbell_init(adev); + r = amdgpu_doorbell_init(adev); if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) return -ENODEV; @@ -1387,9 +1166,8 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) static bool amdgpu_device_read_bios(struct amdgpu_device *adev) { - if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU)) { + if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU)) return false; - } return true; } @@ -1425,6 +1203,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) if (adev->asic_type == CHIP_FIJI) { int err; uint32_t fw_ver; + err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); /* force vPost if error occured */ if (err) @@ -1553,6 +1332,7 @@ static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev, bool state) { struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); + amdgpu_asic_set_vga_state(adev, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | @@ -1575,7 +1355,8 @@ static void amdgpu_device_check_block_size(struct amdgpu_device *adev) { /* defines number of bits in page table versus page directory, * a page is 4KB so we have 12 bits offset, minimum 9 bits in the - * page table and the remaining bits are in the page directory */ + * page table and the remaining bits are in the page directory + */ if (amdgpu_vm_block_size == -1) return; @@ -1807,7 +1588,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - /* + /* * FIXME: open_count is protected by drm_global_mutex but that would lead to * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. @@ -3452,7 +3233,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) * * Main resume function for hardware IPs. The hardware IPs * are split into two resume functions because they are - * are also used in in recovering from a GPU reset and some additional + * also used in recovering from a GPU reset and some additional * steps need to be take between them. In this case (S3/S4) they are * run sequentially. * Returns 0 on success, negative error code on failure. @@ -3461,12 +3242,6 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) { int r; - if (!adev->in_s0ix) { - r = amdgpu_amdkfd_resume_iommu(adev); - if (r) - return r; - } - r = amdgpu_device_ip_resume_phase1(adev); if (r) return r; @@ -3554,8 +3329,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) #else default: if (amdgpu_dc > 0) - DRM_INFO_ONCE("Display Core has been requested via kernel parameter " - "but isn't supported by ASIC, ignoring\n"); + DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n"); return false; #endif } @@ -3711,9 +3485,6 @@ static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev) } static const struct attribute *amdgpu_dev_attributes[] = { - &dev_attr_product_name.attr, - &dev_attr_product_number.attr, - &dev_attr_serial_number.attr, &dev_attr_pcie_replay_count.attr, NULL }; @@ -3804,7 +3575,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); /* mutex initialization are all done here so we - * can recall function without having locking issues */ + * can recall function without having locking issues + */ mutex_init(&adev->firmware.mutex); mutex_init(&adev->pm.mutex); mutex_init(&adev->gfx.gpu_clock_mutex); @@ -3881,11 +3653,11 @@ int amdgpu_device_init(struct amdgpu_device *adev, atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); - if (adev->rmmio == NULL) { + if (!adev->rmmio) return -ENOMEM; - } + DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); - DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); + DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size); /* * Reset domain needs to be present early, before XGMI hive discovered @@ -3953,7 +3725,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, dev_info(adev->dev, "PCIE atomic ops is not supported\n"); /* doorbell bar mapping and doorbell index init*/ - amdgpu_device_doorbell_init(adev); + amdgpu_doorbell_init(adev); if (amdgpu_emu_mode == 1) { /* post the asic on emulation mode */ @@ -4096,14 +3868,6 @@ fence_driver_init: } else adev->ucode_sysfs_en = true; - r = amdgpu_psp_sysfs_init(adev); - if (r) { - adev->psp_sysfs_en = false; - if (!amdgpu_sriov_vf(adev)) - DRM_ERROR("Creating psp sysfs failed\n"); - } else - adev->psp_sysfs_en = true; - /* * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. * Otherwise the mgpu fan boost feature will be skipped due to the @@ -4136,6 +3900,8 @@ fence_driver_init: if (r) dev_err(adev->dev, "Could not create amdgpu device attr\n"); + amdgpu_fru_sysfs_init(adev); + if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); if (r) @@ -4147,7 +3913,8 @@ fence_driver_init: /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ /* this will fail for cards that aren't VGA class devices, just - * ignore it */ + * ignore it + */ if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); @@ -4199,7 +3966,7 @@ static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); /* Unmap all mapped bars - Doorbell, registers and VRAM */ - amdgpu_device_doorbell_fini(adev); + amdgpu_doorbell_fini(adev); iounmap(adev->rmmio); adev->rmmio = NULL; @@ -4230,7 +3997,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) /* make sure IB test finished before entering exclusive mode * to avoid preemption on IB test - * */ + */ if (amdgpu_sriov_vf(adev)) { amdgpu_virt_request_full_gpu(adev, false); amdgpu_virt_fini_data_exchange(adev); @@ -4253,9 +4020,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_pm_sysfs_fini(adev); if (adev->ucode_sysfs_en) amdgpu_ucode_sysfs_fini(adev); - if (adev->psp_sysfs_en) - amdgpu_psp_sysfs_fini(adev); sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); + amdgpu_fru_sysfs_fini(adev); /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); @@ -4313,7 +4079,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) iounmap(adev->rmmio); adev->rmmio = NULL; - amdgpu_device_doorbell_fini(adev); + amdgpu_doorbell_fini(adev); drm_dev_exit(idx); } @@ -4773,6 +4539,10 @@ retry: r = amdgpu_virt_reset_gpu(adev); if (r) return r; + amdgpu_irq_gpu_reset_resume_helper(adev); + + /* some sw clean up VF needs to do before recover */ + amdgpu_virt_post_reset(adev); /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); @@ -4799,7 +4569,6 @@ retry: amdgpu_put_xgmi_hive(hive); if (!r) { - amdgpu_irq_gpu_reset_resume_helper(adev); r = amdgpu_ib_ring_tests(adev); amdgpu_amdkfd_post_reset(adev); @@ -4967,8 +4736,9 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, if (!ring || !ring->sched.thread) continue; - /*clear job fence from fence drv to avoid force_completion - *leave NULL and vm flush fence in fence drv */ + /* Clear job fence from fence drv to avoid force_completion + * leave NULL and vm flush fence in fence drv + */ amdgpu_fence_driver_clear_job_fences(ring); /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ @@ -4982,7 +4752,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, r = amdgpu_reset_prepare_hwcontext(adev, reset_context); /* If reset handler not implemented, continue; otherwise return */ - if (r == -ENOSYS) + if (r == -EOPNOTSUPP) r = 0; else return r; @@ -5100,7 +4870,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, reset_context->reset_device_list = device_list_handle; r = amdgpu_reset_perform_reset(tmp_adev, reset_context); /* If reset handler not implemented, continue; otherwise return */ - if (r == -ENOSYS) + if (r == -EOPNOTSUPP) r = 0; else return r; @@ -5178,9 +4948,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, dev_warn(tmp_adev->dev, "asic atom init failed!"); } else { dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); - r = amdgpu_amdkfd_resume_iommu(tmp_adev); - if (r) - goto out; r = amdgpu_device_ip_resume_phase1(tmp_adev); if (r) @@ -5589,9 +5356,8 @@ skip_hw_reset: if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)) amdgpu_mes_self_test(tmp_adev); - if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { + if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); - } if (tmp_adev->asic_reset_res) r = tmp_adev->asic_reset_res; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 8e1cfc87122d..74ffe6581c85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -65,6 +65,7 @@ #include "soc21.h" #include "navi10_ih.h" #include "ih_v6_0.h" +#include "ih_v6_1.h" #include "gfx_v10_0.h" #include "gfx_v11_0.h" #include "sdma_v5_0.h" @@ -1702,6 +1703,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(6, 0, 2): amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); break; + case IP_VERSION(6, 1, 0): + amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block); + break; default: dev_err(adev->dev, "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", @@ -1750,6 +1754,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 10): case IP_VERSION(13, 0, 11): + case IP_VERSION(14, 0, 0): amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); break; case IP_VERSION(13, 0, 4): @@ -1968,6 +1973,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(6, 0, 1): case IP_VERSION(6, 0, 2): case IP_VERSION(6, 0, 3): + case IP_VERSION(6, 1, 0): amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); break; default: @@ -2447,6 +2453,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(6, 0, 0): case IP_VERSION(6, 0, 1): + case IP_VERSION(6, 1, 0): adev->hdp.funcs = &hdp_v6_0_funcs; break; default: @@ -2509,6 +2516,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 8): + case IP_VERSION(14, 0, 0): adev->smuio.funcs = &smuio_v13_0_6_funcs; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index b702f499f5fb..d20dd3f852fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -124,7 +124,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work) struct drm_crtc *crtc = &amdgpu_crtc->base; unsigned long flags; - unsigned i; + unsigned int i; int vpos, hpos; for (i = 0; i < work->shared_count; ++i) @@ -201,7 +201,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, u64 tiling_flags; int i, r; - work = kzalloc(sizeof *work, GFP_KERNEL); + work = kzalloc(sizeof(*work), GFP_KERNEL); if (work == NULL) return -ENOMEM; @@ -332,13 +332,15 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, adev = drm_to_adev(dev); /* if we have active crtcs and we don't have a power ref, - take the current one */ + * take the current one + */ if (active && !adev->have_disp_power_ref) { adev->have_disp_power_ref = true; return ret; } /* if we have no active crtcs, then drop the power ref - we got before */ + * we got before + */ if (!active && adev->have_disp_power_ref) { pm_runtime_put_autosuspend(dev->dev); adev->have_disp_power_ref = false; @@ -507,11 +509,10 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector, if (amdgpu_connector->router.ddc_valid) amdgpu_i2c_router_select_ddc_port(amdgpu_connector); - if (use_aux) { + if (use_aux) ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2); - } else { + else ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2); - } if (ret != 2) /* Couldn't find an accessible DDC on this connector */ @@ -520,10 +521,12 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector, * EDID header starts with: * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. * Only the first 6 bytes must be valid as - * drm_edid_block_valid() can fix the last 2 bytes */ + * drm_edid_block_valid() can fix the last 2 bytes + */ if (drm_edid_header_is_valid(buf) < 6) { /* Couldn't find an accessible EDID on this - * connector */ + * connector + */ return false; } return true; @@ -1216,8 +1219,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); if (obj == NULL) { - drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, " - "can't create framebuffer\n", mode_cmd->handles[0]); + drm_dbg_kms(dev, + "No GEM object associated to handle 0x%08X, can't create framebuffer\n", + mode_cmd->handles[0]); + return ERR_PTR(-ENOENT); } @@ -1410,6 +1415,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, } if (amdgpu_crtc->rmx_type != RMX_OFF) { fixed20_12 a, b; + a.full = dfixed_const(src_v); b.full = dfixed_const(dst_v); amdgpu_crtc->vsc.full = dfixed_div(a, b); @@ -1429,7 +1435,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, * * \param dev Device to query. * \param pipe Crtc to query. - * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). + * \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). * For driver internal use only also supports these flags: * * USE_REAL_VBLANKSTART to use the real start of vblank instead @@ -1504,8 +1510,8 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, /* Called from driver internal vblank counter query code? */ if (flags & GET_DISTANCE_TO_VBLANKSTART) { - /* Caller wants distance from real vbl_start in *hpos */ - *hpos = *vpos - vbl_start; + /* Caller wants distance from real vbl_start in *hpos */ + *hpos = *vpos - vbl_start; } /* Fudge vblank to start a few scanlines earlier to handle the @@ -1527,7 +1533,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, /* In vblank? */ if (in_vbl) - ret |= DRM_SCANOUTPOS_IN_VBLANK; + ret |= DRM_SCANOUTPOS_IN_VBLANK; /* Called from driver internal vblank counter query code? */ if (flags & GET_DISTANCE_TO_VBLANKSTART) { @@ -1635,6 +1641,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); if (r == 0) { amdgpu_bo_unpin(aobj); @@ -1642,9 +1649,9 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) } } - if (fb == NULL || fb->obj[0] == NULL) { + if (!fb || !fb->obj[0]) continue; - } + robj = gem_to_amdgpu_bo(fb->obj[0]); if (!amdgpu_display_robj_is_fb(adev, robj)) { r = amdgpu_bo_reserve(robj, true); @@ -1671,6 +1678,7 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev) if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); if (r == 0) { r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index f637574644c0..09f6727e7c73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -31,10 +31,15 @@ struct amdgpu_doorbell { /* doorbell mmio */ resource_size_t base; resource_size_t size; - u32 __iomem *ptr; /* Number of doorbells reserved for amdgpu kernel driver */ u32 num_kernel_doorbells; + + /* Kernel doorbells */ + struct amdgpu_bo *kernel_doorbells; + + /* For CPU access of doorbells */ + uint32_t *cpu_addr; }; /* Reserved doorbells for amdgpu (including multimedia). @@ -90,8 +95,7 @@ struct amdgpu_doorbell_index { uint32_t xcc_doorbell_range; }; -typedef enum _AMDGPU_DOORBELL_ASSIGNMENT -{ +enum AMDGPU_DOORBELL_ASSIGNMENT { AMDGPU_DOORBELL_KIQ = 0x000, AMDGPU_DOORBELL_HIQ = 0x001, AMDGPU_DOORBELL_DIQ = 0x002, @@ -109,10 +113,10 @@ typedef enum _AMDGPU_DOORBELL_ASSIGNMENT AMDGPU_DOORBELL_IH = 0x1E8, AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, AMDGPU_DOORBELL_INVALID = 0xFFFF -} AMDGPU_DOORBELL_ASSIGNMENT; +}; + +enum AMDGPU_VEGA20_DOORBELL_ASSIGNMENT { -typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT -{ /* Compute + GFX: 0~255 */ AMDGPU_VEGA20_DOORBELL_KIQ = 0x000, AMDGPU_VEGA20_DOORBELL_HIQ = 0x001, @@ -176,10 +180,10 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x1F7, AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF -} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT; +}; + +enum AMDGPU_NAVI10_DOORBELL_ASSIGNMENT { -typedef enum _AMDGPU_NAVI10_DOORBELL_ASSIGNMENT -{ /* Compute + GFX: 0~255 */ AMDGPU_NAVI10_DOORBELL_KIQ = 0x000, AMDGPU_NAVI10_DOORBELL_HIQ = 0x001, @@ -227,13 +231,12 @@ typedef enum _AMDGPU_NAVI10_DOORBELL_ASSIGNMENT AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT = 0x18F, AMDGPU_NAVI10_DOORBELL_INVALID = 0xFFFF -} AMDGPU_NAVI10_DOORBELL_ASSIGNMENT; +}; /* * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space */ -typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT -{ +enum AMDGPU_DOORBELL64_ASSIGNMENT { /* * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range. @@ -309,9 +312,10 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, AMDGPU_DOORBELL64_INVALID = 0xFFFF -} AMDGPU_DOORBELL64_ASSIGNMENT; +}; + +enum AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 { -typedef enum _AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 { /* XCC0: 0x00 ~20, XCC1: 20 ~ 2F ... */ /* KIQ/HIQ/DIQ */ @@ -330,22 +334,32 @@ typedef enum _AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 { AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_END = 0x19F, /* IH: 0x1A0 ~ 0x1AF */ AMDGPU_DOORBELL_LAYOUT1_IH = 0x1A0, - /* VCN: 0x1B0 ~ 0x1D4 */ + /* VCN: 0x1B0 ~ 0x1E8 */ AMDGPU_DOORBELL_LAYOUT1_VCN_START = 0x1B0, - AMDGPU_DOORBELL_LAYOUT1_VCN_END = 0x1D4, + AMDGPU_DOORBELL_LAYOUT1_VCN_END = 0x1E8, AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START, AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_VCN_END, - AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT = 0x1D4, + AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT = 0x1E8, AMDGPU_DOORBELL_LAYOUT1_INVALID = 0xFFFF -} AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1; +}; u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index); void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); +/* + * GPU doorbell aperture helpers function. + */ +int amdgpu_doorbell_init(struct amdgpu_device *adev); +void amdgpu_doorbell_fini(struct amdgpu_device *adev); +int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev); +uint32_t amdgpu_doorbell_index_on_bar(struct amdgpu_device *adev, + struct amdgpu_bo *db_bo, + uint32_t doorbell_index); + #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) #define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c new file mode 100644 index 000000000000..da4be0bbb446 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" + +/** + * amdgpu_mm_rdoorbell - read a doorbell dword + * + * @adev: amdgpu_device pointer + * @index: doorbell index + * + * Returns the value in the doorbell aperture at the + * requested doorbell index (CIK). + */ +u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) +{ + if (amdgpu_device_skip_hw_access(adev)) + return 0; + + if (index < adev->doorbell.num_kernel_doorbells) + return readl(adev->doorbell.cpu_addr + index); + + DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); + return 0; +} + +/** + * amdgpu_mm_wdoorbell - write a doorbell dword + * + * @adev: amdgpu_device pointer + * @index: doorbell index + * @v: value to write + * + * Writes @v to the doorbell aperture at the + * requested doorbell index (CIK). + */ +void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) +{ + if (amdgpu_device_skip_hw_access(adev)) + return; + + if (index < adev->doorbell.num_kernel_doorbells) + writel(v, adev->doorbell.cpu_addr + index); + else + DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); +} + +/** + * amdgpu_mm_rdoorbell64 - read a doorbell Qword + * + * @adev: amdgpu_device pointer + * @index: doorbell index + * + * Returns the value in the doorbell aperture at the + * requested doorbell index (VEGA10+). + */ +u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) +{ + if (amdgpu_device_skip_hw_access(adev)) + return 0; + + if (index < adev->doorbell.num_kernel_doorbells) + return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index)); + + DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); + return 0; +} + +/** + * amdgpu_mm_wdoorbell64 - write a doorbell Qword + * + * @adev: amdgpu_device pointer + * @index: doorbell index + * @v: value to write + * + * Writes @v to the doorbell aperture at the + * requested doorbell index (VEGA10+). + */ +void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) +{ + if (amdgpu_device_skip_hw_access(adev)) + return; + + if (index < adev->doorbell.num_kernel_doorbells) + atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v); + else + DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); +} + +/** + * amdgpu_doorbell_index_on_bar - Find doorbell's absolute offset in BAR + * + * @adev: amdgpu_device pointer + * @db_bo: doorbell object's bo + * @db_index: doorbell relative index in this doorbell object + * + * returns doorbell's absolute index in BAR + */ +uint32_t amdgpu_doorbell_index_on_bar(struct amdgpu_device *adev, + struct amdgpu_bo *db_bo, + uint32_t doorbell_index) +{ + int db_bo_offset; + + db_bo_offset = amdgpu_bo_gpu_offset_no_check(db_bo); + + /* doorbell index is 32 bit but doorbell's size is 64-bit, so *2 */ + return db_bo_offset / sizeof(u32) + doorbell_index * 2; +} + +/** + * amdgpu_doorbell_create_kernel_doorbells - Create kernel doorbells for graphics + * + * @adev: amdgpu_device pointer + * + * Creates doorbells for graphics driver usages. + * returns 0 on success, error otherwise. + */ +int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev) +{ + int r; + int size; + + /* Reserve first num_kernel_doorbells (page-aligned) for kernel ops */ + size = ALIGN(adev->doorbell.num_kernel_doorbells * sizeof(u32), PAGE_SIZE); + + /* Allocate an extra page for MES kernel usages (ring test) */ + adev->mes.db_start_dw_offset = size / sizeof(u32); + size += PAGE_SIZE; + + r = amdgpu_bo_create_kernel(adev, + size, + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_DOORBELL, + &adev->doorbell.kernel_doorbells, + NULL, + (void **)&adev->doorbell.cpu_addr); + if (r) { + DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r); + return r; + } + + adev->doorbell.num_kernel_doorbells = size / sizeof(u32); + return 0; +} + +/* + * GPU doorbell aperture helpers function. + */ +/** + * amdgpu_doorbell_init - Init doorbell driver information. + * + * @adev: amdgpu_device pointer + * + * Init doorbell driver information (CIK) + * Returns 0 on success, error on failure. + */ +int amdgpu_doorbell_init(struct amdgpu_device *adev) +{ + + /* No doorbell on SI hardware generation */ + if (adev->asic_type < CHIP_BONAIRE) { + adev->doorbell.base = 0; + adev->doorbell.size = 0; + adev->doorbell.num_kernel_doorbells = 0; + return 0; + } + + if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) + return -EINVAL; + + amdgpu_asic_init_doorbell_index(adev); + + /* doorbell bar mapping */ + adev->doorbell.base = pci_resource_start(adev->pdev, 2); + adev->doorbell.size = pci_resource_len(adev->pdev, 2); + + adev->doorbell.num_kernel_doorbells = + min_t(u32, adev->doorbell.size / sizeof(u32), + adev->doorbell_index.max_assignment + 1); + if (adev->doorbell.num_kernel_doorbells == 0) + return -EINVAL; + + /* + * For Vega, reserve and map two pages on doorbell BAR since SDMA + * paging queue doorbell use the second page. The + * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the + * doorbells are in the first page. So with paging queue enabled, + * the max num_kernel_doorbells should + 1 page (0x400 in dword) + */ + if (adev->asic_type >= CHIP_VEGA10) + adev->doorbell.num_kernel_doorbells += 0x400; + + return 0; +} + +/** + * amdgpu_doorbell_fini - Tear down doorbell driver information. + * + * @adev: amdgpu_device pointer + * + * Tear down doorbell driver information (CIK) + */ +void amdgpu_doorbell_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_free_kernel(&adev->doorbell.kernel_doorbells, + NULL, + (void **)&adev->doorbell.cpu_addr); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0593ef8fe0a6..81edf66dbea8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -26,30 +26,30 @@ #include #include #include -#include #include -#include "amdgpu_drv.h" - #include -#include -#include -#include #include -#include -#include +#include + #include #include +#include +#include +#include +#include +#include #include "amdgpu.h" -#include "amdgpu_irq.h" -#include "amdgpu_dma_buf.h" -#include "amdgpu_sched.h" -#include "amdgpu_fdinfo.h" #include "amdgpu_amdkfd.h" - +#include "amdgpu_dma_buf.h" +#include "amdgpu_drv.h" +#include "amdgpu_fdinfo.h" +#include "amdgpu_irq.h" +#include "amdgpu_psp.h" #include "amdgpu_ras.h" -#include "amdgpu_xgmi.h" #include "amdgpu_reset.h" +#include "amdgpu_sched.h" +#include "amdgpu_xgmi.h" #include "../amdxcp/amdgpu_xcp_drv.h" /* @@ -187,7 +187,6 @@ int amdgpu_mes_kiq; int amdgpu_noretry = -1; int amdgpu_force_asic_type = -1; int amdgpu_tmz = -1; /* auto */ -uint amdgpu_freesync_vid_mode; int amdgpu_reset_method = -1; /* auto */ int amdgpu_num_kcq = -1; int amdgpu_smartshift_bias; @@ -313,9 +312,7 @@ module_param_named(msi, amdgpu_msi, int, 0444); * jobs is 10000. The timeout for compute is 60000. */ MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; " - "for passthrough or sriov, 10000 for all jobs." - " 0: keep default value. negative: infinity timeout), " - "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " + "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); @@ -350,8 +347,9 @@ module_param_named(aspm, amdgpu_aspm, int, 0444); * Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down * the dGPUs when they are idle if supported. The default is -1 (auto enable). * Setting the value to 0 disables this functionality. + * Setting the value to -2 is auto enabled with power down when displays are attached. */ -MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto)"); +MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = autowith displays)"); module_param_named(runpm, amdgpu_runtime_pm, int, 0444); /** @@ -584,7 +582,7 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644); */ #ifdef CONFIG_DRM_AMDGPU_SI -#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) +#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) int amdgpu_si_support = 0; MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))"); #else @@ -603,7 +601,7 @@ module_param_named(si_support, amdgpu_si_support, int, 0444); */ #ifdef CONFIG_DRM_AMDGPU_CIK -#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) +#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE) int amdgpu_cik_support = 0; MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))"); #else @@ -620,8 +618,7 @@ module_param_named(cik_support, amdgpu_cik_support, int, 0444); * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled). */ MODULE_PARM_DESC(smu_memory_pool_size, - "reserve gtt for smu debug usage, 0 = disable," - "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte"); + "reserve gtt for smu debug usage, 0 = disable,0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte"); module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444); /** @@ -758,20 +755,6 @@ module_param(debug_largebar, int, 0444); MODULE_PARM_DESC(debug_largebar, "Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)"); -/** - * DOC: ignore_crat (int) - * Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT - * table to get information about AMD APUs. This option can serve as a workaround on - * systems with a broken CRAT table. - * - * Default is auto (according to asic type, iommu_v2, and crat table, to decide - * whether use CRAT) - */ -int ignore_crat; -module_param(ignore_crat, int, 0444); -MODULE_PARM_DESC(ignore_crat, - "Ignore CRAT table during KFD initialization (0 = auto (default), 1 = ignore CRAT)"); - /** * DOC: halt_if_hws_hang (int) * Halt if HWS hang is detected. Default value, 0, disables the halt on hang. @@ -791,9 +774,9 @@ module_param(hws_gws_support, bool, 0444); MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)"); /** - * DOC: queue_preemption_timeout_ms (int) - * queue preemption timeout in ms (1 = Minimum, 9000 = default) - */ + * DOC: queue_preemption_timeout_ms (int) + * queue preemption timeout in ms (1 = Minimum, 9000 = default) + */ int queue_preemption_timeout_ms = 9000; module_param(queue_preemption_timeout_ms, int, 0644); MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)"); @@ -888,32 +871,6 @@ module_param_named(backlight, amdgpu_backlight, bint, 0444); MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)"); module_param_named(tmz, amdgpu_tmz, int, 0444); -/** - * DOC: freesync_video (uint) - * Enable the optimization to adjust front porch timing to achieve seamless - * mode change experience when setting a freesync supported mode for which full - * modeset is not needed. - * - * The Display Core will add a set of modes derived from the base FreeSync - * video mode into the corresponding connector's mode list based on commonly - * used refresh rates and VRR range of the connected display, when users enable - * this feature. From the userspace perspective, they can see a seamless mode - * change experience when the change between different refresh rates under the - * same resolution. Additionally, userspace applications such as Video playback - * can read this modeset list and change the refresh rate based on the video - * frame rate. Finally, the userspace can also derive an appropriate mode for a - * particular refresh rate based on the FreeSync Mode and add it to the - * connector's mode list. - * - * Note: This is an experimental feature. - * - * The default value: 0 (off). - */ -MODULE_PARM_DESC( - freesync_video, - "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)"); -module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444); - /** * DOC: reset_method (int) * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco) @@ -2417,7 +2374,6 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) amdgpu_amdkfd_device_init(adev); amdgpu_ttm_set_buffer_funcs_status(adev, true); } - return; } static int amdgpu_pmops_prepare(struct device *dev) @@ -2541,24 +2497,26 @@ static int amdgpu_runtime_idle_check_display(struct device *dev) struct drm_connector_list_iter iter; int ret = 0; - /* XXX: Return busy if any displays are connected to avoid - * possible display wakeups after runtime resume due to - * hotplug events in case any displays were connected while - * the GPU was in suspend. Remove this once that is fixed. - */ - mutex_lock(&drm_dev->mode_config.mutex); - drm_connector_list_iter_begin(drm_dev, &iter); - drm_for_each_connector_iter(list_connector, &iter) { - if (list_connector->status == connector_status_connected) { - ret = -EBUSY; - break; + if (amdgpu_runtime_pm != -2) { + /* XXX: Return busy if any displays are connected to avoid + * possible display wakeups after runtime resume due to + * hotplug events in case any displays were connected while + * the GPU was in suspend. Remove this once that is fixed. + */ + mutex_lock(&drm_dev->mode_config.mutex); + drm_connector_list_iter_begin(drm_dev, &iter); + drm_for_each_connector_iter(list_connector, &iter) { + if (list_connector->status == connector_status_connected) { + ret = -EBUSY; + break; + } } - } - drm_connector_list_iter_end(&iter); - mutex_unlock(&drm_dev->mode_config.mutex); + drm_connector_list_iter_end(&iter); + mutex_unlock(&drm_dev->mode_config.mutex); - if (ret) - return ret; + if (ret) + return ret; + } if (adev->dc_enabled) { struct drm_crtc *crtc; @@ -2614,6 +2572,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) /* wait for all rings to drain before suspending */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->sched.ready) { ret = amdgpu_fence_wait_empty(ring); if (ret) @@ -2738,6 +2697,7 @@ long amdgpu_drm_ioctl(struct file *filp, struct drm_file *file_priv = filp->private_data; struct drm_device *dev; long ret; + dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); if (ret < 0) @@ -2802,9 +2762,8 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) if (!filp) return -EINVAL; - if (filp->f_op != &amdgpu_driver_kms_fops) { + if (filp->f_op != &amdgpu_driver_kms_fops) return -EINVAL; - } file = filp->private_data; *fpriv = file->driver_priv; @@ -2850,10 +2809,7 @@ static const struct drm_driver amdgpu_kms_driver = { .show_fdinfo = amdgpu_show_fdinfo, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = amdgpu_gem_prime_import, - .gem_prime_mmap = drm_gem_prime_mmap, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -2877,10 +2833,7 @@ const struct drm_driver amdgpu_partition_driver = { .fops = &amdgpu_driver_kms_fops, .release = &amdgpu_driver_release_kms, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = amdgpu_gem_prime_import, - .gem_prime_mmap = drm_gem_prime_mmap, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -2897,16 +2850,13 @@ static struct pci_error_handlers amdgpu_pci_err_handler = { .resume = amdgpu_pci_resume, }; -extern const struct attribute_group amdgpu_vram_mgr_attr_group; -extern const struct attribute_group amdgpu_gtt_mgr_attr_group; - static const struct attribute_group *amdgpu_sysfs_groups[] = { &amdgpu_vram_mgr_attr_group, &amdgpu_gtt_mgr_attr_group, + &amdgpu_flash_attr_group, NULL, }; - static struct pci_driver amdgpu_kms_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c index 7d2a908438e9..e71768661ca8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c @@ -183,6 +183,8 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr, { const struct i2c_adapter_quirks *quirks = i2c_adap->quirks; u16 limit; + u16 ps; /* Partial size */ + int res = 0, r; if (!quirks) limit = 0; @@ -200,28 +202,25 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr, eeprom_addr, buf_size, read ? "read" : "write", EEPROM_OFFSET_SIZE); return -EINVAL; - } else { - u16 ps; /* Partial size */ - int res = 0, r; - - /* The "limit" includes all data bytes sent/received, - * which would include the EEPROM_OFFSET_SIZE bytes. - * Account for them here. - */ - limit -= EEPROM_OFFSET_SIZE; - for ( ; buf_size > 0; - buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) { - ps = min(limit, buf_size); - - r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, - eeprom_buf, ps, read); - if (r < 0) - return r; - res += r; - } - - return res; } + + /* The "limit" includes all data bytes sent/received, + * which would include the EEPROM_OFFSET_SIZE bytes. + * Account for them here. + */ + limit -= EEPROM_OFFSET_SIZE; + for ( ; buf_size > 0; + buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) { + ps = min(limit, buf_size); + + r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, + eeprom_buf, ps, read); + if (r < 0) + return r; + res += r; + } + + return res; } int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index 13d7413d4ca3..6038b5021b27 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -89,7 +89,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid); drm_printf(p, "drm-driver:\t%s\n", file->minor->dev->driver->name); drm_printf(p, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn); - drm_printf(p, "drm-client-id:\t%Lu\n", vm->immediate.fence_context); + drm_printf(p, "drm-client-id:\t%llu\n", vm->immediate.fence_context); drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL); drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL); drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL); @@ -109,7 +109,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) if (!usage[hw_ip]) continue; - drm_printf(p, "drm-engine-%s:\t%Ld ns\n", amdgpu_ip_name[hw_ip], + drm_printf(p, "drm-engine-%s:\t%lld ns\n", amdgpu_ip_name[hw_ip], ktime_to_ns(usage[hw_ip])); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 4620c4712ce3..9c66d98af6d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -60,10 +60,10 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr) switch (adev->asic_type) { case CHIP_VEGA20: /* D161 and D163 are the VG20 server SKUs */ - if (strnstr(atom_ctx->vbios_version, "D161", - sizeof(atom_ctx->vbios_version)) || - strnstr(atom_ctx->vbios_version, "D163", - sizeof(atom_ctx->vbios_version))) { + if (strnstr(atom_ctx->vbios_pn, "D161", + sizeof(atom_ctx->vbios_pn)) || + strnstr(atom_ctx->vbios_pn, "D163", + sizeof(atom_ctx->vbios_pn))) { if (fru_addr) *fru_addr = FRU_EEPROM_MADDR_6; return true; @@ -72,22 +72,23 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr) } case CHIP_ALDEBARAN: /* All Aldebaran SKUs have an FRU */ - if (!strnstr(atom_ctx->vbios_version, "D673", - sizeof(atom_ctx->vbios_version))) + if (!strnstr(atom_ctx->vbios_pn, "D673", + sizeof(atom_ctx->vbios_pn))) if (fru_addr) *fru_addr = FRU_EEPROM_MADDR_6; return true; case CHIP_SIENNA_CICHLID: - if (strnstr(atom_ctx->vbios_version, "D603", - sizeof(atom_ctx->vbios_version))) { - if (strnstr(atom_ctx->vbios_version, "D603GLXE", - sizeof(atom_ctx->vbios_version))) { + if (strnstr(atom_ctx->vbios_pn, "D603", + sizeof(atom_ctx->vbios_pn))) { + if (strnstr(atom_ctx->vbios_pn, "D603GLXE", + sizeof(atom_ctx->vbios_pn))) { return false; - } else { - if (fru_addr) - *fru_addr = FRU_EEPROM_MADDR_6; - return true; } + + if (fru_addr) + *fru_addr = FRU_EEPROM_MADDR_6; + return true; + } else { return false; } @@ -211,3 +212,92 @@ Out: kfree(pia); return 0; } + +/** + * DOC: product_name + * + * The amdgpu driver provides a sysfs API for reporting the product name + * for the device + * The file product_name is used for this and returns the product name + * as returned from the FRU. + * NOTE: This is only available for certain server cards + */ + +static ssize_t amdgpu_fru_product_name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + return sysfs_emit(buf, "%s\n", adev->product_name); +} + +static DEVICE_ATTR(product_name, 0444, amdgpu_fru_product_name_show, NULL); + +/** + * DOC: product_number + * + * The amdgpu driver provides a sysfs API for reporting the part number + * for the device + * The file product_number is used for this and returns the part number + * as returned from the FRU. + * NOTE: This is only available for certain server cards + */ + +static ssize_t amdgpu_fru_product_number_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + return sysfs_emit(buf, "%s\n", adev->product_number); +} + +static DEVICE_ATTR(product_number, 0444, amdgpu_fru_product_number_show, NULL); + +/** + * DOC: serial_number + * + * The amdgpu driver provides a sysfs API for reporting the serial number + * for the device + * The file serial_number is used for this and returns the serial number + * as returned from the FRU. + * NOTE: This is only available for certain server cards + */ + +static ssize_t amdgpu_fru_serial_number_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + return sysfs_emit(buf, "%s\n", adev->serial); +} + +static DEVICE_ATTR(serial_number, 0444, amdgpu_fru_serial_number_show, NULL); + +static const struct attribute *amdgpu_fru_attributes[] = { + &dev_attr_product_name.attr, + &dev_attr_product_number.attr, + &dev_attr_serial_number.attr, + NULL +}; + +int amdgpu_fru_sysfs_init(struct amdgpu_device *adev) +{ + if (!is_fru_eeprom_supported(adev, NULL)) + return 0; + + return sysfs_create_files(&adev->dev->kobj, amdgpu_fru_attributes); +} + +void amdgpu_fru_sysfs_fini(struct amdgpu_device *adev) +{ + if (!is_fru_eeprom_supported(adev, NULL)) + return; + + sysfs_remove_files(&adev->dev->kobj, amdgpu_fru_attributes); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h index 1308d976d60e..c817db17cfa7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h @@ -25,5 +25,7 @@ #define __AMDGPU_FRU_EEPROM_H__ int amdgpu_fru_get_product_info(struct amdgpu_device *adev); +int amdgpu_fru_sysfs_init(struct amdgpu_device *adev); +void amdgpu_fru_sysfs_fini(struct amdgpu_device *adev); #endif // __AMDGPU_FRU_EEPROM_H__ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c index 2ca3c329de6d..2d4b67175b55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c @@ -32,17 +32,15 @@ #include "soc15_common.h" #define FW_ATTESTATION_DB_COOKIE 0x143b6a37 -#define FW_ATTESTATION_RECORD_VALID 1 +#define FW_ATTESTATION_RECORD_VALID 1 #define FW_ATTESTATION_MAX_SIZE 4096 -typedef struct FW_ATT_DB_HEADER -{ +struct FW_ATT_DB_HEADER { uint32_t AttDbVersion; /* version of the fwar feature */ uint32_t AttDbCookie; /* cookie as an extra check for corrupt data */ -} FW_ATT_DB_HEADER; +}; -typedef struct FW_ATT_RECORD -{ +struct FW_ATT_RECORD { uint16_t AttFwIdV1; /* Legacy FW Type field */ uint16_t AttFwIdV2; /* V2 FW ID field */ uint32_t AttFWVersion; /* FW Version */ @@ -50,7 +48,7 @@ typedef struct FW_ATT_RECORD uint8_t AttSource; /* FW source indicator */ uint8_t RecordValid; /* Indicates whether the record is a valid entry */ uint32_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */ -} FW_ATT_RECORD; +}; static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f, char __user *buf, @@ -60,15 +58,15 @@ static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f, struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; uint64_t records_addr = 0; uint64_t vram_pos = 0; - FW_ATT_DB_HEADER fw_att_hdr = {0}; - FW_ATT_RECORD fw_att_record = {0}; + struct FW_ATT_DB_HEADER fw_att_hdr = {0}; + struct FW_ATT_RECORD fw_att_record = {0}; - if (size < sizeof(FW_ATT_RECORD)) { + if (size < sizeof(struct FW_ATT_RECORD)) { DRM_WARN("FW attestation input buffer not enough memory"); return -EINVAL; } - if ((*pos + sizeof(FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) { + if ((*pos + sizeof(struct FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) { DRM_WARN("FW attestation out of bounds"); return 0; } @@ -83,8 +81,8 @@ static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f, if (*pos == 0) { amdgpu_device_vram_access(adev, vram_pos, - (uint32_t*)&fw_att_hdr, - sizeof(FW_ATT_DB_HEADER), + (uint32_t *)&fw_att_hdr, + sizeof(struct FW_ATT_DB_HEADER), false); if (fw_att_hdr.AttDbCookie != FW_ATTESTATION_DB_COOKIE) { @@ -96,20 +94,20 @@ static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f, } amdgpu_device_vram_access(adev, - vram_pos + sizeof(FW_ATT_DB_HEADER) + *pos, - (uint32_t*)&fw_att_record, - sizeof(FW_ATT_RECORD), + vram_pos + sizeof(struct FW_ATT_DB_HEADER) + *pos, + (uint32_t *)&fw_att_record, + sizeof(struct FW_ATT_RECORD), false); if (fw_att_record.RecordValid != FW_ATTESTATION_RECORD_VALID) return 0; - if (copy_to_user(buf, (void*)&fw_att_record, sizeof(FW_ATT_RECORD))) + if (copy_to_user(buf, (void *)&fw_att_record, sizeof(struct FW_ATT_RECORD))) return -EINVAL; - *pos += sizeof(FW_ATT_RECORD); + *pos += sizeof(struct FW_ATT_RECORD); - return sizeof(FW_ATT_RECORD); + return sizeof(struct FW_ATT_RECORD); } static const struct file_operations amdgpu_fw_attestation_debugfs_ops = { @@ -136,7 +134,7 @@ void amdgpu_fw_attestation_debugfs_init(struct amdgpu_device *adev) return; debugfs_create_file("amdgpu_fw_attestation", - S_IRUSR, + 0400, adev_to_drm(adev)->primary->debugfs_root, adev, &amdgpu_fw_attestation_debugfs_ops); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 74055cba3dc9..ca4d2d430e28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -33,6 +33,7 @@ #include #include +#include #include #include @@ -181,11 +182,10 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, return r; bo_va = amdgpu_vm_bo_find(vm, abo); - if (!bo_va) { + if (!bo_va) bo_va = amdgpu_vm_bo_add(adev, vm, abo); - } else { + else ++bo_va->ref_count; - } amdgpu_bo_unreserve(abo); return 0; } @@ -198,29 +198,24 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_bo_list_entry vm_pd; - struct list_head list, duplicates; struct dma_fence *fence = NULL; - struct ttm_validate_buffer tv; - struct ww_acquire_ctx ticket; struct amdgpu_bo_va *bo_va; + struct drm_exec exec; long r; - INIT_LIST_HEAD(&list); - INIT_LIST_HEAD(&duplicates); + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); + drm_exec_until_all_locked(&exec) { + r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto out_unlock; - tv.bo = &bo->tbo; - tv.num_shared = 2; - list_add(&tv.head, &list); - - amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); - - r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); - if (r) { - dev_err(adev->dev, "leaking bo va because " - "we fail to reserve bo (%ld)\n", r); - return; + r = amdgpu_vm_lock_pd(vm, &exec, 0); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto out_unlock; } + bo_va = amdgpu_vm_bo_find(vm, bo); if (!bo_va || --bo_va->ref_count) goto out_unlock; @@ -230,6 +225,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, goto out_unlock; r = amdgpu_vm_clear_freed(adev, vm, &fence); + if (unlikely(r < 0)) + dev_err(adev->dev, "failed to clear page " + "tables on GEM object close (%ld)\n", r); if (r || !fence) goto out_unlock; @@ -237,10 +235,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, dma_fence_put(fence); out_unlock: - if (unlikely(r < 0)) - dev_err(adev->dev, "failed to clear page " - "tables on GEM object close (%ld)\n", r); - ttm_eu_backoff_reservation(&ticket, &list); + if (r) + dev_err(adev->dev, "leaking bo va (%ld)\n", r); + drm_exec_fini(&exec); } static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) @@ -292,6 +289,10 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, uint32_t handle, initial_domain; int r; + /* reject DOORBELLs until userspace code to use it is available */ + if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL) + return -EINVAL; + /* reject invalid gem flags */ if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | @@ -463,9 +464,9 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, struct amdgpu_bo *robj; gobj = drm_gem_object_lookup(filp, handle); - if (gobj == NULL) { + if (!gobj) return -ENOENT; - } + robj = gem_to_amdgpu_bo(gobj); if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { @@ -482,6 +483,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, { union drm_amdgpu_gem_mmap *args = data; uint32_t handle = args->in.handle; + memset(args, 0, sizeof(*args)); return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); } @@ -508,7 +510,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns) timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout)); /* clamp timeout to avoid unsigned-> signed overflow */ - if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT ) + if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT) return MAX_SCHEDULE_TIMEOUT - 1; return timeout_jiffies; @@ -526,9 +528,9 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, long ret; gobj = drm_gem_object_lookup(filp, handle); - if (gobj == NULL) { + if (!gobj) return -ENOENT; - } + robj = gem_to_amdgpu_bo(gobj); ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, true, timeout); @@ -555,7 +557,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, struct amdgpu_bo *robj; int r = -1; - DRM_DEBUG("%d \n", args->handle); + DRM_DEBUG("%d\n", args->handle); gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) return -ENOENT; @@ -675,17 +677,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_bo *abo; struct amdgpu_bo_va *bo_va; - struct amdgpu_bo_list_entry vm_pd; - struct ttm_validate_buffer tv; - struct ww_acquire_ctx ticket; - struct list_head list, duplicates; + struct drm_exec exec; uint64_t va_flags; uint64_t vm_size; int r = 0; if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { dev_dbg(dev->dev, - "va_address 0x%LX is in reserved area 0x%LX\n", + "va_address 0x%llx is in reserved area 0x%llx\n", args->va_address, AMDGPU_VA_RESERVED_SIZE); return -EINVAL; } @@ -693,7 +692,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (args->va_address >= AMDGPU_GMC_HOLE_START && args->va_address < AMDGPU_GMC_HOLE_END) { dev_dbg(dev->dev, - "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n", + "va_address 0x%llx is in VA hole 0x%llx-0x%llx\n", args->va_address, AMDGPU_GMC_HOLE_START, AMDGPU_GMC_HOLE_END); return -EINVAL; @@ -728,36 +727,38 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - INIT_LIST_HEAD(&list); - INIT_LIST_HEAD(&duplicates); if ((args->operation != AMDGPU_VA_OP_CLEAR) && !(args->flags & AMDGPU_VM_PAGE_PRT)) { gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) return -ENOENT; abo = gem_to_amdgpu_bo(gobj); - tv.bo = &abo->tbo; - if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) - tv.num_shared = 1; - else - tv.num_shared = 0; - list_add(&tv.head, &list); } else { gobj = NULL; abo = NULL; } - amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | + DRM_EXEC_IGNORE_DUPLICATES); + drm_exec_until_all_locked(&exec) { + if (gobj) { + r = drm_exec_lock_obj(&exec, gobj); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error; + } - r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); - if (r) - goto error_unref; + r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error; + } if (abo) { bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); if (!bo_va) { r = -ENOENT; - goto error_backoff; + goto error; } } else if (args->operation != AMDGPU_VA_OP_CLEAR) { bo_va = fpriv->prt_va; @@ -794,10 +795,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, args->operation); -error_backoff: - ttm_eu_backoff_reservation(&ticket, &list); - -error_unref: +error: + drm_exec_fini(&exec); drm_gem_object_put(gobj); return r; } @@ -813,9 +812,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, int r; gobj = drm_gem_object_lookup(filp, args->handle); - if (gobj == NULL) { + if (!gobj) return -ENOENT; - } + robj = gem_to_amdgpu_bo(gobj); r = amdgpu_bo_reserve(robj, false); @@ -941,9 +940,9 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, r = drm_gem_handle_create(file_priv, gobj, &handle); /* drop reference from allocate - handle holds it now */ drm_gem_object_put(gobj); - if (r) { + if (r) return r; - } + args->handle = handle; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index fd81b04559d4..2382921710ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -110,9 +110,9 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, * The bitmask of CUs to be disabled in the shader array determined by se and * sh is stored in mask[se * max_sh + sh]. */ -void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh) +void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh) { - unsigned se, sh, cu; + unsigned int se, sh, cu; const char *p; memset(mask, 0, sizeof(*mask) * max_se * max_sh); @@ -124,6 +124,7 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s for (;;) { char *next; int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); + if (ret < 3) { DRM_ERROR("amdgpu: could not parse disable_cu\n"); return; @@ -349,7 +350,7 @@ void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id) } int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, - unsigned hpd_size, int xcc_id) + unsigned int hpd_size, int xcc_id) { int r; u32 *hpd; @@ -376,7 +377,7 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, /* create MQD for each compute/gfx queue */ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, - unsigned mqd_size, int xcc_id) + unsigned int mqd_size, int xcc_id) { int r, i, j; struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; @@ -407,8 +408,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, /* prepare MQD backup */ kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL); - if (!kiq->mqd_backup) - dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); + if (!kiq->mqd_backup) { + dev_warn(adev->dev, + "no memory to create MQD backup for ring %s\n", ring->name); + return -ENOMEM; + } } if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { @@ -427,8 +431,10 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->gfx.me.mqd_backup[i]) + if (!adev->gfx.me.mqd_backup[i]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); + return -ENOMEM; + } } } } @@ -449,8 +455,10 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ring->mqd_size = mqd_size; /* prepare MQD backup */ adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->gfx.mec.mqd_backup[j]) + if (!adev->gfx.mec.mqd_backup[j]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); + return -ENOMEM; + } } } @@ -1274,11 +1282,11 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, return sysfs_emit(buf, "%s\n", supported_partition); } -static DEVICE_ATTR(current_compute_partition, S_IRUGO | S_IWUSR, +static DEVICE_ATTR(current_compute_partition, 0644, amdgpu_gfx_get_current_compute_partition, amdgpu_gfx_set_compute_partition); -static DEVICE_ATTR(available_compute_partition, S_IRUGO, +static DEVICE_ATTR(available_compute_partition, 0444, amdgpu_gfx_get_available_compute_partition, NULL); int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 56d73fade568..fdc25cd559b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -331,6 +331,8 @@ struct amdgpu_gmc { u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[16]; u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[16]; u64 MC_VM_MX_L1_TLB_CNTL; + + u64 noretry_flags; }; #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index ebeddc9a37e9..6aa3b1d845ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -62,7 +62,7 @@ * Returns 0 on success, error on failure. */ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, - unsigned size, enum amdgpu_ib_pool_type pool_type, + unsigned int size, enum amdgpu_ib_pool_type pool_type, struct amdgpu_ib *ib) { int r; @@ -123,7 +123,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, * a CONST_IB), it will be put on the ring prior to the DE IB. Prior * to SI there was just a DE IB. */ -int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, +int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, struct amdgpu_ib *ibs, struct amdgpu_job *job, struct dma_fence **f) { @@ -131,16 +131,16 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_ib *ib = &ibs[0]; struct dma_fence *tmp = NULL; bool need_ctx_switch; - unsigned patch_offset = ~0; + unsigned int patch_offset = ~0; struct amdgpu_vm *vm; uint64_t fence_ctx; uint32_t status = 0, alloc_size; - unsigned fence_flags = 0; + unsigned int fence_flags = 0; bool secure, init_shadow; u64 shadow_va, csa_va, gds_va; int vmid = AMDGPU_JOB_GET_VMID(job); - unsigned i; + unsigned int i; int r = 0; bool need_pipe_sync = false; @@ -282,7 +282,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0); if (ring->funcs->init_cond_exec) { - unsigned ce_offset = ~0; + unsigned int ce_offset = ~0; ce_offset = amdgpu_ring_init_cond_exec(ring); if (ce_offset != ~0 && ring->funcs->patch_cond_exec) @@ -385,7 +385,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) { long tmo_gfx, tmo_mm; int r, ret = 0; - unsigned i; + unsigned int i; tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; if (amdgpu_sriov_vf(adev)) { @@ -402,7 +402,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) /* for CP & SDMA engines since they are scheduled together so * need to make the timeout width enough to cover the time * cost waiting for it coming back under RUNTIME only - */ + */ tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; } else if (adev->gmc.xgmi.hive_id) { tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT; @@ -465,13 +465,13 @@ static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused) { struct amdgpu_device *adev = m->private; - seq_printf(m, "--------------------- DELAYED --------------------- \n"); + seq_puts(m, "--------------------- DELAYED ---------------------\n"); amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED], m); - seq_printf(m, "-------------------- IMMEDIATE -------------------- \n"); + seq_puts(m, "-------------------- IMMEDIATE --------------------\n"); amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE], m); - seq_printf(m, "--------------------- DIRECT ---------------------- \n"); + seq_puts(m, "--------------------- DIRECT ----------------------\n"); amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index fceb3b384955..f3b0aaf3ebc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -138,6 +138,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) /** * amdgpu_ih_ring_write - write IV to the ring buffer * + * @adev: amdgpu_device pointer * @ih: ih ring to write to * @iv: the iv to write * @num_dw: size of the iv in dw @@ -145,8 +146,8 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) * Writes an IV to the ring buffer using the CPU and increment the wptr. * Used for testing and delegating IVs to a software ring. */ -void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, - unsigned int num_dw) +void amdgpu_ih_ring_write(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, + const uint32_t *iv, unsigned int num_dw) { uint32_t wptr = le32_to_cpu(*ih->wptr_cpu) >> 2; unsigned int i; @@ -161,6 +162,9 @@ void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, if (wptr != READ_ONCE(ih->rptr)) { wmb(); WRITE_ONCE(*ih->wptr_cpu, cpu_to_le32(wptr)); + } else if (adev->irq.retry_cam_enabled) { + dev_warn_once(adev->dev, "IH soft ring buffer overflow 0x%X, 0x%X\n", + wptr, ih->rptr); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index dd1c2eded6b9..6c6184f0dbc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -27,6 +27,9 @@ /* Maximum number of IVs processed at once */ #define AMDGPU_IH_MAX_NUM_IVS 32 +#define IH_RING_SIZE (256 * 1024) +#define IH_SW_RING_SIZE (8 * 1024) /* enough for 256 CAM entries */ + struct amdgpu_device; struct amdgpu_iv_entry; @@ -97,8 +100,8 @@ struct amdgpu_ih_funcs { int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, unsigned ring_size, bool use_bus_addr); void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); -void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, - unsigned int num_dw); +void amdgpu_ih_ring_write(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, + const uint32_t *iv, unsigned int num_dw); int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 5273decc5753..fa6d0adcec20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -493,7 +493,7 @@ void amdgpu_irq_delegate(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry, unsigned int num_dw) { - amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw); + amdgpu_ih_ring_write(adev, &adev->irq.ih_soft, entry->iv_entry, num_dw); schedule_work(&adev->irq.ih_soft_work); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 12414a713256..99f4df133ed3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -557,6 +557,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) crtc = (struct drm_crtc *)minfo->crtcs[i]; if (crtc && crtc->base.id == info->mode_crtc.id) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + ui32 = amdgpu_crtc->crtc_id; found = 1; break; @@ -575,7 +576,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (ret) return ret; - ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); + ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip))); return ret ? -EFAULT : 0; } case AMDGPU_INFO_HW_IP_COUNT: { @@ -721,17 +722,18 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ? -EFAULT : 0; } case AMDGPU_INFO_READ_MMR_REG: { - unsigned n, alloc_size; + unsigned int n, alloc_size; uint32_t *regs; - unsigned se_num = (info->read_mmr_reg.instance >> + unsigned int se_num = (info->read_mmr_reg.instance >> AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & AMDGPU_INFO_MMR_SE_INDEX_MASK; - unsigned sh_num = (info->read_mmr_reg.instance >> + unsigned int sh_num = (info->read_mmr_reg.instance >> AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & AMDGPU_INFO_MMR_SH_INDEX_MASK; /* set full masks if the userspace set all bits - * in the bitfields */ + * in the bitfields + */ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) se_num = 0xffffffff; else if (se_num >= AMDGPU_GFX_MAX_SE) @@ -896,7 +898,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return ret; } case AMDGPU_INFO_VCE_CLOCK_TABLE: { - unsigned i; + unsigned int i; struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; struct amd_vce_state *vce_state; @@ -1017,7 +1019,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case AMDGPU_INFO_SENSOR_GPU_AVG_POWER: /* get average GPU power */ if (amdgpu_dpm_read_sensor(adev, - AMDGPU_PP_SENSOR_GPU_POWER, + AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&ui32, &ui32_size)) { return -EINVAL; } @@ -1102,6 +1104,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct drm_amdgpu_info_video_caps *caps; int r; + if (!adev->asic_funcs->query_video_codecs) + return -EINVAL; + switch (info->video_cap.type) { case AMDGPU_INFO_VIDEO_CAPS_DECODE: r = amdgpu_asic_query_video_codecs(adev, false, &codecs); @@ -1719,7 +1724,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n", fw_info.feature, fw_info.ver); - seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); + seq_printf(m, "VBIOS version: %s\n", ctx->vbios_pn); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index f808841310fd..b6015157763a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -22,6 +22,7 @@ */ #include +#include #include "amdgpu_mes.h" #include "amdgpu.h" @@ -38,120 +39,70 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) PAGE_SIZE); } -int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev, - unsigned int *doorbell_index) -{ - int r = ida_simple_get(&adev->mes.doorbell_ida, 2, - adev->mes.max_doorbell_slices, - GFP_KERNEL); - if (r > 0) - *doorbell_index = r; - - return r; -} - -void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev, - unsigned int doorbell_index) -{ - if (doorbell_index) - ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index); -} - -unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar( - struct amdgpu_device *adev, - uint32_t doorbell_index, - unsigned int doorbell_id) -{ - return ((doorbell_index * - amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) + - doorbell_id * 2); -} - -static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev, +static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev, struct amdgpu_mes_process *process, int ip_type, uint64_t *doorbell_index) { unsigned int offset, found; + struct amdgpu_mes *mes = &adev->mes; - if (ip_type == AMDGPU_RING_TYPE_SDMA) { + if (ip_type == AMDGPU_RING_TYPE_SDMA) offset = adev->doorbell_index.sdma_engine[0]; - found = find_next_zero_bit(process->doorbell_bitmap, - AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, - offset); - } else { - found = find_first_zero_bit(process->doorbell_bitmap, - AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS); - } + else + offset = 0; - if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) { + found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset); + if (found >= mes->num_mes_dbs) { DRM_WARN("No doorbell available\n"); return -ENOSPC; } - set_bit(found, process->doorbell_bitmap); - - *doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev, - process->doorbell_index, found); + set_bit(found, mes->doorbell_bitmap); + /* Get the absolute doorbell index on BAR */ + *doorbell_index = mes->db_start_dw_offset + found * 2; return 0; } -static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev, +static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev, struct amdgpu_mes_process *process, uint32_t doorbell_index) { - unsigned int old, doorbell_id; + unsigned int old, rel_index; + struct amdgpu_mes *mes = &adev->mes; - doorbell_id = doorbell_index - - (process->doorbell_index * - amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32); - doorbell_id /= 2; - - old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap); + /* Find the relative index of the doorbell in this object */ + rel_index = (doorbell_index - mes->db_start_dw_offset) / 2; + old = test_and_clear_bit(rel_index, mes->doorbell_bitmap); WARN_ON(!old); } static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) { - size_t doorbell_start_offset; - size_t doorbell_aperture_size; - size_t doorbell_process_limit; - size_t aggregated_doorbell_start; int i; + struct amdgpu_mes *mes = &adev->mes; - aggregated_doorbell_start = (adev->doorbell_index.max_assignment + 1) * sizeof(u32); - aggregated_doorbell_start = - roundup(aggregated_doorbell_start, PAGE_SIZE); + /* Bitmap for dynamic allocation of kernel doorbells */ + mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL); + if (!mes->doorbell_bitmap) { + DRM_ERROR("Failed to allocate MES doorbell bitmap\n"); + return -ENOMEM; + } - doorbell_start_offset = aggregated_doorbell_start + PAGE_SIZE; - doorbell_start_offset = - roundup(doorbell_start_offset, - amdgpu_mes_doorbell_process_slice(adev)); + mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE; + for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) { + adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2; + set_bit(i, mes->doorbell_bitmap); + } - doorbell_aperture_size = adev->doorbell.size; - doorbell_aperture_size = - rounddown(doorbell_aperture_size, - amdgpu_mes_doorbell_process_slice(adev)); - - if (doorbell_aperture_size > doorbell_start_offset) - doorbell_process_limit = - (doorbell_aperture_size - doorbell_start_offset) / - amdgpu_mes_doorbell_process_slice(adev); - else - return -ENOSPC; - - adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32); - adev->mes.max_doorbell_slices = doorbell_process_limit; - - /* allocate Qword range for aggregated doorbell */ - for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) - adev->mes.aggregated_doorbells[i] = - aggregated_doorbell_start / sizeof(u32) + i * 2; - - DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit); return 0; } +static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev) +{ + bitmap_free(adev->mes.doorbell_bitmap); +} + int amdgpu_mes_init(struct amdgpu_device *adev) { int i, r; @@ -250,6 +201,7 @@ void amdgpu_mes_fini(struct amdgpu_device *adev) amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); amdgpu_device_wb_free(adev, adev->mes.read_val_offs); + amdgpu_mes_doorbell_free(adev); idr_destroy(&adev->mes.pasid_idr); idr_destroy(&adev->mes.gang_id_idr); @@ -278,15 +230,6 @@ int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid, return -ENOMEM; } - process->doorbell_bitmap = - kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, - BITS_PER_BYTE), GFP_KERNEL); - if (!process->doorbell_bitmap) { - DRM_ERROR("failed to allocate doorbell bitmap\n"); - kfree(process); - return -ENOMEM; - } - /* allocate the process context bo and map it */ r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, @@ -313,15 +256,6 @@ int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid, goto clean_up_ctx; } - /* allocate the starting doorbell index of the process */ - r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index); - if (r < 0) { - DRM_ERROR("failed to allocate doorbell for process\n"); - goto clean_up_pasid; - } - - DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index); - INIT_LIST_HEAD(&process->gang_list); process->vm = vm; process->pasid = pasid; @@ -331,15 +265,12 @@ int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid, amdgpu_mes_unlock(&adev->mes); return 0; -clean_up_pasid: - idr_remove(&adev->mes.pasid_idr, pasid); - amdgpu_mes_unlock(&adev->mes); clean_up_ctx: + amdgpu_mes_unlock(&adev->mes); amdgpu_bo_free_kernel(&process->proc_ctx_bo, &process->proc_ctx_gpu_addr, &process->proc_ctx_cpu_ptr); clean_up_memory: - kfree(process->doorbell_bitmap); kfree(process); return r; } @@ -385,7 +316,6 @@ void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid) idr_remove(&adev->mes.gang_id_idr, gang->gang_id); } - amdgpu_mes_free_process_doorbells(adev, process->doorbell_index); idr_remove(&adev->mes.pasid_idr, pasid); amdgpu_mes_unlock(&adev->mes); @@ -407,7 +337,6 @@ void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid) amdgpu_bo_free_kernel(&process->proc_ctx_bo, &process->proc_ctx_gpu_addr, &process->proc_ctx_cpu_ptr); - kfree(process->doorbell_bitmap); kfree(process); } @@ -642,6 +571,8 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, unsigned long flags; int r; + memset(&queue_input, 0, sizeof(struct mes_add_queue_input)); + /* allocate the mes queue buffer */ queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL); if (!queue) { @@ -679,7 +610,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, *queue_id = queue->queue_id = r; /* allocate a doorbell index for the queue */ - r = amdgpu_mes_queue_doorbell_get(adev, gang->process, + r = amdgpu_mes_kernel_doorbell_get(adev, gang->process, qprops->queue_type, &qprops->doorbell_off); if (r) @@ -737,7 +668,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, return 0; clean_up_doorbell: - amdgpu_mes_queue_doorbell_free(adev, gang->process, + amdgpu_mes_kernel_doorbell_free(adev, gang->process, qprops->doorbell_off); clean_up_queue_id: spin_lock_irqsave(&adev->mes.queue_id_lock, flags); @@ -792,7 +723,7 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id) queue_id); list_del(&queue->list); - amdgpu_mes_queue_doorbell_free(adev, gang->process, + amdgpu_mes_kernel_doorbell_free(adev, gang->process, queue->doorbell_off); amdgpu_mes_unlock(&adev->mes); @@ -1168,34 +1099,31 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, struct amdgpu_mes_ctx_data *ctx_data) { struct amdgpu_bo_va *bo_va; - struct ww_acquire_ctx ticket; - struct list_head list; - struct amdgpu_bo_list_entry pd; - struct ttm_validate_buffer csa_tv; struct amdgpu_sync sync; + struct drm_exec exec; int r; amdgpu_sync_create(&sync); - INIT_LIST_HEAD(&list); - INIT_LIST_HEAD(&csa_tv.head); - csa_tv.bo = &ctx_data->meta_data_obj->tbo; - csa_tv.num_shared = 1; + drm_exec_init(&exec, 0); + drm_exec_until_all_locked(&exec) { + r = drm_exec_lock_obj(&exec, + &ctx_data->meta_data_obj->tbo.base); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error_fini_exec; - list_add(&csa_tv.head, &list); - amdgpu_vm_get_pd_bo(vm, &list, &pd); - - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); - if (r) { - DRM_ERROR("failed to reserve meta data BO: err=%d\n", r); - return r; + r = amdgpu_vm_lock_pd(vm, &exec, 0); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error_fini_exec; } bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj); if (!bo_va) { - ttm_eu_backoff_reservation(&ticket, &list); DRM_ERROR("failed to create bo_va for meta data BO\n"); - return -ENOMEM; + r = -ENOMEM; + goto error_fini_exec; } r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0, @@ -1205,33 +1133,35 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, if (r) { DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r); - goto error; + goto error_del_bo_va; } r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) { DRM_ERROR("failed to do vm_bo_update on meta data\n"); - goto error; + goto error_del_bo_va; } amdgpu_sync_fence(&sync, bo_va->last_pt_update); r = amdgpu_vm_update_pdes(adev, vm, false); if (r) { DRM_ERROR("failed to update pdes on meta data\n"); - goto error; + goto error_del_bo_va; } amdgpu_sync_fence(&sync, vm->last_update); amdgpu_sync_wait(&sync, false); - ttm_eu_backoff_reservation(&ticket, &list); + drm_exec_fini(&exec); amdgpu_sync_free(&sync); ctx_data->meta_data_va = bo_va; return 0; -error: +error_del_bo_va: amdgpu_vm_bo_del(adev, bo_va); - ttm_eu_backoff_reservation(&ticket, &list); + +error_fini_exec: + drm_exec_fini(&exec); amdgpu_sync_free(&sync); return r; } @@ -1242,34 +1172,30 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va; struct amdgpu_bo *bo = ctx_data->meta_data_obj; struct amdgpu_vm *vm = bo_va->base.vm; - struct amdgpu_bo_list_entry vm_pd; - struct list_head list, duplicates; - struct dma_fence *fence = NULL; - struct ttm_validate_buffer tv; - struct ww_acquire_ctx ticket; - long r = 0; + struct dma_fence *fence; + struct drm_exec exec; + long r; - INIT_LIST_HEAD(&list); - INIT_LIST_HEAD(&duplicates); + drm_exec_init(&exec, 0); + drm_exec_until_all_locked(&exec) { + r = drm_exec_lock_obj(&exec, + &ctx_data->meta_data_obj->tbo.base); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto out_unlock; - tv.bo = &bo->tbo; - tv.num_shared = 2; - list_add(&tv.head, &list); - - amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); - - r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); - if (r) { - dev_err(adev->dev, "leaking bo va because " - "we fail to reserve bo (%ld)\n", r); - return r; + r = amdgpu_vm_lock_pd(vm, &exec, 0); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto out_unlock; } amdgpu_vm_bo_del(adev, bo_va); if (!amdgpu_vm_ready(vm)) goto out_unlock; - r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence); + r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, + &fence); if (r) goto out_unlock; if (fence) { @@ -1288,7 +1214,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, out_unlock: if (unlikely(r < 0)) dev_err(adev->dev, "failed to clear page tables (%ld)\n", r); - ttm_eu_backoff_reservation(&ticket, &list); + drm_exec_fini(&exec); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 2d6ac30b7135..a27b424ffe00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -27,6 +27,7 @@ #include "amdgpu_irq.h" #include "kgd_kfd_interface.h" #include "amdgpu_gfx.h" +#include "amdgpu_doorbell.h" #include #define AMDGPU_MES_MAX_COMPUTE_PIPES 8 @@ -76,7 +77,6 @@ struct amdgpu_mes { uint32_t kiq_version; uint32_t total_max_queue; - uint32_t doorbell_id_offset; uint32_t max_doorbell_slices; uint64_t default_process_quantum; @@ -128,6 +128,11 @@ struct amdgpu_mes { int (*kiq_hw_init)(struct amdgpu_device *adev); int (*kiq_hw_fini)(struct amdgpu_device *adev); + /* MES doorbells */ + uint32_t db_start_dw_offset; + uint32_t num_mes_dbs; + unsigned long *doorbell_bitmap; + /* ip specific functions */ const struct amdgpu_mes_funcs *funcs; }; @@ -142,7 +147,6 @@ struct amdgpu_mes_process { uint64_t process_quantum; struct list_head gang_list; uint32_t doorbell_index; - unsigned long *doorbell_bitmap; struct mutex doorbell_lock; }; @@ -224,6 +228,7 @@ struct mes_add_queue_input { uint32_t is_kfd_process; uint32_t is_aql_queue; uint32_t queue_size; + uint32_t exclusively_scheduled; }; struct mes_remove_queue_input { @@ -386,14 +391,6 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, int amdgpu_mes_self_test(struct amdgpu_device *adev); -int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev, - unsigned int *doorbell_index); -void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev, - unsigned int doorbell_index); -unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar( - struct amdgpu_device *adev, - uint32_t doorbell_index, - unsigned int doorbell_id); int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev); /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index a3bc00577a7c..51ca544a7094 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -45,6 +45,22 @@ int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev) return 0; } +u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev) +{ + if (adev->nbio.funcs && adev->nbio.funcs->get_pcie_replay_count) + return adev->nbio.funcs->get_pcie_replay_count(adev); + + return 0; +} + +void amdgpu_nbio_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, + uint64_t *count1) +{ + if (adev->nbio.funcs->get_pcie_usage) + adev->nbio.funcs->get_pcie_usage(adev, count0, count1); + +} + int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 8ab8ae01f87c..6cf7a8829a52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -99,6 +99,9 @@ struct amdgpu_nbio_funcs { int (*get_compute_partition_mode)(struct amdgpu_device *adev); u32 (*get_memory_partition_mode)(struct amdgpu_device *adev, u32 *supp_modes); + u64 (*get_pcie_replay_count)(struct amdgpu_device *adev); + void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0, + uint64_t *count1); }; struct amdgpu_nbio { @@ -111,5 +114,8 @@ struct amdgpu_nbio { }; int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev); +void amdgpu_nbio_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, uint64_t *count1); int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); +u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index f7905bce0de1..ace837cfa0a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -158,6 +158,14 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) c++; } + if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) { + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].mem_type = AMDGPU_PL_DOORBELL; + places[c].flags = 0; + c++; + } + if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; places[c].lpfn = 0; @@ -477,7 +485,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, goto fail; } - /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */ + /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */ return true; fail: @@ -1029,6 +1037,7 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo) } else if (bo->tbo.resource->mem_type == TTM_PL_TT) { atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size); } + } static const char * const amdgpu_vram_names[] = { @@ -1575,23 +1584,31 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) { struct dma_buf_attachment *attachment; struct dma_buf *dma_buf; - unsigned int domain; const char *placement; unsigned int pin_count; u64 size; - domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); - switch (domain) { - case AMDGPU_GEM_DOMAIN_VRAM: - placement = "VRAM"; - break; - case AMDGPU_GEM_DOMAIN_GTT: - placement = " GTT"; - break; - case AMDGPU_GEM_DOMAIN_CPU: - default: - placement = " CPU"; - break; + if (dma_resv_trylock(bo->tbo.base.resv)) { + unsigned int domain; + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); + switch (domain) { + case AMDGPU_GEM_DOMAIN_VRAM: + if (amdgpu_bo_in_cpu_visible_vram(bo)) + placement = "VRAM VISIBLE"; + else + placement = "VRAM"; + break; + case AMDGPU_GEM_DOMAIN_GTT: + placement = "GTT"; + break; + case AMDGPU_GEM_DOMAIN_CPU: + default: + placement = "CPU"; + break; + } + dma_resv_unlock(bo->tbo.base.resv); + } else { + placement = "UNKNOWN"; } size = amdgpu_bo_size(bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 05496b97ef93..f3ee83cdf97e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -182,6 +182,8 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) return AMDGPU_GEM_DOMAIN_GWS; case AMDGPU_PL_OA: return AMDGPU_GEM_DOMAIN_OA; + case AMDGPU_PL_DOORBELL: + return AMDGPU_GEM_DOMAIN_DOORBELL; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c index 71ee361d0972..6e91ea1de5aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c @@ -276,9 +276,8 @@ static void amdgpu_perf_read(struct perf_event *event) (!pe->adev->df.funcs->pmc_get_count)) return; + prev = local64_read(&hwc->prev_count); do { - prev = local64_read(&hwc->prev_count); - switch (hwc->config_base) { case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: @@ -289,7 +288,7 @@ static void amdgpu_perf_read(struct perf_event *event) count = 0; break; } - } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev); + } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, count)); local64_add(count - prev, &event->count); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 78d1ee71f3f4..8fdca54bb8a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -45,9 +45,6 @@ #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3) -static int psp_sysfs_init(struct amdgpu_device *adev); -static void psp_sysfs_fini(struct amdgpu_device *adev); - static int psp_load_smu_fw(struct psp_context *psp); static int psp_rap_terminate(struct psp_context *psp); static int psp_securedisplay_terminate(struct psp_context *psp); @@ -148,6 +145,7 @@ static int psp_init_sriov_microcode(struct psp_context *psp) break; case IP_VERSION(13, 0, 6): ret = psp_init_cap_microcode(psp, ucode_prefix); + ret &= psp_init_ta_microcode(psp, ucode_prefix); break; case IP_VERSION(13, 0, 10): adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; @@ -180,9 +178,11 @@ static int psp_early_init(void *handle) psp->autoload_supported = false; break; case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 7): + adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); + fallthrough; case IP_VERSION(11, 0, 5): case IP_VERSION(11, 0, 9): - case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 5, 0): case IP_VERSION(11, 0, 12): @@ -202,8 +202,8 @@ static int psp_early_init(void *handle) case IP_VERSION(13, 0, 3): case IP_VERSION(13, 0, 5): case IP_VERSION(13, 0, 8): - case IP_VERSION(13, 0, 10): case IP_VERSION(13, 0, 11): + case IP_VERSION(14, 0, 0): psp_v13_0_set_psp_funcs(psp); psp->autoload_supported = true; break; @@ -215,8 +215,10 @@ static int psp_early_init(void *handle) break; case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 7): + case IP_VERSION(13, 0, 10): psp_v13_0_set_psp_funcs(psp); psp->autoload_supported = true; + adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); break; case IP_VERSION(13, 0, 4): psp_v13_0_4_set_psp_funcs(psp); @@ -437,14 +439,15 @@ static int psp_sw_init(void *handle) /* If psp runtime database exists, then * only enable two stage memory training * when TWO_STAGE_DRAM_TRAINING bit is set - * in runtime database */ + * in runtime database + */ mem_training_ctx->enable_mem_training = true; } } else { - /* If psp runtime database doesn't exist or - * is invalid, force enable two stage memory - * training */ + /* If psp runtime database doesn't exist or is + * invalid, force enable two stage memory training + */ mem_training_ctx->enable_mem_training = true; } @@ -462,13 +465,6 @@ static int psp_sw_init(void *handle) } } - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) { - ret = psp_sysfs_init(adev); - if (ret) - return ret; - } - ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, @@ -520,10 +516,6 @@ static int psp_sw_fini(void *handle) amdgpu_ucode_release(&psp->cap_fw); amdgpu_ucode_release(&psp->toc_fw); - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) - psp_sysfs_fini(adev); - kfree(cmd); cmd = NULL; @@ -807,7 +799,8 @@ static int psp_tmr_init(struct psp_context *psp) tmr_size = PSP_TMR_SIZE(psp->adev); /* For ASICs support RLC autoload, psp will parse the toc - * and calculate the total size of TMR needed */ + * and calculate the total size of TMR needed + */ if (!amdgpu_sriov_vf(psp->adev) && psp->toc.start_addr && psp->toc.size_bytes && @@ -1147,9 +1140,9 @@ int psp_ta_init_shared_buf(struct psp_context *psp, struct ta_mem_context *mem_ctx) { /* - * Allocate 16k memory aligned to 4k from Frame Buffer (local - * physical) for ta to host memory - */ + * Allocate 16k memory aligned to 4k from Frame Buffer (local + * physical) for ta to host memory + */ return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, @@ -1738,7 +1731,8 @@ int psp_ras_trigger_error(struct psp_context *psp, return -EINVAL; /* If err_event_athub occurs error inject was successful, however - return status from TA is no long reliable */ + * return status from TA is no long reliable + */ if (amdgpu_ras_intr_triggered()) return 0; @@ -2459,8 +2453,8 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, return ret; } -static int psp_execute_non_psp_fw_load(struct psp_context *psp, - struct amdgpu_firmware_info *ucode) +int psp_execute_ip_fw_load(struct psp_context *psp, + struct amdgpu_firmware_info *ucode) { int ret = 0; struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); @@ -2503,7 +2497,7 @@ static int psp_load_smu_fw(struct psp_context *psp) DRM_WARN("Failed to set MP1 state prepare for reload\n"); } - ret = psp_execute_non_psp_fw_load(psp, ucode); + ret = psp_execute_ip_fw_load(psp, ucode); if (ret) DRM_ERROR("PSP load smu failed!\n"); @@ -2545,7 +2539,7 @@ int psp_load_fw_list(struct psp_context *psp, for (i = 0; i < ucode_count; ++i) { ucode = ucode_list[i]; psp_print_fw_hdr(psp, ucode); - ret = psp_execute_non_psp_fw_load(psp, ucode); + ret = psp_execute_ip_fw_load(psp, ucode); if (ret) return ret; } @@ -2587,12 +2581,13 @@ static int psp_load_non_psp_fw(struct psp_context *psp) ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) /* PSP only receive one SDMA fw for sienna_cichlid, - * as all four sdma fw are same */ + * as all four sdma fw are same + */ continue; psp_print_fw_hdr(psp, ucode); - ret = psp_execute_non_psp_fw_load(psp, ucode); + ret = psp_execute_ip_fw_load(psp, ucode); if (ret) return ret; @@ -2652,8 +2647,8 @@ static int psp_load_fw(struct amdgpu_device *adev) if (adev->gmc.xgmi.num_physical_nodes > 1) { ret = psp_xgmi_initialize(psp, false, true); /* Warning the XGMI seesion initialize failure - * Instead of stop driver initialization - */ + * Instead of stop driver initialization + */ if (ret) dev_err(psp->adev->dev, "XGMI: Failed to initialize XGMI session\n"); @@ -2931,19 +2926,6 @@ int psp_rlc_autoload_start(struct psp_context *psp) return ret; } -int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, - uint64_t cmd_gpu_addr, int cmd_size) -{ - struct amdgpu_firmware_info ucode = {0}; - - ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : - AMDGPU_UCODE_ID_VCN0_RAM; - ucode.mc_addr = cmd_gpu_addr; - ucode.ucode_size = cmd_size; - - return psp_execute_non_psp_fw_load(&adev->psp, &ucode); -} - int psp_ring_cmd_submit(struct psp_context *psp, uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, @@ -3584,6 +3566,11 @@ void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size drm_dev_exit(idx); } +/** + * DOC: usbc_pd_fw + * Reading from this file will retrieve the USB-C PD firmware version. Writing to + * this file will trigger the update process. + */ static DEVICE_ATTR(usbc_pd_fw, 0644, psp_usbc_pd_fw_sysfs_read, psp_usbc_pd_fw_sysfs_write); @@ -3624,7 +3611,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, adev->psp.vbflash_image_size += count; mutex_unlock(&adev->psp.mutex); - dev_info(adev->dev, "VBIOS flash write PSP done"); + dev_dbg(adev->dev, "IFWI staged for update"); return count; } @@ -3644,7 +3631,7 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, if (adev->psp.vbflash_image_size == 0) return -EINVAL; - dev_info(adev->dev, "VBIOS flash to PSP started"); + dev_dbg(adev->dev, "PSP IFWI flash process initiated"); ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, AMDGPU_GPU_PAGE_SIZE, @@ -3669,14 +3656,32 @@ rel_buf: adev->psp.vbflash_image_size = 0; if (ret) { - dev_err(adev->dev, "Failed to load VBIOS FW, err = %d", ret); + dev_err(adev->dev, "Failed to load IFWI, err = %d", ret); return ret; } - dev_info(adev->dev, "VBIOS flash to PSP done"); + dev_dbg(adev->dev, "PSP IFWI flash process done"); return 0; } +/** + * DOC: psp_vbflash + * Writing to this file will stage an IFWI for update. Reading from this file + * will trigger the update process. + */ +static struct bin_attribute psp_vbflash_bin_attr = { + .attr = {.name = "psp_vbflash", .mode = 0660}, + .size = 0, + .write = amdgpu_psp_vbflash_write, + .read = amdgpu_psp_vbflash_read, +}; + +/** + * DOC: psp_vbflash_status + * The status of the flash process. + * 0: IFWI flash not complete. + * 1: IFWI flash complete. + */ static ssize_t amdgpu_psp_vbflash_status(struct device *dev, struct device_attribute *attr, char *buf) @@ -3693,39 +3698,49 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev, return sysfs_emit(buf, "0x%x\n", vbflash_status); } - -static const struct bin_attribute psp_vbflash_bin_attr = { - .attr = {.name = "psp_vbflash", .mode = 0660}, - .size = 0, - .write = amdgpu_psp_vbflash_write, - .read = amdgpu_psp_vbflash_read, -}; - static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); -int amdgpu_psp_sysfs_init(struct amdgpu_device *adev) +static struct bin_attribute *bin_flash_attrs[] = { + &psp_vbflash_bin_attr, + NULL +}; + +static struct attribute *flash_attrs[] = { + &dev_attr_psp_vbflash_status.attr, + &dev_attr_usbc_pd_fw.attr, + NULL +}; + +static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { - int ret = 0; + struct device *dev = kobj_to_dev(kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + if (attr == &dev_attr_usbc_pd_fw.attr) + return adev->psp.sup_pd_fw_up ? 0660 : 0; - switch (adev->ip_versions[MP0_HWIP][0]) { - case IP_VERSION(13, 0, 0): - case IP_VERSION(13, 0, 7): - case IP_VERSION(13, 0, 10): - ret = sysfs_create_bin_file(&adev->dev->kobj, &psp_vbflash_bin_attr); - if (ret) - dev_err(adev->dev, "Failed to create device file psp_vbflash"); - ret = device_create_file(adev->dev, &dev_attr_psp_vbflash_status); - if (ret) - dev_err(adev->dev, "Failed to create device file psp_vbflash_status"); - return ret; - default: - return 0; - } + return adev->psp.sup_ifwi_up ? 0440 : 0; } +static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, + struct bin_attribute *attr, + int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + return adev->psp.sup_ifwi_up ? 0660 : 0; +} + +const struct attribute_group amdgpu_flash_attr_group = { + .attrs = flash_attrs, + .bin_attrs = bin_flash_attrs, + .is_bin_visible = amdgpu_bin_flash_attr_is_visible, + .is_visible = amdgpu_flash_attr_is_visible, +}; + const struct amd_ip_funcs psp_ip_funcs = { .name = "psp", .early_init = psp_early_init, @@ -3744,27 +3759,6 @@ const struct amd_ip_funcs psp_ip_funcs = { .set_powergating_state = psp_set_powergating_state, }; -static int psp_sysfs_init(struct amdgpu_device *adev) -{ - int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); - - if (ret) - DRM_ERROR("Failed to create USBC PD FW control file!"); - - return ret; -} - -void amdgpu_psp_sysfs_fini(struct amdgpu_device *adev) -{ - sysfs_remove_bin_file(&adev->dev->kobj, &psp_vbflash_bin_attr); - device_remove_file(adev->dev, &dev_attr_psp_vbflash_status); -} - -static void psp_sysfs_fini(struct amdgpu_device *adev) -{ - device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); -} - const struct amdgpu_ip_block_version psp_v3_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_PSP, .major = 3, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 2cae0b1a0b8a..3384eb94fde0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -39,6 +39,8 @@ #define PSP_TMR_ALIGNMENT 0x100000 #define PSP_FW_NAME_LEN 0x24 +extern const struct attribute_group amdgpu_flash_attr_group; + enum psp_shared_mem_size { PSP_ASD_SHARED_MEM_SIZE = 0x0, PSP_XGMI_SHARED_MEM_SIZE = 0x4000, @@ -78,8 +80,7 @@ enum psp_bootloader_cmd { PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000, }; -enum psp_ring_type -{ +enum psp_ring_type { PSP_RING_TYPE__INVALID = 0, /* * These values map to the way the PSP kernel identifies the @@ -89,8 +90,7 @@ enum psp_ring_type PSP_RING_TYPE__KM = 2 /* Kernel mode ring (formerly called GPCOM) */ }; -struct psp_ring -{ +struct psp_ring { enum psp_ring_type ring_type; struct psp_gfx_rb_frame *ring_mem; uint64_t ring_mem_mc_addr; @@ -107,8 +107,7 @@ enum psp_reg_prog_id { PSP_REG_LAST }; -struct psp_funcs -{ +struct psp_funcs { int (*init_microcode)(struct psp_context *psp); int (*bootloader_load_kdb)(struct psp_context *psp); int (*bootloader_load_spl)(struct psp_context *psp); @@ -133,6 +132,7 @@ struct psp_funcs int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver); int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr); int (*vbflash_stat)(struct psp_context *psp); + int (*fatal_error_recovery_quirk)(struct psp_context *psp); }; struct ta_funcs { @@ -307,10 +307,9 @@ struct psp_runtime_scpm_entry { enum psp_runtime_scpm_authentication scpm_status; }; -struct psp_context -{ - struct amdgpu_device *adev; - struct psp_ring km_ring; +struct psp_context { + struct amdgpu_device *adev; + struct psp_ring km_ring; struct psp_gfx_cmd_resp *cmd; const struct psp_funcs *funcs; @@ -339,7 +338,7 @@ struct psp_context uint64_t tmr_mc_addr; /* asd firmware */ - const struct firmware *asd_fw; + const struct firmware *asd_fw; /* toc firmware */ const struct firmware *toc_fw; @@ -384,9 +383,13 @@ struct psp_context uint32_t boot_cfg_bitmask; - char *vbflash_tmp_buf; - size_t vbflash_image_size; - bool vbflash_done; + /* firmware upgrades supported */ + bool sup_pd_fw_up; + bool sup_ifwi_up; + + char *vbflash_tmp_buf; + size_t vbflash_image_size; + bool vbflash_done; }; struct amdgpu_psp_funcs { @@ -443,6 +446,10 @@ struct amdgpu_psp_funcs { ((psp)->funcs->vbflash_stat ? \ (psp)->funcs->vbflash_stat((psp)) : -EINVAL) +#define psp_fatal_error_recovery_quirk(psp) \ + ((psp)->funcs->fatal_error_recovery_quirk ? \ + (psp)->funcs->fatal_error_recovery_quirk((psp)) : 0) + extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amdgpu_ip_block_version psp_v3_1_ip_block; @@ -458,9 +465,10 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index, extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, uint32_t field_val, uint32_t mask, uint32_t msec_timeout); +int psp_execute_ip_fw_load(struct psp_context *psp, + struct amdgpu_firmware_info *ucode); + int psp_gpu_reset(struct amdgpu_device *adev); -int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, - uint64_t cmd_gpu_addr, int cmd_size); int psp_ta_init_shared_buf(struct psp_context *psp, struct ta_mem_context *mem_ctx); @@ -525,6 +533,4 @@ int psp_spatial_partition(struct psp_context *psp, int mode); int is_psp_fw_valid(struct psp_bin_desc bin); -int amdgpu_psp_sysfs_init(struct amdgpu_device *adev); -void amdgpu_psp_sysfs_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 8aaa427f8c0f..7689395e44fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -35,6 +35,7 @@ #include "amdgpu_xgmi.h" #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" #include "nbio_v4_3.h" +#include "nbio_v7_9.h" #include "atom.h" #include "amdgpu_reset.h" @@ -757,16 +758,6 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, return 0; } -static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev, - struct ras_common_if *head) -{ - if (amdgpu_ras_is_feature_allowed(adev, head) || - amdgpu_ras_is_poison_mode_supported(adev)) - return 1; - else - return 0; -} - /* wrapper of psp_ras_enable_features */ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, struct ras_common_if *head, bool enable) @@ -778,7 +769,16 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, if (!con) return -EINVAL; - if (head->block == AMDGPU_RAS_BLOCK__GFX) { + /* Do not enable ras feature if it is not allowed */ + if (enable && + head->block != AMDGPU_RAS_BLOCK__GFX && + !amdgpu_ras_is_feature_allowed(adev, head)) + goto out; + + /* Only enable gfx ras feature from host side */ + if (head->block == AMDGPU_RAS_BLOCK__GFX && + !amdgpu_sriov_vf(adev) && + !amdgpu_ras_intr_triggered()) { info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL); if (!info) return -ENOMEM; @@ -794,16 +794,7 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, .error_type = amdgpu_ras_error_to_ta(head->type), }; } - } - /* Do not enable if it is not allowed. */ - if (enable && !amdgpu_ras_check_feature_allowed(adev, head)) - goto out; - - /* Only enable ras feature operation handle on host side */ - if (head->block == AMDGPU_RAS_BLOCK__GFX && - !amdgpu_sriov_vf(adev) && - !amdgpu_ras_intr_triggered()) { ret = psp_ras_enable_features(&adev->psp, info, enable); if (ret) { dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n", @@ -1159,7 +1150,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, } /* Calculate XGMI relative offset */ - if (adev->gmc.xgmi.num_physical_nodes > 1) { + if (adev->gmc.xgmi.num_physical_nodes > 1 && + info->head.block != AMDGPU_RAS_BLOCK__GFX) { block_info.address = amdgpu_xgmi_get_relative_phy_addr(adev, block_info.address); @@ -2072,6 +2064,8 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) { ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET; set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + + psp_fatal_error_recovery_quirk(&adev->psp); } } @@ -2414,6 +2408,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) if (adev->asic_type == CHIP_IP_DISCOVERY) { switch (adev->ip_versions[MP0_HWIP][0]) { case IP_VERSION(13, 0, 0): + case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 10): return true; default: @@ -2440,10 +2435,10 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) if (!ctx) return; - if (strnstr(ctx->vbios_version, "D16406", - sizeof(ctx->vbios_version)) || - strnstr(ctx->vbios_version, "D36002", - sizeof(ctx->vbios_version))) + if (strnstr(ctx->vbios_pn, "D16406", + sizeof(ctx->vbios_pn)) || + strnstr(ctx->vbios_pn, "D36002", + sizeof(ctx->vbios_pn))) adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); } @@ -2515,8 +2510,18 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) /* hw_supported needs to be aligned with RAS block mask. */ adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; - adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : - adev->ras_hw_enabled & amdgpu_ras_mask; + + /* + * Disable ras feature for aqua vanjaram + * by default on apu platform. + */ + if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6) && + adev->gmc.is_app_apu) + adev->ras_enabled = amdgpu_ras_enable != 1 ? 0 : + adev->ras_hw_enabled & amdgpu_ras_mask; + else + adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : + adev->ras_hw_enabled & amdgpu_ras_mask; } static void amdgpu_ras_counte_dw(struct work_struct *work) @@ -2642,6 +2647,10 @@ int amdgpu_ras_init(struct amdgpu_device *adev) * check DF RAS */ adev->nbio.ras = &nbio_v4_3_ras; break; + case IP_VERSION(7, 9, 0): + if (!adev->gmc.is_app_apu) + adev->nbio.ras = &nbio_v7_9_ras; + break; default: /* nbio ras is not available */ break; @@ -2765,23 +2774,28 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev, goto cleanup; } - r = amdgpu_ras_sysfs_create(adev, ras_block); - if (r) - goto interrupt; + if (ras_obj->hw_ops && + (ras_obj->hw_ops->query_ras_error_count || + ras_obj->hw_ops->query_ras_error_status)) { + r = amdgpu_ras_sysfs_create(adev, ras_block); + if (r) + goto interrupt; - /* Those are the cached values at init. - */ - query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL); - if (!query_info) - return -ENOMEM; - memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if)); + /* Those are the cached values at init. + */ + query_info = kzalloc(sizeof(*query_info), GFP_KERNEL); + if (!query_info) + return -ENOMEM; + memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if)); - if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) { - atomic_set(&con->ras_ce_count, ce_count); - atomic_set(&con->ras_ue_count, ue_count); + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) { + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + } + + kfree(query_info); } - kfree(query_info); return 0; interrupt: @@ -2958,10 +2972,6 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) { - amdgpu_ras_check_supported(adev); - if (!adev->ras_hw_enabled) - return; - if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); @@ -3136,6 +3146,10 @@ int amdgpu_ras_is_supported(struct amdgpu_device *adev, * that the ras block supports ras function. */ if (!ret && + (block == AMDGPU_RAS_BLOCK__GFX || + block == AMDGPU_RAS_BLOCK__SDMA || + block == AMDGPU_RAS_BLOCK__VCN || + block == AMDGPU_RAS_BLOCK__JPEG) && amdgpu_ras_is_poison_mode_supported(adev) && amdgpu_ras_get_ras_block(adev, block, 0)) ret = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 0648dfe559af..4764d2171f92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -158,6 +158,7 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev) case IP_VERSION(11, 0, 7): /* Sienna cichlid */ case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 2): /* Aldebaran */ + case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 10): return true; default: @@ -194,9 +195,9 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev, /* VEGA20 and ARCTURUS */ if (adev->asic_type == CHIP_VEGA20) control->i2c_address = EEPROM_I2C_MADDR_0; - else if (strnstr(atom_ctx->vbios_version, + else if (strnstr(atom_ctx->vbios_pn, "D342", - sizeof(atom_ctx->vbios_version))) + sizeof(atom_ctx->vbios_pn))) control->i2c_address = EEPROM_I2C_MADDR_0; else control->i2c_address = EEPROM_I2C_MADDR_4; @@ -205,13 +206,14 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev, control->i2c_address = EEPROM_I2C_MADDR_0; return true; case IP_VERSION(13, 0, 2): - if (strnstr(atom_ctx->vbios_version, "D673", - sizeof(atom_ctx->vbios_version))) + if (strnstr(atom_ctx->vbios_pn, "D673", + sizeof(atom_ctx->vbios_pn))) control->i2c_address = EEPROM_I2C_MADDR_4; else control->i2c_address = EEPROM_I2C_MADDR_0; return true; case IP_VERSION(13, 0, 0): + case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 10): control->i2c_address = EEPROM_I2C_MADDR_4; return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index 5c4f93ee0c57..3c988cc406e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -90,6 +90,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, cur->node = block; break; case TTM_PL_TT: + case AMDGPU_PL_DOORBELL: node = to_ttm_range_mgr_node(res)->mm_nodes; while (start >= node->size << PAGE_SHIFT) start -= node++->size << PAGE_SHIFT; @@ -152,6 +153,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining); break; case TTM_PL_TT: + case AMDGPU_PL_DOORBELL: node = cur->node; cur->node = ++node; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index eec41ad30406..5fed06ffcc6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -87,7 +87,7 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev, reset_handler = adev->reset_cntl->get_reset_handler( adev->reset_cntl, reset_context); if (!reset_handler) - return -ENOSYS; + return -EOPNOTSUPP; return reset_handler->prepare_hwcontext(adev->reset_cntl, reset_context); @@ -103,7 +103,7 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev, reset_handler = adev->reset_cntl->get_reset_handler( adev->reset_cntl, reset_context); if (!reset_handler) - return -ENOSYS; + return -EOPNOTSUPP; ret = reset_handler->perform_reset(adev->reset_cntl, reset_context); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 028ff075db51..e2ab303ad270 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -389,7 +389,7 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, occupied = ring->wptr & ring->buf_mask; dst = (void *)&ring->ring[occupied]; chunk1 = ring->buf_mask + 1 - occupied; - chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1; + chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1; chunk2 = count_dw - chunk1; chunk1 <<= 2; chunk2 <<= 2; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h index b22d4fb2a847..d3186b570b82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h @@ -56,6 +56,15 @@ enum amdgpu_ring_mux_offset_type { AMDGPU_MUX_OFFSET_TYPE_CE, }; +enum ib_complete_status { + /* IB not started/reset value, default value. */ + IB_COMPLETION_STATUS_DEFAULT = 0, + /* IB preempted, started but not completed. */ + IB_COMPLETION_STATUS_PREEMPTED = 1, + /* IB completed. */ + IB_COMPLETION_STATUS_COMPLETED = 2, +}; + struct amdgpu_ring_mux { struct amdgpu_ring *real_ring; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index 80b263646966..b591d33af264 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -26,6 +26,8 @@ #include "clearstate_defs.h" +#define AMDGPU_MAX_RLC_INSTANCES 8 + /* firmware ID used in rlc toc */ typedef enum _FIRMWARE_ID_ { FIRMWARE_ID_INVALID = 0, @@ -201,7 +203,7 @@ struct amdgpu_rlc { u32 cp_table_size; /* safe mode for updating CG/PG state */ - bool in_safe_mode[8]; + bool in_safe_mode[AMDGPU_MAX_RLC_INSTANCES]; const struct amdgpu_rlc_funcs *funcs; /* for firmware data */ @@ -257,7 +259,7 @@ struct amdgpu_rlc { bool rlcg_reg_access_supported; /* registers for rlcg indirect reg access */ - struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl; + struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl[AMDGPU_MAX_RLC_INSTANCES]; }; void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index dacf281d2b21..e2b9392d7f0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -239,9 +239,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, sizeof(struct amdgpu_sdma_instance)); } - if (amdgpu_sriov_vf(adev)) - return 0; - DRM_DEBUG("psp_load == '%s'\n", adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 525dffbe046a..2fd1bfb35916 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -432,7 +432,7 @@ TRACE_EVENT(amdgpu_vm_flush, ), TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx", __get_str(ring), __entry->vmid, - __entry->vm_hub,__entry->pd_addr) + __entry->vm_hub, __entry->pd_addr) ); DECLARE_EVENT_CLASS(amdgpu_pasid, @@ -494,7 +494,7 @@ TRACE_EVENT(amdgpu_cs_bo_status, ); TRACE_EVENT(amdgpu_bo_move, - TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement), + TP_PROTO(struct amdgpu_bo *bo, uint32_t new_placement, uint32_t old_placement), TP_ARGS(bo, new_placement, old_placement), TP_STRUCT__entry( __field(struct amdgpu_bo *, bo) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 0534ab716809..4e51dce3aab5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -49,7 +49,6 @@ #include #include -#include #include "amdgpu.h" #include "amdgpu_object.h" @@ -127,6 +126,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, case AMDGPU_PL_GDS: case AMDGPU_PL_GWS: case AMDGPU_PL_OA: + case AMDGPU_PL_DOORBELL: placement->num_placement = 0; placement->num_busy_placement = 0; return; @@ -496,9 +496,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_mem->mem_type == AMDGPU_PL_GDS || old_mem->mem_type == AMDGPU_PL_GWS || old_mem->mem_type == AMDGPU_PL_OA || + old_mem->mem_type == AMDGPU_PL_DOORBELL || new_mem->mem_type == AMDGPU_PL_GDS || new_mem->mem_type == AMDGPU_PL_GWS || - new_mem->mem_type == AMDGPU_PL_OA) { + new_mem->mem_type == AMDGPU_PL_OA || + new_mem->mem_type == AMDGPU_PL_DOORBELL) { /* Nothing to save here */ ttm_bo_move_null(bo, new_mem); goto out; @@ -582,6 +584,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, mem->bus.offset += adev->gmc.aper_base; mem->bus.is_iomem = true; break; + case AMDGPU_PL_DOORBELL: + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.offset += adev->doorbell.base; + mem->bus.is_iomem = true; + mem->bus.caching = ttm_uncached; + break; default: return -EINVAL; } @@ -596,6 +604,10 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor); + + if (bo->resource->mem_type == AMDGPU_PL_DOORBELL) + return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT; + return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; } @@ -1305,6 +1317,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) flags |= AMDGPU_PTE_VALID; if (mem && (mem->mem_type == TTM_PL_TT || + mem->mem_type == AMDGPU_PL_DOORBELL || mem->mem_type == AMDGPU_PL_PREEMPT)) { flags |= AMDGPU_PTE_SYSTEM; @@ -1924,6 +1937,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of GTT memory ready.\n", (unsigned int)(gtt_size / (1024 * 1024))); + /* Initiailize doorbell pool on PCI BAR */ + r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE); + if (r) { + DRM_ERROR("Failed initializing doorbell heap.\n"); + return r; + } + + /* Create a boorbell page for kernel usages */ + r = amdgpu_doorbell_create_kernel_doorbells(adev); + if (r) { + DRM_ERROR("Failed to initialize kernel doorbells.\n"); + return r; + } + /* Initialize preemptible memory pool */ r = amdgpu_preempt_mgr_init(adev); if (r) { @@ -2392,7 +2419,7 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, struct page *p; void *ptr; - bytes = bytes < size ? bytes : size; + bytes = min(bytes, size); /* Translate the bus address to a physical address. If * the domain is NULL it means there is no IOMMU active @@ -2447,7 +2474,7 @@ static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, struct page *p; void *ptr; - bytes = bytes < size ? bytes : size; + bytes = min(bytes, size); addr = dom ? iommu_iova_to_phys(dom, addr) : addr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 6d0d66e40db9..65ec82141a8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -33,12 +33,16 @@ #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) #define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3) +#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4) #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 #define AMDGPU_POISON 0xd0bed0be +extern const struct attribute_group amdgpu_vram_mgr_attr_group; +extern const struct attribute_group amdgpu_gtt_mgr_attr_group; + struct hmm_range; struct amdgpu_gtt_mgr { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 16807ff96dc9..8beefc045e14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -703,6 +703,8 @@ FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version); FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version); FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version); FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version); +FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK); +FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK); static struct attribute *fw_attrs[] = { &dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr, @@ -716,6 +718,7 @@ static struct attribute *fw_attrs[] = { &dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr, &dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr, &dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr, + &dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr, NULL }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index ae455aab5d29..36b55d2bd51a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -1239,3 +1239,18 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) return 0; } + +int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx, + enum AMDGPU_UCODE_ID ucode_id) +{ + struct amdgpu_firmware_info ucode = { + .ucode_id = (ucode_id ? ucode_id : + (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : + AMDGPU_UCODE_ID_VCN0_RAM)), + .mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, + .ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - + (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr), + }; + + return psp_execute_ip_fw_load(&adev->psp, &ucode); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 92d5534df5f4..a3eed90b6af0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -161,6 +161,7 @@ } while (0) #define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2) +#define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4) #define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6) #define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8) #define AMDGPU_VCN_SW_RING_FLAG (1 << 9) @@ -180,6 +181,8 @@ #define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0) #define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1) +#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2 + enum fw_queue_mode { FW_QUEUE_RING_RESET = 1, FW_QUEUE_DPG_HOLD_OFF = 2, @@ -343,6 +346,11 @@ struct amdgpu_fw_shared_rb_setup { uint32_t reserved[6]; }; +struct amdgpu_fw_shared_drm_key_wa { + uint8_t method; + uint8_t reserved[3]; +}; + struct amdgpu_vcn4_fw_shared { uint32_t present_flag_0; uint8_t pad[12]; @@ -352,6 +360,7 @@ struct amdgpu_vcn4_fw_shared { uint8_t pad2[20]; struct amdgpu_fw_shared_rb_setup rb_setup; struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface; + struct amdgpu_fw_shared_drm_key_wa drm_key_wa; }; struct amdgpu_vcn_fwlog { @@ -414,4 +423,7 @@ int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); +int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx, + enum AMDGPU_UCODE_ID ucode_id); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 41aa853a07d2..96857ae7fb5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -520,7 +520,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels; adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels); } - if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0)) + if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0)) adev->virt.is_mm_bw_enabled = true; adev->unique_id = @@ -835,6 +835,16 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad return mode; } +void amdgpu_virt_post_reset(struct amdgpu_device *adev) +{ + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) { + /* force set to GFXOFF state after reset, + * to avoid some invalid operation before GC enable + */ + adev->gfx.is_poweron = false; + } +} + bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id) { switch (adev->ip_versions[MP0_HWIP][0]) { @@ -845,6 +855,17 @@ bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_i return false; else return true; + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + /* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */ + if (ucode_id == AMDGPU_UCODE_ID_RLC_G + || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL + || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM + || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM + || ucode_id == AMDGPU_UCODE_ID_SMC) + return true; + else + return false; case IP_VERSION(13, 0, 10): /* white list */ if (ucode_id == AMDGPU_UCODE_ID_CAP @@ -954,7 +975,7 @@ static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, return ret; } -static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag) +static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id) { struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; uint32_t timeout = 50000; @@ -972,7 +993,12 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v return 0; } - reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; + if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) { + dev_err(adev->dev, "invalid xcc\n"); + return 0; + } + + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id]; scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0; scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1; scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2; @@ -1037,13 +1063,13 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v void amdgpu_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value, - u32 acc_flags, u32 hwip) + u32 acc_flags, u32 hwip, u32 xcc_id) { u32 rlcg_flag; if (!amdgpu_sriov_runtime(adev) && amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) { - amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag); + amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id); return; } @@ -1054,13 +1080,13 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev, } u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, - u32 offset, u32 acc_flags, u32 hwip) + u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id) { u32 rlcg_flag; if (!amdgpu_sriov_runtime(adev) && amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag)) - return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag); + return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id); if (acc_flags & AMDGPU_REGS_NO_KIQ) return RREG32_NO_KIQ(offset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 4f7bab52282a..fabb83e9d9ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -355,9 +355,10 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, struct amdgpu_video_codec_info *decode, uint32_t decode_array_size); void amdgpu_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value, - u32 acc_flags, u32 hwip); + u32 acc_flags, u32 hwip, u32 xcc_id); u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, - u32 offset, u32 acc_flags, u32 hwip); + u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id); bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id); +void amdgpu_virt_post_reset(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index d0748bcfad16..7148a216ae2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -501,8 +501,6 @@ static int amdgpu_vkms_sw_init(void *handle) adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; - adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; - r = amdgpu_display_modeset_create_props(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ec1ec08d4058..f5daadcec865 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" @@ -111,9 +112,9 @@ struct amdgpu_prt_cb { }; /** - * struct amdgpu_vm_tlb_seq_cb - Helper to increment the TLB flush sequence + * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence */ -struct amdgpu_vm_tlb_seq_cb { +struct amdgpu_vm_tlb_seq_struct { /** * @vm: pointer to the amdgpu_vm structure to set the fence sequence on */ @@ -339,25 +340,20 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, } /** - * amdgpu_vm_get_pd_bo - add the VM PD to a validation list + * amdgpu_vm_lock_pd - lock PD in drm_exec * * @vm: vm providing the BOs - * @validated: head of validation list - * @entry: entry to add + * @exec: drm execution context + * @num_fences: number of extra fences to reserve * - * Add the page directory to the list of BOs to - * validate for command submission. + * Lock the VM root PD in the DRM execution context. */ -void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, - struct list_head *validated, - struct amdgpu_bo_list_entry *entry) +int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, + unsigned int num_fences) { - entry->priority = 0; - entry->tv.bo = &vm->root.bo->tbo; - /* Two for VM updates, one for TTM and one for the CS job */ - entry->tv.num_shared = 4; - entry->user_pages = NULL; - list_add(&entry->tv.head, validated); + /* We need at least two fences for the VM PD/PT updates */ + return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, + 2 + num_fences); } /** @@ -833,7 +829,7 @@ error: static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { - struct amdgpu_vm_tlb_seq_cb *tlb_cb; + struct amdgpu_vm_tlb_seq_struct *tlb_cb; tlb_cb = container_of(cb, typeof(*tlb_cb), cb); atomic64_inc(&tlb_cb->vm->tlb_seq); @@ -871,7 +867,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence) { struct amdgpu_vm_update_params params; - struct amdgpu_vm_tlb_seq_cb *tlb_cb; + struct amdgpu_vm_tlb_seq_struct *tlb_cb; struct amdgpu_res_cursor cursor; enum amdgpu_sync_mode sync_mode; int r, idx; @@ -2280,16 +2276,13 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) goto unreserve_bo; vm->update_funcs = &amdgpu_vm_cpu_funcs; + r = amdgpu_vm_pt_map_tables(adev, vm); + if (r) + goto unreserve_bo; + } else { vm->update_funcs = &amdgpu_vm_sdma_funcs; } - /* - * Make sure root PD gets mapped. As vm_update_mode could be changed - * when turning a GFX VM into a compute VM. - */ - r = vm->update_funcs->map_table(to_amdgpu_bo_vm(vm->root.bo)); - if (r) - goto unreserve_bo; dma_fence_put(vm->last_update); vm->last_update = dma_fence_get_stub(); @@ -2605,7 +2598,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, /* Intentionally setting invalid PTE flag * combination to force a no-retry-fault */ - flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT; + flags = AMDGPU_VM_NORETRY_FLAGS; value = 0; } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { /* Redirect the access to the dummy page */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index ffac7413c657..204ab13184ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -36,6 +36,8 @@ #include "amdgpu_ring.h" #include "amdgpu_ids.h" +struct drm_exec; + struct amdgpu_bo_va; struct amdgpu_job; struct amdgpu_bo_list_entry; @@ -84,7 +86,13 @@ struct amdgpu_mem_stats; /* PDE Block Fragment Size for VEGA10 */ #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59) +/* Flag combination to set no-retry with TF disabled */ +#define AMDGPU_VM_NORETRY_FLAGS (AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \ + AMDGPU_PTE_TF) +/* Flag combination to set no-retry with TF enabled */ +#define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \ + AMDGPU_PTE_PRT) /* For GFX9 */ #define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57) #define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL) @@ -396,9 +404,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); -void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, - struct list_head *validated, - struct amdgpu_bo_list_entry *entry); +int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, + unsigned int num_fences); bool amdgpu_vm_ready(struct amdgpu_vm *vm); uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, @@ -492,6 +499,8 @@ void amdgpu_vm_pt_free_work(struct work_struct *work); void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); #endif +int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm); + /** * amdgpu_vm_tlb_seq - return tlb flush sequence number * @vm: the amdgpu_vm structure to query diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index 31913ae86de6..6e31621452de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -31,6 +31,7 @@ */ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table) { + table->bo.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; return amdgpu_bo_kmap(&table->bo, NULL); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 5431332bbdb8..96d601e209b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -780,6 +780,27 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params, 1, 0, flags); } +/** + * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags + * + * @adev: amdgpu_device pointer + * @flags: pointer to PTE flags + * + * Update PTE no-retry flags when TF is enabled. + */ +static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev, + uint64_t *flags) +{ + /* + * Update no-retry flags with the corresponding TF + * no-retry combination. + */ + if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) { + *flags &= ~AMDGPU_VM_NORETRY_FLAGS; + *flags |= adev->gmc.noretry_flags; + } +} + /* * amdgpu_vm_pte_update_flags - figure out flags for PTE updates * @@ -806,6 +827,16 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params, flags |= AMDGPU_PTE_EXECUTABLE; } + /* + * Update no-retry flags to use the no-retry flag combination + * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination + * does not work when TF is enabled. So, replace them with + * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for + * all cases. + */ + if (level == AMDGPU_VM_PTB) + amdgpu_vm_pte_update_noretry_flags(adev, &flags); + /* APUs mapping system memory may need different MTYPEs on different * NUMA nodes. Only do this for contiguous ranges that can be assumed * to be on the same NUMA node. @@ -1046,3 +1077,31 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params, return 0; } + +/** + * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible + * @adev: amdgpu device structure + * @vm: amdgpu vm structure + * + * make root page directory and everything below it cpu accessible. + */ +int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm) +{ + struct amdgpu_vm_pt_cursor cursor; + struct amdgpu_vm_bo_base *entry; + + for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) { + + struct amdgpu_bo_vm *bo; + int r; + + if (entry->bo) { + bo = to_amdgpu_bo_vm(entry->bo); + r = vm->update_funcs->map_table(bo); + if (r) + return r; + } + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 03dc59cbe8aa..7e91b24784e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -500,6 +500,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) hive = kzalloc(sizeof(*hive), GFP_KERNEL); if (!hive) { dev_err(adev->dev, "XGMI: allocation failed\n"); + ret = -ENOMEM; hive = NULL; goto pro_end; } diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c similarity index 99% rename from drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c rename to drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 72b629a78c62..d0fc62784e82 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -134,7 +134,7 @@ static int aqua_vanjaram_xcp_sched_list_update( for (i = 0; i < AMDGPU_MAX_RINGS; i++) { ring = adev->rings[i]; - if (!ring || !ring->sched.ready) + if (!ring || !ring->sched.ready || ring->no_scheduler) continue; aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id); diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 5f610e9a5f0f..9f63ddb89b75 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -1438,6 +1438,8 @@ static void atom_get_vbios_pn(struct atom_context *ctx) ctx->vbios_pn[count] = 0; } + + pr_info("ATOM BIOS: %s\n", ctx->vbios_pn); } static void atom_get_vbios_version(struct atom_context *ctx) @@ -1460,11 +1462,9 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) int base; struct atom_context *ctx = kzalloc(sizeof(struct atom_context), GFP_KERNEL); - char *str; struct _ATOM_ROM_HEADER *atom_rom_header; struct _ATOM_MASTER_DATA_TABLE *master_table; struct _ATOM_FIRMWARE_INFO *atom_fw_info; - u16 idx; if (!ctx) return NULL; @@ -1502,16 +1502,6 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) return NULL; } - idx = CU16(ATOM_ROM_PART_NUMBER_PTR); - if (idx == 0) - idx = 0x80; - - str = CSTR(idx); - if (*str != '\0') { - pr_info("ATOM BIOS: %s\n", str); - strscpy(ctx->vbios_version, str, sizeof(ctx->vbios_version)); - } - atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base); if (atom_rom_header->usMasterDataTableOffset != 0) { master_table = (struct _ATOM_MASTER_DATA_TABLE *) diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h index 0c1839824520..c11cf18a0f18 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.h +++ b/drivers/gpu/drm/amd/amdgpu/atom.h @@ -33,7 +33,6 @@ struct drm_device; #define ATOM_ATI_MAGIC_PTR 0x30 #define ATOM_ATI_MAGIC " 761295520" #define ATOM_ROM_TABLE_PTR 0x48 -#define ATOM_ROM_PART_NUMBER_PTR 0x6E #define ATOM_ROM_MAGIC "ATOM" #define ATOM_ROM_MAGIC_PTR 4 @@ -118,12 +117,15 @@ struct drm_device; struct card_info { struct drm_device *dev; - void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ - uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */ - void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ - uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */ - void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ - uint32_t (* pll_read)(struct card_info *, uint32_t); /* filled by driver */ + void (*reg_write)(struct card_info *info, + u32 reg, uint32_t val); /* filled by driver */ + uint32_t (*reg_read)(struct card_info *info, uint32_t reg); /* filled by driver */ + void (*mc_write)(struct card_info *info, + u32 reg, uint32_t val); /* filled by driver */ + uint32_t (*mc_read)(struct card_info *info, uint32_t reg); /* filled by driver */ + void (*pll_write)(struct card_info *info, + u32 reg, uint32_t val); /* filled by driver */ + uint32_t (*pll_read)(struct card_info *info, uint32_t reg); /* filled by driver */ }; struct atom_context { @@ -143,7 +145,6 @@ struct atom_context { int io_mode; uint32_t *scratch; int scratch_size_bytes; - char vbios_version[20]; uint8_t name[STRLEN_LONG]; uint8_t vbios_pn[STRLEN_LONG]; @@ -154,10 +155,10 @@ struct atom_context { extern int amdgpu_atom_debug; -struct atom_context *amdgpu_atom_parse(struct card_info *, void *); -int amdgpu_atom_execute_table(struct atom_context *, int, uint32_t *); -int amdgpu_atom_asic_init(struct atom_context *); -void amdgpu_atom_destroy(struct atom_context *); +struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios); +int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params); +int amdgpu_atom_asic_init(struct atom_context *ctx); +void amdgpu_atom_destroy(struct atom_context *ctx); bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index df385ffc9768..6f7c031dd197 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -442,8 +442,7 @@ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev) adev->irq.ih_funcs = &cik_ih_funcs; } -const struct amdgpu_ip_block_version cik_ih_ip_block = -{ +const struct amdgpu_ip_block_version cik_ih_ip_block = { .type = AMD_IP_BLOCK_TYPE_IH, .major = 2, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 9a24ed463abd..584cd5277f92 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -52,8 +52,7 @@ static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev); static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev); -static const u32 crtc_offsets[] = -{ +static const u32 crtc_offsets[] = { CRTC0_REGISTER_OFFSET, CRTC1_REGISTER_OFFSET, CRTC2_REGISTER_OFFSET, @@ -63,8 +62,7 @@ static const u32 crtc_offsets[] = CRTC6_REGISTER_OFFSET }; -static const u32 hpd_offsets[] = -{ +static const u32 hpd_offsets[] = { HPD0_REGISTER_OFFSET, HPD1_REGISTER_OFFSET, HPD2_REGISTER_OFFSET, @@ -121,30 +119,26 @@ static const struct { .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK } }; -static const u32 golden_settings_tonga_a11[] = -{ +static const u32 golden_settings_tonga_a11[] = { mmDCI_CLK_CNTL, 0x00000080, 0x00000000, mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, mmFBC_MISC, 0x1f311fff, 0x12300000, mmHDMI_CONTROL, 0x31000111, 0x00000011, }; -static const u32 tonga_mgcg_cgcg_init[] = -{ +static const u32 tonga_mgcg_cgcg_init[] = { mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, }; -static const u32 golden_settings_fiji_a10[] = -{ +static const u32 golden_settings_fiji_a10[] = { mmDCI_CLK_CNTL, 0x00000080, 0x00000000, mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, mmFBC_MISC, 0x1f311fff, 0x12300000, mmHDMI_CONTROL, 0x31000111, 0x00000011, }; -static const u32 fiji_mgcg_cgcg_init[] = -{ +static const u32 fiji_mgcg_cgcg_init[] = { mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, }; @@ -1425,8 +1419,7 @@ static void dce_v10_0_audio_enable(struct amdgpu_device *adev, enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); } -static const u32 pin_offsets[] = -{ +static const u32 pin_offsets[] = { AUD0_REGISTER_OFFSET, AUD1_REGISTER_OFFSET, AUD2_REGISTER_OFFSET, @@ -1811,8 +1804,7 @@ static void dce_v10_0_afmt_fini(struct amdgpu_device *adev) } } -static const u32 vga_control_regs[6] = -{ +static const u32 vga_control_regs[6] = { mmD1VGA_CONTROL, mmD2VGA_CONTROL, mmD3VGA_CONTROL, @@ -3651,8 +3643,7 @@ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev) adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs; } -const struct amdgpu_ip_block_version dce_v10_0_ip_block = -{ +const struct amdgpu_ip_block_version dce_v10_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_DCE, .major = 10, .minor = 0, @@ -3660,8 +3651,7 @@ const struct amdgpu_ip_block_version dce_v10_0_ip_block = .funcs = &dce_v10_0_ip_funcs, }; -const struct amdgpu_ip_block_version dce_v10_1_ip_block = -{ +const struct amdgpu_ip_block_version dce_v10_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_DCE, .major = 10, .minor = 1, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index d421a268c9ff..f2b3cb5ed6be 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -53,8 +53,7 @@ static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev); static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev); -static const u32 crtc_offsets[6] = -{ +static const u32 crtc_offsets[6] = { CRTC0_REGISTER_OFFSET, CRTC1_REGISTER_OFFSET, CRTC2_REGISTER_OFFSET, @@ -63,8 +62,7 @@ static const u32 crtc_offsets[6] = CRTC5_REGISTER_OFFSET }; -static const u32 hpd_offsets[] = -{ +static const u32 hpd_offsets[] = { HPD0_REGISTER_OFFSET, HPD1_REGISTER_OFFSET, HPD2_REGISTER_OFFSET, @@ -1345,9 +1343,9 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) if (sad->channels > max_channels) { value = (sad->channels << AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | - (sad->byte2 << + (sad->byte2 << AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | - (sad->freq << + (sad->freq << AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); max_channels = sad->channels; } @@ -1379,8 +1377,7 @@ static void dce_v8_0_audio_enable(struct amdgpu_device *adev, enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); } -static const u32 pin_offsets[7] = -{ +static const u32 pin_offsets[7] = { (0x1780 - 0x1780), (0x1786 - 0x1780), (0x178c - 0x1780), @@ -1740,8 +1737,7 @@ static void dce_v8_0_afmt_fini(struct amdgpu_device *adev) } } -static const u32 vga_control_regs[6] = -{ +static const u32 vga_control_regs[6] = { mmD1VGA_CONTROL, mmD2VGA_CONTROL, mmD3VGA_CONTROL, @@ -1895,9 +1891,9 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | - (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); + (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) | - (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT)); + (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT)); #ifdef __BIG_ENDIAN fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif @@ -3151,7 +3147,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); works = amdgpu_crtc->pflip_works; - if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " "AMDGPU_FLIP_SUBMITTED(%d)\n", amdgpu_crtc->pflip_status, @@ -3544,8 +3540,7 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev) adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs; } -const struct amdgpu_ip_block_version dce_v8_0_ip_block = -{ +const struct amdgpu_ip_block_version dce_v8_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_DCE, .major = 8, .minor = 0, @@ -3553,8 +3548,7 @@ const struct amdgpu_ip_block_version dce_v8_0_ip_block = .funcs = &dce_v8_0_ip_funcs, }; -const struct amdgpu_ip_block_version dce_v8_1_ip_block = -{ +const struct amdgpu_ip_block_version dce_v8_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_DCE, .major = 8, .minor = 1, @@ -3562,8 +3556,7 @@ const struct amdgpu_ip_block_version dce_v8_1_ip_block = .funcs = &dce_v8_0_ip_funcs, }; -const struct amdgpu_ip_block_version dce_v8_2_ip_block = -{ +const struct amdgpu_ip_block_version dce_v8_2_ip_block = { .type = AMD_IP_BLOCK_TYPE_DCE, .major = 8, .minor = 2, @@ -3571,8 +3564,7 @@ const struct amdgpu_ip_block_version dce_v8_2_ip_block = .funcs = &dce_v8_0_ip_funcs, }; -const struct amdgpu_ip_block_version dce_v8_3_ip_block = -{ +const struct amdgpu_ip_block_version dce_v8_3_ip_block = { .type = AMD_IP_BLOCK_TYPE_DCE, .major = 8, .minor = 3, @@ -3580,8 +3572,7 @@ const struct amdgpu_ip_block_version dce_v8_3_ip_block = .funcs = &dce_v8_0_ip_funcs, }; -const struct amdgpu_ip_block_version dce_v8_5_ip_block = -{ +const struct amdgpu_ip_block_version dce_v8_5_ip_block = { .type = AMD_IP_BLOCK_TYPE_DCE, .major = 8, .minor = 5, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 44af8022b89f..0aee9c8288a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -271,8 +271,7 @@ MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec2.bin"); MODULE_FIRMWARE("amdgpu/gc_10_3_7_rlc.bin"); -static const struct soc15_reg_golden golden_settings_gc_10_1[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100), @@ -315,13 +314,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000) }; -static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = { /* Pending on emulation bring up */ }; -static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] = -{ +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000, 0x0), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), @@ -1376,8 +1373,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000) }; -static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100), @@ -1418,8 +1414,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000), }; -static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100), @@ -1464,13 +1459,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000) }; -static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = { /* Pending on emulation bring up */ }; -static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] = -{ +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000L, 0x0), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), @@ -2093,13 +2086,11 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000) }; -static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] = { /* Pending on emulation bring up */ }; -static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] = -{ +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000L, 0x0), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), @@ -3154,8 +3145,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000) }; -static const struct soc15_reg_golden golden_settings_gc_10_3[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), @@ -3164,7 +3154,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000), - SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCUTCL2_CGTT_CLK_CTRL_Sienna_Cichlid, 0xff000000, 0xff008080), @@ -3201,13 +3191,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000) }; -static const struct soc15_reg_golden golden_settings_gc_10_3_sienna_cichlid[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3_sienna_cichlid[] = { /* Pending on emulation bring up */ }; -static const struct soc15_reg_golden golden_settings_gc_10_3_2[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3_2[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), @@ -3254,8 +3242,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020), }; -static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4), @@ -3285,8 +3272,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020), }; -static const struct soc15_reg_golden golden_settings_gc_10_3_3[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3_3[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), @@ -3309,8 +3295,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_3[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00100000) }; -static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0x30000000, 0x30000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0x7e000000, 0x7e000100), @@ -3380,7 +3365,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_5[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX,0xfff7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000) }; @@ -3421,8 +3406,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_cyan_skillfish[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000) }; -static const struct soc15_reg_golden golden_settings_gc_10_3_6[] = -{ +static const struct soc15_reg_golden golden_settings_gc_10_3_6[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x00000044), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), @@ -3506,6 +3490,8 @@ static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev); static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, uint16_t pasid, uint32_t flush_type, bool all_hub, uint8_t dst_sel); +static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev, + unsigned int vmid); static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { @@ -3714,8 +3700,8 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) break; case IP_VERSION(10, 3, 4): soc15_program_register_sequence(adev, - golden_settings_gc_10_3_4, - (const u32)ARRAY_SIZE(golden_settings_gc_10_3_4)); + golden_settings_gc_10_3_4, + (const u32)ARRAY_SIZE(golden_settings_gc_10_3_4)); break; case IP_VERSION(10, 3, 5): soc15_program_register_sequence(adev, @@ -3782,7 +3768,7 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; uint32_t scratch = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); uint32_t tmp = 0; - unsigned i; + unsigned int i; int r; WREG32(scratch, 0xCAFEDEAD); @@ -3820,7 +3806,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; - unsigned index; + unsigned int index; uint64_t gpu_addr; volatile uint32_t *cpu_ptr; long r; @@ -3951,7 +3937,7 @@ static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev) break; } - return ret ; + return ret; } static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) @@ -4151,7 +4137,7 @@ static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) { struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; - reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1); reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2); @@ -4159,14 +4145,14 @@ static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL); reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX); switch (adev->ip_versions[GC_HWIP][0]) { - case IP_VERSION(10, 3, 0): - reg_access_ctrl->spare_int = - SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid); - break; - default: - reg_access_ctrl->spare_int = - SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT); - break; + case IP_VERSION(10, 3, 0): + reg_access_ctrl->spare_int = + SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid); + break; + default: + reg_access_ctrl->spare_int = + SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT); + break; } adev->gfx.rlc.rlcg_reg_access_supported = true; } @@ -4187,11 +4173,6 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) return r; } - /* init spm vmid with 0xf */ - if (adev->gfx.rlc.funcs->update_spm_vmid) - adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); - - return 0; } @@ -4213,7 +4194,7 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev) int r; u32 *hpd; const __le32 *fw_data = NULL; - unsigned fw_size; + unsigned int fw_size; u32 *fw = NULL; size_t mec_hpd_size; @@ -4295,7 +4276,8 @@ static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id { /* in gfx10 the SIMD_ID is specified as part of the INSTANCE * field when performing a select_se_sh so it should be - * zero here */ + * zero here + */ WARN_ON(simd != 0); /* type 2 wave data */ @@ -4474,7 +4456,7 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, int mec, int pipe, int queue) { - unsigned irq_type; + unsigned int irq_type; struct amdgpu_ring *ring; unsigned int hw_prio; @@ -4795,7 +4777,8 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade uint32_t pa_sc_tile_steering_override; /* for ASICs that integrates GFX v10.3 - * pa_sc_tile_steering_override should be set to 0 */ + * pa_sc_tile_steering_override should be set to 0 + */ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) return 0; @@ -4871,8 +4854,10 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - /* Initialize all compute VMIDs to have no GDS, GWS, or OA - access. These should be enabled by FW for target VMIDs. */ + /* + * Initialize all compute VMIDs to have no GDS, GWS, or OA + * access. These should be enabled by FW for target VMIDs. + */ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); @@ -5108,8 +5093,10 @@ static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, static void gfx_v10_0_rlc_start(struct amdgpu_device *adev) { - /* TODO: enable rlc & smu handshake until smu - * and gfxoff feature works as expected */ + /* + * TODO: enable rlc & smu handshake until smu + * and gfxoff feature works as expected + */ if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) gfx_v10_0_rlc_smu_handshake_cntl(adev, false); @@ -5132,7 +5119,7 @@ static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev) { const struct rlc_firmware_header_v2_0 *hdr; const __le32 *fw_data; - unsigned i, fw_size; + unsigned int i, fw_size; if (!adev->gfx.rlc_fw) return -EINVAL; @@ -5169,6 +5156,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) gfx_v10_0_init_csb(adev); + gfx_v10_0_update_spm_vmid_internal(adev, 0xf); + if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ gfx_v10_0_rlc_enable_srm(adev); } else { @@ -5199,6 +5188,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) gfx_v10_0_init_csb(adev); + gfx_v10_0_update_spm_vmid_internal(adev, 0xf); + adev->gfx.rlc.funcs->start(adev); if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { @@ -5207,6 +5198,7 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) return r; } } + return 0; } @@ -5674,11 +5666,10 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) { + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp); - } else { + else WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); - } if (adev->job_hang && !enable) return 0; @@ -5700,7 +5691,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) int r; const struct gfx_firmware_header_v1_0 *pfp_hdr; const __le32 *fw_data; - unsigned i, fw_size; + unsigned int i, fw_size; uint32_t tmp; uint32_t usec_timeout = 50000; /* wait for 50ms */ @@ -5778,7 +5769,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev) int r; const struct gfx_firmware_header_v1_0 *ce_hdr; const __le32 *fw_data; - unsigned i, fw_size; + unsigned int i, fw_size; uint32_t tmp; uint32_t usec_timeout = 50000; /* wait for 50ms */ @@ -5855,7 +5846,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) int r; const struct gfx_firmware_header_v1_0 *me_hdr; const __le32 *fw_data; - unsigned i, fw_size; + unsigned int i, fw_size; uint32_t tmp; uint32_t usec_timeout = 50000; /* wait for 50ms */ @@ -6243,7 +6234,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev) { const struct gfx_firmware_header_v1_0 *mec_hdr; const __le32 *fw_data; - unsigned i; + unsigned int i; u32 tmp; u32 usec_timeout = 50000; /* Wait for 50 ms */ @@ -6922,8 +6913,10 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) { uint32_t data, pattern = 0xDEADBEEF; - /* check if mmVGT_ESGS_RING_SIZE_UMD - * has been remapped to mmVGT_ESGS_RING_SIZE */ + /* + * check if mmVGT_ESGS_RING_SIZE_UMD + * has been remapped to mmVGT_ESGS_RING_SIZE + */ switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 2): @@ -6934,12 +6927,10 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern); if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid) == pattern) { - WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD , data); + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data); return true; - } else { - WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, data); - return false; } + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, data); break; case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 3): @@ -6954,12 +6945,12 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) { WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data); return true; - } else { - WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data); - return false; } + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data); break; } + + return false; } static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) @@ -6969,8 +6960,10 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return; - /* initialize cam_index to 0 - * index will auto-inc after each data writting */ + /* + * Initialize cam_index to 0 + * index will auto-inc after each data writing + */ WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0); switch (adev->ip_versions[GC_HWIP][0]) { @@ -7100,6 +7093,7 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) static void gfx_v10_0_disable_gpa_mode(struct amdgpu_device *adev) { uint32_t data; + data = RREG32_SOC15(GC, 0, mmCPC_PSP_DEBUG); data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; WREG32_SOC15(GC, 0, mmCPC_PSP_DEBUG, data); @@ -7216,7 +7210,7 @@ static bool gfx_v10_0_is_idle(void *handle) static int gfx_v10_0_wait_for_idle(void *handle) { - unsigned i; + unsigned int i; u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -7471,7 +7465,7 @@ static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev) static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) { uint32_t data; - unsigned i; + unsigned int i; data = RLC_SAFE_MODE__CMD_MASK; data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); @@ -7900,12 +7894,11 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } -static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev, + unsigned int vmid) { u32 reg, data; - amdgpu_gfx_off_ctrl(adev, false); - /* not for *_SOC15 */ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) @@ -7920,6 +7913,13 @@ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); +} + +static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid) +{ + amdgpu_gfx_off_ctrl(adev, false); + + gfx_v10_0_update_spm_vmid_internal(adev, vmid); amdgpu_gfx_off_ctrl(adev, true); } @@ -8297,7 +8297,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib, uint32_t flags) { - unsigned vmid = AMDGPU_JOB_GET_VMID(job); + unsigned int vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; if (ib->flags & AMDGPU_IB_FLAG_CE) @@ -8338,7 +8338,7 @@ static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, uint32_t flags) { - unsigned vmid = AMDGPU_JOB_GET_VMID(job); + unsigned int vmid = AMDGPU_JOB_GET_VMID(job); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); if (ring->is_mes_queue) @@ -8373,7 +8373,7 @@ static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring, } static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, - u64 seq, unsigned flags) + u64 seq, unsigned int flags) { bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; @@ -8429,7 +8429,7 @@ static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, } static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { if (ring->is_mes_queue) gfx_v10_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); @@ -8511,9 +8511,9 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, amdgpu_ring_write(ring, 0); } -static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) +static unsigned int gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) { - unsigned ret; + unsigned int ret; amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); @@ -8525,9 +8525,10 @@ static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) return ret; } -static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) +static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned int offset) { - unsigned cur; + unsigned int cur; + BUG_ON(offset > ring->buf_mask); BUG_ON(ring->ring[offset] != 0x55aa55aa); @@ -8750,7 +8751,7 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, } static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring, - unsigned vmid) + unsigned int vmid) { struct amdgpu_device *adev = ring->adev; uint32_t value = 0; @@ -8859,7 +8860,7 @@ static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev static int gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { switch (type) { @@ -8956,7 +8957,7 @@ static int gfx_v10_0_eop_irq(struct amdgpu_device *adev, static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { switch (state) { @@ -8975,7 +8976,7 @@ static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev, static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { switch (state) { @@ -9342,7 +9343,7 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev) { - unsigned total_cu = adev->gfx.config.max_cu_per_sh * + unsigned int total_cu = adev->gfx.config.max_cu_per_sh * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; @@ -9423,7 +9424,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, { int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; - unsigned disable_masks[4 * 2]; + unsigned int disable_masks[4 * 2]; if (!adev || !cu_info) return -EINVAL; @@ -9540,8 +9541,7 @@ static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev) (0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT)); } -const struct amdgpu_ip_block_version gfx_v10_0_ip_block = -{ +const struct amdgpu_ip_block_version gfx_v10_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, .major = 10, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 0451533ddde4..5c3db694afa8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -667,7 +667,7 @@ static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) { struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; - reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); @@ -4654,26 +4654,6 @@ static int gfx_v11_0_early_init(void *handle) return gfx_v11_0_init_microcode(adev); } -static int gfx_v11_0_ras_late_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ras_common_if *gfx_common_if; - int ret; - - gfx_common_if = kzalloc(sizeof(struct ras_common_if), GFP_KERNEL); - if (!gfx_common_if) - return -ENOMEM; - - gfx_common_if->block = AMDGPU_RAS_BLOCK__GFX; - - ret = amdgpu_ras_feature_enable(adev, gfx_common_if, true); - if (ret) - dev_warn(adev->dev, "Failed to enable gfx11 ras feature\n"); - - kfree(gfx_common_if); - return 0; -} - static int gfx_v11_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -4687,12 +4667,6 @@ static int gfx_v11_0_late_init(void *handle) if (r) return r; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) { - r = gfx_v11_0_ras_late_init(handle); - if (r) - return r; - } - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 8c174c11eaee..90b034b173c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -90,8 +90,7 @@ MODULE_FIRMWARE("amdgpu/mullins_ce.bin"); MODULE_FIRMWARE("amdgpu/mullins_rlc.bin"); MODULE_FIRMWARE("amdgpu/mullins_mec.bin"); -static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = -{ +static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = { {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1}, {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2}, @@ -110,8 +109,7 @@ static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15} }; -static const u32 spectre_rlc_save_restore_register_list[] = -{ +static const u32 spectre_rlc_save_restore_register_list[] = { (0x0e00 << 16) | (0xc12c >> 2), 0x00000000, (0x0e00 << 16) | (0xc140 >> 2), @@ -557,8 +555,7 @@ static const u32 spectre_rlc_save_restore_register_list[] = (0x0e00 << 16) | (0x9600 >> 2), }; -static const u32 kalindi_rlc_save_restore_register_list[] = -{ +static const u32 kalindi_rlc_save_restore_register_list[] = { (0x0e00 << 16) | (0xc12c >> 2), 0x00000000, (0x0e00 << 16) | (0xc140 >> 2), @@ -933,7 +930,8 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) case CHIP_MULLINS: chip_name = "mullins"; break; - default: BUG(); + default: + BUG(); } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); @@ -2759,8 +2757,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) return 0; } -struct hqd_registers -{ +struct hqd_registers { u32 cp_mqd_base_addr; u32 cp_mqd_base_addr_hi; u32 cp_hqd_active; @@ -5124,11 +5121,11 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) bitmap = gfx_v7_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { + for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { if (bitmap & mask) { if (counter < ao_cu_num) ao_bitmap |= mask; - counter ++; + counter++; } mask <<= 1; } @@ -5150,8 +5147,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) cu_info->lds_size = 64; } -const struct amdgpu_ip_block_version gfx_v7_1_ip_block = -{ +const struct amdgpu_ip_block_version gfx_v7_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, .major = 7, .minor = 1, @@ -5159,8 +5155,7 @@ const struct amdgpu_ip_block_version gfx_v7_1_ip_block = .funcs = &gfx_v7_0_ip_funcs, }; -const struct amdgpu_ip_block_version gfx_v7_2_ip_block = -{ +const struct amdgpu_ip_block_version gfx_v7_2_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, .major = 7, .minor = 2, @@ -5168,8 +5163,7 @@ const struct amdgpu_ip_block_version gfx_v7_2_ip_block = .funcs = &gfx_v7_0_ip_funcs, }; -const struct amdgpu_ip_block_version gfx_v7_3_ip_block = -{ +const struct amdgpu_ip_block_version gfx_v7_3_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, .major = 7, .minor = 3, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 65577eca58f1..458faf657042 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -762,6 +762,8 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, void *inject_if, uint32_t instance_mask); static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev); +static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev, + unsigned int vmid); static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) @@ -1632,7 +1634,7 @@ static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) { struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; - reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1); reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2); @@ -1667,22 +1669,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) return r; } - switch (adev->ip_versions[GC_HWIP][0]) { - case IP_VERSION(9, 2, 2): - case IP_VERSION(9, 1, 0): - gfx_v9_0_init_lbpw(adev); - break; - case IP_VERSION(9, 4, 0): - gfx_v9_4_init_lbpw(adev); - break; - default: - break; - } - - /* init spm vmid with 0xf */ - if (adev->gfx.rlc.funcs->update_spm_vmid) - adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); - return 0; } @@ -2942,12 +2928,14 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 2, 2): case IP_VERSION(9, 1, 0): + gfx_v9_0_init_lbpw(adev); if (amdgpu_lbpw == 0) gfx_v9_0_enable_lbpw(adev, false); else gfx_v9_0_enable_lbpw(adev, true); break; case IP_VERSION(9, 4, 0): + gfx_v9_4_init_lbpw(adev); if (amdgpu_lbpw > 0) gfx_v9_0_enable_lbpw(adev, true); else @@ -2957,6 +2945,8 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) break; } + gfx_v9_0_update_spm_vmid_internal(adev, 0xf); + adev->gfx.rlc.funcs->start(adev); return 0; @@ -4881,12 +4871,11 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } -static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev, + unsigned int vmid) { u32 reg, data; - amdgpu_gfx_off_ctrl(adev, false); - reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(reg); @@ -4900,6 +4889,13 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); +} + +static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid) +{ + amdgpu_gfx_off_ctrl(adev, false); + + gfx_v9_0_update_spm_vmid_internal(adev, vmid); amdgpu_gfx_off_ctrl(adev, true); } @@ -5230,6 +5226,9 @@ static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring, de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset; } + ((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status = + IB_COMPLETION_STATUS_PREEMPTED; + if (offset + (payload_size >> 2) <= ring->buf_mask + 1) { memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 4f883b94f98e..57ed4e5c294c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -196,14 +196,11 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { dev_inst = GET_INST(GC, i); - if (dev_inst >= 2) - WREG32_SOC15(GC, dev_inst, regGRBM_MCM_ADDR, 0x4); + WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG, + GOLDEN_GB_ADDR_CONFIG); /* Golden settings applied by driver for ASIC with rev_id 0 */ if (adev->rev_id == 0) { - WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG, - GOLDEN_GB_ADDR_CONFIG); - WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1, REDUCE_FIFO_DEPTH_BY_2, 2); } @@ -340,13 +337,11 @@ static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; - amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1); clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) | ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); mutex_unlock(&adev->gfx.gpu_clock_mutex); - amdgpu_gfx_off_ctrl(adev, true); return clock; } @@ -625,7 +620,7 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, int num_xccs_per_xcp) { int ret, i, num_xcc; - u32 tmp = 0; + u32 tmp = 0, regval; if (adev->psp.funcs) { ret = psp_spatial_partition(&adev->psp, @@ -633,23 +628,24 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, num_xccs_per_xcp); if (ret) return ret; - } else { - num_xcc = NUM_XCC(adev->gfx.xcc_mask); + } - for (i = 0; i < num_xcc; i++) { - tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, - num_xccs_per_xcp); - tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, - i % num_xccs_per_xcp); + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + + for (i = 0; i < num_xcc; i++) { + tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, + num_xccs_per_xcp); + tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, + i % num_xccs_per_xcp); + regval = RREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL); + if (regval != tmp) WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, tmp); - } - ret = 0; } adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; - return ret; + return 0; } static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) @@ -901,6 +897,7 @@ static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, int i; uint32_t sh_mem_config; uint32_t sh_mem_bases; + uint32_t data; /* * Configure apertures: @@ -920,6 +917,11 @@ static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, /* CP and shaders */ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases); + + /* Enable trap for each kfd vmid. */ + data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data); } soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); @@ -1038,32 +1040,6 @@ static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data); } -static void gfx_v9_4_3_xcc_program_xcc_id(struct amdgpu_device *adev, - int xcc_id) -{ - uint32_t tmp = 0; - int num_xcc; - - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - switch (num_xcc) { - /* directly config VIRTUAL_XCC_ID to 0 for 1-XCC */ - case 1: - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, 0x8); - break; - case 2: - case 4: - case 6: - case 8: - tmp = (xcc_id % adev->gfx.num_xcc_per_xcp) << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, VIRTUAL_XCC_ID); - tmp = tmp | (adev->gfx.num_xcc_per_xcp << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, NUM_XCC_IN_XCP)); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, tmp); - - break; - default: - break; - } -} - static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) { uint32_t rlc_setting; @@ -1102,6 +1078,24 @@ static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); } +static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) +{ + int xcc_id, num_xcc; + struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)]; + reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0); + reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1); + reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2); + reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3); + reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL); + reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX); + reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT); + } +} + static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) { /* init spm vmid with 0xf */ @@ -1921,9 +1915,6 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) return r; } - /* set the virtual and physical id based on partition_mode */ - gfx_v9_4_3_xcc_program_xcc_id(adev, xcc_id); - r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id); if (r) return r; @@ -2182,6 +2173,9 @@ static int gfx_v9_4_3_early_init(void *handle) gfx_v9_4_3_set_gds_init(adev); gfx_v9_4_3_set_rlc_funcs(adev); + /* init rlcg reg access ctrl */ + gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev); + return gfx_v9_4_3_init_microcode(adev); } @@ -2198,6 +2192,10 @@ static int gfx_v9_4_3_late_init(void *handle) if (r) return r; + if (adev->gfx.ras && + adev->gfx.ras->enable_watchdog_timer) + adev->gfx.ras->enable_watchdog_timer(adev); + return 0; } @@ -4044,6 +4042,34 @@ static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev, gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id); } +static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev, + void *ras_error_status, int xcc_id) +{ + uint32_t i; + uint32_t data; + + data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE, + amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0); + + if (amdgpu_watchdog_timer.timeout_fatal_disable && + (amdgpu_watchdog_timer.period < 1 || + amdgpu_watchdog_timer.period > 0x23)) { + dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n"); + amdgpu_watchdog_timer.period = 0x23; + } + data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL, + amdgpu_watchdog_timer.period); + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data); + } + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, + xcc_id); + mutex_unlock(&adev->grbm_idx_mutex); +} + static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { @@ -4066,6 +4092,11 @@ static void gfx_v9_4_3_reset_ras_error_status(struct amdgpu_device *adev) amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_status); } +static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) +{ + amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer); +} + static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { .name = "gfx_v9_4_3", .early_init = gfx_v9_4_3_early_init, @@ -4394,4 +4425,5 @@ struct amdgpu_gfx_ras gfx_v9_4_3_ras = { .ras_block = { .hw_ops = &gfx_v9_4_3_ras_ops, }, + .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index d94cc1ec7242..cdc290a474a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -103,7 +103,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); if (adev->apu_flags & AMD_APU_IS_RAVEN2) - /* + /* * Raven2 has a HW issue that it is unable to use the * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. * So here is the workaround that increase system @@ -248,7 +248,7 @@ static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - unsigned num_level, block_size; + unsigned int num_level, block_size; uint32_t tmp; int i; @@ -308,7 +308,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - unsigned i; + unsigned int i; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, @@ -375,6 +375,7 @@ static void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; + tmp = RREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index d9f14dc55998..0834af771549 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -140,7 +140,7 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); if (adev->apu_flags & AMD_APU_IS_RAVEN2) - /* + /* * Raven2 has a HW issue that it is unable to use the * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. * So here is the workaround that increase system @@ -315,7 +315,7 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev, uint32_t xcc_mask) { struct amdgpu_vmhub *hub; - unsigned num_level, block_size; + unsigned int num_level, block_size; uint32_t tmp; int i, j; @@ -402,22 +402,6 @@ static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev, static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev, uint32_t xcc_mask) { - int i; - - /* - * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - if (amdgpu_sriov_vf(adev)) { - for_each_inst(i, xcc_mask) { - WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - } - /* GART Enable. */ gfxhub_v1_2_xcc_init_gart_aperture_regs(adev, xcc_mask); gfxhub_v1_2_xcc_init_system_aperture_regs(adev, xcc_mask); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index f173a61c6c15..a041c6c970e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -31,7 +31,7 @@ #include "soc15_common.h" -static const char *gfxhub_client_ids[] = { +static const char * const gfxhub_client_ids[] = { "CB/DB", "Reserved", "GE1", @@ -332,7 +332,7 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - unsigned i; + unsigned int i; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, @@ -393,6 +393,7 @@ static void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; + tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index d8fc3e8088cd..7708d5ded7b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -34,7 +34,7 @@ #define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP 0x16f8 #define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP_BASE_IDX 0 -static const char *gfxhub_client_ids[] = { +static const char * const gfxhub_client_ids[] = { "CB/DB", "Reserved", "GE1", @@ -341,7 +341,7 @@ static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - unsigned i; + unsigned int i; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, @@ -582,6 +582,7 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev) static void gfxhub_v2_1_save_regs(struct amdgpu_device *adev) { int i; + adev->gmc.VM_L2_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL); adev->gmc.VM_L2_CNTL2 = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2); adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL); @@ -616,6 +617,7 @@ static void gfxhub_v2_1_save_regs(struct amdgpu_device *adev) static void gfxhub_v2_1_restore_regs(struct amdgpu_device *adev) { int i; + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, adev->gmc.VM_L2_CNTL); WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, adev->gmc.VM_L2_CNTL2); WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL, adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL); @@ -679,9 +681,8 @@ static void gfxhub_v2_1_halt(struct amdgpu_device *adev) tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2); } - if (!time) { + if (!time) DRM_WARN("failed to wait for GRBM(EA) idle\n"); - } } const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c index c53147f9c9fc..e1c76c070ba9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c @@ -30,7 +30,7 @@ #include "navi10_enum.h" #include "soc15_common.h" -static const char *gfxhub_client_ids[] = { +static const char * const gfxhub_client_ids[] = { "CB/DB", "Reserved", "GE1", @@ -340,7 +340,7 @@ static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v3_0_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - unsigned i; + unsigned int i; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c index ae777487d72e..07f369c7a1ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c @@ -33,7 +33,7 @@ #define regGCVM_L2_CNTL4_DEFAULT 0x000000c1 #define regGCVM_L2_CNTL5_DEFAULT 0x00003fe0 -static const char *gfxhub_client_ids[] = { +static const char * const gfxhub_client_ids[] = { "CB/DB", "Reserved", "GE1", @@ -345,7 +345,7 @@ static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v3_0_3_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - unsigned i; + unsigned int i; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 0c8a47989576..fa87a85e1017 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -53,16 +53,9 @@ #include "amdgpu_reset.h" -#if 0 -static const struct soc15_reg_golden golden_settings_navi10_hdp[] = -{ - /* TODO add golden setting for hdp */ -}; -#endif - static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { return 0; @@ -70,7 +63,7 @@ static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev, static int gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, unsigned type, + struct amdgpu_irq_src *src, unsigned int type, enum amdgpu_interrupt_state state) { switch (state) { @@ -109,9 +102,11 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ? + AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0); + struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; bool retry_fault = !!(entry->src_data[1] & 0x80); bool write_fault = !!(entry->src_data[1] & 0x20); - struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; struct amdgpu_task_info task_info; uint32_t status = 0; u64 addr; @@ -164,8 +159,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); dev_err(adev->dev, - "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " - "for process %s pid %d thread %s pid %d)\n", + "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n", entry->vmid_src ? "mmhub" : "gfxhub", entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, @@ -244,7 +238,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); u32 tmp; /* Use register 17 for GART */ - const unsigned eng = 17; + const unsigned int eng = 17; unsigned int i; unsigned char hub_ip = 0; @@ -346,7 +340,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && down_read_trylock(&adev->reset_domain->sem)) { struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - const unsigned eng = 17; + const unsigned int eng = 17; u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; @@ -477,12 +471,12 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, } static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); - unsigned eng = ring->vm_inv_eng; + unsigned int eng = ring->vm_inv_eng; /* * It may lose gpuvm invalidate acknowldege state across power-gating @@ -524,8 +518,8 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, - unsigned pasid) +static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, + unsigned int pasid) { struct amdgpu_device *adev = ring->adev; uint32_t reg; @@ -645,10 +639,10 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); } -static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); - unsigned size; + unsigned int size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; @@ -751,6 +745,7 @@ static int gmc_v10_0_early_init(void *handle) adev->gmc.private_aperture_start = 0x1000000000000000ULL; adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; return 0; } @@ -972,7 +967,7 @@ static int gmc_v10_0_sw_init(void *handle) r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); if (r) { - printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); + dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); return r; } @@ -1081,7 +1076,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), + (unsigned int)(adev->gmc.gart_size >> 20), (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); return 0; @@ -1255,8 +1250,7 @@ const struct amd_ip_funcs gmc_v10_0_ip_funcs = { .get_clockgating_state = gmc_v10_0_get_clockgating_state, }; -const struct amdgpu_ip_block_version gmc_v10_0_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v10_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 10, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index c571f0d95994..e3b76fd28d15 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -50,7 +50,7 @@ static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { return 0; @@ -58,7 +58,7 @@ static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev, static int gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, unsigned type, + struct amdgpu_irq_src *src, unsigned int type, enum amdgpu_interrupt_state state) { switch (state) { @@ -97,7 +97,9 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; + uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ? + AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0); + struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; uint32_t status = 0; u64 addr; @@ -124,8 +126,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); dev_err(adev->dev, - "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " - "for process %s pid %d thread %s pid %d)\n", + "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n", entry->vmid_src ? "mmhub" : "gfxhub", entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, @@ -198,7 +199,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); u32 tmp; /* Use register 17 for GART */ - const unsigned eng = 17; + const unsigned int eng = 17; unsigned int i; unsigned char hub_ip = 0; @@ -296,7 +297,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - const unsigned eng = 17; + const unsigned int eng = 17; u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; @@ -309,7 +310,6 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, mutex_lock(&adev->mman.gtt_window_lock); gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0); mutex_unlock(&adev->mman.gtt_window_lock); - return; } /** @@ -379,12 +379,12 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, } static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); - unsigned eng = ring->vm_inv_eng; + unsigned int eng = ring->vm_inv_eng; /* * It may lose gpuvm invalidate acknowldege state across power-gating @@ -426,8 +426,8 @@ static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, - unsigned pasid) +static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, + unsigned int pasid) { struct amdgpu_device *adev = ring->adev; uint32_t reg; @@ -547,10 +547,10 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); } -static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL); - unsigned size; + unsigned int size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; @@ -651,6 +651,7 @@ static int gmc_v11_0_early_init(void *handle) adev->gmc.private_aperture_start = 0x1000000000000000ULL; adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; return 0; } @@ -727,9 +728,9 @@ static int gmc_v11_0_mc_init(struct amdgpu_device *adev) adev->gmc.visible_vram_size = adev->gmc.real_vram_size; /* set the gart size */ - if (amdgpu_gart_size == -1) { + if (amdgpu_gart_size == -1) adev->gmc.gart_size = 512ULL << 20; - } else + else adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; gmc_v11_0_vram_gtt_location(adev, &adev->gmc); @@ -823,7 +824,7 @@ static int gmc_v11_0_sw_init(void *handle) r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); if (r) { - printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); + dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); return r; } @@ -926,7 +927,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), + (unsigned int)(adev->gmc.gart_size >> 20), (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index aa754c95a0b3..5b837a65fad2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -120,7 +120,8 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) case CHIP_HAINAN: chip_name = "hainan"; break; - default: BUG(); + default: + BUG(); } /* this memory configuration requires special firmware */ @@ -178,9 +179,8 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev) WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++)); } /* load the MC ucode */ - for (i = 0; i < ucode_size; i++) { + for (i = 0; i < ucode_size; i++) WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++)); - } /* put the engine back into the active state */ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); @@ -208,6 +208,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) { u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; amdgpu_gmc_vram_location(adev, mc, base); @@ -228,9 +229,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v6_0_wait_for_idle((void *)adev)) { + if (gmc_v6_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } if (adev->mode_info.num_crtc) { u32 tmp; @@ -256,9 +256,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (gmc_v6_0_wait_for_idle((void *)adev)) { + if (gmc_v6_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } } static int gmc_v6_0_mc_init(struct amdgpu_device *adev) @@ -269,13 +268,13 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) int r; tmp = RREG32(mmMC_ARB_RAMCFG); - if (tmp & (1 << 11)) { + if (tmp & (1 << 11)) chansize = 16; - } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) { + else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) chansize = 64; - } else { + else chansize = 32; - } + tmp = RREG32(mmMC_SHARED_CHMAP); switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { case 0: @@ -352,7 +351,7 @@ static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { uint32_t reg; @@ -405,11 +404,11 @@ static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, } /** - + * gmc_v8_0_set_prt - set PRT VM fault - + * - + * @adev: amdgpu_device pointer - + * @enable: enable/disable VM fault handling for PRT - +*/ + * gmc_v8_0_set_prt() - set PRT VM fault + * + * @adev: amdgpu_device pointer + * @enable: enable/disable VM fault handling for PRT + */ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) { u32 tmp; @@ -547,7 +546,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0); dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), + (unsigned int)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); return 0; } @@ -787,15 +786,16 @@ static int gmc_v6_0_late_init(void *handle) return 0; } -static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32(mmD1VGA_CONTROL); - unsigned size; + unsigned int size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { u32 viewport = RREG32(mmVIEWPORT_SIZE); + size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 4); @@ -814,6 +814,7 @@ static int gmc_v6_0_sw_init(void *handle) adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; } else { u32 tmp = RREG32(mmMC_SEQ_MISC0); + tmp &= MC_SEQ_MISC0__MT__MASK; adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp); } @@ -964,7 +965,7 @@ static bool gmc_v6_0_is_idle(void *handle) static int gmc_v6_0_wait_for_idle(void *handle) { - unsigned i; + unsigned int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { @@ -995,10 +996,8 @@ static int gmc_v6_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v6_0_mc_stop(adev); - if (gmc_v6_0_wait_for_idle(adev)) { + if (gmc_v6_0_wait_for_idle(adev)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); - } - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -1023,7 +1022,7 @@ static int gmc_v6_0_soft_reset(void *handle) static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { u32 tmp; @@ -1141,8 +1140,7 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs; } -const struct amdgpu_ip_block_version gmc_v6_0_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v6_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 6, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index acd2b407860f..6a6929ac2748 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -58,16 +58,14 @@ MODULE_FIRMWARE("amdgpu/bonaire_mc.bin"); MODULE_FIRMWARE("amdgpu/hawaii_mc.bin"); MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); -static const u32 golden_settings_iceland_a11[] = -{ +static const u32 golden_settings_iceland_a11[] = { mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff }; -static const u32 iceland_mgcg_cgcg_init[] = -{ +static const u32 iceland_mgcg_cgcg_init[] = { mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; @@ -151,7 +149,8 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) case CHIP_KABINI: case CHIP_MULLINS: return 0; - default: BUG(); + default: + return -EINVAL; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); @@ -237,6 +236,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) { u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; amdgpu_gmc_vram_location(adev, mc, base); @@ -266,9 +266,9 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v7_0_wait_for_idle((void *)adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } + if (adev->mode_info.num_crtc) { /* Lockout access through VGA aperture*/ tmp = RREG32(mmVGA_HDP_CONTROL); @@ -290,9 +290,8 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (gmc_v7_0_wait_for_idle((void *)adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); @@ -324,11 +323,11 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) /* Get VRAM informations */ tmp = RREG32(mmMC_ARB_RAMCFG); - if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { + if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) chansize = 64; - } else { + else chansize = 32; - } + tmp = RREG32(mmMC_SHARED_CHMAP); switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { case 0: @@ -472,7 +471,7 @@ static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { uint32_t reg; @@ -488,8 +487,8 @@ static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, - unsigned pasid) +static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, + unsigned int pasid) { amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); } @@ -700,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), + (unsigned int)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); return 0; } @@ -761,7 +760,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) * Print human readable fault information (CIK). */ static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, - u32 addr, u32 mc_client, unsigned pasid) + u32 addr, u32 mc_client, unsigned int pasid) { u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -942,6 +941,7 @@ static int gmc_v7_0_early_init(void *handle) adev->gmc.shared_aperture_end + 1; adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; return 0; } @@ -956,15 +956,16 @@ static int gmc_v7_0_late_init(void *handle) return 0; } -static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32(mmD1VGA_CONTROL); - unsigned size; + unsigned int size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { u32 viewport = RREG32(mmVIEWPORT_SIZE); + size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 4); @@ -984,6 +985,7 @@ static int gmc_v7_0_sw_init(void *handle) adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; } else { u32 tmp = RREG32(mmMC_SEQ_MISC0); + tmp &= MC_SEQ_MISC0__MT__MASK; adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp); } @@ -1152,7 +1154,7 @@ static bool gmc_v7_0_is_idle(void *handle) static int gmc_v7_0_wait_for_idle(void *handle) { - unsigned i; + unsigned int i; u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1190,10 +1192,8 @@ static int gmc_v7_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v7_0_mc_stop(adev); - if (gmc_v7_0_wait_for_idle((void *)adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); - } - tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -1219,7 +1219,7 @@ static int gmc_v7_0_soft_reset(void *handle) static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { u32 tmp; @@ -1383,8 +1383,7 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs; } -const struct amdgpu_ip_block_version gmc_v7_0_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v7_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 7, .minor = 0, @@ -1392,8 +1391,7 @@ const struct amdgpu_ip_block_version gmc_v7_0_ip_block = .funcs = &gmc_v7_0_ip_funcs, }; -const struct amdgpu_ip_block_version gmc_v7_4_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v7_4_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 7, .minor = 4, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 85dead2a5702..5af235202513 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -64,8 +64,7 @@ MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin"); -static const u32 golden_settings_tonga_a11[] = -{ +static const u32 golden_settings_tonga_a11[] = { mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028, mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991, @@ -75,34 +74,29 @@ static const u32 golden_settings_tonga_a11[] = mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, }; -static const u32 tonga_mgcg_cgcg_init[] = -{ +static const u32 tonga_mgcg_cgcg_init[] = { mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; -static const u32 golden_settings_fiji_a10[] = -{ +static const u32 golden_settings_fiji_a10[] = { mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, }; -static const u32 fiji_mgcg_cgcg_init[] = -{ +static const u32 fiji_mgcg_cgcg_init[] = { mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; -static const u32 golden_settings_polaris11_a11[] = -{ +static const u32 golden_settings_polaris11_a11[] = { mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff }; -static const u32 golden_settings_polaris10_a11[] = -{ +static const u32 golden_settings_polaris10_a11[] = { mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, @@ -110,19 +104,16 @@ static const u32 golden_settings_polaris10_a11[] = mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff }; -static const u32 cz_mgcg_cgcg_init[] = -{ +static const u32 cz_mgcg_cgcg_init[] = { mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; -static const u32 stoney_mgcg_cgcg_init[] = -{ +static const u32 stoney_mgcg_cgcg_init[] = { mmATC_MISC_CG, 0xffffffff, 0x000c0200, mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; -static const u32 golden_settings_stoney_common[] = -{ +static const u32 golden_settings_stoney_common[] = { mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004, mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000 }; @@ -260,7 +251,8 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_STONEY: case CHIP_VEGAM: return 0; - default: BUG(); + default: + return -EINVAL; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); @@ -448,9 +440,9 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (gmc_v8_0_wait_for_idle((void *)adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } + if (adev->mode_info.num_crtc) { /* Lockout access through VGA aperture*/ tmp = RREG32(mmVGA_HDP_CONTROL); @@ -483,9 +475,8 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (gmc_v8_0_wait_for_idle((void *)adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); - } WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); @@ -517,11 +508,11 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) /* Get VRAM informations */ tmp = RREG32(mmMC_ARB_RAMCFG); - if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { + if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) chansize = 64; - } else { + else chansize = 32; - } + tmp = RREG32(mmMC_SHARED_CHMAP); switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { case 0: @@ -671,7 +662,7 @@ static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { uint32_t reg; @@ -687,8 +678,8 @@ static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, - unsigned pasid) +static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, + unsigned int pasid) { amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); } @@ -759,11 +750,11 @@ static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev, } /** - * gmc_v8_0_set_prt - set PRT VM fault + * gmc_v8_0_set_prt() - set PRT VM fault * * @adev: amdgpu_device pointer * @enable: enable/disable VM fault handling for PRT -*/ + */ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) { u32 tmp; @@ -940,7 +931,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), + (unsigned int)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); return 0; } @@ -1001,7 +992,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) * Print human readable fault information (VI). */ static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, - u32 addr, u32 mc_client, unsigned pasid) + u32 addr, u32 mc_client, unsigned int pasid) { u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -1056,6 +1047,7 @@ static int gmc_v8_0_early_init(void *handle) adev->gmc.shared_aperture_end + 1; adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; return 0; } @@ -1070,15 +1062,16 @@ static int gmc_v8_0_late_init(void *handle) return 0; } -static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32(mmD1VGA_CONTROL); - unsigned size; + unsigned int size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { u32 viewport = RREG32(mmVIEWPORT_SIZE); + size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 4); @@ -1282,7 +1275,7 @@ static bool gmc_v8_0_is_idle(void *handle) static int gmc_v8_0_wait_for_idle(void *handle) { - unsigned i; + unsigned int i; u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1318,13 +1311,15 @@ static bool gmc_v8_0_check_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } + if (srbm_soft_reset) { adev->gmc.srbm_soft_reset = srbm_soft_reset; return true; - } else { - adev->gmc.srbm_soft_reset = 0; - return false; } + + adev->gmc.srbm_soft_reset = 0; + + return false; } static int gmc_v8_0_pre_soft_reset(void *handle) @@ -1335,9 +1330,8 @@ static int gmc_v8_0_pre_soft_reset(void *handle) return 0; gmc_v8_0_mc_stop(adev); - if (gmc_v8_0_wait_for_idle(adev)) { + if (gmc_v8_0_wait_for_idle(adev)) dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); - } return 0; } @@ -1386,7 +1380,7 @@ static int gmc_v8_0_post_soft_reset(void *handle) static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { u32 tmp; @@ -1747,8 +1741,7 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs; } -const struct amdgpu_ip_block_version gmc_v8_0_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v8_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 8, .minor = 0, @@ -1756,8 +1749,7 @@ const struct amdgpu_ip_block_version gmc_v8_0_ip_block = .funcs = &gmc_v8_0_ip_funcs, }; -const struct amdgpu_ip_block_version gmc_v8_1_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v8_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 8, .minor = 1, @@ -1765,8 +1757,7 @@ const struct amdgpu_ip_block_version gmc_v8_1_ip_block = .funcs = &gmc_v8_0_ip_funcs, }; -const struct amdgpu_ip_block_version gmc_v8_5_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v8_5_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 8, .minor = 5, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 67e669e0141c..f9a5a2c0573e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -81,7 +81,7 @@ #define MAX_MEM_RANGES 8 -static const char *gfxhub_client_ids[] = { +static const char * const gfxhub_client_ids[] = { "CB", "DB", "IA", @@ -332,14 +332,12 @@ static const char *mmhub_client_ids_aldebaran[][2] = { [384+0][1] = "OSS", }; -static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = -{ +static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = { SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa), SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565) }; -static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = -{ +static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = { SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800), SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008) }; @@ -416,13 +414,14 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = { static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { u32 bits, i, tmp, reg; /* Devices newer then VEGA10/12 shall have these programming - sequences performed by PSP BL */ + * sequences performed by PSP BL + */ if (adev->asic_type >= CHIP_VEGA20) return 0; @@ -466,7 +465,7 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { struct amdgpu_vmhub *hub; @@ -631,8 +630,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); dev_err(adev->dev, - "[%s] %s page fault (src_id:%u ring:%u vmid:%u " - "pasid:%u, for process %s pid %d thread %s pid %d)\n", + "[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n", hub_name, retry_fault ? "retry" : "no-retry", entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, @@ -816,7 +814,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type) { bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); - const unsigned eng = 17; + const unsigned int eng = 17; u32 j, inv_req, inv_req2, tmp; struct amdgpu_vmhub *hub; @@ -1033,13 +1031,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, } static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); struct amdgpu_device *adev = ring->adev; struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub]; uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); - unsigned eng = ring->vm_inv_eng; + unsigned int eng = ring->vm_inv_eng; /* * It may lose gpuvm invalidate acknowldege state across power-gating @@ -1081,8 +1079,8 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, - unsigned pasid) +static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, + unsigned int pasid) { struct amdgpu_device *adev = ring->adev; uint32_t reg; @@ -1373,10 +1371,10 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, } } -static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) +static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) { u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); - unsigned size; + unsigned int size; /* TODO move to DC so GMC doesn't need to hard-code DCN registers */ @@ -1622,6 +1620,7 @@ static int gmc_v9_0_early_init(void *handle) adev->gmc.private_aperture_start = 0x1000000000000000ULL; adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; return 0; } @@ -1999,6 +1998,19 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) return 0; } +static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) +{ + static const u32 regBIF_BIOS_SCRATCH_4 = 0x50; + u32 vram_info; + + if (!amdgpu_sriov_vf(adev)) { + vram_info = RREG32(regBIF_BIOS_SCRATCH_4); + adev->gmc.vram_vendor = vram_info & 0xF; + } + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; + adev->gmc.vram_width = 128 * 64; +} + static int gmc_v9_0_sw_init(void *handle) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits; @@ -2011,15 +2023,12 @@ static int gmc_v9_0_sw_init(void *handle) spin_lock_init(&adev->gmc.invalidate_lock); - if (!(adev->bios) || adev->gmc.is_app_apu) { + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + gmc_v9_4_3_init_vram_info(adev); + } else if (!adev->bios) { if (adev->flags & AMD_IS_APU) { - if (adev->gmc.is_app_apu) { - adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; - adev->gmc.vram_width = 128 * 64; - } else { - adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; - adev->gmc.vram_width = 64 * 64; - } + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; + adev->gmc.vram_width = 64 * 64; } else { adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; adev->gmc.vram_width = 128 * 64; @@ -2150,7 +2159,7 @@ static int gmc_v9_0_sw_init(void *handle) dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44; r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits)); if (r) { - printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); + dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); return r; } adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits); @@ -2304,7 +2313,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) return r; DRM_INFO("PCIE GART of %uM enabled.\n", - (unsigned)(adev->gmc.gart_size >> 20)); + (unsigned int)(adev->gmc.gart_size >> 20)); if (adev->gmc.pdb0_bo) DRM_INFO("PDB0 located at 0x%016llX\n", (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo)); @@ -2490,8 +2499,7 @@ const struct amd_ip_funcs gmc_v9_0_ip_funcs = { .get_clockgating_state = gmc_v9_0_get_clockgating_state, }; -const struct amdgpu_ip_block_version gmc_v9_0_ip_block = -{ +const struct amdgpu_ip_block_version gmc_v9_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GMC, .major = 9, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index b02e1cef78a7..ec0c8f8b465a 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -494,7 +494,8 @@ static int ih_v6_0_self_irq(struct amdgpu_device *adev, *adev->irq.ih1.wptr_cpu = wptr; schedule_work(&adev->irq.ih1_work); break; - default: break; + default: + break; } return 0; } @@ -535,7 +536,7 @@ static int ih_v6_0_sw_init(void *handle) * use bus address for ih ring by psp bl */ use_bus_addr = (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr); if (r) return r; @@ -548,7 +549,7 @@ static int ih_v6_0_sw_init(void *handle) /* initialize ih control register offset */ ih_v6_0_init_register_offset(adev); - r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); if (r) return r; @@ -759,8 +760,7 @@ static void ih_v6_0_set_interrupt_funcs(struct amdgpu_device *adev) adev->irq.ih_funcs = &ih_v6_0_funcs; } -const struct amdgpu_ip_block_version ih_v6_0_ip_block = -{ +const struct amdgpu_ip_block_version ih_v6_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_IH, .major = 6, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c new file mode 100644 index 000000000000..8fb05eae340a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c @@ -0,0 +1,769 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include + +#include "amdgpu.h" +#include "amdgpu_ih.h" + +#include "oss/osssys_6_1_0_offset.h" +#include "oss/osssys_6_1_0_sh_mask.h" + +#include "soc15_common.h" +#include "ih_v6_1.h" + +#define MAX_REARM_RETRY 10 + +static void ih_v6_1_set_interrupt_funcs(struct amdgpu_device *adev); + +/** + * ih_v6_1_init_register_offset - Initialize register offset for ih rings + * + * @adev: amdgpu_device pointer + * + * Initialize register offset ih rings (IH_V6_0). + */ +static void ih_v6_1_init_register_offset(struct amdgpu_device *adev) +{ + struct amdgpu_ih_regs *ih_regs; + + /* ih ring 2 is removed + * ih ring and ih ring 1 are available */ + if (adev->irq.ih.ring_size) { + ih_regs = &adev->irq.ih.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR); + ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO); + ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL; + } + + if (adev->irq.ih1.ring_size) { + ih_regs = &adev->irq.ih1.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1; + } +} + +/** + * force_update_wptr_for_self_int - Force update the wptr for self interrupt + * + * @adev: amdgpu_device pointer + * @threshold: threshold to trigger the wptr reporting + * @timeout: timeout to trigger the wptr reporting + * @enabled: Enable/disable timeout flush mechanism + * + * threshold input range: 0 ~ 15, default 0, + * real_threshold = 2^threshold + * timeout input range: 0 ~ 20, default 8, + * real_timeout = (2^timeout) * 1024 / (socclk_freq) + * + * Force update wptr for self interrupt ( >= SIENNA_CICHLID). + */ +static void +force_update_wptr_for_self_int(struct amdgpu_device *adev, + u32 threshold, u32 timeout, bool enabled) +{ + u32 ih_cntl, ih_rb_cntl; + + ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2); + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1); + + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2, + SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout); + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2, + SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, + RB_USED_INT_THRESHOLD, threshold); + + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl)) + return; + } else { + WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl); + } + + WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl); +} + +/** + * ih_v6_1_toggle_ring_interrupts - toggle the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * @enable: true - enable the interrupts, false - disable the interrupts + * + * Toggle the interrupt ring buffer (IH_V6_0) + */ +static int ih_v6_1_toggle_ring_interrupts(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + bool enable) +{ + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; + + ih_regs = &ih->ih_regs; + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + /* enable_intr field is only valid in ring0 */ + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); + + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } + + if (enable) { + ih->enabled = true; + } else { + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_rptr, 0); + WREG32(ih_regs->ih_rb_wptr, 0); + ih->enabled = false; + ih->rptr = 0; + } + + return 0; +} + +/** + * ih_v6_1_toggle_interrupts - Toggle all the available interrupt ring buffers + * + * @adev: amdgpu_device pointer + * @enable: enable or disable interrupt ring buffers + * + * Toggle all the available interrupt ring buffers (IH_V6_0). + */ +static int ih_v6_1_toggle_interrupts(struct amdgpu_device *adev, bool enable) +{ + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1}; + int i; + int r; + + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + r = ih_v6_1_toggle_ring_interrupts(adev, ih[i], enable); + if (r) + return r; + } + } + + return 0; +} + +static uint32_t ih_v6_1_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl) +{ + int rb_bufsz = order_base_2(ih->ring_size / 4); + + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + MC_SPACE, ih->use_bus_addr ? 2 : 4); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_OVERFLOW_CLEAR, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_OVERFLOW_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); + /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register + * value is written to memory + */ + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_WRITEBACK_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0); + + return ih_rb_cntl; +} + +static uint32_t ih_v6_1_doorbell_rptr(struct amdgpu_ih_ring *ih) +{ + u32 ih_doorbell_rtpr = 0; + + if (ih->use_doorbell) { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, OFFSET, + ih->doorbell_index); + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, + ENABLE, 1); + } else { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, + ENABLE, 0); + } + return ih_doorbell_rtpr; +} + +/** + * ih_v6_1_enable_ring - enable an ih ring buffer + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Enable an ih ring buffer (IH_V6_0) + */ +static int ih_v6_1_enable_ring(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; + + ih_regs = &ih->ih_regs; + + /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ + WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8); + WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff); + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = ih_v6_1_rb_cntl(ih, tmp); + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); + if (ih == &adev->irq.ih1) { + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); + } + + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { + DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); + return -ETIMEDOUT; + } + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } + + if (ih == &adev->irq.ih) { + /* set the ih ring 0 writeback address whether it's enabled or not */ + WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr)); + WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF); + } + + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_wptr, 0); + WREG32(ih_regs->ih_rb_rptr, 0); + + WREG32(ih_regs->ih_doorbell_rptr, ih_v6_1_doorbell_rptr(ih)); + + return 0; +} + +/** + * ih_v6_1_irq_init - init and enable the interrupt ring + * + * @adev: amdgpu_device pointer + * + * Allocate a ring buffer for the interrupt controller, + * enable the RLC, disable interrupts, enable the IH + * ring buffer and enable it. + * Called at device load and reume. + * Returns 0 for success, errors for failure. + */ +static int ih_v6_1_irq_init(struct amdgpu_device *adev) +{ + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1}; + u32 ih_chicken; + u32 tmp; + int ret; + int i; + + /* disable irqs */ + ret = ih_v6_1_toggle_interrupts(adev, false); + if (ret) + return ret; + + adev->nbio.funcs->ih_control(adev); + + if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || + (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) { + if (ih[0]->use_bus_addr) { + ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN); + ih_chicken = REG_SET_FIELD(ih_chicken, + IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1); + WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken); + } + } + + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + ret = ih_v6_1_enable_ring(adev, ih[i]); + if (ret) + return ret; + } + } + + /* update doorbell range for ih ring 0 */ + adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell, + ih[0]->doorbell_index); + + tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL); + tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, + CLIENT18_IS_STORM_CLIENT, 1); + WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp); + + tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL); + tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1); + WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp); + + /* GC/MMHUB UTCL2 page fault interrupts are configured as + * MSI storm capable interrupts by deafult. The delay is + * used to avoid ISR being called too frequently + * when page fault happens on several continuous page + * and thus avoid MSI storm */ + tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL); + tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL, + DELAY, 3); + WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp); + + pci_set_master(adev->pdev); + + /* enable interrupts */ + ret = ih_v6_1_toggle_interrupts(adev, true); + if (ret) + return ret; + /* enable wptr force update for self int */ + force_update_wptr_for_self_int(adev, 0, 8, true); + + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + + return 0; +} + +/** + * ih_v6_1_irq_disable - disable interrupts + * + * @adev: amdgpu_device pointer + * + * Disable interrupts on the hw. + */ +static void ih_v6_1_irq_disable(struct amdgpu_device *adev) +{ + force_update_wptr_for_self_int(adev, 0, 8, false); + ih_v6_1_toggle_interrupts(adev, false); + + /* Wait and acknowledge irq */ + mdelay(1); +} + +/** + * ih_v6_1_get_wptr - get the IH ring buffer wptr + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Get the IH ring buffer wptr from either the register + * or the writeback memory buffer. Also check for + * ring buffer overflow and deal with it. + * Returns the value of the wptr. + */ +static u32 ih_v6_1_get_wptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + u32 wptr, tmp; + struct amdgpu_ih_regs *ih_regs; + + wptr = le32_to_cpu(*ih->wptr_cpu); + ih_regs = &ih->ih_regs; + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 32). Hopefully + * this should allow us to catch up. + */ + tmp = (wptr + 32) & ih->ptr_mask; + dev_warn(adev->dev, "IH ring buffer overflow " + "(0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, tmp); + ih->rptr = tmp; + + tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); +out: + return (wptr & ih->ptr_mask); +} + +/** + * ih_v6_1_irq_rearm - rearm IRQ if lost + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + */ +static void ih_v6_1_irq_rearm(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + uint32_t v = 0; + uint32_t i = 0; + struct amdgpu_ih_regs *ih_regs; + + ih_regs = &ih->ih_regs; + + /* Rearm IRQ / re-write doorbell if doorbell write is lost */ + for (i = 0; i < MAX_REARM_RETRY; i++) { + v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr); + if ((v < ih->ring_size) && (v != ih->rptr)) + WDOORBELL32(ih->doorbell_index, ih->rptr); + else + break; + } +} + +/** + * ih_v6_1_set_rptr - set the IH ring buffer rptr + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Set the IH ring buffer rptr. + */ +static void ih_v6_1_set_rptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + struct amdgpu_ih_regs *ih_regs; + + if (ih->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + *ih->rptr_cpu = ih->rptr; + WDOORBELL32(ih->doorbell_index, ih->rptr); + + if (amdgpu_sriov_vf(adev)) + ih_v6_1_irq_rearm(adev, ih); + } else { + ih_regs = &ih->ih_regs; + WREG32(ih_regs->ih_rb_rptr, ih->rptr); + } +} + +/** + * ih_v6_1_self_irq - dispatch work for ring 1 + * + * @adev: amdgpu_device pointer + * @source: irq source + * @entry: IV with WPTR update + * + * Update the WPTR from the IV and schedule work to handle the entries. + */ +static int ih_v6_1_self_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t wptr = cpu_to_le32(entry->src_data[0]); + + switch (entry->ring_id) { + case 1: + *adev->irq.ih1.wptr_cpu = wptr; + schedule_work(&adev->irq.ih1_work); + break; + default: + break; + } + return 0; +} + +static const struct amdgpu_irq_src_funcs ih_v6_1_self_irq_funcs = { + .process = ih_v6_1_self_irq, +}; + +static void ih_v6_1_set_self_irq_funcs(struct amdgpu_device *adev) +{ + adev->irq.self_irq.num_types = 0; + adev->irq.self_irq.funcs = &ih_v6_1_self_irq_funcs; +} + +static int ih_v6_1_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + ih_v6_1_set_interrupt_funcs(adev); + ih_v6_1_set_self_irq_funcs(adev); + return 0; +} + +static int ih_v6_1_sw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool use_bus_addr; + + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0, + &adev->irq.self_irq); + + if (r) + return r; + + /* use gpu virtual address for ih ring + * until ih_checken is programmed to allow + * use bus address for ih ring by psp bl */ + use_bus_addr = + (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true; + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr); + if (r) + return r; + + adev->irq.ih.use_doorbell = true; + adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; + + adev->irq.ih1.ring_size = 0; + adev->irq.ih2.ring_size = 0; + + /* initialize ih control register offset */ + ih_v6_1_init_register_offset(adev); + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); + if (r) + return r; + + r = amdgpu_irq_init(adev); + + return r; +} + +static int ih_v6_1_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_irq_fini_sw(adev); + + return 0; +} + +static int ih_v6_1_hw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = ih_v6_1_irq_init(adev); + if (r) + return r; + + return 0; +} + +static int ih_v6_1_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + ih_v6_1_irq_disable(adev); + + return 0; +} + +static int ih_v6_1_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return ih_v6_1_hw_fini(adev); +} + +static int ih_v6_1_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return ih_v6_1_hw_init(adev); +} + +static bool ih_v6_1_is_idle(void *handle) +{ + /* todo */ + return true; +} + +static int ih_v6_1_wait_for_idle(void *handle) +{ + /* todo */ + return -ETIMEDOUT; +} + +static int ih_v6_1_soft_reset(void *handle) +{ + /* todo */ + return 0; +} + +static void ih_v6_1_update_clockgating_state(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def, field_val; + + if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) { + def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL); + field_val = enable ? 0 : 1; + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DBUS_MUX_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DYN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + REG_CLK_SOFT_OVERRIDE, field_val); + if (def != data) + WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data); + } + + return; +} + +static int ih_v6_1_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + ih_v6_1_update_clockgating_state(adev, + state == AMD_CG_STATE_GATE); + return 0; +} + +static void ih_v6_1_update_ih_mem_power_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t ih_mem_pwr_cntl; + + /* Disable ih sram power cntl before switch powergating mode */ + ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_CTRL_EN, 0); + WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl); + + /* It is recommended to set mem powergating mode to DS mode */ + if (enable) { + /* mem power mode */ + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_LS_EN, 0); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_DS_EN, 1); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_SD_EN, 0); + /* cam mem power mode */ + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0); + /* re-enable power cntl */ + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_CTRL_EN, 1); + } else { + /* mem power mode */ + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_LS_EN, 0); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_DS_EN, 0); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_SD_EN, 0); + /* cam mem power mode */ + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0); + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0); + /* re-enable power cntl*/ + ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL, + IH_BUFFER_MEM_POWER_CTRL_EN, 1); + } + + WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl); +} + +static int ih_v6_1_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_PG_STATE_GATE); + + if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG) + ih_v6_1_update_ih_mem_power_gating(adev, enable); + + return 0; +} + +static void ih_v6_1_get_clockgating_state(void *handle, u64 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL)) + *flags |= AMD_CG_SUPPORT_IH_CG; + + return; +} + +static const struct amd_ip_funcs ih_v6_1_ip_funcs = { + .name = "ih_v6_1", + .early_init = ih_v6_1_early_init, + .late_init = NULL, + .sw_init = ih_v6_1_sw_init, + .sw_fini = ih_v6_1_sw_fini, + .hw_init = ih_v6_1_hw_init, + .hw_fini = ih_v6_1_hw_fini, + .suspend = ih_v6_1_suspend, + .resume = ih_v6_1_resume, + .is_idle = ih_v6_1_is_idle, + .wait_for_idle = ih_v6_1_wait_for_idle, + .soft_reset = ih_v6_1_soft_reset, + .set_clockgating_state = ih_v6_1_set_clockgating_state, + .set_powergating_state = ih_v6_1_set_powergating_state, + .get_clockgating_state = ih_v6_1_get_clockgating_state, +}; + +static const struct amdgpu_ih_funcs ih_v6_1_funcs = { + .get_wptr = ih_v6_1_get_wptr, + .decode_iv = amdgpu_ih_decode_iv_helper, + .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper, + .set_rptr = ih_v6_1_set_rptr +}; + +static void ih_v6_1_set_interrupt_funcs(struct amdgpu_device *adev) +{ + adev->irq.ih_funcs = &ih_v6_1_funcs; +} + +const struct amdgpu_ip_block_version ih_v6_1_ip_block = { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &ih_v6_1_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.h b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.h new file mode 100644 index 000000000000..2232bc5cbd09 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.h @@ -0,0 +1,28 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __IH_V6_1_IH_H__ +#define __IH_V6_1_IH_H__ + +extern const struct amdgpu_ip_block_version ih_v6_1_ip_block; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index c25d4a07350b..1c8116d75f63 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -807,8 +807,7 @@ static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev) adev->jpeg.inst->irq.funcs = &jpeg_v2_0_irq_funcs; } -const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = -{ +const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_JPEG, .major = 2, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index 79791379fc2b..df4440c21bbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -479,7 +479,7 @@ static int jpeg_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = state == AMD_CG_STATE_GATE; if (enable) { if (!jpeg_v3_0_is_idle(handle)) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index a707d407fbd0..3eb3dcd56b57 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -626,7 +626,7 @@ static int jpeg_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = state == AMD_CG_STATE_GATE; if (enable) { if (!jpeg_v4_0_is_idle(handle)) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index ce2b22f7e4e4..15612915bb6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -26,6 +26,7 @@ #include "soc15.h" #include "soc15d.h" #include "jpeg_v4_0_3.h" +#include "mmsch_v4_0_3.h" #include "vcn/vcn_4_0_3_offset.h" #include "vcn/vcn_4_0_3_sh_mask.h" @@ -41,6 +42,7 @@ static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev); static int jpeg_v4_0_3_set_powergating_state(void *handle, enum amd_powergating_state state); static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev); +static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring); static int amdgpu_ih_srcid_jpeg[] = { VCN_4_0__SRCID__JPEG_DECODE, @@ -109,9 +111,20 @@ static int jpeg_v4_0_3_sw_init(void *handle) ring = &adev->jpeg.inst[i].ring_dec[j]; ring->use_doorbell = true; ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id); - ring->doorbell_index = - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 1 + j + 9 * jpeg_inst; + if (!amdgpu_sriov_vf(adev)) { + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 1 + j + 9 * jpeg_inst; + } else { + if (j < 4) + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 4 + j + 32 * jpeg_inst; + else + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 8 + j + 32 * jpeg_inst; + } sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -160,6 +173,119 @@ static int jpeg_v4_0_3_sw_fini(void *handle) return r; } +static int jpeg_v4_0_3_start_sriov(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + uint64_t ctx_addr; + uint32_t param, resp, expected; + uint32_t tmp, timeout; + + struct amdgpu_mm_table *table = &adev->virt.mm_table; + uint32_t *table_loc; + uint32_t table_size; + uint32_t size, size_dw, item_offset; + uint32_t init_status; + int i, j, jpeg_inst; + + struct mmsch_v4_0_cmd_direct_write + direct_wt = { {0} }; + struct mmsch_v4_0_cmd_end end = { {0} }; + struct mmsch_v4_0_3_init_header header; + + direct_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_WRITE; + end.cmd_header.command_type = + MMSCH_COMMAND__END; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + jpeg_inst = GET_INST(JPEG, i); + + memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header)); + header.version = MMSCH_VERSION; + header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2; + + table_loc = (uint32_t *)table->cpu_addr; + table_loc += header.total_size; + + item_offset = header.total_size; + + for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + table_size = 0; + + tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW); + MMSCH_V4_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr)); + tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH); + MMSCH_V4_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr)); + tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_SIZE); + MMSCH_V4_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4); + + if (j <= 3) { + header.mjpegdec0[j].table_offset = item_offset; + header.mjpegdec0[j].init_status = 0; + header.mjpegdec0[j].table_size = table_size; + } else { + header.mjpegdec1[j - 4].table_offset = item_offset; + header.mjpegdec1[j - 4].init_status = 0; + header.mjpegdec1[j - 4].table_size = table_size; + } + header.total_size += table_size; + item_offset += table_size; + } + + MMSCH_V4_0_INSERT_END(); + + /* send init table to MMSCH */ + size = sizeof(struct mmsch_v4_0_3_init_header); + table_loc = (uint32_t *)table->cpu_addr; + memcpy((void *)table_loc, &header, size); + + ctx_addr = table->gpu_addr; + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); + + tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID); + tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp); + + size = header.total_size; + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size); + + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0); + + param = 0x00000001; + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param); + tmp = 0; + timeout = 1000; + resp = 0; + expected = MMSCH_VF_MAILBOX_RESP__OK; + init_status = + ((struct mmsch_v4_0_3_init_header *)(table_loc))->mjpegdec0[i].init_status; + while (resp != expected) { + resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP); + + if (resp != 0) + break; + udelay(10); + tmp = tmp + 10; + if (tmp >= timeout) { + DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ + " waiting for regMMSCH_VF_MAILBOX_RESP "\ + "(expected=0x%08x, readback=0x%08x)\n", + tmp, expected, resp); + return -EBUSY; + } + } + if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE && + init_status != MMSCH_VF_ENGINE_STATUS__PASS) + DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n", + resp, init_status); + + } + return 0; +} + /** * jpeg_v4_0_3_hw_init - start and test JPEG block * @@ -172,31 +298,47 @@ static int jpeg_v4_0_3_hw_init(void *handle) struct amdgpu_ring *ring; int i, j, r, jpeg_inst; - for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - jpeg_inst = GET_INST(JPEG, i); + if (amdgpu_sriov_vf(adev)) { + r = jpeg_v4_0_3_start_sriov(adev); + if (r) + return r; - ring = adev->jpeg.inst[i].ring_dec; + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + ring->wptr = 0; + ring->wptr_old = 0; + jpeg_v4_0_3_dec_ring_set_wptr(ring); + ring->sched.ready = true; + } + } + } else { + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + jpeg_inst = GET_INST(JPEG, i); - if (ring->use_doorbell) - adev->nbio.funcs->vcn_doorbell_range( - adev, ring->use_doorbell, - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 9 * jpeg_inst, - adev->jpeg.inst[i].aid_id); + ring = adev->jpeg.inst[i].ring_dec; - for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { - ring = &adev->jpeg.inst[i].ring_dec[j]; if (ring->use_doorbell) - WREG32_SOC15_OFFSET( - VCN, GET_INST(VCN, i), - regVCN_JPEG_DB_CTRL, - (ring->pipe ? (ring->pipe - 0x15) : 0), - ring->doorbell_index + adev->nbio.funcs->vcn_doorbell_range( + adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 9 * jpeg_inst, + adev->jpeg.inst[i].aid_id); + + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + if (ring->use_doorbell) + WREG32_SOC15_OFFSET( + VCN, GET_INST(VCN, i), + regVCN_JPEG_DB_CTRL, + (ring->pipe ? (ring->pipe - 0x15) : 0), + ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | - VCN_JPEG_DB_CTRL__EN_MASK); - r = amdgpu_ring_test_helper(ring); - if (r) - return r; + VCN_JPEG_DB_CTRL__EN_MASK); + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } } } DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n"); @@ -785,7 +927,7 @@ static int jpeg_v4_0_3_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = state == AMD_CG_STATE_GATE; int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 36a123e6c8ee..eb06d749876f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -909,10 +909,12 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev, /* prepare MQD backup */ adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->mes.mqd_backup[pipe]) + if (!adev->mes.mqd_backup[pipe]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); + return -ENOMEM; + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 1bdaa00c0b46..6827d547042e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -210,9 +210,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; mes_add_queue_pkt.gds_size = input->queue_size; - /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ - mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; - mes_add_queue_pkt.gds_size = input->queue_size; + mes_add_queue_pkt.exclusively_scheduled = input->exclusively_scheduled; return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_add_queue_pkt, sizeof(mes_add_queue_pkt), @@ -790,8 +788,7 @@ static int mes_v11_0_mqd_init(struct amdgpu_ring *ring) DOORBELL_SOURCE, 0); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_HIT, 0); - } - else + } else tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 0); mqd->cp_hqd_pq_doorbell_control = tmp; @@ -1019,10 +1016,12 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev, /* prepare MQD backup */ adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->mes.mqd_backup[pipe]) + if (!adev->mes.mqd_backup[pipe]) { dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); + return -ENOMEM; + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 5e8b493f8699..784c4e077470 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -413,18 +413,6 @@ static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev) static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) { - /* - * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ mmhub_v1_8_init_gart_aperture_regs(adev); mmhub_v1_8_init_system_aperture_regs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index 8bd0fc8d9d25..1dce053a4c4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -331,7 +331,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v2_3_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; - unsigned i; + unsigned int i; for (i = 0; i < 18; ++i) { WREG32_SOC15_OFFSET(MMHUB, 0, @@ -406,6 +406,7 @@ static void mmhub_v2_3_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; + tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); @@ -499,11 +500,11 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK; data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | - DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | - DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | - DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | - DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | - DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); + DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); } else { data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK; @@ -593,13 +594,13 @@ static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u64 *flags) /* AMD_CG_SUPPORT_MC_MGCG */ if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | - DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | - DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | - DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | - DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | - DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)) + DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)) && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) { - *flags |= AMD_CG_SUPPORT_MC_MGCG; + *flags |= AMD_CG_SUPPORT_MC_MGCG; } /* AMD_CG_SUPPORT_MC_LS */ diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index e790f890aec6..5718e4d40e66 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -108,7 +108,7 @@ static void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi } static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev, - int hubid) + int hubid) { uint64_t value; uint32_t tmp; @@ -1568,7 +1568,7 @@ static int mmhub_v9_4_get_ras_error_count(struct amdgpu_device *adev, uint32_t sec_cnt, ded_cnt; for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) { - if(mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset) + if (mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset) continue; sec_cnt = (value & diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0_3.h similarity index 69% rename from drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c rename to drivers/gpu/drm/amd/amdgpu/mmsch_v4_0_3.h index 298c136cefe0..db7eb5260295 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0_3.h @@ -1,5 +1,5 @@ /* - * Copyright 2021 Red Hat Inc. + * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -18,23 +18,20 @@ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. + * */ -#include "ram.h" -#include -#include -#include +#ifndef __MMSCH_V4_0_3_H__ +#define __MMSCH_V4_0_3_H__ -static const struct nvkm_ram_func -ga102_ram = { +#include "amdgpu_vcn.h" +#include "mmsch_v4_0.h" + +struct mmsch_v4_0_3_init_header { + uint32_t version; + uint32_t total_size; + struct mmsch_v4_0_table_info vcn0; + struct mmsch_v4_0_table_info mjpegdec0[4]; + struct mmsch_v4_0_table_info mjpegdec1[4]; }; - -int -ga102_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) -{ - struct nvkm_device *device = fb->subdev.device; - enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios); - u32 size = nvkm_rd32(device, 0x1183a4); - - return nvkm_ram_new_(&ga102_ram, fb, type, (u64)size << 20, pram); -} +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index cae1aaa4ddb6..6a68ee946f1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -183,12 +183,10 @@ send_request: if (req != IDH_REQ_GPU_INIT_DATA) { pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); return r; - } - else /* host doesn't support REQ_GPU_INIT_DATA handshake */ + } else /* host doesn't support REQ_GPU_INIT_DATA handshake */ adev->virt.req_init_data_ver = 0; } else { - if (req == IDH_REQ_GPU_INIT_DATA) - { + if (req == IDH_REQ_GPU_INIT_DATA) { adev->virt.req_init_data_ver = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 288c414babdf..59f53c743362 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -334,7 +334,7 @@ static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev) break; } mdelay(1); - timeout -=1; + timeout -= 1; reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL); } diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index eec13cb5bf75..b6a8478dabf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -565,7 +565,7 @@ static int navi10_ih_sw_init(void *handle) use_bus_addr = false; else use_bus_addr = true; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr); if (r) return r; @@ -578,7 +578,7 @@ static int navi10_ih_sw_init(void *handle) /* initialize ih control registers offset */ navi10_ih_init_register_offset(adev); - r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index cd1a02d30420..9ea072374cb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -32,6 +32,18 @@ #define NPS_MODE_MASK 0x000000FFL +/* Core 0 Port 0 counter */ +#define smnPCIEP_NAK_COUNTER 0x1A340218 + +#define smnPCIE_PERF_CNTL_TXCLK3 0x1A38021c +#define smnPCIE_PERF_CNTL_TXCLK7 0x1A380888 +#define smnPCIE_PERF_COUNT_CNTL 0x1A380200 +#define smnPCIE_PERF_COUNT0_TXCLK3 0x1A380220 +#define smnPCIE_PERF_COUNT0_TXCLK7 0x1A38088C +#define smnPCIE_PERF_COUNT0_UPVAL_TXCLK3 0x1A3808F8 +#define smnPCIE_PERF_COUNT0_UPVAL_TXCLK7 0x1A380918 + + static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, @@ -427,6 +439,75 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev) } } +static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev) +{ + u32 val, nak_r, nak_g; + + if (adev->flags & AMD_IS_APU) + return 0; + + /* Get the number of NAKs received and generated */ + val = RREG32_PCIE(smnPCIEP_NAK_COUNTER); + nak_r = val & 0xFFFF; + nak_g = val >> 16; + + /* Add the total number of NAKs, i.e the number of replays */ + return (nak_r + nak_g); +} + +static void nbio_v7_9_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, + uint64_t *count1) +{ + uint32_t perfctrrx = 0; + uint32_t perfctrtx = 0; + + /* This reports 0 on APUs, so return to avoid writing/reading registers + * that may or may not be different from their GPU counterparts + */ + if (adev->flags & AMD_IS_APU) + return; + + /* Use TXCLK3 counter group for rx event */ + /* Use TXCLK7 counter group for tx event */ + /* Set the 2 events that we wish to watch, defined above */ + /* 40 is event# for received msgs */ + /* 2 is event# of posted requests sent */ + perfctrrx = REG_SET_FIELD(perfctrrx, PCIE_PERF_CNTL_TXCLK3, EVENT0_SEL, 40); + perfctrtx = REG_SET_FIELD(perfctrtx, PCIE_PERF_CNTL_TXCLK7, EVENT0_SEL, 2); + + /* Write to enable desired perf counters */ + WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctrrx); + WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK7, perfctrtx); + + /* Zero out and enable SHADOW_WR + * Write 0x6: + * Bit 1 = Global Shadow wr(1) + * Bit 2 = Global counter reset enable(1) + */ + WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000006); + + /* Enable Gloabl Counter + * Write 0x1: + * Bit 0 = Global Counter Enable(1) + */ + WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000001); + + msleep(1000); + + /* Disable Global Counter, Reset and enable SHADOW_WR + * Write 0x6: + * Bit 1 = Global Shadow wr(1) + * Bit 2 = Global counter reset enable(1) + */ + WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000006); + + /* Get the upper and lower count */ + *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | + ((uint64_t)RREG32_PCIE(smnPCIE_PERF_COUNT0_UPVAL_TXCLK3) << 32); + *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK7) | + ((uint64_t)RREG32_PCIE(smnPCIE_PERF_COUNT0_UPVAL_TXCLK7) << 32); +} + const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset, @@ -450,4 +531,193 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode, .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, .init_registers = nbio_v7_9_init_registers, + .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count, + .get_pcie_usage = nbio_v7_9_get_pcie_usage, +}; + +static void nbio_v7_9_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + return; +} + +static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) +{ + uint32_t bif_doorbell_intr_cntl; + struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if); + struct ras_err_data err_data = {0, 0, 0, NULL}; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); + + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, + RAS_CNTLR_INTERRUPT_CLEAR, 1); + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + if (!ras->disable_ras_err_cnt_harvest) { + /* + * clear error status after ras_controller_intr + * according to hw team and count ue number + * for query + */ + nbio_v7_9_query_ras_error_count(adev, &err_data); + + /* logging on error cnt and printing for awareness */ + obj->err_data.ue_count += err_data.ue_count; + obj->err_data.ce_count += err_data.ce_count; + + if (err_data.ce_count) + dev_info(adev->dev, "%ld correctable hardware " + "errors detected in %s block, " + "no user action is needed.\n", + obj->err_data.ce_count, + get_ras_block_str(adev->nbio.ras_if)); + + if (err_data.ue_count) + dev_info(adev->dev, "%ld uncorrectable hardware " + "errors detected in %s block\n", + obj->err_data.ue_count, + get_ras_block_str(adev->nbio.ras_if)); + } + + dev_info(adev->dev, "RAS controller interrupt triggered " + "by NBIF error\n"); + + /* ras_controller_int is dedicated for nbif ras error, + * not the global interrupt for sync flood + */ + amdgpu_ras_reset_gpu(adev); + } +} + +static void nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev) +{ + uint32_t bif_doorbell_intr_cntl; + + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); + + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); + + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + amdgpu_ras_global_ras_isr(adev); + } +} + +static int nbio_v7_9_set_ras_controller_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + /* Dummy function, there is no initialization operation in driver */ + + return 0; +} + +static int nbio_v7_9_process_ras_controller_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for ras_controller_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static int nbio_v7_9_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + /* Dummy function, there is no initialization operation in driver */ + + return 0; +} + +static int nbio_v7_9_process_err_event_athub_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for err_event_athub_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_controller_irq_funcs = { + .set = nbio_v7_9_set_ras_controller_irq_state, + .process = nbio_v7_9_process_ras_controller_irq, +}; + +static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_err_event_athub_irq_funcs = { + .set = nbio_v7_9_set_ras_err_event_athub_irq_state, + .process = nbio_v7_9_process_err_event_athub_irq, +}; + +static int nbio_v7_9_init_ras_controller_interrupt (struct amdgpu_device *adev) +{ + int r; + + /* init the irq funcs */ + adev->nbio.ras_controller_irq.funcs = + &nbio_v7_9_ras_controller_irq_funcs; + adev->nbio.ras_controller_irq.num_types = 1; + + /* register ras controller interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT, + &adev->nbio.ras_controller_irq); + + return r; +} + +static int nbio_v7_9_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev) +{ + + int r; + + /* init the irq funcs */ + adev->nbio.ras_err_event_athub_irq.funcs = + &nbio_v7_9_ras_err_event_athub_irq_funcs; + adev->nbio.ras_err_event_athub_irq.num_types = 1; + + /* register ras err event athub interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, + &adev->nbio.ras_err_event_athub_irq); + + return r; +} + +const struct amdgpu_ras_block_hw_ops nbio_v7_9_ras_hw_ops = { + .query_ras_error_count = nbio_v7_9_query_ras_error_count, +}; + +struct amdgpu_nbio_ras nbio_v7_9_ras = { + .ras_block = { + .ras_comm = { + .name = "pcie_bif", + .block = AMDGPU_RAS_BLOCK__PCIE_BIF, + .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, + }, + .hw_ops = &nbio_v7_9_ras_hw_ops, + .ras_late_init = amdgpu_nbio_ras_late_init, + }, + .handle_ras_controller_intr_no_bifring = nbio_v7_9_handle_ras_controller_intr_no_bifring, + .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring, + .init_ras_controller_interrupt = nbio_v7_9_init_ras_controller_interrupt, + .init_ras_err_event_athub_interrupt = nbio_v7_9_init_ras_err_event_athub_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h index 8e04eb484328..73709771950d 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h @@ -28,5 +28,6 @@ extern const struct nbio_hdp_flush_reg nbio_v7_9_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v7_9_funcs; +extern struct amdgpu_nbio_ras nbio_v7_9_ras; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 51523b27a186..13aca808ecab 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -67,21 +67,18 @@ static const struct amd_ip_funcs nv_common_ip_funcs; /* Navi */ -static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = -{ +static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, }; -static const struct amdgpu_video_codecs nv_video_codecs_encode = -{ +static const struct amdgpu_video_codecs nv_video_codecs_encode = { .codec_count = ARRAY_SIZE(nv_video_codecs_encode_array), .codec_array = nv_video_codecs_encode_array, }; /* Navi1x */ -static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = -{ +static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, @@ -91,8 +88,7 @@ static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, }; -static const struct amdgpu_video_codecs nv_video_codecs_decode = -{ +static const struct amdgpu_video_codecs nv_video_codecs_decode = { .codec_count = ARRAY_SIZE(nv_video_codecs_decode_array), .codec_array = nv_video_codecs_decode_array, }; @@ -108,8 +104,7 @@ static const struct amdgpu_video_codecs sc_video_codecs_encode = { .codec_array = sc_video_codecs_encode_array, }; -static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = -{ +static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, @@ -120,8 +115,7 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; -static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = -{ +static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, @@ -131,27 +125,23 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, }; -static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn0 = -{ +static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn0 = { .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn0), .codec_array = sc_video_codecs_decode_array_vcn0, }; -static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = -{ +static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = { .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn1), .codec_array = sc_video_codecs_decode_array_vcn1, }; /* SRIOV Sienna Cichlid, not const since data is controlled by host */ -static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = -{ +static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, }; -static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = -{ +static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, @@ -162,8 +152,7 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; -static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = -{ +static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, @@ -173,20 +162,17 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, }; -static struct amdgpu_video_codecs sriov_sc_video_codecs_encode = -{ +static struct amdgpu_video_codecs sriov_sc_video_codecs_encode = { .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array), .codec_array = sriov_sc_video_codecs_encode_array, }; -static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn0 = -{ +static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn0 = { .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0), .codec_array = sriov_sc_video_codecs_decode_array_vcn0, }; -static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn1 = -{ +static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn1 = { .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1), .codec_array = sriov_sc_video_codecs_decode_array_vcn1, }; @@ -536,8 +522,7 @@ static void nv_program_aspm(struct amdgpu_device *adev) } -const struct amdgpu_ip_block_version nv_common_ip_block = -{ +const struct amdgpu_ip_block_version nv_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 1, .minor = 0, @@ -572,16 +557,6 @@ static bool nv_need_reset_on_init(struct amdgpu_device *adev) return false; } -static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev) -{ - - /* TODO - * dummy implement for pcie_replay_count sysfs interface - * */ - - return 0; -} - static void nv_init_doorbell_index(struct amdgpu_device *adev) { adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; @@ -642,8 +617,7 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev, return 0; } -static const struct amdgpu_asic_funcs nv_asic_funcs = -{ +static const struct amdgpu_asic_funcs nv_asic_funcs = { .read_disabled_bios = &nv_read_disabled_bios, .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom, .read_register = &nv_read_register, @@ -656,7 +630,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = .init_doorbell_index = &nv_init_doorbell_index, .need_full_reset = &nv_need_full_reset, .need_reset_on_init = &nv_need_reset_on_init, - .get_pcie_replay_count = &nv_get_pcie_replay_count, + .get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count, .supports_baco = &amdgpu_dpm_is_baco_supported, .pre_asic_init = &nv_pre_asic_init, .update_umd_stable_pstate = &nv_update_umd_stable_pstate, @@ -889,7 +863,8 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_ATHUB_LS | AMD_CG_SUPPORT_IH_CG | AMD_CG_SUPPORT_VCN_MGCG | - AMD_CG_SUPPORT_JPEG_MGCG; + AMD_CG_SUPPORT_JPEG_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG; adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | @@ -950,7 +925,8 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_ATHUB_LS | AMD_CG_SUPPORT_IH_CG | AMD_CG_SUPPORT_VCN_MGCG | - AMD_CG_SUPPORT_JPEG_MGCG; + AMD_CG_SUPPORT_JPEG_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_JPEG | diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index af5685f4cb34..10b17bd5aebe 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -50,6 +50,8 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin"); /* For large FW files the time to complete can be very long */ #define USBC_PD_POLLING_LIMIT_S 240 @@ -94,6 +96,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp) case IP_VERSION(13, 0, 5): case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 11): + case IP_VERSION(14, 0, 0): err = psp_init_toc_microcode(psp, ucode_prefix); if (err) return err; @@ -688,6 +691,27 @@ static int psp_v13_0_vbflash_status(struct psp_context *psp) return RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_115); } +static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + + if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 10)) { + uint32_t reg_data; + /* MP1 fatal error: trigger PSP dram read to unhalt PSP + * during MP1 triggered sync flood. + */ + reg_data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67); + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67, reg_data + 0x10); + + /* delay 1000ms for the mode1 reset for fatal error + * to be recovered back. + */ + msleep(1000); + } + + return 0; +} + static const struct psp_funcs psp_v13_0_funcs = { .init_microcode = psp_v13_0_init_microcode, .bootloader_load_kdb = psp_v13_0_bootloader_load_kdb, @@ -707,7 +731,8 @@ static const struct psp_funcs psp_v13_0_funcs = { .load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw, .read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw, .update_spirom = psp_v13_0_update_spirom, - .vbflash_stat = psp_v13_0_vbflash_status + .vbflash_stat = psp_v13_0_vbflash_status, + .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, }; void psp_v13_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 5c4d4df9cf94..1cc34efb455b 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -237,17 +237,15 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) // emulation only, won't work on real chip // navi10 real chip need to use PSP to load firmware static int sdma_v5_0_init_microcode(struct amdgpu_device *adev) -{ int ret, i; - - if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5))) - return 0; +{ + int ret, i; for (i = 0; i < adev->sdma.num_instances; i++) { ret = amdgpu_sdma_init_microcode(adev, i, false); if (ret) return ret; } - + return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index a7b230e5a26d..2b3ebebc4299 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -1507,6 +1507,30 @@ static int sdma_v5_2_process_illegal_inst_irq(struct amdgpu_device *adev, return 0; } +static bool sdma_v5_2_firmware_mgcg_support(struct amdgpu_device *adev, + int i) +{ + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(5, 2, 1): + if (adev->sdma.instance[i].fw_version < 70) + return false; + break; + case IP_VERSION(5, 2, 3): + if (adev->sdma.instance[i].fw_version < 47) + return false; + break; + case IP_VERSION(5, 2, 7): + if (adev->sdma.instance[i].fw_version < 9) + return false; + break; + default: + return true; + } + + return true; + +} + static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool enable) { @@ -1515,7 +1539,7 @@ static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *ade for (i = 0; i < adev->sdma.num_instances; i++) { - if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1)) + if (!sdma_v5_2_firmware_mgcg_support(adev, i)) adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_MGCG; if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { @@ -1589,6 +1613,7 @@ static int sdma_v5_2_set_clockgating_state(void *handle, case IP_VERSION(5, 2, 5): case IP_VERSION(5, 2, 6): case IP_VERSION(5, 2, 3): + case IP_VERSION(5, 2, 7): sdma_v5_2_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); sdma_v5_2_update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 3b03dda854fd..45be0af2570b 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -48,6 +48,7 @@ MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin"); MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin"); MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin"); MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin"); +MODULE_FIRMWARE("amdgpu/sdma_6_1_0.bin"); #define SDMA1_REG_OFFSET 0x600 #define SDMA0_HYP_DEC_REG_START 0x5880 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index afcaeadda4c7..c45721ca916e 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -893,9 +893,9 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs = .get_config_memsize = &soc15_get_config_memsize, .need_full_reset = &soc15_need_full_reset, .init_doorbell_index = &aqua_vanjaram_doorbell_index_init, - .get_pcie_usage = &vega20_get_pcie_usage, + .get_pcie_usage = &amdgpu_nbio_get_pcie_usage, .need_reset_on_init = &soc15_need_reset_on_init, - .get_pcie_replay_count = &soc15_get_pcie_replay_count, + .get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count, .supports_baco = &soc15_supports_baco, .pre_asic_init = &soc15_pre_asic_init, .query_video_codecs = &soc15_query_video_codecs, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 96948a59f8dd..da683afa0222 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -37,65 +37,65 @@ #define SOC15_REG_OFFSET1(ip, inst, reg, offset) \ (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)+(offset)) -#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \ +#define __WREG32_SOC15_RLC__(reg, value, flag, hwip, inst) \ ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \ - amdgpu_sriov_wreg(adev, reg, value, flag, hwip) : \ + amdgpu_sriov_wreg(adev, reg, value, flag, hwip, inst) : \ WREG32(reg, value)) -#define __RREG32_SOC15_RLC__(reg, flag, hwip) \ +#define __RREG32_SOC15_RLC__(reg, flag, hwip, inst) \ ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \ - amdgpu_sriov_rreg(adev, reg, flag, hwip) : \ + amdgpu_sriov_rreg(adev, reg, flag, hwip, inst) : \ RREG32(reg)) #define WREG32_FIELD15(ip, idx, reg, field, val) \ __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ (__RREG32_SOC15_RLC__( \ adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ - 0, ip##_HWIP) & \ + 0, ip##_HWIP, idx) & \ ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \ - 0, ip##_HWIP) + 0, ip##_HWIP, idx) #define WREG32_FIELD15_PREREG(ip, idx, reg_name, field, val) \ __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][reg##reg_name##_BASE_IDX] + reg##reg_name, \ (__RREG32_SOC15_RLC__( \ adev->reg_offset[ip##_HWIP][idx][reg##reg_name##_BASE_IDX] + reg##reg_name, \ - 0, ip##_HWIP) & \ + 0, ip##_HWIP, idx) & \ ~REG_FIELD_MASK(reg_name, field)) | (val) << REG_FIELD_SHIFT(reg_name, field), \ - 0, ip##_HWIP) + 0, ip##_HWIP, idx) #define RREG32_SOC15(ip, inst, reg) \ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ - 0, ip##_HWIP) + 0, ip##_HWIP, inst) -#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP) +#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP, 0) -#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP) +#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0) #define RREG32_SOC15_NO_KIQ(ip, inst, reg) \ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ - AMDGPU_REGS_NO_KIQ, ip##_HWIP) + AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst) #define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \ __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)) + \ - (offset), 0, ip##_HWIP) + (offset), 0, ip##_HWIP, inst) #define WREG32_SOC15(ip, inst, reg, value) \ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \ - value, 0, ip##_HWIP) + value, 0, ip##_HWIP, inst) #define WREG32_SOC15_IP(ip, reg, value) \ - __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP) + __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP, 0) #define WREG32_SOC15_IP_NO_KIQ(ip, reg, value) \ - __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP) + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0) #define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \ __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ - value, AMDGPU_REGS_NO_KIQ, ip##_HWIP) + value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst) #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, \ - value, 0, ip##_HWIP) + value, 0, ip##_HWIP, inst) #define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask) \ amdgpu_device_wait_on_rreg(adev, inst, \ @@ -108,16 +108,16 @@ #reg, expected_value, mask) #define WREG32_RLC(reg, value) \ - __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP) + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP, 0) -#define WREG32_RLC_EX(prefix, reg, value) \ +#define WREG32_RLC_EX(prefix, reg, value, inst) \ do { \ if (amdgpu_sriov_fullaccess(adev)) { \ uint32_t i = 0; \ uint32_t retries = 50000; \ - uint32_t r0 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG0_BASE_IDX] + prefix##SCRATCH_REG0; \ - uint32_t r1 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG1; \ - uint32_t spare_int = adev->reg_offset[GC_HWIP][0][prefix##RLC_SPARE_INT_BASE_IDX] + prefix##RLC_SPARE_INT; \ + uint32_t r0 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG0_BASE_IDX] + prefix##SCRATCH_REG0; \ + uint32_t r1 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG1; \ + uint32_t spare_int = adev->reg_offset[GC_HWIP][inst][prefix##RLC_SPARE_INT_BASE_IDX] + prefix##RLC_SPARE_INT; \ WREG32(r0, value); \ WREG32(r1, (reg | 0x80000000)); \ WREG32(spare_int, 0x1); \ @@ -136,17 +136,17 @@ /* shadow the registers in the callback function */ #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \ - __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP, inst) /* for GC only */ #define RREG32_RLC(reg) \ __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP) #define WREG32_RLC_NO_KIQ(reg, value, hwip) \ - __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip) + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0) #define RREG32_RLC_NO_KIQ(reg, hwip) \ - __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip) + __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0) #define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \ do { \ @@ -167,32 +167,32 @@ } while (0) #define RREG32_SOC15_RLC(ip, inst, reg) \ - __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP) + __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP, inst) #define WREG32_SOC15_RLC(ip, inst, reg, value) \ do { \ uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\ - __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP); \ + __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP, inst); \ } while (0) #define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \ do { \ uint32_t target_reg = adev->reg_offset[GC_HWIP][inst][reg##_BASE_IDX] + reg;\ - WREG32_RLC_EX(prefix, target_reg, value); \ + WREG32_RLC_EX(prefix, target_reg, value, inst); \ } while (0) #define WREG32_FIELD15_RLC(ip, idx, reg, field, val) \ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \ (__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ - AMDGPU_REGS_RLC, ip##_HWIP) & \ + AMDGPU_REGS_RLC, ip##_HWIP, idx) & \ ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \ - AMDGPU_REGS_RLC, ip##_HWIP) + AMDGPU_REGS_RLC, ip##_HWIP, idx) #define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \ - __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP, inst) #define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \ - __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP) + __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP, inst) /* inst equals to ext for some IPs */ #define RREG32_SOC15_EXT(ip, inst, reg, ext) \ diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index e5e5d68a4d70..40d23738ee4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -48,33 +48,28 @@ static const struct amd_ip_funcs soc21_common_ip_funcs; /* SOC21 */ -static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = -{ +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; -static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = -{ +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, }; -static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = -{ +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = { .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0), .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0, }; -static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 = -{ +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 = { .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1), .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1, }; -static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] = -{ +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, @@ -82,22 +77,19 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; -static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] = -{ +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, }; -static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 = -{ +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 = { .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0), .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0, }; -static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = -{ +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = { .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1), .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1, }; @@ -445,8 +437,7 @@ static void soc21_program_aspm(struct amdgpu_device *adev) adev->nbio.funcs->program_aspm(adev); } -const struct amdgpu_ip_block_version soc21_common_ip_block = -{ +const struct amdgpu_ip_block_version soc21_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 1, .minor = 0, @@ -484,16 +475,6 @@ static bool soc21_need_reset_on_init(struct amdgpu_device *adev) return false; } -static uint64_t soc21_get_pcie_replay_count(struct amdgpu_device *adev) -{ - - /* TODO - * dummy implement for pcie_replay_count sysfs interface - * */ - - return 0; -} - static void soc21_init_doorbell_index(struct amdgpu_device *adev) { adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; @@ -547,8 +528,7 @@ static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev, return 0; } -static const struct amdgpu_asic_funcs soc21_asic_funcs = -{ +static const struct amdgpu_asic_funcs soc21_asic_funcs = { .read_disabled_bios = &soc21_read_disabled_bios, .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom, .read_register = &soc21_read_register, @@ -561,7 +541,7 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs = .init_doorbell_index = &soc21_init_doorbell_index, .need_full_reset = &soc21_need_full_reset, .need_reset_on_init = &soc21_need_reset_on_init, - .get_pcie_replay_count = &soc21_get_pcie_replay_count, + .get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count, .supports_baco = &amdgpu_dpm_is_baco_supported, .pre_asic_init = &soc21_pre_asic_init, .query_video_codecs = &soc21_query_video_codecs, diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index b08905d1c00f..917707bba7f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -493,8 +493,7 @@ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev) adev->irq.ih_funcs = &tonga_ih_funcs; } -const struct amdgpu_ip_block_version tonga_ih_ip_block = -{ +const struct amdgpu_ip_block_version tonga_ih_ip_block = { .type = AMD_IP_BLOCK_TYPE_IH, .major = 3, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index 0fef925b6602..5534c769b655 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -815,8 +815,7 @@ static const struct amd_ip_funcs uvd_v3_1_ip_funcs = { .set_powergating_state = uvd_v3_1_set_powergating_state, }; -const struct amdgpu_ip_block_version uvd_v3_1_ip_block = -{ +const struct amdgpu_ip_block_version uvd_v3_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_UVD, .major = 3, .minor = 1, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index abaa4463e906..86d1d46e1e5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -679,11 +679,11 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, i == 0 ? - adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo: + adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo : adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo); WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, i == 0 ? - adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi: + adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi : adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi); WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); offset = 0; @@ -1908,8 +1908,7 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev) } } -const struct amdgpu_ip_block_version uvd_v7_0_ip_block = -{ +const struct amdgpu_ip_block_version uvd_v7_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_UVD, .major = 7, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 8def62c83ffd..18f6e62af339 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -998,8 +998,7 @@ static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) adev->vce.irq.funcs = &vce_v3_0_irq_funcs; }; -const struct amdgpu_ip_block_version vce_v3_0_ip_block = -{ +const struct amdgpu_ip_block_version vce_v3_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_VCE, .major = 3, .minor = 0, @@ -1007,8 +1006,7 @@ const struct amdgpu_ip_block_version vce_v3_0_ip_block = .funcs = &vce_v3_0_ip_funcs, }; -const struct amdgpu_ip_block_version vce_v3_1_ip_block = -{ +const struct amdgpu_ip_block_version vce_v3_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_VCE, .major = 3, .minor = 1, @@ -1016,8 +1014,7 @@ const struct amdgpu_ip_block_version vce_v3_1_ip_block = .funcs = &vce_v3_0_ip_funcs, }; -const struct amdgpu_ip_block_version vce_v3_4_ip_block = -{ +const struct amdgpu_ip_block_version vce_v3_4_ip_block = { .type = AMD_IP_BLOCK_TYPE_VCE, .major = 3, .minor = 4, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 16feb491adf5..25ba27151ac0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -473,7 +473,7 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev) if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; else - data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; @@ -1772,7 +1772,7 @@ static int vcn_v1_0_set_powergating_state(void *handle, int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if(state == adev->vcn.cur_state) + if (state == adev->vcn.cur_state) return 0; if (state == AMD_PG_STATE_GATE) @@ -1780,7 +1780,7 @@ static int vcn_v1_0_set_powergating_state(void *handle, else ret = vcn_v1_0_start(adev); - if(!ret) + if (!ret) adev->vcn.cur_state = state; return ret; } @@ -2065,8 +2065,7 @@ static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs; } -const struct amdgpu_ip_block_version vcn_v1_0_ip_block = -{ +const struct amdgpu_ip_block_version vcn_v1_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_VCN, .major = 1, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index c975aed2f6c7..18794394c5a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -881,9 +881,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) - psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr - - (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr)); + amdgpu_vcn_psp_update_sram(adev, 0, 0); /* force RBC into idle state */ rb_bufsz = order_base_2(ring->ring_size); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index bb1875f926f1..6fbea38f4d3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -912,9 +912,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) - psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - - (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); + amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); ring = &adev->vcn.inst[inst_idx].ring_dec; /* force RBC into idle state */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index c8f63b3c6f69..a61ecefdafc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1037,9 +1037,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); if (indirect) - psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - - (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); + amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); ring = &adev->vcn.inst[inst_idx].ring_dec; /* force RBC into idle state */ @@ -1107,7 +1105,7 @@ static int vcn_v3_0_start(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << i)) continue; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){ + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram); continue; } @@ -1791,7 +1789,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_bo *bo; uint64_t start, end; unsigned int i; - void * ptr; + void *ptr; int r; addr &= AMDGPU_GMC_HOLE_MASK; @@ -2097,7 +2095,7 @@ static int vcn_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = state == AMD_CG_STATE_GATE; int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -2131,7 +2129,7 @@ static int vcn_v3_0_set_powergating_state(void *handle, return 0; } - if(state == adev->vcn.cur_state) + if (state == adev->vcn.cur_state) return 0; if (state == AMD_PG_STATE_GATE) @@ -2139,7 +2137,7 @@ static int vcn_v3_0_set_powergating_state(void *handle, else ret = vcn_v3_0_start(adev); - if(!ret) + if (!ret) adev->vcn.cur_state = state; return ret; @@ -2230,8 +2228,7 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .set_powergating_state = vcn_v3_0_set_powergating_state, }; -const struct amdgpu_ip_block_version vcn_v3_0_ip_block = -{ +const struct amdgpu_ip_block_version vcn_v3_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_VCN, .major = 3, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 259795098173..29164289c5f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -169,6 +169,12 @@ static int vcn_v4_0_sw_init(void *handle) fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; + if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2)) { + fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT; + fw_shared->drm_key_wa.method = + AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; + } + if (amdgpu_sriov_vf(adev)) fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); @@ -993,9 +999,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo if (indirect) - psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - - (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); + amdgpu_vcn_psp_update_sram(adev, inst_idx, 0); ring = &adev->vcn.inst[inst_idx].ring_enc[0]; @@ -1135,11 +1139,11 @@ static int vcn_v4_0_start(struct amdgpu_device *adev) if (status & 2) break; mdelay(10); - if (amdgpu_emu_mode==1) + if (amdgpu_emu_mode == 1) msleep(1); } - if (amdgpu_emu_mode==1) { + if (amdgpu_emu_mode == 1) { r = -1; if (status & 2) { r = 0; @@ -1800,7 +1804,7 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, return 0; } -static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { +static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, @@ -1845,7 +1849,11 @@ static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << i)) continue; - adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_unified_ring_vm_funcs; + if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2)) + vcn_v4_0_unified_ring_vm_funcs.secure_submission_supported = true; + + adev->vcn.inst[i].ring_enc[0].funcs = + (const struct amdgpu_ring_funcs *)&vcn_v4_0_unified_ring_vm_funcs; adev->vcn.inst[i].ring_enc[0].me = i; DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i); @@ -1910,7 +1918,7 @@ static int vcn_v4_0_wait_for_idle(void *handle) static int vcn_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = state == AMD_CG_STATE_GATE; int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1951,7 +1959,7 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta return 0; } - if(state == adev->vcn.cur_state) + if (state == adev->vcn.cur_state) return 0; if (state == AMD_PG_STATE_GATE) @@ -1959,7 +1967,7 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta else ret = vcn_v4_0_start(adev); - if(!ret) + if (!ret) adev->vcn.cur_state = state; return ret; @@ -2093,8 +2101,7 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { .set_powergating_state = vcn_v4_0_set_powergating_state, }; -const struct amdgpu_ip_block_version vcn_v4_0_ip_block = -{ +const struct amdgpu_ip_block_version vcn_v4_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_VCN, .major = 4, .minor = 0, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 5d67b8b8a3d6..f85d18cd74ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -31,6 +31,7 @@ #include "soc15d.h" #include "soc15_hw_ip.h" #include "vcn_v2_0.h" +#include "mmsch_v4_0_3.h" #include "vcn/vcn_4_0_3_offset.h" #include "vcn/vcn_4_0_3_sh_mask.h" @@ -44,6 +45,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300 +static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev); static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev); static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v4_0_3_set_powergating_state(void *handle, @@ -111,9 +113,16 @@ static int vcn_v4_0_3_sw_init(void *handle) ring = &adev->vcn.inst[i].ring_enc[0]; ring->use_doorbell = true; - ring->doorbell_index = - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 9 * vcn_inst; + + if (!amdgpu_sriov_vf(adev)) + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 9 * vcn_inst; + else + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 32 * vcn_inst; + ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, @@ -130,6 +139,12 @@ static int vcn_v4_0_3_sw_init(void *handle) amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); } + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_virt_alloc_mm_table(adev); + if (r) + return r; + } + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode; @@ -167,6 +182,9 @@ static int vcn_v4_0_3_sw_fini(void *handle) drm_dev_exit(idx); } + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_free_mm_table(adev); + r = amdgpu_vcn_suspend(adev); if (r) return r; @@ -189,33 +207,47 @@ static int vcn_v4_0_3_hw_init(void *handle) struct amdgpu_ring *ring; int i, r, vcn_inst; - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - vcn_inst = GET_INST(VCN, i); - ring = &adev->vcn.inst[i].ring_enc[0]; - - if (ring->use_doorbell) { - adev->nbio.funcs->vcn_doorbell_range( - adev, ring->use_doorbell, - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 9 * vcn_inst, - adev->vcn.inst[i].aid_id); - - WREG32_SOC15( - VCN, GET_INST(VCN, ring->me), - regVCN_RB1_DB_CTRL, - ring->doorbell_index - << VCN_RB1_DB_CTRL__OFFSET__SHIFT | - VCN_RB1_DB_CTRL__EN_MASK); - - /* Read DB_CTRL to flush the write DB_CTRL command. */ - RREG32_SOC15( - VCN, GET_INST(VCN, ring->me), - regVCN_RB1_DB_CTRL); - } - - r = amdgpu_ring_test_helper(ring); + if (amdgpu_sriov_vf(adev)) { + r = vcn_v4_0_3_start_sriov(adev); if (r) goto done; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + ring = &adev->vcn.inst[i].ring_enc[0]; + ring->wptr = 0; + ring->wptr_old = 0; + vcn_v4_0_3_unified_ring_set_wptr(ring); + ring->sched.ready = true; + } + } else { + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + vcn_inst = GET_INST(VCN, i); + ring = &adev->vcn.inst[i].ring_enc[0]; + + if (ring->use_doorbell) { + adev->nbio.funcs->vcn_doorbell_range( + adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 9 * vcn_inst, + adev->vcn.inst[i].aid_id); + + WREG32_SOC15( + VCN, GET_INST(VCN, ring->me), + regVCN_RB1_DB_CTRL, + ring->doorbell_index + << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); + + /* Read DB_CTRL to flush the write DB_CTRL command. */ + RREG32_SOC15( + VCN, GET_INST(VCN, ring->me), + regVCN_RB1_DB_CTRL); + } + + r = amdgpu_ring_test_helper(ring); + if (r) + goto done; + } } done: @@ -778,9 +810,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) - psp_update_vcn_sram(adev, 0, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - - (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); + amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); ring = &adev->vcn.inst[inst_idx].ring_enc[0]; @@ -815,6 +845,193 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b return 0; } +static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev) +{ + int i, vcn_inst; + struct amdgpu_ring *ring_enc; + uint64_t cache_addr; + uint64_t rb_enc_addr; + uint64_t ctx_addr; + uint32_t param, resp, expected; + uint32_t offset, cache_size; + uint32_t tmp, timeout; + + struct amdgpu_mm_table *table = &adev->virt.mm_table; + uint32_t *table_loc; + uint32_t table_size; + uint32_t size, size_dw; + uint32_t init_status; + uint32_t enabled_vcn; + + struct mmsch_v4_0_cmd_direct_write + direct_wt = { {0} }; + struct mmsch_v4_0_cmd_direct_read_modify_write + direct_rd_mod_wt = { {0} }; + struct mmsch_v4_0_cmd_end end = { {0} }; + struct mmsch_v4_0_3_init_header header; + + volatile struct amdgpu_vcn4_fw_shared *fw_shared; + volatile struct amdgpu_fw_shared_rb_setup *rb_setup; + + direct_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_WRITE; + direct_rd_mod_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; + end.cmd_header.command_type = MMSCH_COMMAND__END; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + vcn_inst = GET_INST(VCN, i); + + memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header)); + header.version = MMSCH_VERSION; + header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2; + + table_loc = (uint32_t *)table->cpu_addr; + table_loc += header.total_size; + + table_size = 0; + + MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS), + ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); + + cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); + + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); + + offset = 0; + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET0), 0); + } else { + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[i].gpu_addr)); + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[i].gpu_addr)); + offset = cache_size; + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE0), + cache_size); + + cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset; + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr)); + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr)); + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET1), 0); + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE); + + cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE; + + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr)); + + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr)); + + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET2), 0); + + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE); + + fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr; + rb_setup = &fw_shared->rb_setup; + + ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0]; + ring_enc->wptr = 0; + rb_enc_addr = ring_enc->gpu_addr; + + rb_setup->is_rb_enabled_flags |= RB_ENABLED; + rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr); + rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr); + rb_setup->rb_size = ring_enc->ring_size / 4; + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); + + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr)); + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr)); + MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); + MMSCH_V4_0_INSERT_END(); + + header.vcn0.init_status = 0; + header.vcn0.table_offset = header.total_size; + header.vcn0.table_size = table_size; + header.total_size += table_size; + + /* Send init table to mmsch */ + size = sizeof(struct mmsch_v4_0_3_init_header); + table_loc = (uint32_t *)table->cpu_addr; + memcpy((void *)table_loc, &header, size); + + ctx_addr = table->gpu_addr; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); + + tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID); + tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp); + + size = header.total_size; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size); + + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0); + + param = 0x00000001; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param); + tmp = 0; + timeout = 1000; + resp = 0; + expected = MMSCH_VF_MAILBOX_RESP__OK; + while (resp != expected) { + resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP); + if (resp != 0) + break; + + udelay(10); + tmp = tmp + 10; + if (tmp >= timeout) { + DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ + " waiting for regMMSCH_VF_MAILBOX_RESP "\ + "(expected=0x%08x, readback=0x%08x)\n", + tmp, expected, resp); + return -EBUSY; + } + } + + enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0; + init_status = ((struct mmsch_v4_0_3_init_header *)(table_loc))->vcn0.init_status; + if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE + && init_status != MMSCH_VF_ENGINE_STATUS__PASS) { + DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\ + "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status); + } + } + + return 0; +} + /** * vcn_v4_0_3_start - VCN start * @@ -1289,7 +1506,7 @@ static int vcn_v4_0_3_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = state == AMD_CG_STATE_GATE; int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1319,6 +1536,15 @@ static int vcn_v4_0_3_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret; + /* for SRIOV, guest should not control VCN Power-gating + * MMSCH FW should control Power-gating and clock-gating + * guest should avoid touching CGC and PG + */ + if (amdgpu_sriov_vf(adev)) { + adev->vcn.cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + if (state == adev->vcn.cur_state) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 1e83db0c5438..d364c6dd152c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -485,7 +485,7 @@ static int vega10_ih_sw_init(void *handle) if (r) return r; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, true); if (r) return r; @@ -510,7 +510,7 @@ static int vega10_ih_sw_init(void *handle) /* initialize ih control registers offset */ vega10_ih_init_register_offset(adev); - r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index 4d719df376a7..dbc99536440f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -500,7 +500,8 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev, case 2: schedule_work(&adev->irq.ih2_work); break; - default: break; + default: + break; } return 0; } @@ -539,7 +540,7 @@ static int vega20_ih_sw_init(void *handle) (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) use_bus_addr = false; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr); if (r) return r; @@ -565,7 +566,7 @@ static int vega20_ih_sw_init(void *handle) /* initialize ih control registers offset */ vega20_ih_init_register_offset(adev); - r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, use_bus_addr); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, use_bus_addr); if (r) return r; @@ -710,8 +711,7 @@ static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev) adev->irq.ih_funcs = &vega20_ih_funcs; } -const struct amdgpu_ip_block_version vega20_ih_ip_block = -{ +const struct amdgpu_ip_block_version vega20_ih_ip_block = { .type = AMD_IP_BLOCK_TYPE_IH, .major = 4, .minor = 2, diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index 93bd4eda0d94..d3c3d3ab7225 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -6,7 +6,6 @@ config HSA_AMD bool "HSA kernel driver for AMD GPU devices" depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64) - imply AMD_IOMMU_V2 if X86_64 select HMM_MIRROR select MMU_NOTIFIER select DRM_AMDGPU_USERPTR diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index 2ec8f27c5366..a5ae7bcf44eb 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -59,10 +59,6 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \ $(AMDKFD_PATH)/kfd_crat.o \ $(AMDKFD_PATH)/kfd_debug.o -ifneq ($(CONFIG_AMD_IOMMU_V2),) -AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o -endif - ifneq ($(CONFIG_DEBUG_FS),) AMDKFD_FILES += $(AMDKFD_PATH)/kfd_debugfs.o endif diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 73ca9aebf086..d7cd5fa313ff 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -274,16 +274,16 @@ static const uint32_t cwsr_trap_gfx8_hex[] = { static const uint32_t cwsr_trap_gfx9_hex[] = { - 0xbf820001, 0xbf820254, + 0xbf820001, 0xbf820258, 0xb8f8f802, 0x8978ff78, 0x00020006, 0xb8fbf803, 0x866eff78, 0x00002000, 0xbf840009, 0x866eff6d, 0x00ff0000, 0xbf85001e, 0x866eff7b, 0x00000400, - 0xbf850051, 0xbf8e0010, + 0xbf850055, 0xbf8e0010, 0xb8fbf803, 0xbf82fffa, - 0x866eff7b, 0x00000900, + 0x866eff7b, 0x03c00900, 0xbf850015, 0x866eff7b, 0x000071ff, 0xbf840008, 0x866fff7b, 0x00007080, @@ -294,13 +294,15 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { 0xbf850007, 0xb8eef801, 0x866eff6e, 0x00000800, 0xbf850003, 0x866eff7b, - 0x00000400, 0xbf850036, + 0x00000400, 0xbf85003a, 0xb8faf807, 0x867aff7a, 0x001f8000, 0x8e7a8b7a, 0x8977ff77, 0xfc000000, 0x87777a77, 0xba7ff807, 0x00000000, 0xb8faf812, 0xb8fbf813, 0x8efa887a, + 0xbf0d8f7b, 0xbf840002, + 0x877bff7b, 0xffff0000, 0xc0031bbd, 0x00000010, 0xbf8cc07f, 0x8e6e976e, 0x8977ff77, 0x00800000, @@ -676,14 +678,14 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { }; static const uint32_t cwsr_trap_nv1x_hex[] = { - 0xbf820001, 0xbf8201f1, + 0xbf820001, 0xbf8201f5, 0xb0804004, 0xb978f802, 0x8a78ff78, 0x00020006, 0xb97bf803, 0x876eff78, 0x00002000, 0xbf840009, 0x876eff6d, 0x00ff0000, 0xbf85001e, 0x876eff7b, - 0x00000400, 0xbf850057, + 0x00000400, 0xbf85005b, 0xbf8e0010, 0xb97bf803, 0xbf82fffa, 0x876eff7b, 0x00000900, 0xbf850015, @@ -697,7 +699,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = { 0xb96ef801, 0x876eff6e, 0x00000800, 0xbf850003, 0x876eff7b, 0x00000400, - 0xbf85003c, 0x8a77ff77, + 0xbf850040, 0x8a77ff77, 0xff000000, 0xb97af807, 0x877bff7a, 0x02000000, 0x8f7b867b, 0x88777b77, @@ -706,6 +708,8 @@ static const uint32_t cwsr_trap_nv1x_hex[] = { 0x8a7aff7a, 0x023f8000, 0xb9faf807, 0xb97af812, 0xb97bf813, 0x8ffa887a, + 0xbf0d8f7b, 0xbf840002, + 0x887bff7b, 0xffff0000, 0xf4011bbd, 0xfa000010, 0xbf8cc07f, 0x8f6e976e, 0x8a77ff77, 0x00800000, @@ -1094,16 +1098,16 @@ static const uint32_t cwsr_trap_nv1x_hex[] = { }; static const uint32_t cwsr_trap_arcturus_hex[] = { - 0xbf820001, 0xbf8202d0, + 0xbf820001, 0xbf8202d4, 0xb8f8f802, 0x8978ff78, 0x00020006, 0xb8fbf803, 0x866eff78, 0x00002000, 0xbf840009, 0x866eff6d, 0x00ff0000, 0xbf85001e, 0x866eff7b, 0x00000400, - 0xbf850051, 0xbf8e0010, + 0xbf850055, 0xbf8e0010, 0xb8fbf803, 0xbf82fffa, - 0x866eff7b, 0x00000900, + 0x866eff7b, 0x03c00900, 0xbf850015, 0x866eff7b, 0x000071ff, 0xbf840008, 0x866fff7b, 0x00007080, @@ -1114,13 +1118,15 @@ static const uint32_t cwsr_trap_arcturus_hex[] = { 0xbf850007, 0xb8eef801, 0x866eff6e, 0x00000800, 0xbf850003, 0x866eff7b, - 0x00000400, 0xbf850036, + 0x00000400, 0xbf85003a, 0xb8faf807, 0x867aff7a, 0x001f8000, 0x8e7a8b7a, 0x8977ff77, 0xfc000000, 0x87777a77, 0xba7ff807, 0x00000000, 0xb8faf812, 0xb8fbf813, 0x8efa887a, + 0xbf0d8f7b, 0xbf840002, + 0x877bff7b, 0xffff0000, 0xc0031bbd, 0x00000010, 0xbf8cc07f, 0x8e6e976e, 0x8977ff77, 0x00800000, @@ -1572,16 +1578,16 @@ static const uint32_t cwsr_trap_arcturus_hex[] = { }; static const uint32_t cwsr_trap_aldebaran_hex[] = { - 0xbf820001, 0xbf8202db, + 0xbf820001, 0xbf8202df, 0xb8f8f802, 0x8978ff78, 0x00020006, 0xb8fbf803, 0x866eff78, 0x00002000, 0xbf840009, 0x866eff6d, 0x00ff0000, 0xbf85001e, 0x866eff7b, 0x00000400, - 0xbf850051, 0xbf8e0010, + 0xbf850055, 0xbf8e0010, 0xb8fbf803, 0xbf82fffa, - 0x866eff7b, 0x00000900, + 0x866eff7b, 0x03c00900, 0xbf850015, 0x866eff7b, 0x000071ff, 0xbf840008, 0x866fff7b, 0x00007080, @@ -1592,13 +1598,15 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = { 0xbf850007, 0xb8eef801, 0x866eff6e, 0x00000800, 0xbf850003, 0x866eff7b, - 0x00000400, 0xbf850036, + 0x00000400, 0xbf85003a, 0xb8faf807, 0x867aff7a, 0x001f8000, 0x8e7a8b7a, 0x8977ff77, 0xfc000000, 0x87777a77, 0xba7ff807, 0x00000000, 0xb8faf812, 0xb8fbf813, 0x8efa887a, + 0xbf0d8f7b, 0xbf840002, + 0x877bff7b, 0xffff0000, 0xc0031bbd, 0x00000010, 0xbf8cc07f, 0x8e6e976e, 0x8977ff77, 0x00800000, @@ -2061,14 +2069,14 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = { }; static const uint32_t cwsr_trap_gfx10_hex[] = { - 0xbf820001, 0xbf82021c, + 0xbf820001, 0xbf820220, 0xb0804004, 0xb978f802, 0x8a78ff78, 0x00020006, 0xb97bf803, 0x876eff78, 0x00002000, 0xbf840009, 0x876eff6d, 0x00ff0000, 0xbf85001e, 0x876eff7b, - 0x00000400, 0xbf850041, + 0x00000400, 0xbf850045, 0xbf8e0010, 0xb97bf803, 0xbf82fffa, 0x876eff7b, 0x00000900, 0xbf850015, @@ -2082,8 +2090,10 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xb96ef801, 0x876eff6e, 0x00000800, 0xbf850003, 0x876eff7b, 0x00000400, - 0xbf850026, 0xb97af812, + 0xbf85002a, 0xb97af812, 0xb97bf813, 0x8ffa887a, + 0xbf0d8f7b, 0xbf840002, + 0x887bff7b, 0xffff0000, 0xf4011bbd, 0xfa000010, 0xbf8cc07f, 0x8f6e976e, 0x8a77ff77, 0x00800000, @@ -2494,8 +2504,9 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0x00000000, }; + static const uint32_t cwsr_trap_gfx11_hex[] = { - 0xbfa00001, 0xbfa00221, + 0xbfa00001, 0xbfa00225, 0xb0804006, 0xb8f8f802, 0x9178ff78, 0x00020006, 0xb8fbf803, 0xbf0d9e6d, @@ -2505,7 +2516,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = { 0xbfa10009, 0x8b6eff6d, 0x00ff0000, 0xbfa2001e, 0x8b6eff7b, 0x00000400, - 0xbfa20041, 0xbf830010, + 0xbfa20045, 0xbf830010, 0xb8fbf803, 0xbfa0fffa, 0x8b6eff7b, 0x00000900, 0xbfa20015, 0x8b6eff7b, @@ -2518,9 +2529,11 @@ static const uint32_t cwsr_trap_gfx11_hex[] = { 0xbfa20007, 0xb8eef801, 0x8b6eff6e, 0x00000800, 0xbfa20003, 0x8b6eff7b, - 0x00000400, 0xbfa20026, + 0x00000400, 0xbfa2002a, 0xbefa4d82, 0xbf89fc07, - 0x84fa887a, 0xf4005bbd, + 0x84fa887a, 0xbf0d8f7b, + 0xbfa10002, 0x8c7bff7b, + 0xffff0000, 0xf4005bbd, 0xf8000010, 0xbf89fc07, 0x846e976e, 0x9177ff77, 0x00800000, 0x8c776e77, @@ -2938,211 +2951,151 @@ static const uint32_t cwsr_trap_gfx11_hex[] = { }; static const uint32_t cwsr_trap_gfx9_4_3_hex[] = { - 0xbf820001, 0xbf8202d6, - 0xb8f8f802, 0x89788678, - 0xb8fbf803, 0x866eff78, - 0x00002000, 0xbf840009, - 0x866eff6d, 0x00ff0000, - 0xbf85001a, 0x866eff7b, - 0x00000400, 0xbf85004d, - 0xbf8e0010, 0xb8fbf803, - 0xbf82fffa, 0x866eff7b, - 0x03c00900, 0xbf850011, - 0x866eff7b, 0x000071ff, - 0xbf840008, 0x866fff7b, - 0x00007080, 0xbf840001, - 0xbeee1a87, 0xb8eff801, - 0x8e6e8c6e, 0x866e6f6e, - 0xbf850006, 0x866eff6d, - 0x00ff0000, 0xbf850003, + 0xbf820001, 0xbf8202db, + 0xb8f8f802, 0x8978ff78, + 0x00020006, 0xb8fbf803, + 0x866eff78, 0x00002000, + 0xbf840009, 0x866eff6d, + 0x00ff0000, 0xbf85001a, 0x866eff7b, 0x00000400, - 0xbf850036, 0xb8faf807, - 0x867aff7a, 0x001f8000, - 0x8e7a8b7a, 0x8979ff79, - 0xfc000000, 0x87797a79, - 0xba7ff807, 0x00000000, - 0xb8faf812, 0xb8fbf813, - 0x8efa887a, 0xc0031bbd, - 0x00000010, 0xbf8cc07f, - 0x8e6e976e, 0x8979ff79, - 0x00800000, 0x87796e79, - 0xc0071bbd, 0x00000000, - 0xbf8cc07f, 0xc0071ebd, - 0x00000008, 0xbf8cc07f, - 0x86ee6e6e, 0xbf840001, - 0xbe801d6e, 0x866eff6d, - 0x01ff0000, 0xbf850005, - 0x8778ff78, 0x00002000, - 0x80ec886c, 0x82ed806d, - 0xbf820005, 0x866eff6d, - 0x01000000, 0xbf850002, - 0x806c846c, 0x826d806d, - 0x866dff6d, 0x0000ffff, - 0x8f7a8b79, 0x867aff7a, - 0x001f8000, 0xb97af807, - 0x86fe7e7e, 0x86ea6a6a, - 0x8f6e8378, 0xb96ee0c2, - 0xbf800002, 0xb9780002, - 0xbe801f6c, 0x866dff6d, - 0x0000ffff, 0xbefa0080, - 0xb97a0283, 0xb8faf807, - 0x867aff7a, 0x001f8000, - 0x8e7a8b7a, 0x8979ff79, - 0xfc000000, 0x87797a79, - 0xba7ff807, 0x00000000, - 0xbeee007e, 0xbeef007f, - 0xbefe0180, 0xbf900004, - 0x877a8478, 0xb97af802, - 0xbf8e0002, 0xbf88fffe, - 0xb8fa2985, 0x807a817a, - 0x8e7a8a7a, 0x8e7a817a, - 0xb8fb1605, 0x807b817b, - 0x8e7b867b, 0x807a7b7a, - 0x807a7e7a, 0x827b807f, - 0x867bff7b, 0x0000ffff, - 0xc04b1c3d, 0x00000050, - 0xbf8cc07f, 0xc04b1d3d, - 0x00000060, 0xbf8cc07f, - 0xc0431e7d, 0x00000074, - 0xbf8cc07f, 0xbef4007e, - 0x8675ff7f, 0x0000ffff, - 0x8775ff75, 0x00040000, - 0xbef60080, 0xbef700ff, - 0x00807fac, 0xbef1007c, - 0xbef00080, 0xb8f02985, - 0x80708170, 0x8e708a70, - 0x8e708170, 0xb8fa1605, - 0x807a817a, 0x8e7a867a, - 0x80707a70, 0xbef60084, - 0xbef600ff, 0x01000000, - 0xbefe007c, 0xbefc0070, - 0xc0611c7a, 0x0000007c, - 0xbf8cc07f, 0x80708470, - 0xbefc007e, 0xbefe007c, - 0xbefc0070, 0xc0611b3a, - 0x0000007c, 0xbf8cc07f, - 0x80708470, 0xbefc007e, - 0xbefe007c, 0xbefc0070, - 0xc0611b7a, 0x0000007c, - 0xbf8cc07f, 0x80708470, - 0xbefc007e, 0xbefe007c, - 0xbefc0070, 0xc0611bba, - 0x0000007c, 0xbf8cc07f, - 0x80708470, 0xbefc007e, - 0xbefe007c, 0xbefc0070, - 0xc0611bfa, 0x0000007c, - 0xbf8cc07f, 0x80708470, - 0xbefc007e, 0xbefe007c, - 0xbefc0070, 0xc0611e3a, - 0x0000007c, 0xbf8cc07f, - 0x80708470, 0xbefc007e, - 0xb8fbf803, 0xbefe007c, - 0xbefc0070, 0xc0611efa, - 0x0000007c, 0xbf8cc07f, - 0x80708470, 0xbefc007e, - 0xbefe007c, 0xbefc0070, - 0xc0611a3a, 0x0000007c, - 0xbf8cc07f, 0x80708470, - 0xbefc007e, 0xbefe007c, - 0xbefc0070, 0xc0611a7a, - 0x0000007c, 0xbf8cc07f, - 0x80708470, 0xbefc007e, - 0xb8f1f801, 0xbefe007c, - 0xbefc0070, 0xc0611c7a, - 0x0000007c, 0xbf8cc07f, - 0x80708470, 0xbefc007e, - 0x867aff7f, 0x04000000, - 0xbeef0080, 0x876f6f7a, - 0xb8f02985, 0x80708170, - 0x8e708a70, 0x8e708170, - 0xb8fb1605, 0x807b817b, - 0x8e7b847b, 0x8e76827b, - 0xbef600ff, 0x01000000, - 0xbef20174, 0x80747074, - 0x82758075, 0xbefc0080, - 0xbf800000, 0xbe802b00, - 0xbe822b02, 0xbe842b04, - 0xbe862b06, 0xbe882b08, - 0xbe8a2b0a, 0xbe8c2b0c, - 0xbe8e2b0e, 0xc06b003a, + 0xbf850051, 0xbf8e0010, + 0xb8fbf803, 0xbf82fffa, + 0x866eff7b, 0x03c00900, + 0xbf850011, 0x866eff7b, + 0x000071ff, 0xbf840008, + 0x866fff7b, 0x00007080, + 0xbf840001, 0xbeee1a87, + 0xb8eff801, 0x8e6e8c6e, + 0x866e6f6e, 0xbf850006, + 0x866eff6d, 0x00ff0000, + 0xbf850003, 0x866eff7b, + 0x00000400, 0xbf85003a, + 0xb8faf807, 0x867aff7a, + 0x001f8000, 0x8e7a8b7a, + 0x8979ff79, 0xfc000000, + 0x87797a79, 0xba7ff807, + 0x00000000, 0xb8faf812, + 0xb8fbf813, 0x8efa887a, + 0xbf0d8f7b, 0xbf840002, + 0x877bff7b, 0xffff0000, + 0xc0031bbd, 0x00000010, + 0xbf8cc07f, 0x8e6e976e, + 0x8979ff79, 0x00800000, + 0x87796e79, 0xc0071bbd, 0x00000000, 0xbf8cc07f, - 0xc06b013a, 0x00000010, - 0xbf8cc07f, 0xc06b023a, - 0x00000020, 0xbf8cc07f, - 0xc06b033a, 0x00000030, - 0xbf8cc07f, 0x8074c074, - 0x82758075, 0x807c907c, - 0xbf0a7b7c, 0xbf85ffe7, - 0xbef40172, 0xbef00080, - 0xbefe00c1, 0xbeff00c1, - 0xbee80080, 0xbee90080, - 0xbef600ff, 0x01000000, - 0x867aff78, 0x00400000, - 0xbf850003, 0xb8faf803, - 0x897a7aff, 0x10000000, - 0xbf85004d, 0xbe840080, - 0xd2890000, 0x00000900, - 0x80048104, 0xd2890001, - 0x00000900, 0x80048104, - 0xd2890002, 0x00000900, - 0x80048104, 0xd2890003, - 0x00000900, 0x80048104, - 0xc069003a, 0x00000070, - 0xbf8cc07f, 0x80709070, - 0xbf06c004, 0xbf84ffee, - 0xbe840080, 0xd2890000, - 0x00000901, 0x80048104, - 0xd2890001, 0x00000901, - 0x80048104, 0xd2890002, - 0x00000901, 0x80048104, - 0xd2890003, 0x00000901, - 0x80048104, 0xc069003a, - 0x00000070, 0xbf8cc07f, - 0x80709070, 0xbf06c004, - 0xbf84ffee, 0xbe840080, - 0xd2890000, 0x00000902, - 0x80048104, 0xd2890001, - 0x00000902, 0x80048104, - 0xd2890002, 0x00000902, - 0x80048104, 0xd2890003, - 0x00000902, 0x80048104, - 0xc069003a, 0x00000070, - 0xbf8cc07f, 0x80709070, - 0xbf06c004, 0xbf84ffee, - 0xbe840080, 0xd2890000, - 0x00000903, 0x80048104, - 0xd2890001, 0x00000903, - 0x80048104, 0xd2890002, - 0x00000903, 0x80048104, - 0xd2890003, 0x00000903, - 0x80048104, 0xc069003a, - 0x00000070, 0xbf8cc07f, - 0x80709070, 0xbf06c004, - 0xbf84ffee, 0xbf820008, - 0xe0724000, 0x701d0000, - 0xe0724100, 0x701d0100, - 0xe0724200, 0x701d0200, - 0xe0724300, 0x701d0300, - 0xbefe00c1, 0xbeff00c1, - 0xb8fb4306, 0x867bc17b, - 0xbf840064, 0xbf8a0000, - 0x867aff6f, 0x04000000, - 0xbf840060, 0x8e7b867b, - 0x8e7b827b, 0xbef6007b, + 0xc0071ebd, 0x00000008, + 0xbf8cc07f, 0x86ee6e6e, + 0xbf840001, 0xbe801d6e, + 0x866eff6d, 0x01ff0000, + 0xbf850005, 0x8778ff78, + 0x00002000, 0x80ec886c, + 0x82ed806d, 0xbf820005, + 0x866eff6d, 0x01000000, + 0xbf850002, 0x806c846c, + 0x826d806d, 0x866dff6d, + 0x0000ffff, 0x8f7a8b79, + 0x867aff7a, 0x001f8000, + 0xb97af807, 0x86fe7e7e, + 0x86ea6a6a, 0x8f6e8378, + 0xb96ee0c2, 0xbf800002, + 0xb9780002, 0xbe801f6c, + 0x866dff6d, 0x0000ffff, + 0xbefa0080, 0xb97a0283, + 0xb8faf807, 0x867aff7a, + 0x001f8000, 0x8e7a8b7a, + 0x8979ff79, 0xfc000000, + 0x87797a79, 0xba7ff807, + 0x00000000, 0xbeee007e, + 0xbeef007f, 0xbefe0180, + 0xbf900004, 0x877a8478, + 0xb97af802, 0xbf8e0002, + 0xbf88fffe, 0xb8fa2985, + 0x807a817a, 0x8e7a8a7a, + 0x8e7a817a, 0xb8fb1605, + 0x807b817b, 0x8e7b867b, + 0x807a7b7a, 0x807a7e7a, + 0x827b807f, 0x867bff7b, + 0x0000ffff, 0xc04b1c3d, + 0x00000050, 0xbf8cc07f, + 0xc04b1d3d, 0x00000060, + 0xbf8cc07f, 0xc0431e7d, + 0x00000074, 0xbf8cc07f, + 0xbef4007e, 0x8675ff7f, + 0x0000ffff, 0x8775ff75, + 0x00040000, 0xbef60080, + 0xbef700ff, 0x00807fac, + 0xbef1007c, 0xbef00080, 0xb8f02985, 0x80708170, 0x8e708a70, 0x8e708170, 0xb8fa1605, 0x807a817a, 0x8e7a867a, 0x80707a70, - 0x8070ff70, 0x00000080, - 0xbef600ff, 0x01000000, - 0xbefc0080, 0xd28c0002, - 0x000100c1, 0xd28d0003, - 0x000204c1, 0x867aff78, + 0xbef60084, 0xbef600ff, + 0x01000000, 0xbefe007c, + 0xbefc0070, 0xc0611c7a, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0xbefe007c, 0xbefc0070, + 0xc0611b3a, 0x0000007c, + 0xbf8cc07f, 0x80708470, + 0xbefc007e, 0xbefe007c, + 0xbefc0070, 0xc0611b7a, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0xbefe007c, 0xbefc0070, + 0xc0611bba, 0x0000007c, + 0xbf8cc07f, 0x80708470, + 0xbefc007e, 0xbefe007c, + 0xbefc0070, 0xc0611bfa, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0xbefe007c, 0xbefc0070, + 0xc0611e3a, 0x0000007c, + 0xbf8cc07f, 0x80708470, + 0xbefc007e, 0xb8fbf803, + 0xbefe007c, 0xbefc0070, + 0xc0611efa, 0x0000007c, + 0xbf8cc07f, 0x80708470, + 0xbefc007e, 0xbefe007c, + 0xbefc0070, 0xc0611a3a, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0xbefe007c, 0xbefc0070, + 0xc0611a7a, 0x0000007c, + 0xbf8cc07f, 0x80708470, + 0xbefc007e, 0xb8f1f801, + 0xbefe007c, 0xbefc0070, + 0xc0611c7a, 0x0000007c, + 0xbf8cc07f, 0x80708470, + 0xbefc007e, 0x867aff7f, + 0x04000000, 0xbeef0080, + 0x876f6f7a, 0xb8f02985, + 0x80708170, 0x8e708a70, + 0x8e708170, 0xb8fb1605, + 0x807b817b, 0x8e7b847b, + 0x8e76827b, 0xbef600ff, + 0x01000000, 0xbef20174, + 0x80747074, 0x82758075, + 0xbefc0080, 0xbf800000, + 0xbe802b00, 0xbe822b02, + 0xbe842b04, 0xbe862b06, + 0xbe882b08, 0xbe8a2b0a, + 0xbe8c2b0c, 0xbe8e2b0e, + 0xc06b003a, 0x00000000, + 0xbf8cc07f, 0xc06b013a, + 0x00000010, 0xbf8cc07f, + 0xc06b023a, 0x00000020, + 0xbf8cc07f, 0xc06b033a, + 0x00000030, 0xbf8cc07f, + 0x8074c074, 0x82758075, + 0x807c907c, 0xbf0a7b7c, + 0xbf85ffe7, 0xbef40172, + 0xbef00080, 0xbefe00c1, + 0xbeff00c1, 0xbee80080, + 0xbee90080, 0xbef600ff, + 0x01000000, 0x867aff78, 0x00400000, 0xbf850003, 0xb8faf803, 0x897a7aff, - 0x10000000, 0xbf850030, - 0x24040682, 0xd86e4000, - 0x00000002, 0xbf8cc07f, + 0x10000000, 0xbf85004d, 0xbe840080, 0xd2890000, 0x00000900, 0x80048104, 0xd2890001, 0x00000900, @@ -3162,31 +3115,50 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = { 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, 0xbf84ffee, - 0x680404ff, 0x00000200, - 0xd0c9006a, 0x0000f702, - 0xbf87ffd2, 0xbf820015, - 0xd1060002, 0x00011103, - 0x7e0602ff, 0x00000200, - 0xbefc00ff, 0x00010000, - 0xbe800077, 0x8677ff77, - 0xff7fffff, 0x8777ff77, - 0x00058000, 0xd8ec0000, - 0x00000002, 0xbf8cc07f, - 0xe0765000, 0x701d0002, - 0x68040702, 0xd0c9006a, - 0x0000f702, 0xbf87fff7, - 0xbef70000, 0xbef000ff, - 0x00000400, 0xbefe00c1, - 0xbeff00c1, 0xb8fb2b05, - 0x807b817b, 0x8e7b827b, - 0xbef600ff, 0x01000000, - 0xbefc0084, 0xbf0a7b7c, - 0xbf84006d, 0xbf11017c, - 0x807bff7b, 0x00001000, + 0xbe840080, 0xd2890000, + 0x00000902, 0x80048104, + 0xd2890001, 0x00000902, + 0x80048104, 0xd2890002, + 0x00000902, 0x80048104, + 0xd2890003, 0x00000902, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000903, + 0x80048104, 0xd2890001, + 0x00000903, 0x80048104, + 0xd2890002, 0x00000903, + 0x80048104, 0xd2890003, + 0x00000903, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbf820008, 0xe0724000, + 0x701d0000, 0xe0724100, + 0x701d0100, 0xe0724200, + 0x701d0200, 0xe0724300, + 0x701d0300, 0xbefe00c1, + 0xbeff00c1, 0xb8fb4306, + 0x867bc17b, 0xbf840064, + 0xbf8a0000, 0x867aff6f, + 0x04000000, 0xbf840060, + 0x8e7b867b, 0x8e7b827b, + 0xbef6007b, 0xb8f02985, + 0x80708170, 0x8e708a70, + 0x8e708170, 0xb8fa1605, + 0x807a817a, 0x8e7a867a, + 0x80707a70, 0x8070ff70, + 0x00000080, 0xbef600ff, + 0x01000000, 0xbefc0080, + 0xd28c0002, 0x000100c1, + 0xd28d0003, 0x000204c1, 0x867aff78, 0x00400000, 0xbf850003, 0xb8faf803, 0x897a7aff, 0x10000000, - 0xbf850051, 0xbe840080, + 0xbf850030, 0x24040682, + 0xd86e4000, 0x00000002, + 0xbf8cc07f, 0xbe840080, 0xd2890000, 0x00000900, 0x80048104, 0xd2890001, 0x00000900, 0x80048104, @@ -3205,51 +3177,31 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = { 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, - 0xbf84ffee, 0xbe840080, - 0xd2890000, 0x00000902, - 0x80048104, 0xd2890001, - 0x00000902, 0x80048104, - 0xd2890002, 0x00000902, - 0x80048104, 0xd2890003, - 0x00000902, 0x80048104, - 0xc069003a, 0x00000070, - 0xbf8cc07f, 0x80709070, - 0xbf06c004, 0xbf84ffee, - 0xbe840080, 0xd2890000, - 0x00000903, 0x80048104, - 0xd2890001, 0x00000903, - 0x80048104, 0xd2890002, - 0x00000903, 0x80048104, - 0xd2890003, 0x00000903, - 0x80048104, 0xc069003a, - 0x00000070, 0xbf8cc07f, - 0x80709070, 0xbf06c004, - 0xbf84ffee, 0x807c847c, - 0xbf0a7b7c, 0xbf85ffb1, - 0xbf9c0000, 0xbf820012, - 0x7e000300, 0x7e020301, - 0x7e040302, 0x7e060303, - 0xe0724000, 0x701d0000, - 0xe0724100, 0x701d0100, - 0xe0724200, 0x701d0200, - 0xe0724300, 0x701d0300, - 0x807c847c, 0x8070ff70, - 0x00000400, 0xbf0a7b7c, - 0xbf85ffef, 0xbf9c0000, - 0xb8fb2985, 0x807b817b, - 0x8e7b837b, 0xb8fa2b05, - 0x807a817a, 0x8e7a827a, - 0x80fb7a7b, 0x867b7b7b, - 0xbf84007a, 0x807bff7b, - 0x00001000, 0xbefc0080, - 0xbf11017c, 0x867aff78, + 0xbf84ffee, 0x680404ff, + 0x00000200, 0xd0c9006a, + 0x0000f702, 0xbf87ffd2, + 0xbf820015, 0xd1060002, + 0x00011103, 0x7e0602ff, + 0x00000200, 0xbefc00ff, + 0x00010000, 0xbe800077, + 0x8677ff77, 0xff7fffff, + 0x8777ff77, 0x00058000, + 0xd8ec0000, 0x00000002, + 0xbf8cc07f, 0xe0765000, + 0x701d0002, 0x68040702, + 0xd0c9006a, 0x0000f702, + 0xbf87fff7, 0xbef70000, + 0xbef000ff, 0x00000400, + 0xbefe00c1, 0xbeff00c1, + 0xb8fb2b05, 0x807b817b, + 0x8e7b827b, 0xbef600ff, + 0x01000000, 0xbefc0084, + 0xbf0a7b7c, 0xbf84006d, + 0xbf11017c, 0x807bff7b, + 0x00001000, 0x867aff78, 0x00400000, 0xbf850003, 0xb8faf803, 0x897a7aff, - 0x10000000, 0xbf850059, - 0xd3d84000, 0x18000100, - 0xd3d84001, 0x18000101, - 0xd3d84002, 0x18000102, - 0xd3d84003, 0x18000103, + 0x10000000, 0xbf850051, 0xbe840080, 0xd2890000, 0x00000900, 0x80048104, 0xd2890001, 0x00000900, @@ -3289,137 +3241,200 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = { 0xbf8cc07f, 0x80709070, 0xbf06c004, 0xbf84ffee, 0x807c847c, 0xbf0a7b7c, - 0xbf85ffa9, 0xbf9c0000, - 0xbf820016, 0xd3d84000, - 0x18000100, 0xd3d84001, - 0x18000101, 0xd3d84002, - 0x18000102, 0xd3d84003, - 0x18000103, 0xe0724000, + 0xbf85ffb1, 0xbf9c0000, + 0xbf820012, 0x7e000300, + 0x7e020301, 0x7e040302, + 0x7e060303, 0xe0724000, 0x701d0000, 0xe0724100, 0x701d0100, 0xe0724200, 0x701d0200, 0xe0724300, 0x701d0300, 0x807c847c, 0x8070ff70, 0x00000400, - 0xbf0a7b7c, 0xbf85ffeb, - 0xbf9c0000, 0xbf8200ee, - 0xbef4007e, 0x8675ff7f, - 0x0000ffff, 0x8775ff75, - 0x00040000, 0xbef60080, - 0xbef700ff, 0x00807fac, - 0x866eff7f, 0x04000000, - 0xbf84001f, 0xbefe00c1, - 0xbeff00c1, 0xb8ef4306, - 0x866fc16f, 0xbf84001a, - 0x8e6f866f, 0x8e6f826f, - 0xbef6006f, 0xb8f82985, - 0x80788178, 0x8e788a78, - 0x8e788178, 0xb8ee1605, - 0x806e816e, 0x8e6e866e, - 0x80786e78, 0x8078ff78, - 0x00000080, 0xbef600ff, - 0x01000000, 0xbefc0080, - 0xe0510000, 0x781d0000, - 0xe0510100, 0x781d0000, - 0x807cff7c, 0x00000200, - 0x8078ff78, 0x00000200, - 0xbf0a6f7c, 0xbf85fff6, + 0xbf0a7b7c, 0xbf85ffef, + 0xbf9c0000, 0xb8fb2985, + 0x807b817b, 0x8e7b837b, + 0xb8fa2b05, 0x807a817a, + 0x8e7a827a, 0x80fb7a7b, + 0x867b7b7b, 0xbf84007a, + 0x807bff7b, 0x00001000, + 0xbefc0080, 0xbf11017c, + 0x867aff78, 0x00400000, + 0xbf850003, 0xb8faf803, + 0x897a7aff, 0x10000000, + 0xbf850059, 0xd3d84000, + 0x18000100, 0xd3d84001, + 0x18000101, 0xd3d84002, + 0x18000102, 0xd3d84003, + 0x18000103, 0xbe840080, + 0xd2890000, 0x00000900, + 0x80048104, 0xd2890001, + 0x00000900, 0x80048104, + 0xd2890002, 0x00000900, + 0x80048104, 0xd2890003, + 0x00000900, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000901, 0x80048104, + 0xd2890001, 0x00000901, + 0x80048104, 0xd2890002, + 0x00000901, 0x80048104, + 0xd2890003, 0x00000901, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000902, + 0x80048104, 0xd2890001, + 0x00000902, 0x80048104, + 0xd2890002, 0x00000902, + 0x80048104, 0xd2890003, + 0x00000902, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000903, 0x80048104, + 0xd2890001, 0x00000903, + 0x80048104, 0xd2890002, + 0x00000903, 0x80048104, + 0xd2890003, 0x00000903, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0x807c847c, + 0xbf0a7b7c, 0xbf85ffa9, + 0xbf9c0000, 0xbf820016, + 0xd3d84000, 0x18000100, + 0xd3d84001, 0x18000101, + 0xd3d84002, 0x18000102, + 0xd3d84003, 0x18000103, + 0xe0724000, 0x701d0000, + 0xe0724100, 0x701d0100, + 0xe0724200, 0x701d0200, + 0xe0724300, 0x701d0300, + 0x807c847c, 0x8070ff70, + 0x00000400, 0xbf0a7b7c, + 0xbf85ffeb, 0xbf9c0000, + 0xbf8200ee, 0xbef4007e, + 0x8675ff7f, 0x0000ffff, + 0x8775ff75, 0x00040000, + 0xbef60080, 0xbef700ff, + 0x00807fac, 0x866eff7f, + 0x04000000, 0xbf84001f, 0xbefe00c1, 0xbeff00c1, + 0xb8ef4306, 0x866fc16f, + 0xbf84001a, 0x8e6f866f, + 0x8e6f826f, 0xbef6006f, + 0xb8f82985, 0x80788178, + 0x8e788a78, 0x8e788178, + 0xb8ee1605, 0x806e816e, + 0x8e6e866e, 0x80786e78, + 0x8078ff78, 0x00000080, 0xbef600ff, 0x01000000, - 0xb8ef2b05, 0x806f816f, - 0x8e6f826f, 0x806fff6f, - 0x00008000, 0xbef80080, - 0xbeee0078, 0x8078ff78, - 0x00000400, 0xbefc0084, + 0xbefc0080, 0xe0510000, + 0x781d0000, 0xe0510100, + 0x781d0000, 0x807cff7c, + 0x00000200, 0x8078ff78, + 0x00000200, 0xbf0a6f7c, + 0xbf85fff6, 0xbefe00c1, + 0xbeff00c1, 0xbef600ff, + 0x01000000, 0xb8ef2b05, + 0x806f816f, 0x8e6f826f, + 0x806fff6f, 0x00008000, + 0xbef80080, 0xbeee0078, + 0x8078ff78, 0x00000400, + 0xbefc0084, 0xbf11087c, + 0xe0524000, 0x781d0000, + 0xe0524100, 0x781d0100, + 0xe0524200, 0x781d0200, + 0xe0524300, 0x781d0300, + 0xbf8c0f70, 0x7e000300, + 0x7e020301, 0x7e040302, + 0x7e060303, 0x807c847c, + 0x8078ff78, 0x00000400, + 0xbf0a6f7c, 0xbf85ffee, + 0xb8ef2985, 0x806f816f, + 0x8e6f836f, 0xb8f92b05, + 0x80798179, 0x8e798279, + 0x80ef796f, 0x866f6f6f, + 0xbf84001a, 0x806fff6f, + 0x00008000, 0xbefc0080, 0xbf11087c, 0xe0524000, 0x781d0000, 0xe0524100, 0x781d0100, 0xe0524200, 0x781d0200, 0xe0524300, 0x781d0300, 0xbf8c0f70, - 0x7e000300, 0x7e020301, - 0x7e040302, 0x7e060303, + 0xd3d94000, 0x18000100, + 0xd3d94001, 0x18000101, + 0xd3d94002, 0x18000102, + 0xd3d94003, 0x18000103, 0x807c847c, 0x8078ff78, 0x00000400, 0xbf0a6f7c, - 0xbf85ffee, 0xb8ef2985, - 0x806f816f, 0x8e6f836f, - 0xb8f92b05, 0x80798179, - 0x8e798279, 0x80ef796f, - 0x866f6f6f, 0xbf84001a, - 0x806fff6f, 0x00008000, - 0xbefc0080, 0xbf11087c, - 0xe0524000, 0x781d0000, - 0xe0524100, 0x781d0100, - 0xe0524200, 0x781d0200, - 0xe0524300, 0x781d0300, - 0xbf8c0f70, 0xd3d94000, - 0x18000100, 0xd3d94001, - 0x18000101, 0xd3d94002, - 0x18000102, 0xd3d94003, - 0x18000103, 0x807c847c, - 0x8078ff78, 0x00000400, - 0xbf0a6f7c, 0xbf85ffea, - 0xbf9c0000, 0xe0524000, - 0x6e1d0000, 0xe0524100, - 0x6e1d0100, 0xe0524200, - 0x6e1d0200, 0xe0524300, - 0x6e1d0300, 0xbf8c0f70, - 0xb8f82985, 0x80788178, - 0x8e788a78, 0x8e788178, - 0xb8ee1605, 0x806e816e, - 0x8e6e866e, 0x80786e78, - 0x80f8c078, 0xb8ef1605, - 0x806f816f, 0x8e6f846f, - 0x8e76826f, 0xbef600ff, - 0x01000000, 0xbefc006f, - 0xc031003a, 0x00000078, - 0x80f8c078, 0xbf8cc07f, - 0x80fc907c, 0xbf800000, - 0xbe802d00, 0xbe822d02, - 0xbe842d04, 0xbe862d06, - 0xbe882d08, 0xbe8a2d0a, - 0xbe8c2d0c, 0xbe8e2d0e, - 0xbf06807c, 0xbf84fff0, - 0xb8f82985, 0x80788178, - 0x8e788a78, 0x8e788178, - 0xb8ee1605, 0x806e816e, - 0x8e6e866e, 0x80786e78, - 0xbef60084, 0xbef600ff, - 0x01000000, 0xc0211bfa, + 0xbf85ffea, 0xbf9c0000, + 0xe0524000, 0x6e1d0000, + 0xe0524100, 0x6e1d0100, + 0xe0524200, 0x6e1d0200, + 0xe0524300, 0x6e1d0300, + 0xbf8c0f70, 0xb8f82985, + 0x80788178, 0x8e788a78, + 0x8e788178, 0xb8ee1605, + 0x806e816e, 0x8e6e866e, + 0x80786e78, 0x80f8c078, + 0xb8ef1605, 0x806f816f, + 0x8e6f846f, 0x8e76826f, + 0xbef600ff, 0x01000000, + 0xbefc006f, 0xc031003a, + 0x00000078, 0x80f8c078, + 0xbf8cc07f, 0x80fc907c, + 0xbf800000, 0xbe802d00, + 0xbe822d02, 0xbe842d04, + 0xbe862d06, 0xbe882d08, + 0xbe8a2d0a, 0xbe8c2d0c, + 0xbe8e2d0e, 0xbf06807c, + 0xbf84fff0, 0xb8f82985, + 0x80788178, 0x8e788a78, + 0x8e788178, 0xb8ee1605, + 0x806e816e, 0x8e6e866e, + 0x80786e78, 0xbef60084, + 0xbef600ff, 0x01000000, + 0xc0211bfa, 0x00000078, + 0x80788478, 0xc0211b3a, 0x00000078, 0x80788478, - 0xc0211b3a, 0x00000078, - 0x80788478, 0xc0211b7a, + 0xc0211b7a, 0x00000078, + 0x80788478, 0xc0211c3a, 0x00000078, 0x80788478, - 0xc0211c3a, 0x00000078, - 0x80788478, 0xc0211c7a, + 0xc0211c7a, 0x00000078, + 0x80788478, 0xc0211eba, 0x00000078, 0x80788478, - 0xc0211eba, 0x00000078, - 0x80788478, 0xc0211efa, + 0xc0211efa, 0x00000078, + 0x80788478, 0xc0211a3a, 0x00000078, 0x80788478, - 0xc0211a3a, 0x00000078, - 0x80788478, 0xc0211a7a, + 0xc0211a7a, 0x00000078, + 0x80788478, 0xc0211cfa, 0x00000078, 0x80788478, - 0xc0211cfa, 0x00000078, - 0x80788478, 0xbf8cc07f, - 0xbefc006f, 0xbefe0070, - 0xbeff0071, 0x866f7bff, - 0x000003ff, 0xb96f4803, - 0x866f7bff, 0xfffff800, - 0x8f6f8b6f, 0xb96fa2c3, - 0xb973f801, 0xb8ee2985, - 0x806e816e, 0x8e6e8a6e, - 0x8e6e816e, 0xb8ef1605, - 0x806f816f, 0x8e6f866f, - 0x806e6f6e, 0x806e746e, - 0x826f8075, 0x866fff6f, - 0x0000ffff, 0xc00b1c37, - 0x00000050, 0xc00b1d37, - 0x00000060, 0xc0031e77, - 0x00000074, 0xbf8cc07f, - 0x8f6e8b79, 0x866eff6e, - 0x001f8000, 0xb96ef807, - 0x866dff6d, 0x0000ffff, - 0x86fe7e7e, 0x86ea6a6a, - 0x8f6e837a, 0xb96ee0c2, - 0xbf800002, 0xb97a0002, - 0xbf8a0000, 0xbe801f6c, - 0xbf810000, 0x00000000, + 0xbf8cc07f, 0xbefc006f, + 0xbefe0070, 0xbeff0071, + 0x866f7bff, 0x000003ff, + 0xb96f4803, 0x866f7bff, + 0xfffff800, 0x8f6f8b6f, + 0xb96fa2c3, 0xb973f801, + 0xb8ee2985, 0x806e816e, + 0x8e6e8a6e, 0x8e6e816e, + 0xb8ef1605, 0x806f816f, + 0x8e6f866f, 0x806e6f6e, + 0x806e746e, 0x826f8075, + 0x866fff6f, 0x0000ffff, + 0xc00b1c37, 0x00000050, + 0xc00b1d37, 0x00000060, + 0xc0031e77, 0x00000074, + 0xbf8cc07f, 0x8f6e8b79, + 0x866eff6e, 0x001f8000, + 0xb96ef807, 0x866dff6d, + 0x0000ffff, 0x86fe7e7e, + 0x86ea6a6a, 0x8f6e837a, + 0xb96ee0c2, 0xbf800002, + 0xb97a0002, 0xbf8a0000, + 0xbe801f6c, 0xbf810000, }; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index 8b92c33c2a7c..fdab64624422 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -276,6 +276,11 @@ L_FETCH_2ND_TRAP: #endif s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 + s_bitcmp1_b32 ttmp15, 0xF + s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA + s_or_b32 ttmp15, ttmp15, 0xFFFF0000 +L_NO_SIGN_EXTEND_TMA: + s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 glc:1 // debug trap enabled flag s_waitcnt lgkmcnt(0) s_lshl_b32 ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm index f2087cc2e89d..e506411ad28a 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm @@ -283,6 +283,11 @@ L_FETCH_2ND_TRAP: s_getreg_b32 ttmp15, hwreg(HW_REG_SQ_SHADER_TMA_HI) s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 + s_bitcmp1_b32 ttmp15, 0xF + s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA + s_or_b32 ttmp15, ttmp15, 0xFFFF0000 +L_NO_SIGN_EXTEND_TMA: + s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 glc:1 // debug trap enabled flag s_waitcnt lgkmcnt(0) s_lshl_b32 ttmp2, ttmp2, TTMP_DEBUG_TRAP_ENABLED_SHIFT diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 6a27b000a246..c37f1fcd2165 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -333,10 +333,12 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, goto err_bind_process; } - if (!pdd->doorbell_index && - kfd_alloc_process_doorbells(dev->kfd, &pdd->doorbell_index) < 0) { - err = -ENOMEM; - goto err_alloc_doorbells; + if (!pdd->qpd.proc_doorbells) { + err = kfd_alloc_process_doorbells(dev->kfd, pdd); + if (err) { + pr_debug("failed to allocate process doorbells\n"); + goto err_bind_process; + } } /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work @@ -417,7 +419,6 @@ err_create_queue: if (wptr_bo) amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo); err_wptr_map_gart: -err_alloc_doorbells: err_bind_process: err_pdd: mutex_unlock(&p->mutex); @@ -1025,9 +1026,6 @@ bool kfd_dev_is_large_bar(struct kfd_node *dev) return true; } - if (dev->kfd->use_iommu_v2) - return false; - if (dev->local_mem_info.local_mem_size_private == 0 && dev->local_mem_info.local_mem_size_public > 0) return true; @@ -1487,7 +1485,8 @@ static int kfd_ioctl_alloc_queue_gws(struct file *filep, goto out_unlock; } - if (!kfd_dbg_has_gws_support(dev) && p->debug_trap_enabled) { + if (p->debug_trap_enabled && (!kfd_dbg_has_gws_support(dev) || + kfd_dbg_has_cwsr_workaround(dev))) { retval = -EBUSY; goto out_unlock; } @@ -1845,22 +1844,21 @@ static uint32_t get_process_num_bos(struct kfd_process *p) idr_for_each_entry(&pdd->alloc_idr, mem, id) { struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; - if ((uint64_t)kgd_mem->va > pdd->gpuvm_base) + if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base) num_of_bos++; } } return num_of_bos; } -static int criu_get_prime_handle(struct drm_gem_object *gobj, int flags, +static int criu_get_prime_handle(struct kgd_mem *mem, int flags, u32 *shared_fd) { struct dma_buf *dmabuf; int ret; - dmabuf = amdgpu_gem_prime_export(gobj, flags); - if (IS_ERR(dmabuf)) { - ret = PTR_ERR(dmabuf); + ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf); + if (ret) { pr_err("dmabuf export failed for the BO\n"); return ret; } @@ -1918,7 +1916,11 @@ static int criu_checkpoint_bos(struct kfd_process *p, kgd_mem = (struct kgd_mem *)mem; dumper_bo = kgd_mem->bo; - if ((uint64_t)kgd_mem->va <= pdd->gpuvm_base) + /* Skip checkpointing BOs that are used for Trap handler + * code and state. Currently, these BOs have a VA that + * is less GPUVM Base + */ + if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base) continue; bo_bucket = &bo_buckets[bo_index]; @@ -1940,7 +1942,7 @@ static int criu_checkpoint_bos(struct kfd_process *p, } if (bo_bucket->alloc_flags & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) { - ret = criu_get_prime_handle(&dumper_bo->tbo.base, + ret = criu_get_prime_handle(kgd_mem, bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0, &bo_bucket->dmabuf_fd); @@ -2262,10 +2264,10 @@ static int criu_restore_devices(struct kfd_process *p, goto exit; } - if (!pdd->doorbell_index && - kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) { - ret = -ENOMEM; - goto exit; + if (!pdd->qpd.proc_doorbells) { + ret = kfd_alloc_process_doorbells(dev->kfd, pdd); + if (ret) + goto exit; } } @@ -2402,7 +2404,7 @@ static int criu_restore_bo(struct kfd_process *p, /* create the dmabuf object and export the bo */ if (bo_bucket->alloc_flags & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) { - ret = criu_get_prime_handle(&kgd_mem->bo->tbo.base, DRM_RDWR, + ret = criu_get_prime_handle(kgd_mem, DRM_RDWR, &bo_bucket->dmabuf_fd); if (ret) return ret; @@ -2755,6 +2757,16 @@ static int runtime_enable(struct kfd_process *p, uint64_t r_debug, if (pdd->qpd.queue_count) return -EEXIST; + + /* + * Setup TTMPs by default. + * Note that this call must remain here for MES ADD QUEUE to + * skip_process_ctx_clear unconditionally as the first call to + * SET_SHADER_DEBUGGER clears any stale process context data + * saved in MES. + */ + if (pdd->dev->kfd->shared_resources.enable_mes) + kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); } p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED; @@ -2848,7 +2860,8 @@ static int runtime_disable(struct kfd_process *p) if (!pdd->dev->kfd->shared_resources.enable_mes) debug_refresh_runlist(pdd->dev->dqm); else - kfd_dbg_set_mes_debug_mode(pdd); + kfd_dbg_set_mes_debug_mode(pdd, + !kfd_dbg_has_cwsr_workaround(pdd->dev)); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index f5a6f562e2a8..86fb7ac7982a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -26,7 +26,6 @@ #include "kfd_crat.h" #include "kfd_priv.h" #include "kfd_topology.h" -#include "kfd_iommu.h" #include "amdgpu.h" #include "amdgpu_amdkfd.h" @@ -1536,72 +1535,6 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc return num_of_cache_types; } -static bool kfd_ignore_crat(void) -{ - bool ret; - - if (ignore_crat) - return true; - - ret = true; - - return ret; -} - -/* - * kfd_create_crat_image_acpi - Allocates memory for CRAT image and - * copies CRAT from ACPI (if available). - * NOTE: Call kfd_destroy_crat_image to free CRAT image memory - * - * @crat_image: CRAT read from ACPI. If no CRAT in ACPI then - * crat_image will be NULL - * @size: [OUT] size of crat_image - * - * Return 0 if successful else return error code - */ -int kfd_create_crat_image_acpi(void **crat_image, size_t *size) -{ - struct acpi_table_header *crat_table; - acpi_status status; - void *pcrat_image; - int rc = 0; - - if (!crat_image) - return -EINVAL; - - *crat_image = NULL; - - if (kfd_ignore_crat()) { - pr_info("CRAT table disabled by module option\n"); - return -ENODATA; - } - - /* Fetch the CRAT table from ACPI */ - status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table); - if (status == AE_NOT_FOUND) { - pr_info("CRAT table not found\n"); - return -ENODATA; - } else if (ACPI_FAILURE(status)) { - const char *err = acpi_format_exception(status); - - pr_err("CRAT table error: %s\n", err); - return -EINVAL; - } - - pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL); - if (!pcrat_image) { - rc = -ENOMEM; - goto out; - } - - memcpy(pcrat_image, crat_table, crat_table->length); - *crat_image = pcrat_image; - *size = crat_table->length; -out: - acpi_put_table(crat_table); - return rc; -} - /* Memory required to create Virtual CRAT. * Since there is no easy way to predict the amount of memory required, the * following amount is allocated for GPU Virtual CRAT. This is @@ -2169,12 +2102,6 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, cu->hsa_capability = 0; - /* Check if this node supports IOMMU. During parsing this flag will - * translate to HSA_CAP_ATS_PRESENT - */ - if (!kfd_iommu_check_device(kdev->kfd)) - cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT; - crat_table->length += sub_type_hdr->length; crat_table->total_entries++; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h index fc719389b5d6..387a8ef49385 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h @@ -307,7 +307,6 @@ struct kfd_gpu_cache_info { }; int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info); -int kfd_create_crat_image_acpi(void **crat_image, size_t *size); void kfd_destroy_crat_image(void *crat_image); int kfd_parse_crat_table(void *crat_image, struct list_head *device_list, uint32_t proximity_domain); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 9766076e9ec4..9ec750666382 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -344,11 +344,10 @@ unwind: return r; } -int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) +int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en) { uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; uint32_t flags = pdd->process->dbg_flags; - bool sq_trap_en = !!spi_dbg_cntl || !kfd_dbg_has_cwsr_workaround(pdd->dev); if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) return 0; @@ -432,7 +431,7 @@ int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd, if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_map_and_unlock(pdd->dev->dqm); else - r = kfd_dbg_set_mes_debug_mode(pdd); + r = kfd_dbg_set_mes_debug_mode(pdd, true); kfd_dbg_clear_dev_watch_id(pdd, watch_id); @@ -445,7 +444,8 @@ int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, uint32_t *watch_id, uint32_t watch_mode) { - int r = kfd_dbg_get_dev_watch_id(pdd, watch_id); + int xcc_id, r = kfd_dbg_get_dev_watch_id(pdd, watch_id); + uint32_t xcc_mask = pdd->dev->xcc_mask; if (r) return r; @@ -459,19 +459,21 @@ int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, } amdgpu_gfx_off_ctrl(pdd->dev->adev, false); - pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch( + for_each_inst(xcc_id, xcc_mask) + pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch( pdd->dev->adev, watch_address, watch_address_mask, *watch_id, watch_mode, - pdd->dev->vm_info.last_vmid_kfd); + pdd->dev->vm_info.last_vmid_kfd, + xcc_id); amdgpu_gfx_off_ctrl(pdd->dev->adev, true); if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_map_and_unlock(pdd->dev->dqm); else - r = kfd_dbg_set_mes_debug_mode(pdd); + r = kfd_dbg_set_mes_debug_mode(pdd, true); /* HWS is broken so no point in HW rollback but release the watchpoint anyways */ if (r) @@ -513,7 +515,7 @@ int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags) if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_refresh_runlist(pdd->dev->dqm); else - r = kfd_dbg_set_mes_debug_mode(pdd); + r = kfd_dbg_set_mes_debug_mode(pdd, true); if (r) { target->dbg_flags = prev_flags; @@ -536,7 +538,7 @@ int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags) if (!pdd->dev->kfd->shared_resources.enable_mes) debug_refresh_runlist(pdd->dev->dqm); else - kfd_dbg_set_mes_debug_mode(pdd); + kfd_dbg_set_mes_debug_mode(pdd, true); } } @@ -598,7 +600,7 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind if (!pdd->dev->kfd->shared_resources.enable_mes) debug_refresh_runlist(pdd->dev->dqm); else - kfd_dbg_set_mes_debug_mode(pdd); + kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); } kfd_dbg_set_workaround(target, false); @@ -714,7 +716,7 @@ int kfd_dbg_trap_activate(struct kfd_process *target) if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_refresh_runlist(pdd->dev->dqm); else - r = kfd_dbg_set_mes_debug_mode(pdd); + r = kfd_dbg_set_mes_debug_mode(pdd, true); if (r) { target->runtime_info.runtime_state = @@ -750,7 +752,8 @@ int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, if (!KFD_IS_SOC15(pdd->dev)) return -ENODEV; - if (!kfd_dbg_has_gws_support(pdd->dev) && pdd->qpd.num_gws) + if (pdd->qpd.num_gws && (!kfd_dbg_has_gws_support(pdd->dev) || + kfd_dbg_has_cwsr_workaround(pdd->dev))) return -EBUSY; } @@ -847,7 +850,7 @@ int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target, if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_refresh_runlist(pdd->dev->dqm); else - r = kfd_dbg_set_mes_debug_mode(pdd); + r = kfd_dbg_set_mes_debug_mode(pdd, true); if (r) break; @@ -879,7 +882,7 @@ int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target, if (!pdd->dev->kfd->shared_resources.enable_mes) r = debug_refresh_runlist(pdd->dev->dqm); else - r = kfd_dbg_set_mes_debug_mode(pdd); + r = kfd_dbg_set_mes_debug_mode(pdd, true); if (r) break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index 662a13a0d582..fd0ff64d4184 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -76,8 +76,9 @@ int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev) { - return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || - KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0); + return (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || + KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || + KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0)); } void debug_event_write_work_handler(struct work_struct *work); @@ -125,5 +126,14 @@ static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev) return true; } -int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd); +int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en); + +static inline bool kfd_dbg_has_ttmps_always_setup(struct kfd_node *dev) +{ + return (KFD_GC_VERSION(dev) < IP_VERSION(11, 0, 0) && + KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 2)) || + (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) && + KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0) && + (dev->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 70); +} #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index a53e0757fe64..93ce181eb3ba 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -29,7 +29,6 @@ #include "kfd_pm4_headers_vi.h" #include "kfd_pm4_headers_aldebaran.h" #include "cwsr_trap_handler.h" -#include "kfd_iommu.h" #include "amdgpu_amdkfd.h" #include "kfd_smi_events.h" #include "kfd_svm.h" @@ -62,7 +61,6 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, unsigned int chunk_size); static void kfd_gtt_sa_fini(struct kfd_dev *kfd); -static int kfd_resume_iommu(struct kfd_dev *kfd); static int kfd_resume(struct kfd_node *kfd); static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) @@ -442,8 +440,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) atomic_set(&kfd->compute_profile, 0); mutex_init(&kfd->doorbell_mutex); - memset(&kfd->doorbell_available_index, 0, - sizeof(kfd->doorbell_available_index)); ida_init(&kfd->doorbell_ida); @@ -495,6 +491,7 @@ static int kfd_gws_init(struct kfd_node *node) { int ret = 0; struct kfd_dev *kfd = node->kfd; + uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) return 0; @@ -511,7 +508,10 @@ static int kfd_gws_init(struct kfd_node *node) (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3)) || (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0) && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0) - && kfd->mec2_fw_version >= 0x6b)))) + && kfd->mec2_fw_version >= 0x6b) || + (KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0) + && KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0) + && mes_rev >= 68)))) ret = amdgpu_amdkfd_alloc_gws(node->adev, node->adev->gds.gws_size, &node->gws); @@ -753,15 +753,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->noretry = kfd->adev->gmc.noretry; - /* If CRAT is broken, won't set iommu enabled */ - kfd_double_confirm_iommu_support(kfd); - - if (kfd_iommu_device_init(kfd)) { - kfd->use_iommu_v2 = false; - dev_err(kfd_device, "Error initializing iommuv2\n"); - goto device_iommu_error; - } - kfd_cwsr_init(kfd); dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", @@ -836,9 +827,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, svm_range_set_max_pages(kfd->adev); - if (kfd_resume_iommu(kfd)) - goto kfd_resume_iommu_error; - spin_lock_init(&kfd->watch_points_lock); kfd->init_complete = true; @@ -850,11 +838,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto out; -kfd_resume_iommu_error: node_init_error: node_alloc_error: kfd_cleanup_nodes(kfd, i); -device_iommu_error: kfd_doorbell_fini(kfd); kfd_doorbell_error: kfd_gtt_sa_fini(kfd); @@ -969,7 +955,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) node = kfd->nodes[i]; node->dqm->ops.stop(node->dqm); } - kfd_iommu_suspend(kfd); } int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) @@ -999,26 +984,6 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) return ret; } -int kgd2kfd_resume_iommu(struct kfd_dev *kfd) -{ - if (!kfd->init_complete) - return 0; - - return kfd_resume_iommu(kfd); -} - -static int kfd_resume_iommu(struct kfd_dev *kfd) -{ - int err = 0; - - err = kfd_iommu_resume(kfd); - if (err) - dev_err(kfd_device, - "Failed to resume IOMMU for device %x:%x\n", - kfd->adev->pdev->vendor, kfd->adev->pdev->device); - return err; -} - static int kfd_resume(struct kfd_node *node) { int err = 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 01192f5abe46..b166f30f083e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -227,7 +227,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, queue_input.tba_addr = qpd->tba_addr; queue_input.tma_addr = qpd->tma_addr; queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device); - queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled; + queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled || + kfd_dbg_has_ttmps_always_setup(q->device); queue_type = convert_to_mes_queue_type(q->properties.type); if (queue_type < 0) { @@ -237,10 +238,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, } queue_input.queue_type = (uint32_t)queue_type; - if (q->gws) { - queue_input.gws_base = 0; - queue_input.gws_size = qpd->num_gws; - } + queue_input.exclusively_scheduled = q->properties.is_gws; amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); @@ -250,7 +248,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, q->properties.doorbell_off); pr_err("MES might be in unrecoverable state, issue a GPU reset\n"); kfd_hws_hang(dqm); -} + } return r; } @@ -397,7 +395,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, unsigned int found; found = find_first_zero_bit(qpd->doorbell_bitmap, - KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { pr_debug("No doorbells available"); return -EBUSY; @@ -407,9 +405,9 @@ static int allocate_doorbell(struct qcm_process_device *qpd, } } - q->properties.doorbell_off = - kfd_get_doorbell_dw_offset_in_bar(dev->kfd, qpd_to_pdd(qpd), - q->doorbell_id); + q->properties.doorbell_off = amdgpu_doorbell_index_on_bar(dev->adev, + qpd->proc_doorbells, + q->doorbell_id); return 0; } @@ -1620,7 +1618,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm) if (dqm->dev->kfd2kgd->get_iq_wait_times) dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev, - &dqm->wait_times); + &dqm->wait_times, + ffs(dqm->dev->xcc_mask) - 1); return 0; } @@ -1662,6 +1661,26 @@ static int start_cpsch(struct device_queue_manager *dqm) if (!dqm->dev->kfd->shared_resources.enable_mes) execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); + + /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */ + if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu && + (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) { + uint32_t reg_offset = 0; + uint32_t grace_period = 1; + + retval = pm_update_grace_period(&dqm->packet_mgr, + grace_period); + if (retval) + pr_err("Setting grace timeout failed\n"); + else if (dqm->dev->kfd2kgd->build_grace_period_packet_info) + /* Update dqm->wait_times maintained in software */ + dqm->dev->kfd2kgd->build_grace_period_packet_info( + dqm->dev->adev, dqm->wait_times, + grace_period, ®_offset, + &dqm->wait_times, + ffs(dqm->dev->xcc_mask) - 1); + } + dqm_unlock(dqm); return 0; @@ -2540,7 +2559,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) switch (dev->adev->asic_type) { case CHIP_KAVERI: case CHIP_HAWAII: - device_queue_manager_init_cik_hawaii(&dqm->asic_ops); + device_queue_manager_init_cik(&dqm->asic_ops); break; case CHIP_CARRIZO: @@ -2550,14 +2569,14 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) case CHIP_POLARIS11: case CHIP_POLARIS12: case CHIP_VEGAM: - device_queue_manager_init_vi_tonga(&dqm->asic_ops); + device_queue_manager_init_vi(&dqm->asic_ops); break; default: if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0)) device_queue_manager_init_v11(&dqm->asic_ops); else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) - device_queue_manager_init_v10_navi10(&dqm->asic_ops); + device_queue_manager_init_v10(&dqm->asic_ops); else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1)) device_queue_manager_init_v9(&dqm->asic_ops); else { @@ -2797,19 +2816,11 @@ static void copy_context_work_handler (struct work_struct *work) static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array) { size_t array_size = num_queues * sizeof(uint32_t); - uint32_t *queue_ids = NULL; if (!usr_queue_id_array) return NULL; - queue_ids = kzalloc(array_size, GFP_KERNEL); - if (!queue_ids) - return ERR_PTR(-ENOMEM); - - if (copy_from_user(queue_ids, usr_queue_id_array, array_size)) - return ERR_PTR(-EFAULT); - - return queue_ids; + return memdup_user(usr_queue_id_array, array_size); } int resume_queues(struct kfd_process *p, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 7dd4b177219d..cf7e182588f8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -269,15 +269,11 @@ struct device_queue_manager { void device_queue_manager_init_cik( struct device_queue_manager_asic_ops *asic_ops); -void device_queue_manager_init_cik_hawaii( - struct device_queue_manager_asic_ops *asic_ops); void device_queue_manager_init_vi( struct device_queue_manager_asic_ops *asic_ops); -void device_queue_manager_init_vi_tonga( - struct device_queue_manager_asic_ops *asic_ops); void device_queue_manager_init_v9( struct device_queue_manager_asic_ops *asic_ops); -void device_queue_manager_init_v10_navi10( +void device_queue_manager_init_v10( struct device_queue_manager_asic_ops *asic_ops); void device_queue_manager_init_v11( struct device_queue_manager_asic_ops *asic_ops); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c index b1ab5b0775e1..d4d95c7f2e5d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c @@ -34,17 +34,13 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, void __user *alternate_aperture_base, uint64_t alternate_aperture_size); static int update_qpd_cik(struct device_queue_manager *dqm, - struct qcm_process_device *qpd); -static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, - struct qcm_process_device *qpd); -static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, - struct qcm_process_device *qpd); -static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, - struct queue *q, - struct qcm_process_device *qpd); + struct qcm_process_device *qpd); +static void init_sdma_vm(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd); void device_queue_manager_init_cik( - struct device_queue_manager_asic_ops *asic_ops) + struct device_queue_manager_asic_ops *asic_ops) { asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik; asic_ops->update_qpd = update_qpd_cik; @@ -52,15 +48,6 @@ void device_queue_manager_init_cik( asic_ops->mqd_manager_init = mqd_manager_init_cik; } -void device_queue_manager_init_cik_hawaii( - struct device_queue_manager_asic_ops *asic_ops) -{ - asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik; - asic_ops->update_qpd = update_qpd_cik_hawaii; - asic_ops->init_sdma_vm = init_sdma_vm_hawaii; - asic_ops->mqd_manager_init = mqd_manager_init_cik_hawaii; -} - static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) { /* In 64-bit mode, we can only control the top 3 bits of the LDS, @@ -115,41 +102,7 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, } static int update_qpd_cik(struct device_queue_manager *dqm, - struct qcm_process_device *qpd) -{ - struct kfd_process_device *pdd; - unsigned int temp; - - pdd = qpd_to_pdd(qpd); - - /* check if sh_mem_config register already configured */ - if (qpd->sh_mem_config == 0) { - qpd->sh_mem_config = - ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) | - DEFAULT_MTYPE(MTYPE_NONCACHED) | - APE1_MTYPE(MTYPE_NONCACHED); - qpd->sh_mem_ape1_limit = 0; - qpd->sh_mem_ape1_base = 0; - } - - if (qpd->pqm->process->is_32bit_user_mode) { - temp = get_sh_mem_bases_32(pdd); - qpd->sh_mem_bases = SHARED_BASE(temp); - qpd->sh_mem_config |= PTR32; - } else { - temp = get_sh_mem_bases_nybble_64(pdd); - qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); - qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__PRIVATE_ATC__SHIFT; - } - - pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", - qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); - - return 0; -} - -static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, - struct qcm_process_device *qpd) + struct qcm_process_device *qpd) { struct kfd_process_device *pdd; unsigned int temp; @@ -178,25 +131,9 @@ static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, return 0; } -static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, - struct qcm_process_device *qpd) -{ - uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT); - - if (q->process->is_32bit_user_mode) - value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) | - get_sh_mem_bases_32(qpd_to_pdd(qpd)); - else - value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << - SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) & - SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; - - q->properties.sdma_vm_addr = value; -} - -static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, - struct queue *q, - struct qcm_process_device *qpd) +static void init_sdma_vm(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd) { /* On dGPU we're always in GPUVM64 addressing mode with 64-bit * aperture addresses. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c index f1a1f5753e65..245a90dfc2f6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c @@ -32,7 +32,7 @@ static int update_qpd_v10(struct device_queue_manager *dqm, static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd); -void device_queue_manager_init_v10_navi10( +void device_queue_manager_init_v10( struct device_queue_manager_asic_ops *asic_ops) { asic_ops->update_qpd = update_qpd_v10; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c index 8af643388768..54eb1bff903c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -60,7 +60,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm, qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; - if (dqm->dev->kfd->noretry && !dqm->dev->kfd->use_iommu_v2) + if (dqm->dev->kfd->noretry) qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3)) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c index d7d45832df0f..b291ee0fab94 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c @@ -28,29 +28,19 @@ #include "oss/oss_3_0_sh_mask.h" static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, - struct qcm_process_device *qpd, - enum cache_policy default_policy, - enum cache_policy alternate_policy, - void __user *alternate_aperture_base, - uint64_t alternate_aperture_size); -static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, - struct qcm_process_device *qpd, - enum cache_policy default_policy, - enum cache_policy alternate_policy, - void __user *alternate_aperture_base, - uint64_t alternate_aperture_size); + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size); static int update_qpd_vi(struct device_queue_manager *dqm, - struct qcm_process_device *qpd); -static int update_qpd_vi_tonga(struct device_queue_manager *dqm, - struct qcm_process_device *qpd); -static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, - struct qcm_process_device *qpd); -static void init_sdma_vm_tonga(struct device_queue_manager *dqm, - struct queue *q, - struct qcm_process_device *qpd); + struct qcm_process_device *qpd); +static void init_sdma_vm(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd); void device_queue_manager_init_vi( - struct device_queue_manager_asic_ops *asic_ops) + struct device_queue_manager_asic_ops *asic_ops) { asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi; asic_ops->update_qpd = update_qpd_vi; @@ -58,15 +48,6 @@ void device_queue_manager_init_vi( asic_ops->mqd_manager_init = mqd_manager_init_vi; } -void device_queue_manager_init_vi_tonga( - struct device_queue_manager_asic_ops *asic_ops) -{ - asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi_tonga; - asic_ops->update_qpd = update_qpd_vi_tonga; - asic_ops->init_sdma_vm = init_sdma_vm_tonga; - asic_ops->mqd_manager_init = mqd_manager_init_vi_tonga; -} - static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) { /* In 64-bit mode, we can only control the top 3 bits of the LDS, @@ -96,35 +77,6 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) } static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, - struct qcm_process_device *qpd, - enum cache_policy default_policy, - enum cache_policy alternate_policy, - void __user *alternate_aperture_base, - uint64_t alternate_aperture_size) -{ - uint32_t default_mtype; - uint32_t ape1_mtype; - - default_mtype = (default_policy == cache_policy_coherent) ? - MTYPE_CC : - MTYPE_NC; - - ape1_mtype = (alternate_policy == cache_policy_coherent) ? - MTYPE_CC : - MTYPE_NC; - - qpd->sh_mem_config = (qpd->sh_mem_config & - SH_MEM_CONFIG__ADDRESS_MODE_MASK) | - SH_MEM_ALIGNMENT_MODE_UNALIGNED << - SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | - default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | - ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT | - SH_MEM_CONFIG__PRIVATE_ATC_MASK; - - return true; -} - -static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, struct qcm_process_device *qpd, enum cache_policy default_policy, enum cache_policy alternate_policy, @@ -152,48 +104,7 @@ static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, } static int update_qpd_vi(struct device_queue_manager *dqm, - struct qcm_process_device *qpd) -{ - struct kfd_process_device *pdd; - unsigned int temp; - - pdd = qpd_to_pdd(qpd); - - /* check if sh_mem_config register already configured */ - if (qpd->sh_mem_config == 0) { - qpd->sh_mem_config = - SH_MEM_ALIGNMENT_MODE_UNALIGNED << - SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | - MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | - MTYPE_CC << SH_MEM_CONFIG__APE1_MTYPE__SHIFT | - SH_MEM_CONFIG__PRIVATE_ATC_MASK; - - qpd->sh_mem_ape1_limit = 0; - qpd->sh_mem_ape1_base = 0; - } - - if (qpd->pqm->process->is_32bit_user_mode) { - temp = get_sh_mem_bases_32(pdd); - qpd->sh_mem_bases = temp << SH_MEM_BASES__SHARED_BASE__SHIFT; - qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA32 << - SH_MEM_CONFIG__ADDRESS_MODE__SHIFT; - } else { - temp = get_sh_mem_bases_nybble_64(pdd); - qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); - qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 << - SH_MEM_CONFIG__ADDRESS_MODE__SHIFT; - qpd->sh_mem_config |= 1 << - SH_MEM_CONFIG__PRIVATE_ATC__SHIFT; - } - - pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", - qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); - - return 0; -} - -static int update_qpd_vi_tonga(struct device_queue_manager *dqm, - struct qcm_process_device *qpd) + struct qcm_process_device *qpd) { struct kfd_process_device *pdd; unsigned int temp; @@ -226,25 +137,9 @@ static int update_qpd_vi_tonga(struct device_queue_manager *dqm, return 0; } -static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, - struct qcm_process_device *qpd) -{ - uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT); - - if (q->process->is_32bit_user_mode) - value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) | - get_sh_mem_bases_32(qpd_to_pdd(qpd)); - else - value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << - SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) & - SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; - - q->properties.sdma_vm_addr = value; -} - -static void init_sdma_vm_tonga(struct device_queue_manager *dqm, - struct queue *q, - struct qcm_process_device *qpd) +static void init_sdma_vm(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd) { /* On dGPU we're always in GPUVM64 addressing mode with 64-bit * aperture addresses. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index 6421b620388d..c2e0b79dcc6d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -61,81 +61,46 @@ size_t kfd_doorbell_process_slice(struct kfd_dev *kfd) /* Doorbell calculations for device init. */ int kfd_doorbell_init(struct kfd_dev *kfd) { - size_t doorbell_start_offset; - size_t doorbell_aperture_size; - size_t doorbell_process_limit; + int size = PAGE_SIZE; + int r; /* - * With MES enabled, just set the doorbell base as it is needed - * to calculate doorbell physical address. + * Todo: KFD kernel level operations need only one doorbell for + * ring test/HWS. So instead of reserving a whole page here for + * kernel, reserve and consume a doorbell from existing KGD kernel + * doorbell page. */ - if (kfd->shared_resources.enable_mes) { - kfd->doorbell_base = - kfd->shared_resources.doorbell_physical_address; - return 0; + + /* Bitmap to dynamically allocate doorbells from kernel page */ + kfd->doorbell_bitmap = bitmap_zalloc(size / sizeof(u32), GFP_KERNEL); + if (!kfd->doorbell_bitmap) { + DRM_ERROR("Failed to allocate kernel doorbell bitmap\n"); + return -ENOMEM; } - /* - * We start with calculations in bytes because the input data might - * only be byte-aligned. - * Only after we have done the rounding can we assume any alignment. - */ - - doorbell_start_offset = - roundup(kfd->shared_resources.doorbell_start_offset, - kfd_doorbell_process_slice(kfd)); - - doorbell_aperture_size = - rounddown(kfd->shared_resources.doorbell_aperture_size, - kfd_doorbell_process_slice(kfd)); - - if (doorbell_aperture_size > doorbell_start_offset) - doorbell_process_limit = - (doorbell_aperture_size - doorbell_start_offset) / - kfd_doorbell_process_slice(kfd); - else - return -ENOSPC; - - if (!kfd->max_doorbell_slices || - doorbell_process_limit < kfd->max_doorbell_slices) - kfd->max_doorbell_slices = doorbell_process_limit; - - kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address + - doorbell_start_offset; - - kfd->doorbell_base_dw_offset = doorbell_start_offset / sizeof(u32); - - kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base, - kfd_doorbell_process_slice(kfd)); - - if (!kfd->doorbell_kernel_ptr) - return -ENOMEM; - - pr_debug("Doorbell initialization:\n"); - pr_debug("doorbell base == 0x%08lX\n", - (uintptr_t)kfd->doorbell_base); - - pr_debug("doorbell_base_dw_offset == 0x%08lX\n", - kfd->doorbell_base_dw_offset); - - pr_debug("doorbell_process_limit == 0x%08lX\n", - doorbell_process_limit); - - pr_debug("doorbell_kernel_offset == 0x%08lX\n", - (uintptr_t)kfd->doorbell_base); - - pr_debug("doorbell aperture size == 0x%08lX\n", - kfd->shared_resources.doorbell_aperture_size); - - pr_debug("doorbell kernel address == %p\n", kfd->doorbell_kernel_ptr); + /* Alloc a doorbell page for KFD kernel usages */ + r = amdgpu_bo_create_kernel(kfd->adev, + size, + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_DOORBELL, + &kfd->doorbells, + NULL, + (void **)&kfd->doorbell_kernel_ptr); + if (r) { + pr_err("failed to allocate kernel doorbells\n"); + bitmap_free(kfd->doorbell_bitmap); + return r; + } + pr_debug("Doorbell kernel address == %p\n", kfd->doorbell_kernel_ptr); return 0; } void kfd_doorbell_fini(struct kfd_dev *kfd) { - if (kfd->doorbell_kernel_ptr) - iounmap(kfd->doorbell_kernel_ptr); + bitmap_free(kfd->doorbell_bitmap); + amdgpu_bo_free_kernel(&kfd->doorbells, NULL, + (void **)&kfd->doorbell_kernel_ptr); } int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, @@ -188,22 +153,15 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, u32 inx; mutex_lock(&kfd->doorbell_mutex); - inx = find_first_zero_bit(kfd->doorbell_available_index, - KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); + inx = find_first_zero_bit(kfd->doorbell_bitmap, PAGE_SIZE / sizeof(u32)); - __set_bit(inx, kfd->doorbell_available_index); + __set_bit(inx, kfd->doorbell_bitmap); mutex_unlock(&kfd->doorbell_mutex); if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) return NULL; - inx *= kfd->device_info.doorbell_size / sizeof(u32); - - /* - * Calculating the kernel doorbell offset using the first - * doorbell page. - */ - *doorbell_off = kfd->doorbell_base_dw_offset + inx; + *doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev, kfd->doorbells, inx); pr_debug("Get kernel queue doorbell\n" " doorbell offset == 0x%08X\n" @@ -217,11 +175,10 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr) { unsigned int inx; - inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr) - * sizeof(u32) / kfd->device_info.doorbell_size; + inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr); mutex_lock(&kfd->doorbell_mutex); - __clear_bit(inx, kfd->doorbell_available_index); + __clear_bit(inx, kfd->doorbell_bitmap); mutex_unlock(&kfd->doorbell_mutex); } @@ -243,80 +200,96 @@ void write_kernel_doorbell64(void __iomem *db, u64 value) } } -unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, - struct kfd_process_device *pdd, - unsigned int doorbell_id) +static int init_doorbell_bitmap(struct qcm_process_device *qpd, + struct kfd_dev *dev) { - /* - * doorbell_base_dw_offset accounts for doorbells taken by KGD. - * index * kfd_doorbell_process_slice/sizeof(u32) adjusts to - * the process's doorbells. The offset returned is in dword - * units regardless of the ASIC-dependent doorbell size. - */ - if (!kfd->shared_resources.enable_mes) - return kfd->doorbell_base_dw_offset + - pdd->doorbell_index - * kfd_doorbell_process_slice(kfd) / sizeof(u32) + - doorbell_id * - kfd->device_info.doorbell_size / sizeof(u32); - else - return amdgpu_mes_get_doorbell_dw_offset_in_bar( - (struct amdgpu_device *)kfd->adev, - pdd->doorbell_index, doorbell_id); -} + unsigned int i; + int range_start = dev->shared_resources.non_cp_doorbells_start; + int range_end = dev->shared_resources.non_cp_doorbells_end; -uint64_t kfd_get_number_elems(struct kfd_dev *kfd) -{ - uint64_t num_of_elems = (kfd->shared_resources.doorbell_aperture_size - - kfd->shared_resources.doorbell_start_offset) / - kfd_doorbell_process_slice(kfd) + 1; + if (!KFD_IS_SOC15(dev)) + return 0; - return num_of_elems; + /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */ + pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end); + pr_debug("reserved doorbell 0x%03x - 0x%03x\n", + range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, + range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET); + for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) { + if (i >= range_start && i <= range_end) { + __set_bit(i, qpd->doorbell_bitmap); + __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, + qpd->doorbell_bitmap); + } + } + + return 0; } phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd) { - if (!pdd->doorbell_index) { - int r = kfd_alloc_process_doorbells(pdd->dev->kfd, - &pdd->doorbell_index); - if (r < 0) + struct amdgpu_device *adev = pdd->dev->adev; + uint32_t first_db_index; + + if (!pdd->qpd.proc_doorbells) { + if (kfd_alloc_process_doorbells(pdd->dev->kfd, pdd)) + /* phys_addr_t 0 is error */ return 0; } - return pdd->dev->kfd->doorbell_base + - pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev->kfd); + first_db_index = amdgpu_doorbell_index_on_bar(adev, pdd->qpd.proc_doorbells, 0); + return adev->doorbell.base + first_db_index * sizeof(uint32_t); } -int kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_index) +int kfd_alloc_process_doorbells(struct kfd_dev *kfd, struct kfd_process_device *pdd) { - int r = 0; + int r; + struct qcm_process_device *qpd = &pdd->qpd; - if (!kfd->shared_resources.enable_mes) - r = ida_simple_get(&kfd->doorbell_ida, 1, - kfd->max_doorbell_slices, GFP_KERNEL); - else - r = amdgpu_mes_alloc_process_doorbells( - (struct amdgpu_device *)kfd->adev, - doorbell_index); + /* Allocate bitmap for dynamic doorbell allocation */ + qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, + GFP_KERNEL); + if (!qpd->doorbell_bitmap) { + DRM_ERROR("Failed to allocate process doorbell bitmap\n"); + return -ENOMEM; + } - if (r > 0) - *doorbell_index = r; + r = init_doorbell_bitmap(&pdd->qpd, kfd); + if (r) { + DRM_ERROR("Failed to initialize process doorbells\n"); + r = -ENOMEM; + goto err; + } - if (r < 0) - pr_err("Failed to allocate process doorbells\n"); + /* Allocate doorbells for this process */ + r = amdgpu_bo_create_kernel(kfd->adev, + kfd_doorbell_process_slice(kfd), + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_DOORBELL, + &qpd->proc_doorbells, + NULL, + NULL); + if (r) { + DRM_ERROR("Failed to allocate process doorbells\n"); + goto err; + } + return 0; +err: + bitmap_free(qpd->doorbell_bitmap); + qpd->doorbell_bitmap = NULL; return r; } -void kfd_free_process_doorbells(struct kfd_dev *kfd, unsigned int doorbell_index) +void kfd_free_process_doorbells(struct kfd_dev *kfd, struct kfd_process_device *pdd) { - if (doorbell_index) { - if (!kfd->shared_resources.enable_mes) - ida_simple_remove(&kfd->doorbell_ida, doorbell_index); - else - amdgpu_mes_free_process_doorbells( - (struct amdgpu_device *)kfd->adev, - doorbell_index); + struct qcm_process_device *qpd = &pdd->qpd; + + if (qpd->doorbell_bitmap) { + bitmap_free(qpd->doorbell_bitmap); + qpd->doorbell_bitmap = NULL; } + + amdgpu_bo_free_kernel(&qpd->proc_doorbells, NULL, NULL); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 8081a9408006..0f58be65132f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -31,7 +31,6 @@ #include #include "kfd_priv.h" #include "kfd_events.h" -#include "kfd_iommu.h" #include /* @@ -1146,87 +1145,6 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, rcu_read_unlock(); } -#ifdef KFD_SUPPORT_IOMMU_V2 -void kfd_signal_iommu_event(struct kfd_node *dev, u32 pasid, - unsigned long address, bool is_write_requested, - bool is_execute_requested) -{ - struct kfd_hsa_memory_exception_data memory_exception_data; - struct vm_area_struct *vma; - int user_gpu_id; - - /* - * Because we are called from arbitrary context (workqueue) as opposed - * to process context, kfd_process could attempt to exit while we are - * running so the lookup function increments the process ref count. - */ - struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); - struct mm_struct *mm; - - if (!p) - return; /* Presumably process exited. */ - - /* Take a safe reference to the mm_struct, which may otherwise - * disappear even while the kfd_process is still referenced. - */ - mm = get_task_mm(p->lead_thread); - if (!mm) { - kfd_unref_process(p); - return; /* Process is exiting */ - } - - user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); - if (unlikely(user_gpu_id == -EINVAL)) { - WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); - return; - } - memset(&memory_exception_data, 0, sizeof(memory_exception_data)); - - mmap_read_lock(mm); - vma = find_vma(mm, address); - - memory_exception_data.gpu_id = user_gpu_id; - memory_exception_data.va = address; - /* Set failure reason */ - memory_exception_data.failure.NotPresent = 1; - memory_exception_data.failure.NoExecute = 0; - memory_exception_data.failure.ReadOnly = 0; - if (vma && address >= vma->vm_start) { - memory_exception_data.failure.NotPresent = 0; - - if (is_write_requested && !(vma->vm_flags & VM_WRITE)) - memory_exception_data.failure.ReadOnly = 1; - else - memory_exception_data.failure.ReadOnly = 0; - - if (is_execute_requested && !(vma->vm_flags & VM_EXEC)) - memory_exception_data.failure.NoExecute = 1; - else - memory_exception_data.failure.NoExecute = 0; - } - - mmap_read_unlock(mm); - mmput(mm); - - pr_debug("notpresent %d, noexecute %d, readonly %d\n", - memory_exception_data.failure.NotPresent, - memory_exception_data.failure.NoExecute, - memory_exception_data.failure.ReadOnly); - - /* Workaround on Raven to not kill the process when memory is freed - * before IOMMU is able to finish processing all the excessive PPRs - */ - - if (KFD_GC_VERSION(dev) != IP_VERSION(9, 1, 0) && - KFD_GC_VERSION(dev) != IP_VERSION(9, 2, 2) && - KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0)) - lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY, - &memory_exception_data); - - kfd_unref_process(p); -} -#endif /* KFD_SUPPORT_IOMMU_V2 */ - void kfd_signal_hw_exception_event(u32 pasid) { /* diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index da2ca00d79e5..62b205dac63a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -322,22 +322,19 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id) pdd->lds_base = MAKE_LDS_APP_BASE_VI(); pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); - if (!pdd->dev->kfd->use_iommu_v2) { - /* dGPUs: SVM aperture starting at 0 - * with small reserved space for kernel. - * Set them to CANONICAL addresses. - */ - pdd->gpuvm_base = SVM_USER_BASE; - pdd->gpuvm_limit = - pdd->dev->kfd->shared_resources.gpuvm_size - 1; - } else { - /* set them to non CANONICAL addresses, and no SVM is - * allocated. - */ - pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1); - pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base, - pdd->dev->kfd->shared_resources.gpuvm_size); - } + /* dGPUs: SVM aperture starting at 0 + * with small reserved space for kernel. + * Set them to CANONICAL addresses. + */ + pdd->gpuvm_base = SVM_USER_BASE; + pdd->gpuvm_limit = + pdd->dev->kfd->shared_resources.gpuvm_size - 1; + + /* dGPUs: the reserved space for kernel + * before SVM + */ + pdd->qpd.cwsr_base = SVM_CWSR_BASE; + pdd->qpd.ib_base = SVM_IB_BASE; pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI(); pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); @@ -348,18 +345,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id) pdd->lds_base = MAKE_LDS_APP_BASE_V9(); pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); - /* Raven needs SVM to support graphic handle, etc. Leave the small - * reserved space before SVM on Raven as well, even though we don't - * have to. - * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they - * are used in Thunk to reserve SVM. - */ - pdd->gpuvm_base = SVM_USER_BASE; + pdd->gpuvm_base = PAGE_SIZE; pdd->gpuvm_limit = pdd->dev->kfd->shared_resources.gpuvm_size - 1; pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9(); pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); + + /* + * Place TBA/TMA on opposite side of VM hole to prevent + * stray faults from triggering SVM on these pages. + */ + pdd->qpd.cwsr_base = pdd->dev->kfd->shared_resources.gpuvm_size; } int kfd_init_apertures(struct kfd_process *process) @@ -416,14 +413,6 @@ int kfd_init_apertures(struct kfd_process *process) return -EINVAL; } } - - if (!dev->kfd->use_iommu_v2) { - /* dGPUs: the reserved space for kernel - * before SVM - */ - pdd->qpd.cwsr_base = SVM_CWSR_BASE; - pdd->qpd.ib_base = SVM_IB_BASE; - } } dev_dbg(kfd_device, "node id %u\n", id); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c deleted file mode 100644 index 808ee010520a..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ /dev/null @@ -1,356 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/* - * Copyright 2018-2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include - -#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2) - -#include -#include -#include -#include -#include -#include "kfd_priv.h" -#include "kfd_topology.h" -#include "kfd_iommu.h" - -static const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP | - AMD_IOMMU_DEVICE_FLAG_PRI_SUP | - AMD_IOMMU_DEVICE_FLAG_PASID_SUP; - -/** kfd_iommu_check_device - Check whether IOMMU is available for device - */ -int kfd_iommu_check_device(struct kfd_dev *kfd) -{ - struct amd_iommu_device_info iommu_info; - int err; - - if (!kfd->use_iommu_v2) - return -ENODEV; - - iommu_info.flags = 0; - err = amd_iommu_device_info(kfd->adev->pdev, &iommu_info); - if (err) - return err; - - if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) - return -ENODEV; - - return 0; -} - -/** kfd_iommu_device_init - Initialize IOMMU for device - */ -int kfd_iommu_device_init(struct kfd_dev *kfd) -{ - struct amd_iommu_device_info iommu_info; - unsigned int pasid_limit; - int err; - - if (!kfd->use_iommu_v2) - return 0; - - iommu_info.flags = 0; - err = amd_iommu_device_info(kfd->adev->pdev, &iommu_info); - if (err < 0) { - dev_err(kfd_device, - "error getting iommu info. is the iommu enabled?\n"); - return -ENODEV; - } - - if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) { - dev_err(kfd_device, - "error required iommu flags ats %i, pri %i, pasid %i\n", - (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0, - (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0, - (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) - != 0); - return -ENODEV; - } - - pasid_limit = min_t(unsigned int, - (unsigned int)(1 << kfd->device_info.max_pasid_bits), - iommu_info.max_pasids); - - if (!kfd_set_pasid_limit(pasid_limit)) { - dev_err(kfd_device, "error setting pasid limit\n"); - return -EBUSY; - } - - return 0; -} - -/** kfd_iommu_bind_process_to_device - Have the IOMMU bind a process - * - * Binds the given process to the given device using its PASID. This - * enables IOMMUv2 address translation for the process on the device. - * - * This function assumes that the process mutex is held. - */ -int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd) -{ - struct kfd_node *dev = pdd->dev; - struct kfd_process *p = pdd->process; - int err; - - if (!dev->kfd->use_iommu_v2 || pdd->bound == PDD_BOUND) - return 0; - - if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) { - pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n"); - return -EINVAL; - } - - if (!kfd_is_first_node(dev)) { - dev_warn_once(kfd_device, - "IOMMU supported only on first node\n"); - return 0; - } - - err = amd_iommu_bind_pasid(dev->adev->pdev, p->pasid, p->lead_thread); - if (!err) - pdd->bound = PDD_BOUND; - - return err; -} - -/** kfd_iommu_unbind_process - Unbind process from all devices - * - * This removes all IOMMU device bindings of the process. To be used - * before process termination. - */ -void kfd_iommu_unbind_process(struct kfd_process *p) -{ - int i; - - for (i = 0; i < p->n_pdds; i++) - if ((p->pdds[i]->bound == PDD_BOUND) && - (kfd_is_first_node((p->pdds[i]->dev)))) - amd_iommu_unbind_pasid(p->pdds[i]->dev->adev->pdev, - p->pasid); -} - -/* Callback for process shutdown invoked by the IOMMU driver */ -static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid) -{ - struct kfd_node *dev = kfd_device_by_pci_dev(pdev); - struct kfd_process *p; - struct kfd_process_device *pdd; - - if (!dev) - return; - - /* - * Look for the process that matches the pasid. If there is no such - * process, we either released it in amdkfd's own notifier, or there - * is a bug. Unfortunately, there is no way to tell... - */ - p = kfd_lookup_process_by_pasid(pasid); - if (!p) - return; - - pr_debug("Unbinding process 0x%x from IOMMU\n", pasid); - - mutex_lock(&p->mutex); - - pdd = kfd_get_process_device_data(dev, p); - if (pdd) - /* For GPU relying on IOMMU, we need to dequeue here - * when PASID is still bound. - */ - kfd_process_dequeue_from_device(pdd); - - mutex_unlock(&p->mutex); - - kfd_unref_process(p); -} - -/* This function called by IOMMU driver on PPR failure */ -static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid, - unsigned long address, u16 flags) -{ - struct kfd_node *dev; - - dev_warn_ratelimited(kfd_device, - "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X", - pdev->bus->number, - PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), - pasid, - address, - flags); - - dev = kfd_device_by_pci_dev(pdev); - if (!WARN_ON(!dev)) - kfd_signal_iommu_event(dev, pasid, address, - flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC); - - return AMD_IOMMU_INV_PRI_RSP_INVALID; -} - -/* - * Bind processes do the device that have been temporarily unbound - * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device. - */ -static int kfd_bind_processes_to_device(struct kfd_node *knode) -{ - struct kfd_process_device *pdd; - struct kfd_process *p; - unsigned int temp; - int err = 0; - - int idx = srcu_read_lock(&kfd_processes_srcu); - - hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { - mutex_lock(&p->mutex); - pdd = kfd_get_process_device_data(knode, p); - - if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) { - mutex_unlock(&p->mutex); - continue; - } - - err = amd_iommu_bind_pasid(knode->adev->pdev, p->pasid, - p->lead_thread); - if (err < 0) { - pr_err("Unexpected pasid 0x%x binding failure\n", - p->pasid); - mutex_unlock(&p->mutex); - break; - } - - pdd->bound = PDD_BOUND; - mutex_unlock(&p->mutex); - } - - srcu_read_unlock(&kfd_processes_srcu, idx); - - return err; -} - -/* - * Mark currently bound processes as PDD_BOUND_SUSPENDED. These - * processes will be restored to PDD_BOUND state in - * kfd_bind_processes_to_device. - */ -static void kfd_unbind_processes_from_device(struct kfd_node *knode) -{ - struct kfd_process_device *pdd; - struct kfd_process *p; - unsigned int temp; - - int idx = srcu_read_lock(&kfd_processes_srcu); - - hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { - mutex_lock(&p->mutex); - pdd = kfd_get_process_device_data(knode, p); - - if (WARN_ON(!pdd)) { - mutex_unlock(&p->mutex); - continue; - } - - if (pdd->bound == PDD_BOUND) - pdd->bound = PDD_BOUND_SUSPENDED; - mutex_unlock(&p->mutex); - } - - srcu_read_unlock(&kfd_processes_srcu, idx); -} - -/** kfd_iommu_suspend - Prepare IOMMU for suspend - * - * This unbinds processes from the device and disables the IOMMU for - * the device. - */ -void kfd_iommu_suspend(struct kfd_dev *kfd) -{ - if (!kfd->use_iommu_v2) - return; - - kfd_unbind_processes_from_device(kfd->nodes[0]); - - amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); - amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); - amd_iommu_free_device(kfd->adev->pdev); -} - -/** kfd_iommu_resume - Restore IOMMU after resume - * - * This reinitializes the IOMMU for the device and re-binds previously - * suspended processes to the device. - */ -int kfd_iommu_resume(struct kfd_dev *kfd) -{ - unsigned int pasid_limit; - int err; - - if (!kfd->use_iommu_v2) - return 0; - - pasid_limit = kfd_get_pasid_limit(); - - err = amd_iommu_init_device(kfd->adev->pdev, pasid_limit); - if (err) - return -ENXIO; - - amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, - iommu_pasid_shutdown_callback); - amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, - iommu_invalid_ppr_cb); - - err = kfd_bind_processes_to_device(kfd->nodes[0]); - if (err) { - amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); - amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); - amd_iommu_free_device(kfd->adev->pdev); - return err; - } - - return 0; -} - -/** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology - */ -int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev) -{ - struct kfd_perf_properties *props; - - if (!(kdev->node_props.capability & HSA_CAP_ATS_PRESENT)) - return 0; - - if (!amd_iommu_pc_supported()) - return 0; - - props = kfd_alloc_struct(props); - if (!props) - return -ENOMEM; - strcpy(props->block_name, "iommu"); - props->max_concurrent = amd_iommu_pc_get_max_banks(0) * - amd_iommu_pc_get_max_counters(0); /* assume one iommu */ - list_add_tail(&props->list, &kdev->perf_props); - - return 0; -} - -#endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h deleted file mode 100644 index 8cf0fcbe87c2..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h +++ /dev/null @@ -1,84 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -/* - * Copyright 2018-2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef __KFD_IOMMU_H__ -#define __KFD_IOMMU_H__ - -#include - -#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2) - -#define KFD_SUPPORT_IOMMU_V2 - -int kfd_iommu_check_device(struct kfd_dev *kfd); -int kfd_iommu_device_init(struct kfd_dev *kfd); - -int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd); -void kfd_iommu_unbind_process(struct kfd_process *p); - -void kfd_iommu_suspend(struct kfd_dev *kfd); -int kfd_iommu_resume(struct kfd_dev *kfd); - -int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev); - -#else - -static inline int kfd_iommu_check_device(struct kfd_dev *kfd) -{ - return -ENODEV; -} -static inline int kfd_iommu_device_init(struct kfd_dev *kfd) -{ -#if IS_MODULE(CONFIG_AMD_IOMMU_V2) - WARN_ONCE(1, "iommu_v2 module is not usable by built-in KFD"); -#endif - return 0; -} - -static inline int kfd_iommu_bind_process_to_device( - struct kfd_process_device *pdd) -{ - return 0; -} -static inline void kfd_iommu_unbind_process(struct kfd_process *p) -{ - /* empty */ -} - -static inline void kfd_iommu_suspend(struct kfd_dev *kfd) -{ - /* empty */ -} -static inline int kfd_iommu_resume(struct kfd_dev *kfd) -{ - return 0; -} - -static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev) -{ - return 0; -} - -#endif /* IS_REACHABLE(CONFIG_AMD_IOMMU_V2) */ - -#endif /* __KFD_IOMMU_H__ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 709ac885ca6d..7d82c7da223a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -461,7 +461,6 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, 0, node->id, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); - svm_range_free_dma_mappings(prange); out_free: kvfree(buf); @@ -543,10 +542,12 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, addr = next; } - if (cpages) + if (cpages) { prange->actual_loc = best_loc; - else + svm_range_free_dma_mappings(prange, true); + } else { svm_range_vram_node_free(prange); + } return r < 0 ? r : 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 863cf060af48..d01bb57733b3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -48,7 +48,7 @@ int pipe_priority_map[] = { struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q) { - struct kfd_mem_obj *mqd_mem_obj = NULL; + struct kfd_mem_obj *mqd_mem_obj; mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); if (!mqd_mem_obj) @@ -64,7 +64,7 @@ struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properti struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, struct queue_properties *q) { - struct kfd_mem_obj *mqd_mem_obj = NULL; + struct kfd_mem_obj *mqd_mem_obj; uint64_t offset; mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 65c9f01a1f86..ee1d32d957f2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -206,13 +206,6 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static void update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q, - struct mqd_update_info *minfo) -{ - __update_mqd(mm, mqd, q, minfo, 1); -} - static uint32_t read_doorbell_id(void *mqd) { struct cik_mqd *m = (struct cik_mqd *)mqd; @@ -220,9 +213,9 @@ static uint32_t read_doorbell_id(void *mqd) return m->queue_doorbell_id0; } -static void update_mqd_hawaii(struct mqd_manager *mm, void *mqd, - struct queue_properties *q, - struct mqd_update_info *minfo) +static void update_mqd(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, + struct mqd_update_info *minfo) { __update_mqd(mm, mqd, q, minfo, 0); } @@ -387,7 +380,6 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif - struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, struct kfd_node *dev) { @@ -470,16 +462,3 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, return mqd; } - -struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, - struct kfd_node *dev) -{ - struct mqd_manager *mqd; - - mqd = mqd_manager_init_cik(type, dev); - if (!mqd) - return NULL; - if (type == KFD_MQD_TYPE_CP) - mqd->update_mqd = update_mqd_hawaii; - return mqd; -} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 94c0fc2e57b7..83699392c808 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -318,6 +318,26 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; } +static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id) +{ + int err; + struct v10_compute_mqd *m; + u32 doorbell_off; + + m = get_mqd(mqd); + + doorbell_off = m->cp_hqd_pq_doorbell_control >> + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + + err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0); + if (err) + pr_debug("Destroy HIQ MQD failed: %d\n", err); + + return err; +} + static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -460,7 +480,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, mqd->free_mqd = free_mqd_hiq_sdma; mqd->load_mqd = kfd_hiq_load_mqd_kiq; mqd->update_mqd = update_mqd; - mqd->destroy_mqd = kfd_destroy_mqd_cp; + mqd->destroy_mqd = destroy_hiq_mqd; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct v10_compute_mqd); mqd->mqd_stride = kfd_mqd_stride; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 31fec5e70d13..2319467d2d95 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -335,6 +335,26 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; } +static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id) +{ + int err; + struct v11_compute_mqd *m; + u32 doorbell_off; + + m = get_mqd(mqd); + + doorbell_off = m->cp_hqd_pq_doorbell_control >> + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + + err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0); + if (err) + pr_debug("Destroy HIQ MQD failed: %d\n", err); + + return err; +} + static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -449,7 +469,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, mqd->free_mqd = free_mqd_hiq_sdma; mqd->load_mqd = kfd_hiq_load_mqd_kiq; mqd->update_mqd = update_mqd; - mqd->destroy_mqd = kfd_destroy_mqd_cp; + mqd->destroy_mqd = destroy_hiq_mqd; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct v11_compute_mqd); #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 601bb9f68048..e23d32f35607 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -405,6 +405,25 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; } +static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id) +{ + int err; + struct v9_mqd *m; + u32 doorbell_off; + + m = get_mqd(mqd); + + doorbell_off = m->cp_hqd_pq_doorbell_control >> + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0); + if (err) + pr_debug("Destroy HIQ MQD failed: %d\n", err); + + return err; +} + static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -548,16 +567,19 @@ static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, { uint32_t xcc_mask = mm->dev->xcc_mask; int xcc_id, err, inst = 0; - void *xcc_mqd; uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); + struct v9_mqd *m; + u32 doorbell_off; for_each_inst(xcc_id, xcc_mask) { - xcc_mqd = mqd + hiq_mqd_size * inst; - err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, - type, timeout, pipe_id, - queue_id, xcc_id); + m = get_mqd(mqd + hiq_mqd_size * inst); + + doorbell_off = m->cp_hqd_pq_doorbell_control >> + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + + err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, xcc_id); if (err) { - pr_debug("Destroy MQD failed for xcc: %d\n", inst); + pr_debug("Destroy HIQ MQD failed for xcc: %d\n", inst); break; } ++inst; @@ -846,7 +868,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, } else { mqd->init_mqd = init_mqd_hiq; mqd->load_mqd = kfd_hiq_load_mqd_kiq; - mqd->destroy_mqd = kfd_destroy_mqd_cp; + mqd->destroy_mqd = destroy_hiq_mqd; } break; case KFD_MQD_TYPE_DIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index d1e962da51dd..657c37822980 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -237,14 +237,6 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } - -static void update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q, - struct mqd_update_info *minfo) -{ - __update_mqd(mm, mqd, q, minfo, MTYPE_CC, 1); -} - static uint32_t read_doorbell_id(void *mqd) { struct vi_mqd *m = (struct vi_mqd *)mqd; @@ -252,9 +244,9 @@ static uint32_t read_doorbell_id(void *mqd) return m->queue_doorbell_id0; } -static void update_mqd_tonga(struct mqd_manager *mm, void *mqd, - struct queue_properties *q, - struct mqd_update_info *minfo) +static void update_mqd(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, + struct mqd_update_info *minfo) { __update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0); } @@ -529,16 +521,3 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, return mqd; } - -struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, - struct kfd_node *dev) -{ - struct mqd_manager *mqd; - - mqd = mqd_manager_init_vi(type, dev); - if (!mqd) - return NULL; - if (type == KFD_MQD_TYPE_CP) - mqd->update_mqd = update_mqd_tonga; - return mqd; -} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 29a2d0499b67..8ce6f5200905 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -121,6 +121,7 @@ static int pm_map_process_aldebaran(struct packet_manager *pm, packet->sh_mem_bases = qpd->sh_mem_bases; if (qpd->tba_addr) { packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); + packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8); packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); } @@ -298,7 +299,8 @@ static int pm_set_grace_period_v9(struct packet_manager *pm, pm->dqm->wait_times, grace_period, ®_offset, - ®_data); + ®_data, + 0); if (grace_period == USE_DEFAULT_GRACE_PERIOD) reg_data = pm->dqm->wait_times; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index d4c9ee3f9953..3d9ce44d88da 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -175,12 +175,6 @@ extern int send_sigterm; */ extern int debug_largebar; -/* - * Ignore CRAT table during KFD initialization, can be used to work around - * broken CRAT tables on some AMD systems - */ -extern int ignore_crat; - /* Set sh_mem_config.retry_disable on GFX v9 */ extern int amdgpu_noretry; @@ -234,7 +228,6 @@ struct kfd_device_info { uint8_t num_of_watch_points; uint16_t mqd_size_aligned; bool supports_cwsr; - bool needs_iommu_device; bool needs_pci_atomics; uint32_t no_atomic_fw_version; unsigned int num_sdma_queues_per_engine; @@ -323,15 +316,6 @@ struct kfd_dev { struct kfd_device_info device_info; - phys_addr_t doorbell_base; /* Start of actual doorbells used by - * KFD. It is aligned for mapping - * into user mode - */ - size_t doorbell_base_dw_offset; /* Offset from the start of the PCI - * doorbell BAR to the first KFD - * doorbell in dwords. GFX reserves - * the segment before this offset. - */ u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells * page used by kernel queue */ @@ -340,8 +324,6 @@ struct kfd_dev { const struct kfd2kgd_calls *kfd2kgd; struct mutex doorbell_mutex; - DECLARE_BITMAP(doorbell_available_index, - KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); void *gtt_mem; uint64_t gtt_start_gpu_addr; @@ -368,9 +350,6 @@ struct kfd_dev { bool pci_atomic_requested; - /* Use IOMMU v2 flag */ - bool use_iommu_v2; - /* Compute Profile ref. count */ atomic_t compute_profile; @@ -385,6 +364,12 @@ struct kfd_dev { /* Track per device allocated watch points */ uint32_t alloc_watch_ids; spinlock_t watch_points_lock; + + /* Kernel doorbells for KFD device */ + struct amdgpu_bo *doorbells; + + /* bitmap for dynamic doorbell allocation from doorbell object */ + unsigned long *doorbell_bitmap; }; enum kfd_mempool { @@ -702,7 +687,10 @@ struct qcm_process_device { uint64_t ib_base; void *ib_kaddr; - /* doorbell resources per process per device */ + /* doorbells for kfd process */ + struct amdgpu_bo *proc_doorbells; + + /* bitmap for dynamic doorbell allocation from the bo */ unsigned long *doorbell_bitmap; }; @@ -792,7 +780,6 @@ struct kfd_process_device { struct attribute attr_evict; struct kobject *kobj_stats; - unsigned int doorbell_index; /* * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process @@ -1100,9 +1087,9 @@ unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, unsigned int doorbell_id); phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd); int kfd_alloc_process_doorbells(struct kfd_dev *kfd, - unsigned int *doorbell_index); + struct kfd_process_device *pdd); void kfd_free_process_doorbells(struct kfd_dev *kfd, - unsigned int doorbell_index); + struct kfd_process_device *pdd); /* GTT Sub-Allocator */ int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, @@ -1152,7 +1139,6 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, } int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); int kfd_numa_node_to_apic_id(int numa_node_id); -void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); /* Interrupts */ #define KFD_IRQ_FENCE_CLIENTID 0xff @@ -1299,12 +1285,8 @@ void print_queue(struct queue *q); struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, struct kfd_node *dev); -struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, - struct kfd_node *dev); struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, struct kfd_node *dev); -struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, - struct kfd_node *dev); struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, struct kfd_node *dev); struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, @@ -1459,9 +1441,6 @@ int kfd_wait_on_events(struct kfd_process *p, uint32_t *wait_result); void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, uint32_t valid_id_bits); -void kfd_signal_iommu_event(struct kfd_node *dev, - u32 pasid, unsigned long address, - bool is_write_requested, bool is_execute_requested); void kfd_signal_hw_exception_event(u32 pasid); int kfd_set_event(struct kfd_process *p, uint32_t event_id); int kfd_reset_event(struct kfd_process *p, uint32_t event_id); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index a844e68211ac..fbf053001af9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -41,7 +40,6 @@ struct mm_struct; #include "kfd_priv.h" #include "kfd_device_queue_manager.h" -#include "kfd_iommu.h" #include "kfd_svm.h" #include "kfd_smi_events.h" #include "kfd_debug.h" @@ -1035,10 +1033,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) free_pages((unsigned long)pdd->qpd.cwsr_kaddr, get_order(KFD_CWSR_TBA_TMA_SIZE)); - bitmap_free(pdd->qpd.doorbell_bitmap); idr_destroy(&pdd->alloc_idr); - kfd_free_process_doorbells(pdd->dev->kfd, pdd->doorbell_index); + kfd_free_process_doorbells(pdd->dev->kfd, pdd); if (pdd->dev->kfd->shared_resources.enable_mes) amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, @@ -1123,7 +1120,6 @@ static void kfd_process_wq_release(struct work_struct *work) dma_fence_signal(p->ef); kfd_process_remove_sysfs(p); - kfd_iommu_unbind_process(p); kfd_process_kunmap_signal_bo(p); kfd_process_free_outstanding_kfd_bos(p); @@ -1550,38 +1546,6 @@ err_alloc_process: return ERR_PTR(err); } -static int init_doorbell_bitmap(struct qcm_process_device *qpd, - struct kfd_dev *dev) -{ - unsigned int i; - int range_start = dev->shared_resources.non_cp_doorbells_start; - int range_end = dev->shared_resources.non_cp_doorbells_end; - - if (!KFD_IS_SOC15(dev)) - return 0; - - qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, - GFP_KERNEL); - if (!qpd->doorbell_bitmap) - return -ENOMEM; - - /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */ - pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end); - pr_debug("reserved doorbell 0x%03x - 0x%03x\n", - range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, - range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET); - - for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) { - if (i >= range_start && i <= range_end) { - __set_bit(i, qpd->doorbell_bitmap); - __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, - qpd->doorbell_bitmap); - } - } - - return 0; -} - struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, struct kfd_process *p) { @@ -1606,11 +1570,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, if (!pdd) return NULL; - if (init_doorbell_bitmap(&pdd->qpd, dev->kfd)) { - pr_err("Failed to init doorbell for process\n"); - goto err_free_pdd; - } - pdd->dev = dev; INIT_LIST_HEAD(&pdd->qpd.queues_list); INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); @@ -1766,10 +1725,6 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, } } - err = kfd_iommu_bind_process_to_device(pdd); - if (err) - goto out; - /* * make sure that runtime_usage counter is incremented just once * per pdd @@ -1777,15 +1732,6 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, pdd->runtime_inuse = true; return pdd; - -out: - /* balance runpm reference count and exit with error */ - if (!pdd->runtime_inuse) { - pm_runtime_mark_last_busy(adev_to_drm(dev->adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev); - } - - return ERR_PTR(err); } /* Create specific handle mapped to mem from process local memory idr diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index ba9d69054119..adb5e4bdc0b2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -123,7 +123,7 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, if (!gws && pdd->qpd.num_gws == 0) return -EINVAL; - if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3)) { + if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) && !dev->kfd->shared_resources.enable_mes) { if (gws) ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info, gws, &mem); @@ -136,7 +136,9 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, } else { /* * Intentionally set GWS to a non-NULL value - * for GFX 9.4.3. + * for devices that do not use GWS for global wave + * synchronization but require the formality + * of setting GWS for cooperative groups. */ pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL; } @@ -173,7 +175,8 @@ void pqm_uninit(struct process_queue_manager *pqm) list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { if (pqn->q && pqn->q->gws && - KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3)) + KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) && + !pqn->q->device->kfd->shared_resources.enable_mes) amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info, pqn->q->gws); kfd_procfs_del_queue(pqn->q); @@ -365,17 +368,20 @@ int pqm_create_queue(struct process_queue_manager *pqm, goto err_create_queue; } - if (q && p_doorbell_offset_in_process) + if (q && p_doorbell_offset_in_process) { /* Return the doorbell offset within the doorbell page * to the caller so it can be passed up to user mode * (in bytes). - * There are always 1024 doorbells per process, so in case - * of 8-byte doorbells, there are two doorbell pages per - * process. + * relative doorbell index = Absolute doorbell index - + * absolute index of first doorbell in the page. */ - *p_doorbell_offset_in_process = - (q->properties.doorbell_off * sizeof(uint32_t)) & - (kfd_doorbell_process_slice(dev->kfd) - 1); + uint32_t first_db_index = amdgpu_doorbell_index_on_bar(pdd->dev->adev, + pdd->qpd.proc_doorbells, + 0); + + *p_doorbell_offset_in_process = (q->properties.doorbell_off + - first_db_index) * sizeof(uint32_t); + } pr_debug("PQM After DQM create queue\n"); @@ -455,7 +461,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) } if (pqn->q->gws) { - if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3)) + if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) && + !dev->kfd->shared_resources.enable_mes) amdgpu_amdkfd_remove_gws_from_process( pqm->process->kgd_process_info, pqn->q->gws); @@ -929,12 +936,6 @@ int kfd_criu_restore_queue(struct kfd_process *p, goto exit; } - if (!pdd->doorbell_index && - kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) { - ret = -ENOMEM; - goto exit; - } - /* data stored in this order: mqd, ctl_stack */ mqd = q_extra_data; ctl_stack = mqd + q_data->mqd_size; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 0b7bfbd0cb66..011561605983 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -23,7 +23,10 @@ #include #include +#include #include +#include + #include "amdgpu_sync.h" #include "amdgpu_object.h" #include "amdgpu_vm.h" @@ -46,6 +49,13 @@ * page table is updated. */ #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC) +#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) +#define dynamic_svm_range_dump(svms) \ + _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms) +#else +#define dynamic_svm_range_dump(svms) \ + do { if (0) svm_range_debug_dump(svms); } while (0) +#endif /* Giant svm range split into smaller ranges based on this, it is decided using * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to @@ -239,7 +249,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, } } -void svm_range_free_dma_mappings(struct svm_range *prange) +void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma) { struct kfd_process_device *pdd; dma_addr_t *dma_addr; @@ -260,13 +270,14 @@ void svm_range_free_dma_mappings(struct svm_range *prange) continue; } dev = &pdd->dev->adev->pdev->dev; - svm_range_dma_unmap(dev, dma_addr, 0, prange->npages); + if (unmap_dma) + svm_range_dma_unmap(dev, dma_addr, 0, prange->npages); kvfree(dma_addr); prange->dma_addr[gpuidx] = NULL; } } -static void svm_range_free(struct svm_range *prange, bool update_mem_usage) +static void svm_range_free(struct svm_range *prange, bool do_unmap) { uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT; struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); @@ -275,9 +286,9 @@ static void svm_range_free(struct svm_range *prange, bool update_mem_usage) prange->start, prange->last); svm_range_vram_node_free(prange); - svm_range_free_dma_mappings(prange); + svm_range_free_dma_mappings(prange, do_unmap); - if (update_mem_usage && !p->xnack_enabled) { + if (do_unmap && !p->xnack_enabled) { pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size); amdgpu_amdkfd_unreserve_mem_limit(NULL, size, KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); @@ -849,6 +860,37 @@ static void svm_range_debug_dump(struct svm_range_list *svms) } } +static void * +svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements, + uint64_t offset) +{ + unsigned char *dst; + + dst = kvmalloc_array(num_elements, size, GFP_KERNEL); + if (!dst) + return NULL; + memcpy(dst, (unsigned char *)psrc + offset, num_elements * size); + + return (void *)dst; +} + +static int +svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src) +{ + int i; + + for (i = 0; i < MAX_GPU_INSTANCE; i++) { + if (!src->dma_addr[i]) + continue; + dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i], + sizeof(*src->dma_addr[i]), src->npages, 0); + if (!dst->dma_addr[i]) + return -ENOMEM; + } + + return 0; +} + static int svm_range_split_array(void *ppnew, void *ppold, size_t size, uint64_t old_start, uint64_t old_n, @@ -863,22 +905,16 @@ svm_range_split_array(void *ppnew, void *ppold, size_t size, if (!pold) return 0; - new = kvmalloc_array(new_n, size, GFP_KERNEL); + d = (new_start - old_start) * size; + new = svm_range_copy_array(pold, size, new_n, d); if (!new) return -ENOMEM; - - d = (new_start - old_start) * size; - memcpy(new, pold + d, new_n * size); - - old = kvmalloc_array(old_n, size, GFP_KERNEL); + d = (new_start == old_start) ? new_n * size : 0; + old = svm_range_copy_array(pold, size, old_n, d); if (!old) { kvfree(new); return -ENOMEM; } - - d = (new_start == old_start) ? new_n * size : 0; - memcpy(old, pold + d, old_n * size); - kvfree(pold); *(void **)ppold = old; *(void **)ppnew = new; @@ -1455,37 +1491,34 @@ struct svm_validate_context { struct svm_range *prange; bool intr; DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); - struct ttm_validate_buffer tv[MAX_GPU_INSTANCE]; - struct list_head validate_list; - struct ww_acquire_ctx ticket; + struct drm_exec exec; }; -static int svm_range_reserve_bos(struct svm_validate_context *ctx) +static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr) { struct kfd_process_device *pdd; struct amdgpu_vm *vm; uint32_t gpuidx; int r; - INIT_LIST_HEAD(&ctx->validate_list); - for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { - pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); - if (!pdd) { - pr_debug("failed to find device idx %d\n", gpuidx); - return -EINVAL; + drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0); + drm_exec_until_all_locked(&ctx->exec) { + for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { + pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); + if (!pdd) { + pr_debug("failed to find device idx %d\n", gpuidx); + r = -EINVAL; + goto unreserve_out; + } + vm = drm_priv_to_vm(pdd->drm_priv); + + r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); + drm_exec_retry_on_contention(&ctx->exec); + if (unlikely(r)) { + pr_debug("failed %d to reserve bo\n", r); + goto unreserve_out; + } } - vm = drm_priv_to_vm(pdd->drm_priv); - - ctx->tv[gpuidx].bo = &vm->root.bo->tbo; - ctx->tv[gpuidx].num_shared = 4; - list_add(&ctx->tv[gpuidx].head, &ctx->validate_list); - } - - r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list, - ctx->intr, NULL); - if (r) { - pr_debug("failed %d to reserve bo\n", r); - return r; } for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { @@ -1508,13 +1541,13 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) return 0; unreserve_out: - ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list); + drm_exec_fini(&ctx->exec); return r; } static void svm_range_unreserve_bos(struct svm_validate_context *ctx) { - ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list); + drm_exec_fini(&ctx->exec); } static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) @@ -1522,6 +1555,8 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) struct kfd_process_device *pdd; pdd = kfd_process_device_from_gpuidx(p, gpuidx); + if (!pdd) + return NULL; return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev); } @@ -1596,12 +1631,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { - if (!prange->mapped_to_gpu) { + bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); + if (!prange->mapped_to_gpu || + bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { r = 0; goto free_ctx; } - - bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); } if (prange->actual_loc && !prange->ttm_res) { @@ -1613,7 +1648,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, goto free_ctx; } - svm_range_reserve_bos(ctx); + svm_range_reserve_bos(ctx, intr); p = container_of(prange->svms, struct kfd_process, svms); owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap, @@ -1925,7 +1960,10 @@ static struct svm_range *svm_range_clone(struct svm_range *old) new = svm_range_new(old->svms, old->start, old->last, false); if (!new) return NULL; - + if (svm_range_copy_dma_addrs(new, old)) { + svm_range_free(new, false); + return NULL; + } if (old->svm_bo) { new->ttm_res = old->ttm_res; new->offset = old->offset; @@ -3558,7 +3596,7 @@ out_unlock_range: break; } - svm_range_debug_dump(svms); + dynamic_svm_range_dump(svms); mutex_unlock(&svms->lock); mmap_read_unlock(mm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 21b14510882b..9e668eeefb32 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -183,7 +183,7 @@ void svm_range_add_list_work(struct svm_range_list *svms, void schedule_deferred_list_work(struct svm_range_list *svms); void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, unsigned long offset, unsigned long npages); -void svm_range_free_dma_mappings(struct svm_range *prange); +void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma); int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, uint64_t *svm_priv_data_size); int kfd_criu_checkpoint_svm(struct kfd_process *p, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 4a17bb7c7b27..ff98fded9534 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -36,8 +36,8 @@ #include "kfd_crat.h" #include "kfd_topology.h" #include "kfd_device_queue_manager.h" -#include "kfd_iommu.h" #include "kfd_svm.h" +#include "kfd_debug.h" #include "amdgpu_amdkfd.h" #include "amdgpu_ras.h" #include "amdgpu.h" @@ -988,17 +988,6 @@ static void find_system_memory(const struct dmi_header *dm, } } -/* - * Performance counters information is not part of CRAT but we would like to - * put them in the sysfs under topology directory for Thunk to get the data. - * This function is called before updating the sysfs. - */ -static int kfd_add_perf_to_topology(struct kfd_topology_device *kdev) -{ - /* These are the only counters supported so far */ - return kfd_iommu_add_perf_counters(kdev); -} - /* kfd_add_non_crat_information - Add information that is not currently * defined in CRAT but is necessary for KFD topology * @dev - topology device to which addition info is added @@ -1013,25 +1002,6 @@ static void kfd_add_non_crat_information(struct kfd_topology_device *kdev) /* TODO: For GPU node, rearrange code from kfd_topology_add_device */ } -/* kfd_is_acpi_crat_invalid - CRAT from ACPI is valid only for AMD APU devices. - * Ignore CRAT for all other devices. AMD APU is identified if both CPU - * and GPU cores are present. - * @device_list - topology device list created by parsing ACPI CRAT table. - * @return - TRUE if invalid, FALSE is valid. - */ -static bool kfd_is_acpi_crat_invalid(struct list_head *device_list) -{ - struct kfd_topology_device *dev; - - list_for_each_entry(dev, device_list, list) { - if (dev->node_props.cpu_cores_count && - dev->node_props.simd_count) - return false; - } - pr_info("Ignoring ACPI CRAT on non-APU system\n"); - return true; -} - int kfd_topology_init(void) { void *crat_image = NULL; @@ -1062,48 +1032,25 @@ int kfd_topology_init(void) */ proximity_domain = 0; - /* - * Get the CRAT image from the ACPI. If ACPI doesn't have one - * or if ACPI CRAT is invalid create a virtual CRAT. - * NOTE: The current implementation expects all AMD APUs to have - * CRAT. If no CRAT is available, it is assumed to be a CPU - */ - ret = kfd_create_crat_image_acpi(&crat_image, &image_size); - if (!ret) { - ret = kfd_parse_crat_table(crat_image, - &temp_topology_device_list, - proximity_domain); - if (ret || - kfd_is_acpi_crat_invalid(&temp_topology_device_list)) { - kfd_release_topology_device_list( - &temp_topology_device_list); - kfd_destroy_crat_image(crat_image); - crat_image = NULL; - } + ret = kfd_create_crat_image_virtual(&crat_image, &image_size, + COMPUTE_UNIT_CPU, NULL, + proximity_domain); + cpu_only_node = 1; + if (ret) { + pr_err("Error creating VCRAT table for CPU\n"); + return ret; } - if (!crat_image) { - ret = kfd_create_crat_image_virtual(&crat_image, &image_size, - COMPUTE_UNIT_CPU, NULL, - proximity_domain); - cpu_only_node = 1; - if (ret) { - pr_err("Error creating VCRAT table for CPU\n"); - return ret; - } - - ret = kfd_parse_crat_table(crat_image, - &temp_topology_device_list, - proximity_domain); - if (ret) { - pr_err("Error parsing VCRAT table for CPU\n"); - goto err; - } + ret = kfd_parse_crat_table(crat_image, + &temp_topology_device_list, + proximity_domain); + if (ret) { + pr_err("Error parsing VCRAT table for CPU\n"); + goto err; } kdev = list_first_entry(&temp_topology_device_list, struct kfd_topology_device, list); - kfd_add_perf_to_topology(kdev); down_write(&topology_lock); kfd_topology_update_device_list(&temp_topology_device_list, @@ -1189,8 +1136,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_node *gpu) /* Discrete GPUs need their own topology device list * entries. Don't assign them to CPU/APU nodes. */ - if (!gpu->kfd->use_iommu_v2 && - dev->node_props.cpu_cores_count) + if (dev->node_props.cpu_cores_count) continue; if (!dev->gpu && (dev->node_props.simd_count > 0)) { @@ -1931,23 +1877,27 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED | HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED; - if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) { - dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 | - HSA_DBG_WATCH_ADDR_MASK_HI_BIT; + if (kfd_dbg_has_ttmps_always_setup(dev->gpu)) + dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; - if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 4, 2)) + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) { + if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3)) dev->node_props.debug_prop |= - HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; + HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9_4_3 | + HSA_DBG_WATCH_ADDR_MASK_HI_BIT_GFX9_4_3; else + dev->node_props.debug_prop |= + HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 | + HSA_DBG_WATCH_ADDR_MASK_HI_BIT; + + if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 4, 2)) dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; } else { dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 | HSA_DBG_WATCH_ADDR_MASK_HI_BIT; - if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(11, 0, 0)) - dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; - else + if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0)) dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; } @@ -2083,10 +2033,7 @@ int kfd_topology_add_device(struct kfd_node *gpu) * Overwrite ATS capability according to needs_iommu_device to fix * potential missing corresponding bit in CRAT of BIOS. */ - if (dev->gpu->kfd->use_iommu_v2) - dev->node_props.capability |= HSA_CAP_ATS_PRESENT; - else - dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT; + dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT; /* Fix errors in CZ CRAT. * simd_count: Carrizo CRAT reports wrong simd_count, probably @@ -2281,29 +2228,6 @@ int kfd_numa_node_to_apic_id(int numa_node_id) return kfd_cpumask_to_apic_id(cpumask_of_node(numa_node_id)); } -void kfd_double_confirm_iommu_support(struct kfd_dev *gpu) -{ - struct kfd_topology_device *dev; - - gpu->use_iommu_v2 = false; - - if (!gpu->device_info.needs_iommu_device) - return; - - down_read(&topology_lock); - - /* Only use IOMMUv2 if there is an APU topology node with no GPU - * assigned yet. This GPU will be assigned to it. - */ - list_for_each_entry(dev, &topology_device_list, list) - if (dev->node_props.cpu_cores_count && - dev->node_props.simd_count && - !dev->gpu) - gpu->use_iommu_v2 = true; - - up_read(&topology_lock); -} - #if defined(CONFIG_DEBUG_FS) int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index cba2cd5ed9d1..dea32a9e5506 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -32,9 +32,12 @@ #define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32 #define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 6 +#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9_4_3 7 #define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 7 #define HSA_DBG_WATCH_ADDR_MASK_HI_BIT \ (29 << HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT) +#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_GFX9_4_3 \ + (30 << HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT) struct kfd_node_properties { uint64_t hive_id; diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index bf0a655d009e..901d1961b739 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -5,7 +5,7 @@ menu "Display Engine Configuration" config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y - depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64 + depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64 select SND_HDA_COMPONENT if SND_HDA_CORE # !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752 select DRM_AMD_DC_FP if (X86 || LOONGARCH || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG)) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 249b073f6a23..8bf94920d23e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -38,7 +38,7 @@ AMDGPUDM += dc_fpu.o endif ifneq ($(CONFIG_DRM_AMD_DC),) -AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o +AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o amdgpu_dm_replay.o endif AMDGPUDM += amdgpu_dm_hdcp.o diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e5554a36e8c8..268cb99a4c4b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -245,51 +245,52 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, */ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) { + struct amdgpu_crtc *acrtc = NULL; + if (crtc >= adev->mode_info.num_crtc) return 0; - else { - struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; - if (acrtc->dm_irq_params.stream == NULL) { - DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", - crtc); - return 0; - } + acrtc = adev->mode_info.crtcs[crtc]; - return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); + if (!acrtc->dm_irq_params.stream) { + DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", + crtc); + return 0; } + + return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); } static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, u32 *vbl, u32 *position) { u32 v_blank_start, v_blank_end, h_position, v_position; + struct amdgpu_crtc *acrtc = NULL; if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) return -EINVAL; - else { - struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; - if (acrtc->dm_irq_params.stream == NULL) { - DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", - crtc); - return 0; - } + acrtc = adev->mode_info.crtcs[crtc]; - /* - * TODO rework base driver to use values directly. - * for now parse it back into reg-format - */ - dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, - &v_blank_start, - &v_blank_end, - &h_position, - &v_position); - - *position = v_position | (h_position << 16); - *vbl = v_blank_start | (v_blank_end << 16); + if (!acrtc->dm_irq_params.stream) { + DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", + crtc); + return 0; } + /* + * TODO rework base driver to use values directly. + * for now parse it back into reg-format + */ + dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, + &v_blank_start, + &v_blank_end, + &h_position, + &v_position); + + *position = v_position | (h_position << 16); + *vbl = v_blank_start | (v_blank_end << 16); + return 0; } @@ -1821,9 +1822,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) * It is expected that DMUB will resend any pending notifications at this point, for * example HPD from DPIA. */ - if (dc_is_dmub_outbox_supported(adev->dm.dc)) + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { dc_enable_dmub_outbox(adev->dm.dc); + /* DPIA trace goes to dmesg logs only if outbox is enabled */ + if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) + dc_dmub_srv_enable_dpia_trace(adev->dm.dc); + } + if (amdgpu_dm_initialize_drm_device(adev)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); @@ -4090,6 +4096,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, int bl_idx) { + int ret; struct amdgpu_dm_backlight_caps caps; struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; @@ -4104,13 +4111,14 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, if (!rc) return dm->brightness[bl_idx]; return convert_brightness_to_user(&caps, avg); - } else { - int ret = dc_link_get_backlight_level(link); - - if (ret == DC_ERROR_UNEXPECTED) - return dm->brightness[bl_idx]; - return convert_brightness_to_user(&caps, ret); } + + ret = dc_link_get_backlight_level(link); + + if (ret == DC_ERROR_UNEXPECTED) + return dm->brightness[bl_idx]; + + return convert_brightness_to_user(&caps, ret); } static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) @@ -5792,6 +5800,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, edp_min_bpp_x16, edp_max_bpp_x16, dsc_caps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &bw_range)) { if (bw_range.max_kbps < link_bw_in_kbps) { @@ -5800,6 +5809,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, &dsc_options, 0, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &dsc_cfg)) { stream->timing.dsc_cfg = dsc_cfg; stream->timing.flags.DSC = 1; @@ -5814,6 +5824,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, &dsc_options, link_bw_in_kbps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &dsc_cfg)) { stream->timing.dsc_cfg = dsc_cfg; stream->timing.flags.DSC = 1; @@ -5857,12 +5868,14 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, &dsc_options, link_bandwidth_kbps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); } } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { - timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link)); max_supported_bw_in_kbps = link_bandwidth_kbps; dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; @@ -5874,6 +5887,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, &dsc_options, dsc_max_supported_bw_in_kbps, &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", @@ -6031,7 +6045,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); - if (stream->link->psr_settings.psr_feature_enabled) { + if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) { // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet @@ -7295,7 +7309,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) drm_connector_attach_colorspace_property(&aconnector->base); - } else if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || + } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || connector_type == DRM_MODE_CONNECTOR_eDP) { if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) drm_connector_attach_colorspace_property(&aconnector->base); @@ -7771,7 +7785,7 @@ static void update_freesync_state_on_stream( aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; - if (aconn && aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { + if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { pack_sdp_v1_3 = aconn->pack_sdp_v1_3; if (aconn->vsdb_info.amd_vsdb_version == 1) @@ -7926,7 +7940,6 @@ static inline uint32_t get_mem_type(struct drm_framebuffer *fb) } static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, - struct dc_state *dc_state, struct drm_device *dev, struct amdgpu_display_manager *dm, struct drm_crtc *pcrtc, @@ -8074,10 +8087,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * fast updates. */ if (crtc->state->async_flip && - acrtc_state->update_type != UPDATE_TYPE_FAST) + (acrtc_state->update_type != UPDATE_TYPE_FAST || + get_mem_type(old_plane_state->fb) != get_mem_type(fb))) drm_warn_once(state->dev, "[PLANE:%d:%s] async flip with non-fast update\n", plane->base.id, plane->name); + bundle->flip_addrs[planes_count].flip_immediate = crtc->state->async_flip && acrtc_state->update_type == UPDATE_TYPE_FAST && @@ -8406,52 +8421,17 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); } -/** - * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. - * @state: The atomic state to commit - * - * This will tell DC to commit the constructed DC state from atomic_check, - * programming the hardware. Any failures here implies a hardware failure, since - * atomic check should have filtered anything non-kosher. - */ -static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) +static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, + struct dc_state *dc_state) { struct drm_device *dev = state->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; - struct dm_atomic_state *dm_state; - struct dc_state *dc_state = NULL, *dc_state_temp = NULL; - u32 i, j; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; - unsigned long flags; - bool wait_for_vblank = true; - struct drm_connector *connector; - struct drm_connector_state *old_con_state, *new_con_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; - int crtc_disable_count = 0; bool mode_set_reset_required = false; - int r; - - trace_amdgpu_dm_atomic_commit_tail_begin(state); - - r = drm_atomic_helper_wait_for_fences(dev, state, false); - if (unlikely(r)) - DRM_ERROR("Waiting for fences timed out!"); - - drm_atomic_helper_update_legacy_modeset_state(dev, state); - drm_dp_mst_atomic_wait_for_dependencies(state); - - dm_state = dm_atomic_get_new_state(state); - if (dm_state && dm_state->context) { - dc_state = dm_state->context; - } else { - /* No state changes, retain current state. */ - dc_state_temp = dc_create_state(dm->dc); - ASSERT(dc_state_temp); - dc_state = dc_state_temp; - dc_resource_state_copy_construct_current(dm->dc, dc_state); - } + u32 i; for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { @@ -8550,25 +8530,23 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } } /* for_each_crtc_in_state() */ - if (dc_state) { - /* if there mode set or reset, disable eDP PSR */ - if (mode_set_reset_required) { - if (dm->vblank_control_workqueue) - flush_workqueue(dm->vblank_control_workqueue); + /* if there mode set or reset, disable eDP PSR */ + if (mode_set_reset_required) { + if (dm->vblank_control_workqueue) + flush_workqueue(dm->vblank_control_workqueue); - amdgpu_dm_psr_disable_all(dm); - } - - dm_enable_per_frame_crtc_master_sync(dc_state); - mutex_lock(&dm->dc_lock); - WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); - - /* Allow idle optimization when vblank count is 0 for display off */ - if (dm->active_vblank_irq_count == 0) - dc_allow_idle_optimizations(dm->dc, true); - mutex_unlock(&dm->dc_lock); + amdgpu_dm_psr_disable_all(dm); } + dm_enable_per_frame_crtc_master_sync(dc_state); + mutex_lock(&dm->dc_lock); + WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + + /* Allow idle optimization when vblank count is 0 for display off */ + if (dm->active_vblank_irq_count == 0) + dc_allow_idle_optimizations(dm->dc, true); + mutex_unlock(&dm->dc_lock); + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); @@ -8587,6 +8565,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) acrtc->otg_inst = status->primary_otg_inst; } } +} + +/** + * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. + * @state: The atomic state to commit + * + * This will tell DC to commit the constructed DC state from atomic_check, + * programming the hardware. Any failures here implies a hardware failure, since + * atomic check should have filtered anything non-kosher. + */ +static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_display_manager *dm = &adev->dm; + struct dm_atomic_state *dm_state; + struct dc_state *dc_state = NULL; + u32 i, j; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + unsigned long flags; + bool wait_for_vblank = true; + struct drm_connector *connector; + struct drm_connector_state *old_con_state, *new_con_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + int crtc_disable_count = 0; + + trace_amdgpu_dm_atomic_commit_tail_begin(state); + + drm_atomic_helper_update_legacy_modeset_state(dev, state); + drm_dp_mst_atomic_wait_for_dependencies(state); + + dm_state = dm_atomic_get_new_state(state); + if (dm_state && dm_state->context) { + dc_state = dm_state->context; + amdgpu_dm_commit_streams(state, dc_state); + } + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); @@ -8709,13 +8725,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct dc_surface_update dummy_updates[MAX_SURFACES]; + struct dc_surface_update *dummy_updates; struct dc_stream_update stream_update; struct dc_info_packet hdr_packet; struct dc_stream_status *status = NULL; bool abm_changed, hdr_changed, scaling_changed; - memset(&dummy_updates, 0, sizeof(dummy_updates)); memset(&stream_update, 0, sizeof(stream_update)); if (acrtc) { @@ -8774,6 +8789,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * Here we create an empty update on each plane. * To fix this, DC should permit updating only stream properties. */ + dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); for (j = 0; j < status->plane_count; j++) dummy_updates[j].surface = status->plane_states[0]; @@ -8785,6 +8801,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_new_crtc_state->stream, &stream_update); mutex_unlock(&dm->dc_lock); + kfree(dummy_updates); } /** @@ -8863,8 +8880,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->stream) - amdgpu_dm_commit_planes(state, dc_state, dev, - dm, crtc, wait_for_vblank); + amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); } /* Update audio instances for each connector. */ @@ -8919,9 +8935,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for (i = 0; i < crtc_disable_count; i++) pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); - - if (dc_state_temp) - dc_release_state(dc_state_temp); } static int dm_force_atomic_commit(struct drm_connector *connector) @@ -9710,8 +9723,8 @@ static int dm_update_plane_state(struct dc *dc, if (plane->type == DRM_PLANE_TYPE_OVERLAY) { if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) return -EINVAL; - else - *is_top_most_overlay = false; + + *is_top_most_overlay = false; } DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", @@ -10040,6 +10053,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, /* Remove exiting planes if they are modified */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + if (old_plane_state->fb && new_plane_state->fb && + get_mem_type(old_plane_state->fb) != + get_mem_type(new_plane_state->fb)) + lock_and_validation_needed = true; + ret = dm_update_plane_state(dc, state, plane, old_plane_state, new_plane_state, @@ -10287,9 +10305,20 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + /* + * Only allow async flips for fast updates that don't change + * the FB pitch, the DCC state, rotation, etc. + */ + if (new_crtc_state->async_flip && lock_and_validation_needed) { + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] async flips are only supported for fast updates\n", + crtc->base.id, crtc->name); + ret = -EINVAL; + goto fail; + } + dm_new_crtc_state->update_type = lock_and_validation_needed ? - UPDATE_TYPE_FULL : - UPDATE_TYPE_FAST; + UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; } /* Must be success */ @@ -10461,6 +10490,41 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, return ret; } +static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, + struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ + u8 *edid_ext = NULL; + int i; + int j = 0; + + if (edid == NULL || edid->extensions == 0) + return -ENODEV; + + /* Find DisplayID extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (void *)(edid + (i + 1)); + if (edid_ext[0] == DISPLAYID_EXT) + break; + } + + while (j < EDID_LENGTH) { + struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; + unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); + + if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && + amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { + vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; + vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; + DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); + + return true; + } + j++; + } + + return false; +} + static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) { @@ -10596,6 +10660,14 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, freesync_capable = true; } } + parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); + + if (vsdb_info.replay_mode) { + amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; + amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; + amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; + } + } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (i >= 0 && vsdb_info.freesync_supported) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 9fb5bb3a75a7..a2d34be82613 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -51,6 +51,9 @@ #define AMDGPU_DMUB_NOTIFICATION_MAX 5 +#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A +#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 +#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3 0x3 /* #include "include/amdgpu_dal_power_if.h" #include "amdgpu_dm_irq.h" @@ -75,6 +78,12 @@ struct dmub_srv; struct dc_plane_state; struct dmub_notification; +struct amd_vsdb_block { + unsigned char ieee_id[3]; + unsigned char version; + unsigned char feature_caps; +}; + struct common_irq_params { struct amdgpu_device *adev; enum dc_irq_source irq_src; @@ -609,6 +618,11 @@ struct amdgpu_hdmi_vsdb_info { * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz */ unsigned int max_refresh_rate_hz; + + /** + * @replay mode: Replay supported + */ + bool replay_mode; }; struct amdgpu_dm_connector { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 0802f8e8fac5..52ecfa746b54 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -123,9 +123,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work); crtc = secure_display_ctx->crtc; - if (!crtc) { + if (!crtc) return; - } psp = &drm_to_adev(crtc->dev)->psp; @@ -151,9 +150,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); if (!ret) { - if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { + if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); - } } mutex_unlock(&psp->securedisplay_context.mutex); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index d63ee636483b..7c21e21bcc51 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1075,24 +1075,24 @@ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data) switch (dm_crtc_state->stream->output_color_space) { case COLOR_SPACE_SRGB: - seq_printf(m, "sRGB"); + seq_puts(m, "sRGB"); break; case COLOR_SPACE_YCBCR601: case COLOR_SPACE_YCBCR601_LIMITED: - seq_printf(m, "BT601_YCC"); + seq_puts(m, "BT601_YCC"); break; case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR709_LIMITED: - seq_printf(m, "BT709_YCC"); + seq_puts(m, "BT709_YCC"); break; case COLOR_SPACE_ADOBERGB: - seq_printf(m, "opRGB"); + seq_puts(m, "opRGB"); break; case COLOR_SPACE_2020_RGB_FULLRANGE: - seq_printf(m, "BT2020_RGB"); + seq_puts(m, "BT2020_RGB"); break; case COLOR_SPACE_2020_YCBCR: - seq_printf(m, "BT2020_YCC"); + seq_puts(m, "BT2020_YCC"); break; default: goto unlock; @@ -3022,7 +3022,7 @@ static int edp_ilr_show(struct seq_file *m, void *unused) seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz); } } else { - seq_printf(m, "ILR is not supported by this eDP panel.\n"); + seq_puts(m, "ILR is not supported by this eDP panel.\n"); } return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 5536d17306d0..20cfc5be21a4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -39,10 +39,10 @@ static bool lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) { - struct dc_link *link = handle; struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} }; - struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, + link->dc->caps.i2c_speed_in_khz}; return dm_helpers_submit_i2c(link->ctx, link, &cmd); } @@ -52,8 +52,10 @@ lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint3 { struct dc_link *link = handle; - struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} }; - struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, + {false, address, size, data} }; + struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, + link->dc->caps.i2c_speed_in_khz}; return dm_helpers_submit_i2c(link->ctx, link, &cmd); } @@ -76,7 +78,6 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size) { - struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.context.initialized) { @@ -96,13 +97,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version; *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size; - return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf; } -static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version) +static int psp_set_srm(struct psp_context *psp, + u8 *srm, uint32_t srm_size, uint32_t *srm_version) { - struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.context.initialized) { @@ -119,7 +119,8 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || + hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX) return -EINVAL; @@ -150,7 +151,6 @@ static void process_output(struct hdcp_workqueue *hdcp_work) static void link_lock(struct hdcp_workqueue *work, bool lock) { - int i = 0; for (i = 0; i < work->max_link; i++) { @@ -160,66 +160,60 @@ static void link_lock(struct hdcp_workqueue *work, bool lock) mutex_unlock(&work[i].mutex); } } + void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, - uint8_t content_type, + u8 content_type, bool enable_encryption) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; - struct mod_hdcp_display *display = &hdcp_work[link_index].display; - struct mod_hdcp_link *link = &hdcp_work[link_index].link; - struct mod_hdcp_display_query query; + struct mod_hdcp_link_adjustment link_adjust; + struct mod_hdcp_display_adjustment display_adjust; unsigned int conn_index = aconnector->base.index; mutex_lock(&hdcp_w->mutex); hdcp_w->aconnector[conn_index] = aconnector; - query.display = NULL; - mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query); + memset(&link_adjust, 0, sizeof(link_adjust)); + memset(&display_adjust, 0, sizeof(display_adjust)); - if (query.display != NULL) { - memcpy(display, query.display, sizeof(struct mod_hdcp_display)); - mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); + if (enable_encryption) { + /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp + * (s3 resume case) + */ + if (hdcp_work->srm_size > 0) + psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, + hdcp_work->srm_size, + &hdcp_work->srm_version); - hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; + display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE; - if (enable_encryption) { - /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp - * (s3 resume case) - */ - if (hdcp_work->srm_size > 0) - psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size, - &hdcp_work->srm_version); + link_adjust.auth_delay = 2; - display->adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE; - if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { - hdcp_w->link.adjust.hdcp1.disable = 0; - hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; - } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { - hdcp_w->link.adjust.hdcp1.disable = 1; - hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; - } - - schedule_delayed_work(&hdcp_w->property_validate_dwork, - msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); - } else { - display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; - hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - cancel_delayed_work(&hdcp_w->property_validate_dwork); + if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { + link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; + } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { + link_adjust.hdcp1.disable = 1; + link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; } - display->state = MOD_HDCP_DISPLAY_ACTIVE; + schedule_delayed_work(&hdcp_w->property_validate_dwork, + msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + } else { + display_adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; + hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + cancel_delayed_work(&hdcp_w->property_validate_dwork); } - mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output); process_output(hdcp_w); mutex_unlock(&hdcp_w->mutex); } static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, - unsigned int link_index, + unsigned int link_index, struct amdgpu_dm_connector *aconnector) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -238,7 +232,8 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n", - aconnector->base.index, conn_state->hdcp_content_type, aconnector->base.dpms); + aconnector->base.index, conn_state->hdcp_content_type, + aconnector->base.dpms); } mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); @@ -246,6 +241,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, process_output(hdcp_w); mutex_unlock(&hdcp_w->mutex); } + void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -274,15 +270,12 @@ void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index schedule_work(&hdcp_w->cpirq_work); } - - - static void event_callback(struct work_struct *work) { struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, - callback_dwork); + callback_dwork); mutex_lock(&hdcp_work->mutex); @@ -294,13 +287,12 @@ static void event_callback(struct work_struct *work) process_output(hdcp_work); mutex_unlock(&hdcp_work->mutex); - - } static void event_property_update(struct work_struct *work) { - struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); + struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, + property_update_work); struct amdgpu_dm_connector *aconnector = NULL; struct drm_device *dev; long ret; @@ -334,11 +326,10 @@ static void event_property_update(struct work_struct *work) mutex_lock(&hdcp_work->mutex); if (conn_state->commit) { - ret = wait_for_completion_interruptible_timeout( - &conn_state->commit->hw_done, 10 * HZ); + ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done, + 10 * HZ); if (ret == 0) { - DRM_ERROR( - "HDCP state unknown! Setting it to DESIRED"); + DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n"); hdcp_work->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; } @@ -349,24 +340,20 @@ static void event_property_update(struct work_struct *work) DRM_MODE_HDCP_CONTENT_TYPE0 && hdcp_work->encryption_status[conn_index] <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) { - DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n"); - drm_hdcp_update_content_protection( - connector, - DRM_MODE_CONTENT_PROTECTION_ENABLED); + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED); } else if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && hdcp_work->encryption_status[conn_index] == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) { - drm_hdcp_update_content_protection( - connector, - DRM_MODE_CONTENT_PROTECTION_ENABLED); + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED); } } else { DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n"); - drm_hdcp_update_content_protection( - connector, DRM_MODE_CONTENT_PROTECTION_DESIRED); - + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED); } mutex_unlock(&hdcp_work->mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex); @@ -402,7 +389,7 @@ static void event_property_validate(struct work_struct *work) &query); DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n", - aconnector->base.index, + aconnector->base.index, aconnector->base.state->content_protection, query.encryption_status, hdcp_work->encryption_status[conn_index]); @@ -410,7 +397,8 @@ static void event_property_validate(struct work_struct *work) if (query.encryption_status != hdcp_work->encryption_status[conn_index]) { DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n", - hdcp_work->encryption_status[conn_index], query.encryption_status); + hdcp_work->encryption_status[conn_index], + query.encryption_status); hdcp_work->encryption_status[conn_index] = query.encryption_status; @@ -429,7 +417,7 @@ static void event_watchdog_timer(struct work_struct *work) struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(to_delayed_work(work), - struct hdcp_workqueue, + struct hdcp_workqueue, watchdog_timer_dwork); mutex_lock(&hdcp_work->mutex); @@ -443,7 +431,6 @@ static void event_watchdog_timer(struct work_struct *work) process_output(hdcp_work); mutex_unlock(&hdcp_work->mutex); - } static void event_cpirq(struct work_struct *work) @@ -459,10 +446,8 @@ static void event_cpirq(struct work_struct *work) process_output(hdcp_work); mutex_unlock(&hdcp_work->mutex); - } - void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) { int i = 0; @@ -478,10 +463,8 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) kfree(hdcp_work); } - static bool enable_assr(void *handle, struct dc_link *link) { - struct hdcp_workqueue *hdcp_work = handle; struct mod_hdcp hdcp = hdcp_work->hdcp; struct psp_context *psp = hdcp.config.psp.handle; @@ -499,7 +482,8 @@ static bool enable_assr(void *handle, struct dc_link *link) memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE; - dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst; + dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = + link->link_enc_hw_inst; dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; psp_dtm_invoke(psp, dtm_cmd->cmd_id); @@ -521,7 +505,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) int link_index = aconnector->dc_link->link_index; struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; - struct drm_connector_state *conn_state; + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct dc_sink *sink = NULL; bool link_is_hdcp14 = false; @@ -541,7 +525,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) else if (aconnector->dc_em_sink) sink = aconnector->dc_em_sink; - if (sink != NULL) + if (sink) link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal); display->controller = CONTROLLER_ID_D0 + config->otg_inst; @@ -564,19 +548,27 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; link->adjust.auth_delay = 2; link->adjust.hdcp1.disable = 0; - conn_state = aconnector->base.state; + hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index, - (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1, - (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1); + (!!aconnector->base.state) ? + aconnector->base.state->content_protection : -1, + (!!aconnector->base.state) ? + aconnector->base.state->hdcp_content_type : -1); + + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + + process_output(hdcp_w); + mutex_unlock(&hdcp_w->mutex); - if (conn_state) - hdcp_update_display(hdcp_work, link_index, aconnector, - conn_state->hdcp_content_type, false); } - -/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel +/** + * DOC: Add sysfs interface for set/get srm + * + * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel * will automatically call once or twice depending on the size * * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is @@ -587,23 +579,23 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on * the last call we will send the full SRM. PSP will fail on every call before the last. * - * This means we don't know if the SRM is good until the last call. And because of this limitation we - * cannot throw errors early as it will stop the kernel from writing to sysfs + * This means we don't know if the SRM is good until the last call. And because of this + * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs * * Example 1: - * Good SRM size = 5096 - * first call to write 4096 -> PSP fails - * Second call to write 1000 -> PSP Pass -> SRM is set + * Good SRM size = 5096 + * first call to write 4096 -> PSP fails + * Second call to write 1000 -> PSP Pass -> SRM is set * * Example 2: - * Bad SRM size = 4096 - * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this - * is the last call) + * Bad SRM size = 4096 + * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this + * is the last call) * * Solution?: - * 1: Parse the SRM? -> It is signed so we don't know the EOF - * 2: We can have another sysfs that passes the size before calling set. -> simpler solution - * below + * 1: Parse the SRM? -> It is signed so we don't know the EOF + * 2: We can have another sysfs that passes the size before calling set. -> simpler solution + * below * * Easy Solution: * Always call get after Set to verify if set was successful. @@ -612,20 +604,21 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) * +----------------------+ * PSP will only update its srm if its older than the one we are trying to load. * Always do set first than get. - * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer - * version and save it + * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer + * version and save it * - * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the - * same(newer) version back and save it + * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the + * same(newer) version back and save it * - * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is - * incorrect/corrupted and we should correct our SRM by getting it from PSP + * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is + * incorrect/corrupted and we should correct our SRM by getting it from PSP */ -static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, +static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct hdcp_workqueue *work; - uint32_t srm_version = 0; + u32 srm_version = 0; work = container_of(bin_attr, struct hdcp_workqueue, attr); link_lock(work, true); @@ -639,19 +632,19 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bi work->srm_version = srm_version; } - link_lock(work, false); return count; } -static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, +static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct hdcp_workqueue *work; - uint8_t *srm = NULL; - uint32_t srm_version; - uint32_t srm_size; + u8 *srm = NULL; + u32 srm_version; + u32 srm_size; size_t ret = count; work = container_of(bin_attr, struct hdcp_workqueue, attr); @@ -684,12 +677,12 @@ ret: /* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory. * * For example, - * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" - * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent - * across boot/reboots/suspend/resume/shutdown + * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" + * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent + * across boot/reboots/suspend/resume/shutdown * - * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need - * to make the SRM persistent. + * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP + * we need to make the SRM persistent. * * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory. * -The kernel cannot write to the file systems. @@ -699,8 +692,8 @@ ret: * * Usermode can read/write to/from PSP using the sysfs interface * For example: - * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile - * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm + * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile + * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm */ static const struct bin_attribute data_attr = { .attr = {.name = "hdcp_srm", .mode = 0664}, @@ -709,10 +702,9 @@ static const struct bin_attribute data_attr = { .read = srm_data_read, }; - -struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc) +struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, + struct cp_psp *cp_psp, struct dc *dc) { - int max_caps = dc->caps.max_links; struct hdcp_workqueue *hdcp_work; int i = 0; @@ -721,14 +713,16 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct if (ZERO_OR_NULL_PTR(hdcp_work)) return NULL; - hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL); + hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, + sizeof(*hdcp_work->srm), GFP_KERNEL); - if (hdcp_work->srm == NULL) + if (!hdcp_work->srm) goto fail_alloc_context; - hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL); + hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, + sizeof(*hdcp_work->srm_temp), GFP_KERNEL); - if (hdcp_work->srm_temp == NULL) + if (!hdcp_work->srm_temp) goto fail_alloc_context; hdcp_work->max_link = max_caps; @@ -781,10 +775,5 @@ fail_alloc_context: kfree(hdcp_work); return NULL; - - - } - - diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index d9a482908380..4b230933b28e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -68,15 +68,15 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) } } -/* dm_helpers_parse_edid_caps - * - * Parse edid caps +/** + * dm_helpers_parse_edid_caps() - Parse edid caps * + * @link: current detected link * @edid: [in] pointer to edid - * edid_caps: [in] pointer to edid caps - * @return - * void - * */ + * @edid_caps: [in] pointer to edid caps + * + * Return: void + */ enum dc_edid_status dm_helpers_parse_edid_caps( struct dc_link *link, const struct dc_edid *edid, @@ -117,7 +117,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( if (sad_count <= 0) return result; - edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; + edid_caps->audio_mode_count = min(sad_count, DC_MAX_AUDIO_DESC_COUNT); for (i = 0; i < edid_caps->audio_mode_count; ++i) { struct cea_sad *sad = &sads[i]; @@ -255,7 +255,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( /* Accessing the connector state is required for vcpi_slots allocation * and directly relies on behaviour in commit check * that blocks before commit guaranteeing that the state - * is not gonna be swapped while still in use in commit tail */ + * is not gonna be swapped while still in use in commit tail + */ if (!aconnector || !aconnector->mst_root) return false; @@ -282,7 +283,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each * stream. AMD ASIC stream slot allocation should follow the same - * sequence. copy DRM MST allocation to dc */ + * sequence. copy DRM MST allocation to dc + */ fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table); return true; @@ -426,7 +428,7 @@ void dm_dtn_log_append_v(struct dc_context *ctx, total = log_ctx->pos + n + 1; if (total > log_ctx->size) { - char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL); + char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL); if (buf) { memcpy(buf, log_ctx->buf, log_ctx->pos); @@ -633,7 +635,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); if (ret < 0) { - DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret); + DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); return false; } @@ -655,7 +657,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); } - DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success); + DC_LOG_DC("%s: success = %d\n", __func__, success); return success; } @@ -664,7 +666,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) { unsigned char data[16] = {0}; - DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n"); + DC_LOG_DC("Start %s\n", __func__); // Step 2 data[0] = 'P'; @@ -722,7 +724,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) return; - DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n"); + DC_LOG_DC("Done %s\n", __func__); } /* MST Dock */ @@ -995,9 +997,8 @@ void dm_helpers_override_panel_settings( struct dc_panel_config *panel_config) { // Feature DSC - if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) panel_config->dsc.disable_dsc_edp = true; - } } void *dm_helpers_allocate_gpu_mem( diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 19f543ba7205..51467f132c26 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -120,7 +120,8 @@ static void dm_irq_work_func(struct work_struct *work) /* Call a DAL subcomponent which registered for interrupt notification * at INTERRUPT_LOW_IRQ_CONTEXT. - * (The most common use is HPD interrupt) */ + * (The most common use is HPD interrupt) + */ } /* @@ -172,7 +173,8 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev, if (handler_removed == false) { /* Not necessarily an error - caller may not - * know the context. */ + * know the context. + */ return NULL; } @@ -261,7 +263,7 @@ validate_irq_registration_params(struct dc_interrupt_params *int_params, static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, irq_handler_idx handler_idx) { - if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) { + if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) { DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n"); return false; } @@ -343,7 +345,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, /* This pointer will be stored by code which requested interrupt * registration. * The same pointer will be needed in order to unregister the - * interrupt. */ + * interrupt. + */ DRM_DEBUG_KMS( "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n", @@ -390,7 +393,8 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, if (handler_list == NULL) { /* If we got here, it means we searched all irq contexts - * for this irq source, but the handler was not found. */ + * for this irq source, but the handler was not found. + */ DRM_ERROR( "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n", ih, irq_source); @@ -450,7 +454,8 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) DM_IRQ_TABLE_LOCK(adev, irq_table_flags); /* The handler was removed from the table, * it means it is safe to flush all the 'work' - * (because no code can schedule a new one). */ + * (because no code can schedule a new one). + */ lh = &adev->dm.irq_handler_list_low_tab[src]; DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); @@ -494,7 +499,7 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); if (!list_empty(hnd_list_l)) { - list_for_each_safe (entry, tmp, hnd_list_l) { + list_for_each_safe(entry, tmp, hnd_list_l) { handler = list_entry( entry, struct amdgpu_dm_irq_handler_data, @@ -571,7 +576,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, if (list_empty(handler_list)) return; - list_for_each_entry (handler_data, handler_list, list) { + list_for_each_entry(handler_data, handler_list, list) { if (queue_work(system_highpri_wq, &handler_data->work)) { work_queued = true; break; @@ -627,7 +632,8 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, &adev->dm.irq_handler_list_high_tab[irq_source], list) { /* Call a subcomponent which registered for immediate - * interrupt notification */ + * interrupt notification + */ handler_data->handler(handler_data->handler_arg); } @@ -664,7 +670,7 @@ static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, return 0; } -static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type) +static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type) { switch (type) { case AMDGPU_HPD_1: @@ -686,7 +692,7 @@ static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type) static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, + unsigned int type, enum amdgpu_interrupt_state state) { enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type); @@ -698,7 +704,7 @@ static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, static inline int dm_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned crtc_id, + unsigned int crtc_id, enum amdgpu_interrupt_state state, const enum irq_type dal_irq_type, const char *func) @@ -729,7 +735,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev, static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned crtc_id, + unsigned int crtc_id, enum amdgpu_interrupt_state state) { return dm_irq_state( @@ -743,7 +749,7 @@ static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned crtc_id, + unsigned int crtc_id, enum amdgpu_interrupt_state state) { return dm_irq_state( @@ -893,13 +899,13 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, true); } - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd_rx, true); @@ -928,13 +934,13 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) to_amdgpu_dm_connector(connector); const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false); } - if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd_rx, false); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index b885c39bd16b..57230661132b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -296,6 +296,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!aconnector->edid) { struct edid *edid; + edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port); if (!edid) { @@ -827,6 +828,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p &dsc_options, 0, params[i].timing, + dc_link_get_highest_encoding_format(params[i].aconnector->dc_link), ¶ms[i].timing->dsc_cfg)) { params[i].timing->flags.DSC = 1; @@ -877,7 +879,9 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) param.sink->ctx->dc->res_pool->dscs[0], ¶m.sink->dsc_caps.dsc_dec_caps, &dsc_options, - (int) kbps, param.timing, &dsc_config); + (int) kbps, param.timing, + dc_link_get_highest_encoding_format(param.aconnector->dc_link), + &dsc_config); return dsc_config.bits_per_pixel; } @@ -1115,8 +1119,11 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, dsc_policy.min_target_bpp * 16, dsc_policy.max_target_bpp * 16, &stream->sink->dsc_caps.dsc_dec_caps, - &stream->timing, ¶ms[count].bw_range)) - params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + &stream->timing, + dc_link_get_highest_encoding_format(dc_link), + ¶ms[count].bw_range)) + params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(dc_link)); count++; } @@ -1576,7 +1583,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream, dsc_policy.min_target_bpp * 16, dsc_policy.max_target_bpp * 16, &stream->sink->dsc_caps.dsc_dec_caps, - &stream->timing, bw_range); + &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range); return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 322668973747..8eeca160d434 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -113,6 +113,11 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state DRM_FORMAT_ARGB8888, DRM_FORMAT_RGBA8888, DRM_FORMAT_ABGR8888, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_ARGB16161616, + DRM_FORMAT_ABGR16161616, + DRM_FORMAT_ARGB16161616F, }; uint32_t format = plane_state->fb->format->format; unsigned int i; @@ -164,7 +169,7 @@ static bool modifier_has_dcc(uint64_t modifier) return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); } -static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier) +static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier) { if (modifier == DRM_FORMAT_MOD_LINEAR) return 0; @@ -581,7 +586,7 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev, int pkrs = 0; u32 gb_addr_config; u8 i = 0; - unsigned swizzle_r_x; + unsigned int swizzle_r_x; uint64_t modifier_r_x; uint64_t modifier_dcc_best; uint64_t modifier_dcc_4k; @@ -698,8 +703,8 @@ static int get_plane_formats(const struct drm_plane *plane, * caps list. */ - switch (plane->type) { - case DRM_PLANE_TYPE_PRIMARY: + if (plane->type == DRM_PLANE_TYPE_PRIMARY || + (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) { for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { if (num_formats >= max_formats) break; @@ -717,25 +722,29 @@ static int get_plane_formats(const struct drm_plane *plane, formats[num_formats++] = DRM_FORMAT_XBGR16161616F; formats[num_formats++] = DRM_FORMAT_ABGR16161616F; } - break; + } else { + switch (plane->type) { + case DRM_PLANE_TYPE_OVERLAY: + for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { + if (num_formats >= max_formats) + break; - case DRM_PLANE_TYPE_OVERLAY: - for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { - if (num_formats >= max_formats) - break; + formats[num_formats++] = overlay_formats[i]; + } + break; - formats[num_formats++] = overlay_formats[i]; + case DRM_PLANE_TYPE_CURSOR: + for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = cursor_formats[i]; + } + break; + + default: + break; } - break; - - case DRM_PLANE_TYPE_CURSOR: - for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { - if (num_formats >= max_formats) - break; - - formats[num_formats++] = cursor_formats[i]; - } - break; } return num_formats; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 75284e2cec74..848c5b4bb301 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -334,7 +334,8 @@ bool dm_pp_get_clock_levels_by_type( if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { /* This clock is higher the validation clock. * Than means the previous one is the highest - * non-boosted one. */ + * non-boosted one. + */ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n", dc_clks->num_levels, i); dc_clks->num_levels = i > 0 ? i : 1; @@ -406,10 +407,10 @@ bool dm_pp_notify_wm_clock_changes( * TODO: expand this to other ASICs */ if ((adev->asic_type >= CHIP_POLARIS10) && - (adev->asic_type <= CHIP_VEGAM) && - !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, - (void *)wm_with_clock_ranges)) - return true; + (adev->asic_type <= CHIP_VEGAM) && + !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, + (void *)wm_with_clock_ranges)) + return true; return false; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 4f61d4f257cd..08ce3bb8f640 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -166,6 +166,7 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) */ if (vsync_rate_hz != 0) { unsigned int frame_time_microsec = 1000000 / vsync_rate_hz; + num_frames_static = (30000 / frame_time_microsec) + 1; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c new file mode 100644 index 000000000000..32d3086c4cb7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c @@ -0,0 +1,183 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "amdgpu_dm_replay.h" +#include "dc.h" +#include "dm_helpers.h" +#include "amdgpu_dm.h" +#include "modules/power/power_helpers.h" +#include "dmub/inc/dmub_cmd.h" +#include "dc/inc/link.h" + +/* + * link_supports_replay() - check if the link supports replay + * @link: link + * @aconnector: aconnector + * + */ +static bool link_supports_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +{ + struct dm_connector_state *state = to_dm_connector_state(aconnector->base.state); + struct dpcd_caps *dpcd_caps = &link->dpcd_caps; + struct adaptive_sync_caps *as_caps = &link->dpcd_caps.adaptive_sync_caps; + + if (!state->freesync_capable) + return false; + + if (!aconnector->vsdb_info.replay_mode) + return false; + + // Check the eDP version + if (dpcd_caps->edp_rev < EDP_REVISION_13) + return false; + + if (!dpcd_caps->alpm_caps.bits.AUX_WAKE_ALPM_CAP) + return false; + + // Check adaptive sync support cap + if (!as_caps->dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT) + return false; + + return true; +} + +/* + * amdgpu_dm_setup_replay() - setup replay configuration + * @link: link + * @aconnector: aconnector + * + */ +bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +{ + struct replay_config pr_config; + union replay_debug_flags *debug_flags = NULL; + + // For eDP, if Replay is supported, return true to skip checks + if (link->replay_settings.config.replay_supported) + return true; + + if (!dc_is_embedded_signal(link->connector_signal)) + return false; + + if (link->panel_config.psr.disallow_replay) + return false; + + if (!link_supports_replay(link, aconnector)) + return false; + + // Mark Replay is supported in link and update related attributes + pr_config.replay_supported = true; + pr_config.replay_power_opt_supported = 0; + pr_config.replay_enable_option |= pr_enable_option_static_screen; + pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq ? true : false; + + if (!pr_config.replay_timing_sync_supported) + pr_config.replay_enable_option &= ~pr_enable_option_general_ui; + + debug_flags = (union replay_debug_flags *)&pr_config.debug_flags; + debug_flags->u32All = 0; + debug_flags->bitfields.visual_confirm = + link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY ? true : false; + + link->replay_settings.replay_feature_enabled = true; + + init_replay_config(link, &pr_config); + + return true; +} + + +/* + * amdgpu_dm_replay_enable() - enable replay f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait) +{ + uint64_t state; + unsigned int retry_count; + bool replay_active = true; + const unsigned int max_retry = 1000; + bool force_static = true; + struct dc_link *link = NULL; + + + if (stream == NULL) + return false; + + link = stream->link; + + if (link == NULL) + return false; + + link->dc->link_srv->edp_setup_replay(link, stream); + + link->dc->link_srv->edp_set_replay_allow_active(link, NULL, false, false, NULL); + + link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, false, true, NULL); + + if (wait == true) { + + for (retry_count = 0; retry_count <= max_retry; retry_count++) { + dc_link_get_replay_state(link, &state); + if (replay_active) { + if (state != REPLAY_STATE_0 && + (!force_static || state == REPLAY_STATE_3)) + break; + } else { + if (state == REPLAY_STATE_0) + break; + } + udelay(500); + } + + /* assert if max retry hit */ + if (retry_count >= max_retry) + ASSERT(0); + } else { + /* To-do: Add trace log */ + } + + return true; +} + +/* + * amdgpu_dm_replay_disable() - disable replay f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_replay_disable(struct dc_stream_state *stream) +{ + + if (stream->link) { + DRM_DEBUG_DRIVER("Disabling replay...\n"); + stream->link->dc->link_srv->edp_set_replay_allow_active(stream->link, NULL, false, false, NULL); + return true; + } + + return false; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h new file mode 100644 index 000000000000..01cba3cd6246 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h @@ -0,0 +1,46 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMDGPU_DM_AMDGPU_DM_REPLAY_H_ +#define AMDGPU_DM_AMDGPU_DM_REPLAY_H_ + +#include "amdgpu.h" + +enum replay_enable_option { + pr_enable_option_static_screen = 0x1, + pr_enable_option_mpo_video = 0x2, + pr_enable_option_full_screen_video = 0x4, + pr_enable_option_general_ui = 0x8, + pr_enable_option_static_screen_coasting = 0x10000, + pr_enable_option_mpo_video_coasting = 0x20000, + pr_enable_option_full_screen_video_coasting = 0x40000, +}; + + +bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool enable); +bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector); +bool amdgpu_dm_replay_disable(struct dc_stream_state *stream); + +#endif /* AMDGPU_DM_AMDGPU_DM_REPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c index 352e9afb85c6..e295a839ab47 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c @@ -24,7 +24,7 @@ */ #include "dm_services.h" -#include "conversion.h" +#include "basics/conversion.h" #define DIVIDER 10000 diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c index 84aeccf36b4b..6d2924114a3e 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/vector.c +++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c @@ -50,12 +50,11 @@ bool dal_vector_construct( return true; } -static bool dal_vector_presized_costruct( - struct vector *vector, - struct dc_context *ctx, - uint32_t count, - void *initial_value, - uint32_t struct_size) +static bool dal_vector_presized_costruct(struct vector *vector, + struct dc_context *ctx, + uint32_t count, + void *initial_value, + uint32_t struct_size) { uint32_t i; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 27af9d3c2b73..6b3190447581 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -96,7 +96,7 @@ struct dc_bios *bios_parser_create( struct bp_init_data *init, enum dce_version dce_version) { - struct bios_parser *bp = NULL; + struct bios_parser *bp; bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL); if (!bp) @@ -2576,7 +2576,7 @@ static struct integrated_info *bios_parser_create_integrated_info( struct dc_bios *dcb) { struct bios_parser *bp = BP_FROM_DCB(dcb); - struct integrated_info *info = NULL; + struct integrated_info *info; info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL); @@ -2593,11 +2593,10 @@ static struct integrated_info *bios_parser_create_integrated_info( return NULL; } -static enum bp_result update_slot_layout_info( - struct dc_bios *dcb, - unsigned int i, - struct slot_layout_info *slot_layout_info, - unsigned int record_offset) +static enum bp_result update_slot_layout_info(struct dc_bios *dcb, + unsigned int i, + struct slot_layout_info *slot_layout_info, + unsigned int record_offset) { unsigned int j; struct bios_parser *bp; @@ -2696,10 +2695,9 @@ static enum bp_result update_slot_layout_info( } -static enum bp_result get_bracket_layout_record( - struct dc_bios *dcb, - unsigned int bracket_layout_id, - struct slot_layout_info *slot_layout_info) +static enum bp_result get_bracket_layout_record(struct dc_bios *dcb, + unsigned int bracket_layout_id, + struct slot_layout_info *slot_layout_info) { unsigned int i; unsigned int record_offset; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index cce47d3f1a13..484d62bcf2c2 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -340,9 +340,8 @@ static struct atom_display_object_path_v2 *get_bios_object( } /* from graphics_object_id, find display path which includes the object_id */ -static struct atom_display_object_path_v3 *get_bios_object_from_path_v3( - struct bios_parser *bp, - struct graphics_object_id id) +static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(struct bios_parser *bp, + struct graphics_object_id id) { unsigned int i; struct graphics_object_id obj_id = {0}; @@ -521,9 +520,8 @@ static enum bp_result get_gpio_i2c_info( return BP_RESULT_OK; } -static struct atom_hpd_int_record *get_hpd_record_for_path_v3( - struct bios_parser *bp, - struct atom_display_object_path_v3 *object) +static struct atom_hpd_int_record *get_hpd_record_for_path_v3(struct bios_parser *bp, + struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; @@ -774,20 +772,20 @@ static enum bp_result bios_parser_get_device_tag( return BP_RESULT_BADINPUT; switch (bp->object_info_tbl.revision.minor) { - case 4: - default: + case 4: + default: /* getBiosObject will return MXM object */ - object = get_bios_object(bp, connector_object_id); + object = get_bios_object(bp, connector_object_id); if (!object) { BREAK_TO_DEBUGGER(); /* Invalid object id */ return BP_RESULT_BADINPUT; } - info->acpi_device = 0; /* BIOS no longer provides this */ - info->dev_id = device_type_from_device_id(object->device_tag); - break; - case 5: + info->acpi_device = 0; /* BIOS no longer provides this */ + info->dev_id = device_type_from_device_id(object->device_tag); + break; + case 5: object_path_v3 = get_bios_object_from_path_v3(bp, connector_object_id); if (!object_path_v3) { @@ -1582,13 +1580,13 @@ static bool bios_parser_is_device_id_supported( uint32_t mask = get_support_mask_for_device_id(id); switch (bp->object_info_tbl.revision.minor) { - case 4: - default: - return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) & mask) != 0; - break; - case 5: - return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0; - break; + case 4: + default: + return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) & mask) != 0; + break; + case 5: + return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0; + break; } return false; @@ -1757,7 +1755,7 @@ static enum bp_result bios_parser_get_firmware_info( case 2: case 3: result = get_firmware_info_v3_2(bp, info); - break; + break; case 4: result = get_firmware_info_v3_4(bp, info); break; @@ -2175,9 +2173,8 @@ static struct atom_disp_connector_caps_record *get_disp_connector_caps_record( return NULL; } -static struct atom_connector_caps_record *get_connector_caps_record( - struct bios_parser *bp, - struct atom_display_object_path_v3 *object) +static struct atom_connector_caps_record *get_connector_caps_record(struct bios_parser *bp, + struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; @@ -2228,7 +2225,7 @@ static enum bp_result bios_parser_get_disp_connector_caps_info( return BP_RESULT_BADINPUT; switch (bp->object_info_tbl.revision.minor) { - case 4: + case 4: default: object = get_bios_object(bp, object_id); @@ -2264,9 +2261,8 @@ static enum bp_result bios_parser_get_disp_connector_caps_info( return BP_RESULT_OK; } -static struct atom_connector_speed_record *get_connector_speed_cap_record( - struct bios_parser *bp, - struct atom_display_object_path_v3 *object) +static struct atom_connector_speed_record *get_connector_speed_cap_record(struct bios_parser *bp, + struct atom_display_object_path_v3 *object) { struct atom_common_record_header *header; uint32_t offset; @@ -3090,7 +3086,7 @@ static struct integrated_info *bios_parser_create_integrated_info( struct dc_bios *dcb) { struct bios_parser *bp = BP_FROM_DCB(dcb); - struct integrated_info *info = NULL; + struct integrated_info *info; info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL); @@ -3679,7 +3675,7 @@ struct dc_bios *firmware_parser_create( struct bp_init_data *init, enum dce_version dce_version) { - struct bios_parser *bp = NULL; + struct bios_parser *bp; bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL); if (!bp) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 6127d6045336..dcedf9645161 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -117,6 +117,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m continue; clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active; dc->link_srv->edp_set_psr_allow_active(edp_link, &allow_active, false, false, NULL); + dc->link_srv->edp_set_replay_allow_active(edp_link, &allow_active, false, false, NULL); } } @@ -137,6 +138,8 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) continue; dc->link_srv->edp_set_psr_allow_active(edp_link, &clk_mgr->psr_allow_active_cache, false, false, NULL); + dc->link_srv->edp_set_replay_allow_active(edp_link, + &clk_mgr->psr_allow_active_cache, false, false, NULL); } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 2f7c8996b19d..7326b7565846 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -783,7 +783,6 @@ void dcn314_clk_mgr_construct( clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; dce_clock_read_ss_info(&clk_mgr->base); /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/ - //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz); clk_mgr->base.base.bw_params = &dcn314_bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c index 925d6e13620e..3e0da873cf4c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c @@ -33,28 +33,26 @@ #define MAX_INSTANCE 6 #define MAX_SEGMENT 6 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; }; static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } } } }; + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } } } }; + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; #define regBIF_BX_PF2_RSMU_INDEX 0x0000 #define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index d7de756301cf..09151cc56ce4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -45,24 +45,14 @@ #define MAX_INSTANCE 7 #define MAX_SEGMENT 6 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; }; -static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0 } }, - { { 0x00016E00, 0x02401C00, 0, 0, 0, 0 } }, - { { 0x00017000, 0x02402000, 0, 0, 0, 0 } }, - { { 0x00017200, 0x02402400, 0, 0, 0, 0 } }, - { { 0x0001B000, 0x0242D800, 0, 0, 0, 0 } }, - { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0 } }, - { { 0x0001B400, 0x0242E000, 0, 0, 0, 0 } } } }; - #define regCLK1_CLK_PLL_REQ 0x0237 #define regCLK1_CLK_PLL_REQ_BASE_IDX 0 @@ -73,9 +63,6 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L -#define REG(reg_name) \ - (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) - #define TO_CLK_MGR_DCN316(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn316, base) @@ -577,36 +564,6 @@ static struct clk_mgr_funcs dcn316_funcs = { }; extern struct clk_mgr_funcs dcn3_fpga_funcs; -static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) -{ - /* get FbMult value */ - struct fixed31_32 pll_req; - unsigned int fbmult_frac_val = 0; - unsigned int fbmult_int_val = 0; - - /* - * Register value of fbmult is in 8.16 format, we are converting to 31.32 - * to leverage the fix point operations available in driver - */ - - REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/ - REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */ - - pll_req = dc_fixpt_from_int(fbmult_int_val); - - /* - * since fractional part is only 16 bit in register definition but is 32 bit - * in our fix point definiton, need to shift left by 16 to obtain correct value - */ - pll_req.value |= fbmult_frac_val << 16; - - /* multiply by REFCLK period */ - pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz); - - /* integer part is now VCO frequency in kHz */ - return dc_fixpt_floor(pll_req); -} - void dcn316_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn316 *clk_mgr, @@ -660,7 +617,8 @@ void dcn316_clk_mgr_construct( clk_mgr->base.smu_present = true; // Skip this for now as it did not work on DCN315, renable during bring up - clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + //clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); + clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* in case we don't get a value from the register, use default */ if (clk_mgr->base.base.dentist_vco_freq_khz == 0) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c index 457a9254ae1c..3ed19197a755 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c @@ -34,23 +34,21 @@ #define MAX_INSTANCE 7 #define MAX_SEGMENT 6 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; }; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; }; static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } } } }; + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; #define REG(reg_name) \ (MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index cb992aca760d..984b52923534 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -297,7 +297,7 @@ void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz, prev_dppclk_khz; + int dpp_inst = 0, dppclk_khz, prev_dppclk_khz; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; @@ -555,6 +555,11 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, } } + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) + dcn32_smu_wait_for_dmub_ack_mclk(clk_mgr, true); + else + dcn32_smu_wait_for_dmub_ack_mclk(clk_mgr, false); + /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */ if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) { update_fclk = true; @@ -802,7 +807,7 @@ static void dcn32_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)); else dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, - clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz); + clk_mgr_base->bw_params->max_memclk_mhz); } else { dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c index fb524fe4ab26..700ce42036d7 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c @@ -139,3 +139,10 @@ unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, ui return response; } + +void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable) +{ + smu_print("PMFW to wait for DMCUB ack for MCLK : %d\n", enable); + + dcn32_smu_send_msg_with_param(clk_mgr, 0x14, enable ? 1 : 0, NULL); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h index a68038a41972..a34c258c19dc 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h @@ -43,5 +43,6 @@ void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways); void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz); +void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable); #endif /* __DCN32_CLK_MGR_SMU_MSG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index d133e4186a52..566d7045b2de 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -586,18 +586,15 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream, bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, struct crc_params *crc_window, bool enable, bool continuous) { - int i; struct pipe_ctx *pipe; struct crc_params param; struct timing_generator *tg; - for (i = 0; i < MAX_PIPES; i++) { - pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) - break; - } + pipe = resource_get_otg_master_for_stream( + &dc->current_state->res_ctx, stream); + /* Stream not found */ - if (i == MAX_PIPES) + if (pipe == NULL) return false; /* By default, capture the full frame */ @@ -1047,8 +1044,10 @@ static void disable_all_writeback_pipes_for_stream( stream->writeback_info[i].wb_enabled = false; } -static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, - struct dc_stream_state *stream, bool lock) +static void apply_ctx_interdependent_lock(struct dc *dc, + struct dc_state *context, + struct dc_stream_state *stream, + bool lock) { int i; @@ -1062,7 +1061,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *contex // Copied conditions that were previously in dce110_apply_ctx_for_surface if (stream == pipe_ctx->stream) { - if (!pipe_ctx->top_pipe && + if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); } @@ -1923,6 +1922,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_trigger_sync(dc, context); + /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */ + for (i = 0; i < context->stream_count; i++) { + uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed; + + context->streams[i]->update_flags.raw = 0xFFFFFFFF; + context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; + } + /* Program all planes within new context*/ if (dc->hwss.program_front_end_for_ctx) { dc->hwss.interdependent_update_lock(dc, context, true); @@ -2001,6 +2008,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c for (i = 0; i < context->stream_count; i++) context->streams[i]->mode_changed = false; + /* Clear update flags that were set earlier to avoid redundant programming */ + for (i = 0; i < context->stream_count; i++) { + context->streams[i]->update_flags.raw = 0x0; + } + old_state = dc->current_state; dc->current_state = context; @@ -2480,9 +2492,7 @@ static enum surface_update_type get_scaling_info_update_type( if (!u->scaling_info) return UPDATE_TYPE_FAST; - if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width - || u->scaling_info->clip_rect.height != u->surface->clip_rect.height - || u->scaling_info->dst_rect.width != u->surface->dst_rect.width + if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width || u->scaling_info->dst_rect.height != u->surface->dst_rect.height || u->scaling_info->scaling_quality.integer_scaling != u->surface->scaling_quality.integer_scaling @@ -2686,96 +2696,6 @@ static enum surface_update_type check_update_surfaces_for_stream( return overall_type; } -static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect) -{ - int view_height, view_width, clip_x, clip_y, clip_width, clip_height; - - view_height = src.height; - view_width = src.width; - - clip_x = clip_rect.x; - clip_y = clip_rect.y; - - clip_width = clip_rect.width; - clip_height = clip_rect.height; - - /* check for centered video accounting for off by 1 scaling truncation */ - if ((view_height - clip_y - clip_height <= clip_y + 1) && - (view_width - clip_x - clip_width <= clip_x + 1) && - (view_height - clip_y - clip_height >= clip_y - 1) && - (view_width - clip_x - clip_width >= clip_x - 1)) { - - /* when OS scales up/down to letter box, it may end up - * with few blank pixels on the border due to truncating. - * Add offset margin to account for this - */ - if (clip_x <= 4 || clip_y <= 4) - return true; - } - - return false; -} - -static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc, - struct dc_surface_update *srf_updates, int surface_count, - enum surface_update_type update_type) -{ - enum surface_update_type new_update_type = update_type; - int i, j; - struct pipe_ctx *pipe = NULL; - struct dc_stream_state *stream; - - /* Check that we are in windowed MPO with ODM - * - look for MPO pipe by scanning pipes for first pipe matching - * surface that has moved ( position change ) - * - MPO pipe will have top pipe - * - check that top pipe has ODM pointer - */ - if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) { - for (i = 0; i < surface_count; i++) { - if (srf_updates[i].surface && srf_updates[i].scaling_info - && srf_updates[i].surface->update_flags.bits.position_change) { - - for (j = 0; j < dc->res_pool->pipe_count; j++) { - if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) { - pipe = &dc->current_state->res_ctx.pipe_ctx[j]; - stream = pipe->stream; - break; - } - } - - if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream - && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) { - struct rect old_clip_rect, new_clip_rect; - bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle; - bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle; - - old_clip_rect = srf_updates[i].surface->clip_rect; - new_clip_rect = srf_updates[i].scaling_info->clip_rect; - - old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2))); - old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2))); - old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right; - - new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2))); - new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2))); - new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right; - - if (old_clip_rect_left && new_clip_rect_middle) - new_update_type = UPDATE_TYPE_FULL; - else if (old_clip_rect_middle && new_clip_rect_right) - new_update_type = UPDATE_TYPE_FULL; - else if (old_clip_rect_right && new_clip_rect_middle) - new_update_type = UPDATE_TYPE_FULL; - else if (old_clip_rect_middle && new_clip_rect_left) - new_update_type = UPDATE_TYPE_FULL; - } - } - } - } - return new_update_type; -} - /* * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) * @@ -2807,10 +2727,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream( updates[i].surface->update_flags.raw = 0xFFFFFFFF; } - if (type == UPDATE_TYPE_MED) - type = check_boundary_crossing_for_windowed_mpo_with_odm(dc, - updates, surface_count, type); - if (type == UPDATE_TYPE_FAST) { // If there's an available clock comparator, we use that. if (dc->clk_mgr->funcs->are_clock_states_equal) { @@ -3245,7 +3161,7 @@ static void commit_planes_do_stream_update(struct dc *dc, for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { + if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) { if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); @@ -3370,6 +3286,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s && stream->ctx->dce_version >= DCN_VERSION_3_1) return true; + if (stream->link->replay_settings.config.replay_supported) + return true; + return false; } @@ -3524,16 +3443,9 @@ static void commit_planes_for_stream_fast(struct dc *dc, struct pipe_ctx *top_pipe_to_program = NULL; dc_z10_restore(dc); - for (j = 0; j < dc->res_pool->pipe_count; j++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - - if (!pipe_ctx->top_pipe && - !pipe_ctx->prev_odm_pipe && - pipe_ctx->stream && - pipe_ctx->stream == stream) { - top_pipe_to_program = pipe_ctx; - } - } + top_pipe_to_program = resource_get_otg_master_for_stream( + &context->res_ctx, + stream); if (dc->debug.visual_confirm) { for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -3582,9 +3494,9 @@ static void commit_planes_for_stream_fast(struct dc *dc, context->block_sequence_steps); /* Clear update flags so next flip doesn't have redundant programming * (if there's no stream update, the update flags are not cleared). + * Surface updates are cleared unconditionally at the beginning of each flip, + * so no need to clear here. */ - if (top_pipe_to_program->plane_state) - top_pipe_to_program->plane_state->update_flags.raw = 0; if (top_pipe_to_program->stream) top_pipe_to_program->stream->update_flags.raw = 0; } @@ -3638,16 +3550,9 @@ static void commit_planes_for_stream(struct dc *dc, context_clock_trace(dc, context); } - for (j = 0; j < dc->res_pool->pipe_count; j++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; - - if (!pipe_ctx->top_pipe && - !pipe_ctx->prev_odm_pipe && - pipe_ctx->stream && - pipe_ctx->stream == stream) { - top_pipe_to_program = pipe_ctx; - } - } + top_pipe_to_program = resource_get_otg_master_for_stream( + &context->res_ctx, + stream); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -4088,9 +3993,9 @@ static bool commit_minimal_transition_state(struct dc *dc, struct dc_state *transition_base_context) { struct dc_state *transition_context = dc_create_state(dc); - enum pipe_split_policy tmp_mpc_policy; - bool temp_dynamic_odm_policy; - bool temp_subvp_policy; + enum pipe_split_policy tmp_mpc_policy = 0; + bool temp_dynamic_odm_policy = 0; + bool temp_subvp_policy = 0; enum dc_status ret = DC_ERROR_UNEXPECTED; unsigned int i, j; unsigned int pipe_in_use = 0; @@ -4284,7 +4189,8 @@ static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_c return false; } -static bool full_update_required(struct dc_surface_update *srf_updates, +static bool full_update_required(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_update *stream_update, struct dc_stream_state *stream) @@ -4292,6 +4198,7 @@ static bool full_update_required(struct dc_surface_update *srf_updates, int i; struct dc_stream_status *stream_status; + const struct dc_state *context = dc->current_state; for (i = 0; i < surface_count; i++) { if (srf_updates && @@ -4302,7 +4209,11 @@ static bool full_update_required(struct dc_surface_update *srf_updates, srf_updates[i].in_transfer_func || srf_updates[i].func_shaper || srf_updates[i].lut3d_func || - srf_updates[i].blend_tf)) + srf_updates[i].blend_tf || + srf_updates[i].surface->force_full_update || + (srf_updates[i].flip_addr && + srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || + !is_surface_in_context(context, srf_updates[i].surface))) return true; } @@ -4340,18 +4251,21 @@ static bool full_update_required(struct dc_surface_update *srf_updates, if (stream_status == NULL || stream_status->plane_count != surface_count) return true; } + if (dc->idle_optimizations_allowed) + return true; return false; } -static bool fast_update_only(struct dc_fast_update *fast_update, +static bool fast_update_only(struct dc *dc, + struct dc_fast_update *fast_update, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_update *stream_update, struct dc_stream_state *stream) { return fast_updates_exist(fast_update, surface_count) - && !full_update_required(srf_updates, surface_count, stream_update, stream); + && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); } bool dc_update_planes_and_stream(struct dc *dc, @@ -4369,8 +4283,8 @@ bool dc_update_planes_and_stream(struct dc *dc, * cause underflow. Apply stream configuration with minimal pipe * split first to avoid unsupported transitions for active pipes. */ - bool force_minimal_pipe_splitting; - bool is_plane_addition; + bool force_minimal_pipe_splitting = 0; + bool is_plane_addition = 0; populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( @@ -4423,7 +4337,7 @@ bool dc_update_planes_and_stream(struct dc *dc, } update_seamless_boot_flags(dc, context, surface_count, stream); - if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) && + if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, @@ -4569,7 +4483,7 @@ void dc_commit_updates_for_stream(struct dc *dc, TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); update_seamless_boot_flags(dc, context, surface_count, stream); - if (fast_update_only(fast_update, srf_updates, surface_count, stream_update, stream) && + if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, @@ -5245,6 +5159,9 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo if (link->psr_settings.psr_feature_enabled) return; + if (link->replay_settings.replay_feature_enabled) + return; + /*find primary pipe associated with stream*/ for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -5273,3 +5190,70 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); } + +/***************************************************************************** + * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause + * ABM + * @dc: dc structure + * @stream: stream where vsync int state changed + * @pData: abm hw states + * + ****************************************************************************/ +bool dc_abm_save_restore( + struct dc *dc, + struct dc_stream_state *stream, + struct abm_save_restore *pData) +{ + int i; + int edp_num; + struct pipe_ctx *pipe = NULL; + struct dc_link *link = stream->sink->link; + struct dc_link *edp_links[MAX_NUM_EDP]; + + + /*find primary pipe associated with stream*/ + for (i = 0; i < MAX_PIPES; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->stream == stream && pipe->stream_res.tg) + break; + } + + if (i == MAX_PIPES) { + ASSERT(0); + return false; + } + + dc_get_edp_links(dc, edp_links, &edp_num); + + /* Determine panel inst */ + for (i = 0; i < edp_num; i++) + if (edp_links[i] == link) + break; + + if (i == edp_num) + return false; + + if (pipe->stream_res.abm && + pipe->stream_res.abm->funcs->save_restore) + return pipe->stream_res.abm->funcs->save_restore( + pipe->stream_res.abm, + i, + pData); + return false; +} + +void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) +{ + unsigned int i; + bool subvp_in_use = false; + + for (i = 0; i < dc->current_state->stream_count; i++) { + if (dc->current_state->streams[i]->mall_stream_config.type != SUBVP_NONE) { + subvp_in_use = true; + break; + } + } + properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size; +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index cb2bf9a466f5..f99ec1b0efaf 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -187,6 +187,7 @@ static bool is_ycbcr709_limited_type( ret = true; return ret; } + static enum dc_color_space_type get_color_space_type(enum dc_color_space color_space) { enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index 18e098568cb4..ed94187c2afa 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -314,6 +314,24 @@ const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link) return link->dc->link_srv->dp_get_verified_link_cap(link); } +enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link) +{ + if (dc_is_dp_signal(link->connector_signal)) { + if (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_DVI_DONGLE && + link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE) + return DC_LINK_ENCODING_HDMI_TMDS; + else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) == + DP_8b_10b_ENCODING) + return DC_LINK_ENCODING_DP_8b_10b; + else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) == + DP_128b_132b_ENCODING) + return DC_LINK_ENCODING_DP_128b_132b; + } else if (dc_is_hdmi_signal(link->connector_signal)) { + } + + return DC_LINK_ENCODING_UNSPECIFIED; +} + bool dc_link_is_dp_sink_present(struct dc_link *link) { return link->dc->link_srv->dp_is_sink_present(link); @@ -449,6 +467,11 @@ bool dc_link_setup_psr(struct dc_link *link, return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context); } +bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state) +{ + return link->dc->link_srv->edp_get_replay_state(link, state); +} + bool dc_link_wait_for_t12(struct dc_link *link) { return link->dc->link_srv->edp_wait_for_t12(link); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 2f3d9a698486..f7b51aca6020 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -45,6 +45,8 @@ #include "link/hwss/link_hwss_dio.h" #include "link/hwss/link_hwss_dpia.h" #include "link/hwss/link_hwss_hpo_dp.h" +#include "link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h" +#include "link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h" #if defined(CONFIG_DRM_AMD_DC_SI) #include "dce60/dce60_resource.h" @@ -69,9 +71,20 @@ #include "../dcn32/dcn32_resource.h" #include "../dcn321/dcn321_resource.h" +#define VISUAL_CONFIRM_BASE_DEFAULT 3 +#define VISUAL_CONFIRM_BASE_MIN 1 +#define VISUAL_CONFIRM_BASE_MAX 10 +/* we choose 240 because it is a common denominator of common v addressable + * such as 2160, 1440, 1200, 960. So we take 1/240 portion of v addressable as + * the visual confirm dpp offset height. So visual confirm height can stay + * relatively the same independent from timing used. + */ +#define VISUAL_CONFIRM_DPP_OFFSET_DENO 240 #define DC_LOGGER_INIT(logger) +#define UNABLE_TO_SPLIT -1 + enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) { enum dce_version dc_version = DCE_VERSION_UNKNOWN; @@ -719,10 +732,10 @@ static inline void get_vp_scan_direction( *flip_horz_scan_dir = !*flip_horz_scan_dir; } -int get_num_mpc_splits(struct pipe_ctx *pipe) +int resource_get_num_mpc_splits(const struct pipe_ctx *pipe) { int mpc_split_count = 0; - struct pipe_ctx *other_pipe = pipe->bottom_pipe; + const struct pipe_ctx *other_pipe = pipe->bottom_pipe; while (other_pipe && other_pipe->plane_state == pipe->plane_state) { mpc_split_count++; @@ -737,48 +750,46 @@ int get_num_mpc_splits(struct pipe_ctx *pipe) return mpc_split_count; } -int get_num_odm_splits(struct pipe_ctx *pipe) +int resource_get_num_odm_splits(const struct pipe_ctx *pipe) { int odm_split_count = 0; - struct pipe_ctx *next_pipe = pipe->next_odm_pipe; - while (next_pipe) { + + pipe = resource_get_otg_master(pipe); + + while (pipe->next_odm_pipe) { odm_split_count++; - next_pipe = next_pipe->next_odm_pipe; - } - pipe = pipe->prev_odm_pipe; - while (pipe) { - odm_split_count++; - pipe = pipe->prev_odm_pipe; + pipe = pipe->next_odm_pipe; } return odm_split_count; } -static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx) +static int get_odm_split_index(struct pipe_ctx *pipe_ctx) { - *split_count = get_num_odm_splits(pipe_ctx); - *split_idx = 0; - if (*split_count == 0) { - /*Check for mpc split*/ - struct pipe_ctx *split_pipe = pipe_ctx->top_pipe; + int index = 0; - *split_count = get_num_mpc_splits(pipe_ctx); - while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { - (*split_idx)++; - split_pipe = split_pipe->top_pipe; - } + pipe_ctx = resource_get_opp_head(pipe_ctx); + if (!pipe_ctx) + return 0; - /* MPO window on right side of ODM split */ - if (split_pipe && split_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) - (*split_idx)++; - } else { - /*Get odm split index*/ - struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe; - - while (split_pipe) { - (*split_idx)++; - split_pipe = split_pipe->prev_odm_pipe; - } + while (pipe_ctx->prev_odm_pipe) { + index++; + pipe_ctx = pipe_ctx->prev_odm_pipe; } + + return index; +} + +static int get_mpc_split_index(struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *split_pipe = pipe_ctx->top_pipe; + int index = 0; + + while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { + index++; + split_pipe = split_pipe->top_pipe; + } + + return index; } /* @@ -800,82 +811,366 @@ static void calculate_viewport_size(struct pipe_ctx *pipe_ctx) } } +static struct rect intersect_rec(const struct rect *r0, const struct rect *r1) +{ + struct rect rec; + int r0_x_end = r0->x + r0->width; + int r1_x_end = r1->x + r1->width; + int r0_y_end = r0->y + r0->height; + int r1_y_end = r1->y + r1->height; + + rec.x = r0->x > r1->x ? r0->x : r1->x; + rec.width = r0_x_end > r1_x_end ? r1_x_end - rec.x : r0_x_end - rec.x; + rec.y = r0->y > r1->y ? r0->y : r1->y; + rec.height = r0_y_end > r1_y_end ? r1_y_end - rec.y : r0_y_end - rec.y; + + /* in case that there is no intersection */ + if (rec.width < 0 || rec.height < 0) + memset(&rec, 0, sizeof(rec)); + + return rec; +} + +static struct rect shift_rec(const struct rect *rec_in, int x, int y) +{ + struct rect rec_out = *rec_in; + + rec_out.x += x; + rec_out.y += y; + + return rec_out; +} + +static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ctx) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + int odm_slice_count = resource_get_num_odm_splits(pipe_ctx) + 1; + int odm_slice_idx = get_odm_split_index(pipe_ctx); + bool is_last_odm_slice = (odm_slice_idx + 1) == odm_slice_count; + int h_active = stream->timing.h_addressable + + stream->timing.h_border_left + + stream->timing.h_border_right; + int odm_slice_width = h_active / odm_slice_count; + struct rect odm_rec; + + odm_rec.x = odm_slice_width * odm_slice_idx; + odm_rec.width = is_last_odm_slice ? + /* last slice width is the reminder of h_active */ + h_active - odm_slice_width * (odm_slice_count - 1) : + /* odm slice width is the floor of h_active / count */ + odm_slice_width; + odm_rec.y = 0; + odm_rec.height = stream->timing.v_addressable + + stream->timing.v_border_bottom + + stream->timing.v_border_top; + + return odm_rec; +} + +static struct rect calculate_plane_rec_in_timing_active( + struct pipe_ctx *pipe_ctx, + const struct rect *rec_in) +{ + /* + * The following diagram shows an example where we map a 1920x1200 + * desktop to a 2560x1440 timing with a plane rect in the middle + * of the screen. To map a plane rect from Stream Source to Timing + * Active space, we first multiply stream scaling ratios (i.e 2304/1920 + * horizontal and 1440/1200 vertical) to the plane's x and y, then + * we add stream destination offsets (i.e 128 horizontal, 0 vertical). + * This will give us a plane rect's position in Timing Active. However + * we have to remove the fractional. The rule is that we find left/right + * and top/bottom positions and round the value to the adjacent integer. + * + * Stream Source Space + * ------------ + * __________________________________________________ + * |Stream Source (1920 x 1200) ^ | + * | y | + * | <------- w --------|> | + * | __________________V | + * |<-- x -->|Plane//////////////| ^ | + * | |(pre scale)////////| | | + * | |///////////////////| | | + * | |///////////////////| h | + * | |///////////////////| | | + * | |///////////////////| | | + * | |///////////////////| V | + * | | + * | | + * |__________________________________________________| + * + * + * Timing Active Space + * --------------------------------- + * + * Timing Active (2560 x 1440) + * __________________________________________________ + * |*****| Stteam Destination (2304 x 1440) |*****| + * |*****| |*****| + * |<128>| |*****| + * |*****| __________________ |*****| + * |*****| |Plane/////////////| |*****| + * |*****| |(post scale)//////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |//////////////////| |*****| + * |*****| |*****| + * |*****| |*****| + * |*****| |*****| + * |*****|______________________________________|*****| + * + * So the resulting formulas are shown below: + * + * recout_x = 128 + round(plane_x * 2304 / 1920) + * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x + * recout_y = 0 + round(plane_y * 1440 / 1280) + * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y + * + * NOTE: fixed point division is not error free. To reduce errors + * introduced by fixed point division, we divide only after + * multiplication is complete. + */ + const struct dc_stream_state *stream = pipe_ctx->stream; + struct rect rec_out = {0}; + struct fixed31_32 temp; + + temp = dc_fixpt_from_fraction(rec_in->x * stream->dst.width, + stream->src.width); + rec_out.x = stream->dst.x + dc_fixpt_round(temp); + + temp = dc_fixpt_from_fraction( + (rec_in->x + rec_in->width) * stream->dst.width, + stream->src.width); + rec_out.width = stream->dst.x + dc_fixpt_round(temp) - rec_out.x; + + temp = dc_fixpt_from_fraction(rec_in->y * stream->dst.height, + stream->src.height); + rec_out.y = stream->dst.y + dc_fixpt_round(temp); + + temp = dc_fixpt_from_fraction( + (rec_in->y + rec_in->height) * stream->dst.height, + stream->src.height); + rec_out.height = stream->dst.y + dc_fixpt_round(temp) - rec_out.y; + + return rec_out; +} + +static struct rect calculate_mpc_slice_in_timing_active( + struct pipe_ctx *pipe_ctx, + struct rect *plane_clip_rec) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + int mpc_slice_count = resource_get_num_mpc_splits(pipe_ctx) + 1; + int mpc_slice_idx = get_mpc_split_index(pipe_ctx); + int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1; + struct rect mpc_rec; + + mpc_rec.width = plane_clip_rec->width / mpc_slice_count; + mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx; + mpc_rec.height = plane_clip_rec->height; + mpc_rec.y = plane_clip_rec->y; + ASSERT(mpc_slice_count == 1 || + stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || + mpc_rec.width % 2 == 0); + + /* extra pixels in the division remainder need to go to pipes after + * the extra pixel index minus one(epimo) defined here as: + */ + if (mpc_slice_idx > epimo) { + mpc_rec.x += mpc_slice_idx - epimo - 1; + mpc_rec.width += 1; + } + + if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { + ASSERT(mpc_rec.height % 2 == 0); + mpc_rec.height /= 2; + } + return mpc_rec; +} + +static void adjust_recout_for_visual_confirm(struct rect *recout, + struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + int dpp_offset, base_offset; + + if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE) + return; + + dpp_offset = pipe_ctx->stream->timing.v_addressable / VISUAL_CONFIRM_DPP_OFFSET_DENO; + dpp_offset *= pipe_ctx->plane_res.dpp->inst; + + if ((dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_BASE_MIN) && + dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_BASE_MAX) + base_offset = dc->debug.visual_confirm_rect_height; + else + base_offset = VISUAL_CONFIRM_BASE_DEFAULT; + + recout->height -= base_offset; + recout->height -= dpp_offset; +} + +/* + * The function maps a plane clip from Stream Source Space to ODM Slice Space + * and calculates the rec of the overlapping area of MPC slice of the plane + * clip, ODM slice associated with the pipe context and stream destination rec. + */ static void calculate_recout(struct pipe_ctx *pipe_ctx) { - const struct dc_plane_state *plane_state = pipe_ctx->plane_state; - const struct dc_stream_state *stream = pipe_ctx->stream; - struct scaler_data *data = &pipe_ctx->plane_res.scl_data; - struct rect surf_clip = plane_state->clip_rect; - bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; - int split_count, split_idx; - - calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); - if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE) - split_idx = 0; - /* - * Only the leftmost ODM pipe should be offset by a nonzero distance + * A plane clip represents the desired plane size and position in Stream + * Source Space. Stream Source is the destination where all planes are + * blended (i.e. positioned, scaled and overlaid). It is a canvas where + * all planes associated with the current stream are drawn together. + * After Stream Source is completed, we will further scale and + * reposition the entire canvas of the stream source to Stream + * Destination in Timing Active Space. This could be due to display + * overscan adjustment where we will need to rescale and reposition all + * the planes so they can fit into a TV with overscan or downscale + * upscale features such as GPU scaling or VSR. + * + * This two step blending is a virtual procedure in software. In + * hardware there is no such thing as Stream Source. all planes are + * blended once in Timing Active Space. Software virtualizes a Stream + * Source space to decouple the math complicity so scaling param + * calculation focuses on one step at a time. + * + * In the following two diagrams, user applied 10% overscan adjustment + * so the Stream Source needs to be scaled down a little before mapping + * to Timing Active Space. As a result the Plane Clip is also scaled + * down by the same ratio, Plane Clip position (i.e. x and y) with + * respect to Stream Source is also scaled down. To map it in Timing + * Active Space additional x and y offsets from Stream Destination are + * added to Plane Clip as well. + * + * Stream Source Space + * ------------ + * __________________________________________________ + * |Stream Source (3840 x 2160) ^ | + * | y | + * | | | + * | __________________V | + * |<-- x -->|Plane Clip/////////| | + * | |(pre scale)////////| | + * | |///////////////////| | + * | |///////////////////| | + * | |///////////////////| | + * | |///////////////////| | + * | |///////////////////| | + * | | + * | | + * |__________________________________________________| + * + * + * Timing Active Space (3840 x 2160) + * --------------------------------- + * + * Timing Active + * __________________________________________________ + * | y_____________________________________________ | + * |x |Stream Destination (3456 x 1944) | | + * | | | | + * | | __________________ | | + * | | |Plane Clip////////| | | + * | | |(post scale)//////| | | + * | | |//////////////////| | | + * | | |//////////////////| | | + * | | |//////////////////| | | + * | | |//////////////////| | | + * | | | | + * | | | | + * | |____________________________________________| | + * |__________________________________________________| + * + * + * In Timing Active Space a plane clip could be further sliced into + * pieces called MPC slices. Each Pipe Context is responsible for + * processing only one MPC slice so the plane processing workload can be + * distributed to multiple DPP Pipes. MPC slices could be blended + * together to a single ODM slice. Each ODM slice is responsible for + * processing a portion of Timing Active divided horizontally so the + * output pixel processing workload can be distributed to multiple OPP + * pipes. All ODM slices are mapped together in ODM block so all MPC + * slices belong to different ODM slices could be pieced together to + * form a single image in Timing Active. MPC slices must belong to + * single ODM slice. If an MPC slice goes across ODM slice boundary, it + * needs to be divided into two MPC slices one for each ODM slice. + * + * In the following diagram the output pixel processing workload is + * divided horizontally into two ODM slices one for each OPP blend tree. + * OPP0 blend tree is responsible for processing left half of Timing + * Active, while OPP2 blend tree is responsible for processing right + * half. + * + * The plane has two MPC slices. However since the right MPC slice goes + * across ODM boundary, two DPP pipes are needed one for each OPP blend + * tree. (i.e. DPP1 for OPP0 blend tree and DPP2 for OPP2 blend tree). + * + * Assuming that we have a Pipe Context associated with OPP0 and DPP1 + * working on processing the plane in the diagram. We want to know the + * width and height of the shaded rectangle and its relative position + * with respect to the ODM slice0. This is called the recout of the pipe + * context. + * + * Planes can be at arbitrary size and position and there could be an + * arbitrary number of MPC and ODM slices. The algorithm needs to take + * all scenarios into account. + * + * Timing Active Space (3840 x 2160) + * --------------------------------- + * + * Timing Active + * __________________________________________________ + * |OPP0(ODM slice0)^ |OPP2(ODM slice1) | + * | y | | + * | | <- w -> | + * | _____V________|____ | + * | |DPP0 ^ |DPP1 |DPP2| | + * |<------ x |-----|->|/////| | | + * | | | |/////| | | + * | | h |/////| | | + * | | | |/////| | | + * | |_____V__|/////|____| | + * | | | + * | | | + * | | | + * |_________________________|________________________| + * + * */ - if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) { - /* MPO window on right side of ODM split */ - data->recout.x = stream->dst.x + (surf_clip.x - stream->src.x - stream->src.width/2) * - stream->dst.width / stream->src.width; - } else if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) { - data->recout.x = stream->dst.x; - if (stream->src.x < surf_clip.x) - data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width - / stream->src.width; - } else - data->recout.x = 0; + struct rect plane_clip; + struct rect mpc_slice_of_plane_clip; + struct rect odm_slice; + struct rect overlapping_area; - if (stream->src.x > surf_clip.x) - surf_clip.width -= stream->src.x - surf_clip.x; - data->recout.width = surf_clip.width * stream->dst.width / stream->src.width; - if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width) - data->recout.width = stream->dst.x + stream->dst.width - data->recout.x; - - data->recout.y = stream->dst.y; - if (stream->src.y < surf_clip.y) - data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height - / stream->src.height; - else if (stream->src.y > surf_clip.y) - surf_clip.height -= stream->src.y - surf_clip.y; - - data->recout.height = surf_clip.height * stream->dst.height / stream->src.height; - if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height) - data->recout.height = stream->dst.y + stream->dst.height - data->recout.y; - - /* Handle h & v split */ - if (split_tb) { - ASSERT(data->recout.height % 2 == 0); - data->recout.height /= 2; - } else if (split_count) { - if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) { - /* extra pixels in the division remainder need to go to pipes after - * the extra pixel index minus one(epimo) defined here as: - */ - int epimo = split_count - data->recout.width % (split_count + 1); - - data->recout.x += (data->recout.width / (split_count + 1)) * split_idx; - if (split_idx > epimo) - data->recout.x += split_idx - epimo - 1; - ASSERT(stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || data->recout.width % 2 == 0); - data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0); - } else { - /* odm */ - if (split_idx == split_count) { - /* rightmost pipe is the remainder recout */ - data->recout.width -= data->h_active * split_count - data->recout.x; - - /* ODM combine cases with MPO we can get negative widths */ - if (data->recout.width < 0) - data->recout.width = 0; - - data->recout.x = 0; - } else - data->recout.width = data->h_active - data->recout.x; - } + plane_clip = calculate_plane_rec_in_timing_active(pipe_ctx, + &pipe_ctx->plane_state->clip_rect); + /* guard plane clip from drawing beyond stream dst here */ + plane_clip = intersect_rec(&plane_clip, + &pipe_ctx->stream->dst); + mpc_slice_of_plane_clip = calculate_mpc_slice_in_timing_active( + pipe_ctx, &plane_clip); + odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx); + overlapping_area = intersect_rec(&mpc_slice_of_plane_clip, &odm_slice); + if (overlapping_area.height > 0 && + overlapping_area.width > 0) { + /* shift the overlapping area so it is with respect to current + * ODM slice's position + */ + pipe_ctx->plane_res.scl_data.recout = shift_rec( + &overlapping_area, + -odm_slice.x, -odm_slice.y); + adjust_recout_for_visual_confirm( + &pipe_ctx->plane_res.scl_data.recout, + pipe_ctx); + } else { + /* if there is no overlap, zero recout */ + memset(&pipe_ctx->plane_res.scl_data.recout, 0, + sizeof(struct rect)); } + } static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) @@ -997,33 +1292,30 @@ static void calculate_init_and_vp( static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; - const struct dc_stream_state *stream = pipe_ctx->stream; struct scaler_data *data = &pipe_ctx->plane_res.scl_data; struct rect src = plane_state->src_rect; + struct rect recout_dst_in_active_timing; + struct rect recout_clip_in_active_timing; + struct rect recout_clip_in_recout_dst; + struct rect overlap_in_active_timing; + struct rect odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx); int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; - int split_count, split_idx, ro_lb, ro_tb, recout_full_x, recout_full_y; bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir; - calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx); - /* - * recout full is what the recout would have been if we didnt clip - * the source plane at all. We only care about left(ro_lb) and top(ro_tb) - * offsets of recout within recout full because those are the directions - * we scan from and therefore the only ones that affect inits. - */ - recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x) - * stream->dst.width / stream->src.width; - recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y) - * stream->dst.height / stream->src.height; - if (pipe_ctx->prev_odm_pipe && split_idx) - ro_lb = data->h_active * split_idx - recout_full_x; - else if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe) - ro_lb = data->h_active * split_idx - recout_full_x + data->recout.x; + recout_clip_in_active_timing = shift_rec( + &data->recout, odm_slice.x, odm_slice.y); + recout_dst_in_active_timing = calculate_plane_rec_in_timing_active( + pipe_ctx, &plane_state->dst_rect); + overlap_in_active_timing = intersect_rec(&recout_clip_in_active_timing, + &recout_dst_in_active_timing); + if (overlap_in_active_timing.width > 0 && + overlap_in_active_timing.height > 0) + recout_clip_in_recout_dst = shift_rec(&overlap_in_active_timing, + -recout_dst_in_active_timing.x, + -recout_dst_in_active_timing.y); else - ro_lb = data->recout.x - recout_full_x; - ro_tb = data->recout.y - recout_full_y; - ASSERT(ro_lb >= 0 && ro_tb >= 0); + memset(&recout_clip_in_recout_dst, 0, sizeof(struct rect)); /* * Work in recout rotation since that requires less transformations @@ -1042,7 +1334,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) calculate_init_and_vp( flip_horz_scan_dir, - ro_lb, + recout_clip_in_recout_dst.x, data->recout.width, src.width, data->taps.h_taps, @@ -1052,7 +1344,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) &data->viewport.width); calculate_init_and_vp( flip_horz_scan_dir, - ro_lb, + recout_clip_in_recout_dst.x, data->recout.width, src.width / vpc_div, data->taps.h_taps_c, @@ -1062,7 +1354,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) &data->viewport_c.width); calculate_init_and_vp( flip_vert_scan_dir, - ro_tb, + recout_clip_in_recout_dst.y, data->recout.height, src.height, data->taps.v_taps, @@ -1072,7 +1364,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) &data->viewport.height); calculate_init_and_vp( flip_vert_scan_dir, - ro_tb, + recout_clip_in_recout_dst.y, data->recout.height, src.height / vpc_div, data->taps.v_taps_c, @@ -1097,6 +1389,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx); bool res = false; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -1121,30 +1414,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->stream->dst.y += timing->v_border_top; /* Calculate H and V active size */ - pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + - timing->h_border_left + timing->h_border_right; - pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + - timing->v_border_top + timing->v_border_bottom; - if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe) { - pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1; + pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width; + pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height; - DC_LOG_SCALER("%s pipe %d: next_odm_pipe:%d prev_odm_pipe:%d\n", - __func__, - pipe_ctx->pipe_idx, - pipe_ctx->next_odm_pipe ? pipe_ctx->next_odm_pipe->pipe_idx : -1, - pipe_ctx->prev_odm_pipe ? pipe_ctx->prev_odm_pipe->pipe_idx : -1); - } /* ODM + windows MPO, where window is on either right or left ODM half */ - else if (pipe_ctx->top_pipe && (pipe_ctx->top_pipe->next_odm_pipe || pipe_ctx->top_pipe->prev_odm_pipe)) { - - pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx->top_pipe) + 1; - - DC_LOG_SCALER("%s ODM + windows MPO: pipe:%d top_pipe:%d top_pipe->next_odm_pipe:%d top_pipe->prev_odm_pipe:%d\n", - __func__, - pipe_ctx->pipe_idx, - pipe_ctx->top_pipe->pipe_idx, - pipe_ctx->top_pipe->next_odm_pipe ? pipe_ctx->top_pipe->next_odm_pipe->pipe_idx : -1, - pipe_ctx->top_pipe->prev_odm_pipe ? pipe_ctx->top_pipe->prev_odm_pipe->pipe_idx : -1); - } /* depends on h_active */ calculate_recout(pipe_ctx); /* depends on pixel format */ @@ -1226,17 +1498,12 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width; } - if (!pipe_ctx->stream->ctx->dc->config.enable_windowed_mpo_odm) { - if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE || - pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) - res = false; - } else { - /* Clamp minimum viewport size */ - if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE) - pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE; - if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) - pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE; - } + /* Clamp minimum viewport size */ + if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE) + pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE; + if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) + pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE; + DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n" "src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n", @@ -1288,7 +1555,7 @@ enum dc_status resource_build_scaling_params_for_context( return DC_OK; } -struct pipe_ctx *find_idle_secondary_pipe( +struct pipe_ctx *resource_find_free_secondary_pipe_legacy( struct resource_context *res_ctx, const struct resource_pool *pool, const struct pipe_ctx *primary_pipe) @@ -1348,28 +1615,175 @@ struct pipe_ctx *find_idle_secondary_pipe( return secondary_pipe; } -struct pipe_ctx *resource_get_head_pipe_for_stream( +int resource_find_free_pipe_used_in_cur_mpc_blending_tree( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct pipe_ctx *cur_opp_head) +{ + const struct pipe_ctx *cur_sec_dpp = cur_opp_head->bottom_pipe; + struct pipe_ctx *new_pipe; + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + + while (cur_sec_dpp) { + /* find a free pipe used in current opp blend tree, + * this is to avoid MPO pipe switching to different opp blending + * tree + */ + new_pipe = &new_res_ctx->pipe_ctx[cur_sec_dpp->pipe_idx]; + if (resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = cur_sec_dpp->pipe_idx; + break; + } + cur_sec_dpp = cur_sec_dpp->bottom_pipe; + } + + return free_pipe_idx; +} + +int recource_find_free_pipe_not_used_in_cur_res_ctx( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool) +{ + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + const struct pipe_ctx *new_pipe, *cur_pipe; + int i; + + for (i = 0; i < pool->pipe_count; i++) { + cur_pipe = &cur_res_ctx->pipe_ctx[i]; + new_pipe = &new_res_ctx->pipe_ctx[i]; + + if (resource_is_pipe_type(cur_pipe, FREE_PIPE) && + resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = i; + break; + } + } + + return free_pipe_idx; +} + +int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool) +{ + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + const struct pipe_ctx *new_pipe, *cur_pipe; + int i; + + for (i = 0; i < pool->pipe_count; i++) { + cur_pipe = &cur_res_ctx->pipe_ctx[i]; + new_pipe = &new_res_ctx->pipe_ctx[i]; + + if (resource_is_pipe_type(cur_pipe, DPP_PIPE) && + !resource_is_pipe_type(cur_pipe, OPP_HEAD) && + resource_is_for_mpcc_combine(cur_pipe) && + resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = i; + break; + } + } + + return free_pipe_idx; +} + +int resource_find_any_free_pipe(struct resource_context *new_res_ctx, + const struct resource_pool *pool) +{ + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + const struct pipe_ctx *new_pipe; + int i; + + for (i = 0; i < pool->pipe_count; i++) { + new_pipe = &new_res_ctx->pipe_ctx[i]; + + if (resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = i; + break; + } + } + + return free_pipe_idx; +} + +bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type) +{ +#ifdef DBG + if (pipe_ctx->stream == NULL) { + /* a free pipe with dangling states */ + ASSERT(!pipe_ctx->plane_state); + ASSERT(!pipe_ctx->prev_odm_pipe); + ASSERT(!pipe_ctx->next_odm_pipe); + ASSERT(!pipe_ctx->top_pipe); + ASSERT(!pipe_ctx->bottom_pipe); + } else if (pipe_ctx->top_pipe) { + /* a secondary DPP pipe must be signed to a plane */ + ASSERT(pipe_ctx->plane_state) + } + /* Add more checks here to prevent corrupted pipe ctx. It is very hard + * to debug this issue afterwards because we can't pinpoint the code + * location causing inconsistent pipe context states. + */ +#endif + switch (type) { + case OTG_MASTER: + return !pipe_ctx->prev_odm_pipe && + !pipe_ctx->top_pipe && + pipe_ctx->stream; + case OPP_HEAD: + return !pipe_ctx->top_pipe && pipe_ctx->stream; + case DPP_PIPE: + return pipe_ctx->plane_state && pipe_ctx->stream; + case FREE_PIPE: + return !pipe_ctx->plane_state && !pipe_ctx->stream; + default: + return false; + } +} + +bool resource_is_for_mpcc_combine(const struct pipe_ctx *pipe_ctx) +{ + return resource_get_num_mpc_splits(pipe_ctx) > 0; +} + +struct pipe_ctx *resource_get_otg_master_for_stream( struct resource_context *res_ctx, struct dc_stream_state *stream) { int i; for (i = 0; i < MAX_PIPES; i++) { - if (res_ctx->pipe_ctx[i].stream == stream - && !res_ctx->pipe_ctx[i].top_pipe - && !res_ctx->pipe_ctx[i].prev_odm_pipe) + if (res_ctx->pipe_ctx[i].stream == stream && + resource_is_pipe_type(&res_ctx->pipe_ctx[i], OTG_MASTER)) return &res_ctx->pipe_ctx[i]; } return NULL; } -static struct pipe_ctx *resource_get_tail_pipe( - struct resource_context *res_ctx, +struct pipe_ctx *resource_get_otg_master(const struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *otg_master = resource_get_opp_head(pipe_ctx); + + while (otg_master->prev_odm_pipe) + otg_master = otg_master->prev_odm_pipe; + return otg_master; +} + +struct pipe_ctx *resource_get_opp_head(const struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *opp_head = (struct pipe_ctx *) pipe_ctx; + + ASSERT(!resource_is_pipe_type(opp_head, FREE_PIPE)); + while (opp_head->top_pipe) + opp_head = opp_head->top_pipe; + return opp_head; +} + +static struct pipe_ctx *get_tail_pipe( struct pipe_ctx *head_pipe) { - struct pipe_ctx *tail_pipe; - - tail_pipe = head_pipe->bottom_pipe; + struct pipe_ctx *tail_pipe = head_pipe->bottom_pipe; while (tail_pipe) { head_pipe = tail_pipe; @@ -1379,44 +1793,6 @@ static struct pipe_ctx *resource_get_tail_pipe( return head_pipe; } -/* - * A free_pipe for a stream is defined here as a pipe - * that has no surface attached yet - */ -static struct pipe_ctx *acquire_free_pipe_for_head( - struct dc_state *context, - const struct resource_pool *pool, - struct pipe_ctx *head_pipe) -{ - int i; - struct resource_context *res_ctx = &context->res_ctx; - - if (!head_pipe->plane_state) - return head_pipe; - - /* Re-use pipe already acquired for this stream if available*/ - for (i = pool->pipe_count - 1; i >= 0; i--) { - if (res_ctx->pipe_ctx[i].stream == head_pipe->stream && - !res_ctx->pipe_ctx[i].plane_state) { - return &res_ctx->pipe_ctx[i]; - } - } - - /* - * At this point we have no re-useable pipe for this stream and we need - * to acquire an idle one to satisfy the request - */ - - if (!pool->funcs->acquire_idle_pipe_for_layer) { - if (!pool->funcs->acquire_idle_pipe_for_head_pipe_in_layer) - return NULL; - else - return pool->funcs->acquire_idle_pipe_for_head_pipe_in_layer(context, pool, head_pipe->stream, head_pipe); - } - - return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream); -} - static int acquire_first_split_pipe( struct resource_context *res_ctx, const struct resource_pool *pool, @@ -1445,31 +1821,87 @@ static int acquire_first_split_pipe( split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst; split_pipe->pipe_idx = i; - split_pipe->stream = stream; - return i; - } else if (split_pipe->prev_odm_pipe && - split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) { - split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe; - if (split_pipe->next_odm_pipe) - split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe; - - if (split_pipe->prev_odm_pipe->plane_state) - resource_build_scaling_params(split_pipe->prev_odm_pipe); - - memset(split_pipe, 0, sizeof(*split_pipe)); - split_pipe->stream_res.tg = pool->timing_generators[i]; - split_pipe->plane_res.hubp = pool->hubps[i]; - split_pipe->plane_res.ipp = pool->ipps[i]; - split_pipe->plane_res.dpp = pool->dpps[i]; - split_pipe->stream_res.opp = pool->opps[i]; - split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst; - split_pipe->pipe_idx = i; - split_pipe->stream = stream; return i; } } - return -1; + return UNABLE_TO_SPLIT; +} + +static bool add_plane_to_opp_head_pipes(struct pipe_ctx *otg_master_pipe, + struct dc_plane_state *plane_state, + struct dc_state *context) +{ + struct pipe_ctx *opp_head_pipe = otg_master_pipe; + + while (opp_head_pipe) { + if (opp_head_pipe->plane_state) { + ASSERT(0); + return false; + } + opp_head_pipe->plane_state = plane_state; + opp_head_pipe = opp_head_pipe->next_odm_pipe; + } + + return true; +} + +static void insert_secondary_dpp_pipe_with_plane(struct pipe_ctx *opp_head_pipe, + struct pipe_ctx *sec_pipe, struct dc_plane_state *plane_state) +{ + struct pipe_ctx *tail_pipe = get_tail_pipe(opp_head_pipe); + + tail_pipe->bottom_pipe = sec_pipe; + sec_pipe->top_pipe = tail_pipe; + if (tail_pipe->prev_odm_pipe) { + ASSERT(tail_pipe->prev_odm_pipe->bottom_pipe); + sec_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe; + tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = sec_pipe; + } + sec_pipe->plane_state = plane_state; +} + +/* for each opp head pipe of an otg master pipe, acquire a secondary dpp pipe + * and add the plane. So the plane is added to all MPC blend trees associated + * with the otg master pipe. + */ +static bool acquire_secondary_dpp_pipes_and_add_plane( + struct pipe_ctx *otg_master_pipe, + struct dc_plane_state *plane_state, + struct dc_state *new_ctx, + struct dc_state *cur_ctx, + struct resource_pool *pool) +{ + struct pipe_ctx *opp_head_pipe, *sec_pipe; + + if (!pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe) + return false; + + opp_head_pipe = otg_master_pipe; + while (opp_head_pipe) { + sec_pipe = pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe( + cur_ctx, + new_ctx, + pool, + opp_head_pipe); + if (!sec_pipe) { + /* try tearing down MPCC combine */ + int pipe_idx = acquire_first_split_pipe( + &new_ctx->res_ctx, pool, + otg_master_pipe->stream); + + if (pipe_idx >= 0) + sec_pipe = &new_ctx->res_ctx.pipe_ctx[pipe_idx]; + } + + if (!sec_pipe) + return false; + + insert_secondary_dpp_pipe_with_plane(opp_head_pipe, sec_pipe, + plane_state); + opp_head_pipe = opp_head_pipe->next_odm_pipe; + } + return true; } bool dc_add_plane_to_context( @@ -1478,244 +1910,39 @@ bool dc_add_plane_to_context( struct dc_plane_state *plane_state, struct dc_state *context) { - int i; struct resource_pool *pool = dc->res_pool; - struct pipe_ctx *head_pipe, *tail_pipe, *free_pipe; + struct pipe_ctx *otg_master_pipe; struct dc_stream_status *stream_status = NULL; - struct pipe_ctx *prev_right_head = NULL; - struct pipe_ctx *free_right_pipe = NULL; - struct pipe_ctx *prev_left_head = NULL; + bool added = false; - DC_LOGGER_INIT(stream->ctx->logger); - for (i = 0; i < context->stream_count; i++) - if (context->streams[i] == stream) { - stream_status = &context->stream_status[i]; - break; - } + stream_status = dc_stream_get_status_from_state(context, stream); if (stream_status == NULL) { dm_error("Existing stream not found; failed to attach surface!\n"); - return false; - } - - - if (stream_status->plane_count == MAX_SURFACE_NUM) { + goto out; + } else if (stream_status->plane_count == MAX_SURFACE_NUM) { dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", plane_state, MAX_SURFACE_NUM); - return false; + goto out; } - head_pipe = resource_get_head_pipe_for_stream(&context->res_ctx, stream); - - if (!head_pipe) { - dm_error("Head pipe not found for stream_state %p !\n", stream); - return false; + otg_master_pipe = resource_get_otg_master_for_stream( + &context->res_ctx, stream); + if (otg_master_pipe->plane_state == NULL) + added = add_plane_to_opp_head_pipes(otg_master_pipe, + plane_state, context); + else + added = acquire_secondary_dpp_pipes_and_add_plane( + otg_master_pipe, plane_state, context, + dc->current_state, pool); + if (added) { + stream_status->plane_states[stream_status->plane_count] = + plane_state; + stream_status->plane_count++; + dc_plane_state_retain(plane_state); } - /* retain new surface, but only once per stream */ - dc_plane_state_retain(plane_state); - - while (head_pipe) { - free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe); - - if (!free_pipe) { - int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); - if (pipe_idx >= 0) - free_pipe = &context->res_ctx.pipe_ctx[pipe_idx]; - } - - if (!free_pipe) { - dc_plane_state_release(plane_state); - return false; - } - - free_pipe->plane_state = plane_state; - - if (head_pipe != free_pipe) { - tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe); - ASSERT(tail_pipe); - - /* ODM + window MPO, where MPO window is on right half only */ - if (free_pipe->plane_state && - (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2) && - tail_pipe->next_odm_pipe) { - - /* For ODM + window MPO, in 3 plane case, if we already have a MPO window on - * the right side, then we will invalidate a 2nd one on the right side - */ - if (head_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) { - dc_plane_state_release(plane_state); - return false; - } - - DC_LOG_SCALER("%s - ODM + window MPO(right). free_pipe:%d tail_pipe->next_odm_pipe:%d\n", - __func__, - free_pipe->pipe_idx, - tail_pipe->next_odm_pipe ? tail_pipe->next_odm_pipe->pipe_idx : -1); - - /* - * We want to avoid the case where the right side already has a pipe assigned to - * it and is different from free_pipe ( which would cause trigger a pipe - * reallocation ). - * Check the old context to see if the right side already has a pipe allocated - * - If not, continue to use free_pipe - * - If the right side already has a pipe, use that pipe instead if its available - */ - - /* - * We also want to avoid the case where with three plane ( 2 MPO videos ), we have - * both videos on the left side so one of the videos is invalidated. Then we - * move the invalidated video back to the right side. If the order of the plane - * states is such that the right MPO plane is processed first, the free pipe - * selected by the head will be the left MPO pipe. But since there was no right - * MPO pipe, it will assign the free pipe to the right MPO pipe instead and - * a pipe reallocation will occur. - * Check the old context to see if the left side already has a pipe allocated - * - If not, continue to use free_pipe - * - If the left side is already using this pipe, then pick another pipe for right - */ - - prev_right_head = &dc->current_state->res_ctx.pipe_ctx[tail_pipe->next_odm_pipe->pipe_idx]; - if ((prev_right_head->bottom_pipe) && - (free_pipe->pipe_idx != prev_right_head->bottom_pipe->pipe_idx)) { - free_right_pipe = acquire_free_pipe_for_head(context, pool, tail_pipe->next_odm_pipe); - } else { - prev_left_head = &dc->current_state->res_ctx.pipe_ctx[head_pipe->pipe_idx]; - if ((prev_left_head->bottom_pipe) && - (free_pipe->pipe_idx == prev_left_head->bottom_pipe->pipe_idx)) { - free_right_pipe = acquire_free_pipe_for_head(context, pool, head_pipe); - } - } - - if (free_right_pipe) { - free_pipe->stream = NULL; - memset(&free_pipe->stream_res, 0, sizeof(struct stream_resource)); - memset(&free_pipe->plane_res, 0, sizeof(struct plane_resource)); - free_pipe->plane_state = NULL; - free_pipe->pipe_idx = 0; - free_right_pipe->plane_state = plane_state; - free_pipe = free_right_pipe; - } - - free_pipe->stream_res.tg = tail_pipe->next_odm_pipe->stream_res.tg; - free_pipe->stream_res.abm = tail_pipe->next_odm_pipe->stream_res.abm; - free_pipe->stream_res.opp = tail_pipe->next_odm_pipe->stream_res.opp; - free_pipe->stream_res.stream_enc = tail_pipe->next_odm_pipe->stream_res.stream_enc; - free_pipe->stream_res.audio = tail_pipe->next_odm_pipe->stream_res.audio; - free_pipe->clock_source = tail_pipe->next_odm_pipe->clock_source; - - free_pipe->top_pipe = tail_pipe->next_odm_pipe; - tail_pipe->next_odm_pipe->bottom_pipe = free_pipe; - } else if (free_pipe->plane_state && - (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2) - && head_pipe->next_odm_pipe) { - - /* For ODM + window MPO, support 3 plane ( 2 MPO ) case. - * Here we have a desktop ODM + left window MPO and a new MPO window appears - * on the right side only. It fails the first case, because tail_pipe is the - * left window MPO, so it has no next_odm_pipe. So in this scenario, we check - * for head_pipe->next_odm_pipe instead - */ - DC_LOG_SCALER("%s - ODM + win MPO (left) + win MPO (right). free_pipe:%d head_pipe->next_odm:%d\n", - __func__, - free_pipe->pipe_idx, - head_pipe->next_odm_pipe ? head_pipe->next_odm_pipe->pipe_idx : -1); - - /* - * We want to avoid the case where the right side already has a pipe assigned to - * it and is different from free_pipe ( which would cause trigger a pipe - * reallocation ). - * Check the old context to see if the right side already has a pipe allocated - * - If not, continue to use free_pipe - * - If the right side already has a pipe, use that pipe instead if its available - */ - prev_right_head = &dc->current_state->res_ctx.pipe_ctx[head_pipe->next_odm_pipe->pipe_idx]; - if ((prev_right_head->bottom_pipe) && - (free_pipe->pipe_idx != prev_right_head->bottom_pipe->pipe_idx)) { - free_right_pipe = acquire_free_pipe_for_head(context, pool, head_pipe->next_odm_pipe); - if (free_right_pipe) { - free_pipe->stream = NULL; - memset(&free_pipe->stream_res, 0, sizeof(struct stream_resource)); - memset(&free_pipe->plane_res, 0, sizeof(struct plane_resource)); - free_pipe->plane_state = NULL; - free_pipe->pipe_idx = 0; - free_right_pipe->plane_state = plane_state; - free_pipe = free_right_pipe; - } - } - - free_pipe->stream_res.tg = head_pipe->next_odm_pipe->stream_res.tg; - free_pipe->stream_res.abm = head_pipe->next_odm_pipe->stream_res.abm; - free_pipe->stream_res.opp = head_pipe->next_odm_pipe->stream_res.opp; - free_pipe->stream_res.stream_enc = head_pipe->next_odm_pipe->stream_res.stream_enc; - free_pipe->stream_res.audio = head_pipe->next_odm_pipe->stream_res.audio; - free_pipe->clock_source = head_pipe->next_odm_pipe->clock_source; - - free_pipe->top_pipe = head_pipe->next_odm_pipe; - head_pipe->next_odm_pipe->bottom_pipe = free_pipe; - } else { - - /* For ODM + window MPO, in 3 plane case, if we already have a MPO window on - * the left side, then we will invalidate a 2nd one on the left side - */ - if (head_pipe->next_odm_pipe && tail_pipe->top_pipe) { - dc_plane_state_release(plane_state); - return false; - } - - free_pipe->stream_res.tg = tail_pipe->stream_res.tg; - free_pipe->stream_res.abm = tail_pipe->stream_res.abm; - free_pipe->stream_res.opp = tail_pipe->stream_res.opp; - free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc; - free_pipe->stream_res.audio = tail_pipe->stream_res.audio; - free_pipe->clock_source = tail_pipe->clock_source; - - free_pipe->top_pipe = tail_pipe; - tail_pipe->bottom_pipe = free_pipe; - - /* Connect MPO pipes together if MPO window is in the centre */ - if (!(free_pipe->plane_state && - (free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <= - free_pipe->stream->src.x + free_pipe->stream->src.width/2))) { - if (!free_pipe->next_odm_pipe && - tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) { - free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe; - tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe; - } - if (!free_pipe->prev_odm_pipe && - tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) { - free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe; - tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe; - } - } - } - } - - /* ODM + window MPO, where MPO window is on left half only */ - if (free_pipe->plane_state && - (free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <= - free_pipe->stream->src.x + free_pipe->stream->src.width/2)) { - DC_LOG_SCALER("%s - ODM + window MPO(left). free_pipe:%d\n", - __func__, - free_pipe->pipe_idx); - break; - } - /* ODM + window MPO, where MPO window is on right half only */ - if (free_pipe->plane_state && - (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2)) { - DC_LOG_SCALER("%s - ODM + window MPO(right). free_pipe:%d\n", - __func__, - free_pipe->pipe_idx); - break; - } - - head_pipe = head_pipe->next_odm_pipe; - } - /* assign new surfaces*/ - stream_status->plane_states[stream_status->plane_count] = plane_state; - - stream_status->plane_count++; - - return true; +out: + return added; } bool dc_remove_plane_from_context( @@ -2219,7 +2446,7 @@ enum dc_status dc_remove_stream_from_ctx( { int i; struct dc_context *dc_ctx = dc->ctx; - struct pipe_ctx *del_pipe = resource_get_head_pipe_for_stream(&new_ctx->res_ctx, stream); + struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(&new_ctx->res_ctx, stream); struct pipe_ctx *odm_pipe; if (!del_pipe) { @@ -3473,7 +3700,7 @@ enum dc_status resource_map_clock_resources( { /* acquire new resources */ const struct resource_pool *pool = dc->res_pool; - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream( + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream( &context->res_ctx, stream); if (!pipe_ctx) @@ -3863,10 +4090,7 @@ void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (!pipe_ctx_old->stream) - continue; - - if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + if (!resource_is_pipe_type(pipe_ctx_old, OTG_MASTER)) continue; if (!pipe_ctx->stream || @@ -3990,11 +4214,13 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link, * with an hpo encoder. Or we can return a very dummy one that doesn't * do work for all functions */ - return get_hpo_dp_link_hwss(); + return (requires_fixed_vs_pe_retimer_hpo_link_hwss(link) ? + get_hpo_fixed_vs_pe_retimer_dp_link_hwss() : get_hpo_dp_link_hwss()); else if (can_use_dpia_link_hwss(link, link_res)) return get_dpia_link_hwss(); else if (can_use_dio_link_hwss(link, link_res)) - return get_dio_link_hwss(); + return (requires_fixed_vs_pe_retimer_dio_link_hwss(link)) ? + get_dio_fixed_vs_pe_retimer_link_hwss() : get_dio_link_hwss(); else return get_virtual_link_hwss(); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 6e11d2b701f8..01fe2d2fd241 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -71,8 +71,7 @@ static bool dc_stream_construct(struct dc_stream_state *stream, /* Copy audio modes */ /* TODO - Remove this translation */ - for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++) - { + for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++) { stream->audio_info.modes[i].channel_count = dc_sink_data->edid_caps.audio_modes[i].channel_count; stream->audio_info.modes[i].format_code = dc_sink_data->edid_caps.audio_modes[i].format_code; stream->audio_info.modes[i].sample_rates.all = dc_sink_data->edid_caps.audio_modes[i].sample_rate; @@ -306,6 +305,32 @@ bool dc_optimize_timing_for_fsft( } #endif +static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream) +{ + uint32_t refresh_rate; + struct dc *dc = stream->ctx->dc; + + refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 + + stream->timing.v_total * stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, stream->timing.h_total); + + /* If there's any stream that fits the SubVP high refresh criteria, + * we must return true. This is because cursor updates are asynchronous + * with full updates, so we could transition into a SubVP config and + * remain in HW cursor mode if there's no cursor update which will + * then cause corruption. + */ + if ((refresh_rate >= 120 && refresh_rate <= 175 && + stream->timing.v_addressable >= 1440 && + stream->timing.v_addressable <= 2160) && + (dc->current_state->stream_count > 1 || + (dc->current_state->stream_count == 1 && !stream->allow_freesync))) + return true; + + return false; +} + /* * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address */ @@ -334,12 +359,13 @@ bool dc_stream_set_cursor_attributes( /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4. * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case: - * 1. For single display cases, if resolution is >= 5K and refresh rate < 120hz - * 2. For multi display cases, if resolution is >= 4K and refresh rate < 120hz - * - * [< 120hz is a requirement for SubVP configs] + * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs) + * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz + * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz */ if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) { + if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream)) + return false; if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 && ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) return false; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 63948170fd6d..0d0bef8eb331 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -40,12 +40,14 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" +struct abm_save_restore; + /* forward declaration */ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.241" +#define DC_VER "3.2.247" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -428,6 +430,7 @@ enum visual_confirm { VISUAL_CONFIRM_SWAPCHAIN = 6, VISUAL_CONFIRM_FAMS = 7, VISUAL_CONFIRM_SWIZZLE = 9, + VISUAL_CONFIRM_REPLAY = 12, VISUAL_CONFIRM_SUBVP = 14, VISUAL_CONFIRM_MCLK_SWITCH = 16, }; @@ -506,7 +509,7 @@ enum dcn_zstate_support_state { DCN_ZSTATE_SUPPORT_DISALLOW, }; -/** +/* * struct dc_clocks - DC pipe clocks * * For any clocks that may differ per pipe only the max is stored in this @@ -728,7 +731,7 @@ struct resource_pool; struct dce_hwseq; struct link_service; -/** +/* * struct dc_debug_options - DC debug struct * * This struct provides a simple mechanism for developers to change some @@ -756,7 +759,7 @@ struct dc_debug_options { bool use_max_lb; enum dcc_option disable_dcc; - /** + /* * @pipe_split_policy: Define which pipe split policy is used by the * display core. */ @@ -861,6 +864,7 @@ struct dc_debug_options { bool psr_skip_crtc_disable; union dpia_debug_options dpia_debug; bool disable_fixed_vs_aux_timeout_wa; + uint32_t fixed_vs_aux_delay_config_wa; bool force_disable_subvp; bool force_subvp_mclk_switch; bool allow_sw_cursor_fallback; @@ -902,9 +906,18 @@ struct dc_debug_options { uint32_t fpo_vactive_max_blank_us; bool enable_legacy_fast_update; bool disable_dc_mode_overwrite; + bool replay_skip_crtc_disabled; }; struct gpu_info_soc_bounding_box_v1_0; + +/* Generic structure that can be used to query properties of DC. More fields + * can be added as required. + */ +struct dc_current_properties { + unsigned int cursor_size_limit; +}; + struct dc { struct dc_debug_options debug; struct dc_versions versions; @@ -1334,7 +1347,7 @@ struct dc_validation_set { struct dc_stream_state *stream; /** - * @plane_state: Surface state + * @plane_states: Surface state */ struct dc_plane_state *plane_states[MAX_SURFACES]; @@ -1409,10 +1422,14 @@ struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc, uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane); +void dc_set_disable_128b_132b_stream_overhead(bool disable); + /* The function returns minimum bandwidth required to drive a given timing * return - minimum required timing bandwidth in kbps. */ -uint32_t dc_bandwidth_in_kbps_from_timing(const struct dc_crtc_timing *timing); +uint32_t dc_bandwidth_in_kbps_from_timing( + const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding); /* Link Interfaces */ /* @@ -1481,6 +1498,7 @@ struct dc_link { enum engine_id eng_id; bool test_pattern_enabled; + enum dp_test_pattern current_test_pattern; union compliance_test_state compliance_test_state; void *priv; @@ -1514,8 +1532,11 @@ struct dc_link { enum edp_revision edp_revision; union dpcd_sink_ext_caps dpcd_sink_ext_caps; + struct backlight_settings backlight_settings; struct psr_settings psr_settings; + struct replay_settings replay_settings; + /* Drive settings read from integrated info table */ struct dc_lane_settings bios_forced_drive_settings; @@ -1849,6 +1870,14 @@ enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format( */ const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link); +/* Get the highest encoding format that the link supports; highest meaning the + * encoding format which supports the maximum bandwidth. + * + * @link - a link with DP RX connection + * return - highest encoding format link supports. + */ +enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link); + /* Check if a RX (ex. DP sink, MST hub, passive or active dongle) is connected * to a link with dp connector signal type. * @link - a link with dp connector signal type @@ -1983,6 +2012,8 @@ bool dc_link_setup_psr(struct dc_link *dc_link, const struct dc_stream_state *stream, struct psr_config *psr_config, struct psr_context *psr_context); +bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state); + /* On eDP links this function call will stall until T12 has elapsed. * If the panel is not in power off state, this function will return * immediately. @@ -2230,6 +2261,11 @@ void dc_z10_save_init(struct dc *dc); bool dc_is_dmub_outbox_supported(struct dc *dc); bool dc_enable_dmub_notifications(struct dc *dc); +bool dc_abm_save_restore( + struct dc *dc, + struct dc_stream_state *stream, + struct abm_save_restore *pData); + void dc_enable_dmub_outbox(struct dc *dc); bool dc_process_dmub_aux_transfer_async(struct dc *dc, @@ -2255,6 +2291,8 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, void dc_print_dmub_diagnostic_data(const struct dc *dc); +void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties); + /* DSC Interfaces */ #include "dc_dsc.h" diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index c753c6f30dd7..4c5ef3ef8dbd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -31,6 +31,7 @@ #include "core_types.h" #include "../basics/conversion.h" #include "cursor_reg_cache.h" +#include "resource.h" #define CTX dc_dmub_srv->ctx #define DC_LOGGER CTX->logger @@ -356,7 +357,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->stream && pipe->stream->fpo_in_use) { + if (resource_is_pipe_type(pipe, OTG_MASTER) && pipe->stream->fpo_in_use) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000; @@ -381,6 +382,9 @@ void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) { union dmub_rb_cmd cmd = { 0 }; + if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) + return; + memset(&cmd, 0, sizeof(cmd)); /* Prepare fw command */ @@ -528,7 +532,8 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) - if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe) + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !resource_is_pipe_type(pipe, DPP_PIPE)) continue; // Find the SubVP pipe @@ -725,12 +730,10 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->stream) - continue; - /* For SubVP pipe count, only count the top most (ODM / MPC) pipe */ - if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && + if (resource_is_pipe_type(pipe, OTG_MASTER) && + resource_is_pipe_type(pipe, DPP_PIPE) && pipe->stream->mall_stream_config.type == SUBVP_MAIN) subvp_pipes[subvp_count++] = pipe; } @@ -747,12 +750,14 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, * Any ODM or MPC splits being used in SubVP will be handled internally in * populate_subvp_cmd_pipe_info */ - if (pipe->plane_state && pipe->stream->mall_stream_config.paired_stream && - !pipe->top_pipe && !pipe->prev_odm_pipe && + if (resource_is_pipe_type(pipe, OTG_MASTER) && + resource_is_pipe_type(pipe, DPP_PIPE) && + pipe->stream->mall_stream_config.paired_stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); - } else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE && - !pipe->top_pipe && !pipe->prev_odm_pipe) { + } else if (resource_is_pipe_type(pipe, OTG_MASTER) && + resource_is_pipe_type(pipe, DPP_PIPE) && + pipe->stream->mall_stream_config.type == SUBVP_NONE) { // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where // we run through DML without calculating "natural" P-state support populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); @@ -894,6 +899,9 @@ static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx) pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1) return true; + if (pipe_ctx->stream->link->replay_settings.config.replay_supported) + return true; + return false; } @@ -1018,3 +1026,32 @@ bool dc_dmub_check_min_version(struct dmub_srv *srv) return true; return srv->hw_funcs.is_psrsu_supported(srv); } + +void dc_dmub_srv_enable_dpia_trace(const struct dc *dc) +{ + struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; + struct dmub_srv *dmub; + enum dmub_status status; + static const uint32_t timeout_us = 30; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) { + DC_LOG_ERROR("%s: invalid parameters.", __func__); + return; + } + + dmub = dc_dmub_srv->dmub; + + status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 0x0010, timeout_us); + if (status != DMUB_STATUS_OK) { + DC_LOG_ERROR("timeout updating trace buffer mask word\n"); + return; + } + + status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 0x0000, timeout_us); + if (status != DMUB_STATUS_OK) { + DC_LOG_ERROR("timeout updating trace buffer mask word\n"); + return; + } + + DC_LOG_DEBUG("Enabled DPIA trace\n"); +} \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 099f94b6107c..bb3fe162dd93 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -87,4 +87,7 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv); void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx); bool dc_dmub_check_min_version(struct dmub_srv *srv); + +void dc_dmub_srv_enable_dpia_trace(const struct dc *dc); + #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 55139d7bf422..cfaa39c5dd16 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -1117,6 +1117,11 @@ struct edp_psr_info { uint8_t force_psrsu_cap; }; +struct replay_info { + uint8_t pixel_deviation_per_line; + uint8_t max_deviation_line; +}; + struct dprx_states { bool cable_id_written; }; @@ -1236,6 +1241,8 @@ struct dpcd_caps { uint8_t edp_rev; union edp_alpm_caps alpm_caps; struct edp_psr_info psr_info; + + struct replay_info pr_info; }; union dpcd_sink_ext_caps { @@ -1276,6 +1283,28 @@ union dpcd_psr_configuration { unsigned char raw; }; +union replay_enable_and_configuration { + struct { + unsigned char FREESYNC_PANEL_REPLAY_MODE :1; + unsigned char TIMING_DESYNC_ERROR_VERIFICATION :1; + unsigned char STATE_TRANSITION_ERROR_DETECTION :1; + unsigned char RESERVED0 :1; + unsigned char RESERVED1 :4; + } bits; + unsigned char raw; +}; + +union dpcd_replay_configuration { + struct { + unsigned char STATE_TRANSITION_ERROR_STATUS : 1; + unsigned char DESYNC_ERROR_STATUS : 1; + unsigned char SINK_DEVICE_REPLAY_STATUS : 3; + unsigned char SINK_FRAME_LOCKED : 2; + unsigned char RESERVED : 1; + } bits; + unsigned char raw; +}; + union dpcd_alpm_configuration { struct { unsigned char ENABLE : 1; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 9491b76d61f5..fe3078b8789e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -73,6 +73,7 @@ bool dc_dsc_compute_bandwidth_range( uint32_t max_bpp_x16, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range); bool dc_dsc_compute_config( @@ -81,6 +82,7 @@ bool dc_dsc_compute_config( const struct dc_dsc_config_options *options, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg); uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 0ce7728a5a4b..445ad79001ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -189,7 +189,6 @@ struct dc_panel_patch { unsigned int disable_fams; unsigned int skip_avmute; unsigned int mst_start_top_delay; - unsigned int delay_disable_aux_intercept_ms; }; struct dc_edid_caps { @@ -879,7 +878,7 @@ struct dsc_dec_dpcd_caps { uint32_t branch_overall_throughput_0_mps; /* In MPs */ uint32_t branch_overall_throughput_1_mps; /* In MPs */ uint32_t branch_max_line_width; - bool is_dp; + bool is_dp; /* Decoded format */ }; struct dc_golden_table { @@ -902,6 +901,14 @@ enum dc_gpu_mem_alloc_type { DC_MEM_ALLOC_TYPE_AGP }; +enum dc_link_encoding_format { + DC_LINK_ENCODING_UNSPECIFIED = 0, + DC_LINK_ENCODING_DP_8b_10b, + DC_LINK_ENCODING_DP_128b_132b, + DC_LINK_ENCODING_HDMI_TMDS, + DC_LINK_ENCODING_HDMI_FRL +}; + enum dc_psr_version { DC_PSR_VERSION_1 = 0, DC_PSR_VERSION_SU_1 = 1, @@ -995,6 +1002,10 @@ struct link_mst_stream_allocation_table { struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM]; }; +struct backlight_settings { + uint32_t backlight_millinits; +}; + /* PSR feature flags */ struct psr_settings { bool psr_feature_enabled; // PSR is supported by sink @@ -1014,6 +1025,45 @@ struct psr_settings { unsigned int psr_power_opt; }; +enum replay_coasting_vtotal_type { + PR_COASTING_TYPE_NOM = 0, + PR_COASTING_TYPE_STATIC, + PR_COASTING_TYPE_FULL_SCREEN_VIDEO, + PR_COASTING_TYPE_TEST_HARNESS, + PR_COASTING_TYPE_NUM, +}; + +union replay_error_status { + struct { + unsigned char STATE_TRANSITION_ERROR :1; + unsigned char LINK_CRC_ERROR :1; + unsigned char DESYNC_ERROR :1; + unsigned char RESERVED :5; + } bits; + unsigned char raw; +}; + +struct replay_config { + bool replay_supported; // Replay feature is supported + unsigned int replay_power_opt_supported; // Power opt flags that are supported + bool replay_smu_opt_supported; // SMU optimization is supported + unsigned int replay_enable_option; // Replay enablement option + uint32_t debug_flags; // Replay debug flags + bool replay_timing_sync_supported; // Replay desync is supported + union replay_error_status replay_error_status; // Replay error status +}; + +/* Replay feature flags */ +struct replay_settings { + struct replay_config config; // Replay configuration + bool replay_feature_enabled; // Replay feature is ready for activating + bool replay_allow_active; // Replay is currently active + unsigned int replay_power_opt_active; // Power opt flags that are activated currently + bool replay_smu_opt_enable; // SMU optimization is enabled + uint16_t coasting_vtotal; // Current Coasting vtotal + uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; // Coasting vtotal table +}; + /* To split out "global" and "per-panel" config settings. * Add a struct dc_panel_config under dc_link */ @@ -1040,9 +1090,11 @@ struct dc_panel_config { struct psr { bool disable_psr; bool disallow_psrsu; + bool disallow_replay; bool rc_disable; bool rc_allow_static_screen; bool rc_allow_fullscreen_VPB; + unsigned int replay_enable_option; } psr; /* ABM */ struct varib { diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile index 01490c9ba958..15b64c26d5a2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile @@ -30,7 +30,7 @@ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dmub_abm_lcd.o dce_panel_cntl.o \ -dmub_hw_lock_mgr.o dmub_outbox.o +dmub_hw_lock_mgr.o dmub_outbox.o dmub_replay.o AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index 07359eb89efc..e7acd6eec1fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c @@ -640,7 +640,7 @@ static void dce11_pplib_apply_display_requirements( * on power saving. * */ - pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? + pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4) ? pp_display_cfg->min_engine_clock_khz : 0; pp_display_cfg->min_engine_clock_deep_sleep_khz diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index 63009db8b5a7..b87bfecb7755 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -76,9 +76,9 @@ static bool dce_dmcu_init(struct dmcu *dmcu) } static bool dce_dmcu_load_iram(struct dmcu *dmcu, - unsigned int start_offset, - const char *src, - unsigned int bytes) + unsigned int start_offset, + const char *src, + unsigned int bytes) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); unsigned int count = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index a3fee929cd12..86233f94db4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -98,6 +98,29 @@ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5) +#define HWSEQ_PIXEL_RATE_REG_LIST_302(blk) \ + SRII(PIXEL_RATE_CNTL, blk, 0), \ + SRII(PIXEL_RATE_CNTL, blk, 1),\ + SRII(PIXEL_RATE_CNTL, blk, 2),\ + SRII(PIXEL_RATE_CNTL, blk, 3), \ + SRII(PIXEL_RATE_CNTL, blk, 4) + +#define HWSEQ_PHYPLL_REG_LIST_302(blk) \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4) + +#define HWSEQ_PIXEL_RATE_REG_LIST_303(blk) \ + SRII(PIXEL_RATE_CNTL, blk, 0), \ + SRII(PIXEL_RATE_CNTL, blk, 1) + +#define HWSEQ_PHYPLL_REG_LIST_303(blk) \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1) + + #define HWSEQ_PHYPLL_REG_LIST_201(blk) \ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1) @@ -387,7 +410,11 @@ SR(MPC_CRC_RESULT_C), \ SR(MPC_CRC_RESULT_AR), \ SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING) + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_CLOCK_CONTROL), \ + SR(ODM_MEM_PWR_CTRL3), \ + SR(DMU_MEM_PWR_CNTL), \ + SR(MMHUBBUB_MEM_PWR_CNTL) #define HWSEQ_DCN301_REG_LIST()\ SR(REFCLK_CNTL), \ @@ -508,8 +535,11 @@ SR(D5VGA_CONTROL), \ SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ + HWSEQ_PIXEL_RATE_REG_LIST_302(OTG), \ + HWSEQ_PHYPLL_REG_LIST_302(OTG), \ SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING) + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_CLOCK_CONTROL) #define HWSEQ_DCN303_REG_LIST() \ HWSEQ_DCN_REG_LIST(), \ @@ -540,28 +570,6 @@ SR(AZALIA_CONTROLLER_CLOCK_GATING), \ SR(HPO_TOP_CLOCK_CONTROL) -#define HWSEQ_PIXEL_RATE_REG_LIST_302(blk) \ - SRII(PIXEL_RATE_CNTL, blk, 0), \ - SRII(PIXEL_RATE_CNTL, blk, 1),\ - SRII(PIXEL_RATE_CNTL, blk, 2),\ - SRII(PIXEL_RATE_CNTL, blk, 3), \ - SRII(PIXEL_RATE_CNTL, blk, 4) - -#define HWSEQ_PHYPLL_REG_LIST_302(blk) \ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4) - -#define HWSEQ_PIXEL_RATE_REG_LIST_303(blk) \ - SRII(PIXEL_RATE_CNTL, blk, 0), \ - SRII(PIXEL_RATE_CNTL, blk, 1) - -#define HWSEQ_PHYPLL_REG_LIST_303(blk) \ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ - SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1) - struct dce_hwseq_registers { uint32_t DCFE_CLOCK_CONTROL[6]; uint32_t DCFEV_CLOCK_CONTROL; @@ -663,14 +671,15 @@ struct dce_hwseq_registers { uint32_t MC_VM_XGMI_LFB_CNTL; uint32_t AZALIA_AUDIO_DTO; uint32_t AZALIA_CONTROLLER_CLOCK_GATING; - uint32_t HPO_TOP_CLOCK_CONTROL; - uint32_t ODM_MEM_PWR_CTRL3; - uint32_t DMU_MEM_PWR_CNTL; - uint32_t MMHUBBUB_MEM_PWR_CNTL; - uint32_t DCHUBBUB_ARB_HOSTVM_CNTL; + /* MMHUB VM */ uint32_t MC_VM_FB_LOCATION_BASE; uint32_t MC_VM_FB_LOCATION_TOP; uint32_t MC_VM_FB_OFFSET; + uint32_t MMHUBBUB_MEM_PWR_CNTL; + uint32_t HPO_TOP_CLOCK_CONTROL; + uint32_t ODM_MEM_PWR_CTRL3; + uint32_t DMU_MEM_PWR_CNTL; + uint32_t DCHUBBUB_ARB_HOSTVM_CNTL; uint32_t HPO_TOP_HW_CONTROL; }; /* set field name */ @@ -915,6 +924,7 @@ struct dce_hwseq_registers { #define HWSEQ_DCN30_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN2_MASK_SH_LIST(mask_sh), \ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_GATE_DIS, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ @@ -1012,7 +1022,8 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN19_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ - HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh) + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_GATE_DIS, mask_sh) #define HWSEQ_DCN303_MASK_SH_LIST(mask_sh) \ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 6d1b01c267b7..4f552c3e7663 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -442,10 +442,9 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( return dce_i2c_hw; } -static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result( - struct dce_i2c_hw *dce_i2c_hw, - uint32_t timeout, - enum i2c_channel_operation_result expected_result) +static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(struct dce_i2c_hw *dce_i2c_hw, + uint32_t timeout, + enum i2c_channel_operation_result expected_result) { enum i2c_channel_operation_result result; uint32_t i = 0; @@ -509,11 +508,10 @@ static uint32_t get_transaction_timeout_hw( return period_timeout * num_of_clock_stretches; } -static bool dce_i2c_hw_engine_submit_payload( - struct dce_i2c_hw *dce_i2c_hw, - struct i2c_payload *payload, - bool middle_of_transaction, - uint32_t speed) +static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw, + struct i2c_payload *payload, + bool middle_of_transaction, + uint32_t speed) { struct i2c_request_transaction_data request; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c index f1aeb6d1967c..e188447c8156 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c @@ -367,6 +367,7 @@ static bool dce_i2c_sw_engine_acquire_engine( return true; } + bool dce_i2c_engine_acquire_sw( struct dce_i2c_sw *dce_i2c_sw, struct ddc *ddc_handle) @@ -392,12 +393,8 @@ bool dce_i2c_engine_acquire_sw( return result; } - - - -static void dce_i2c_sw_engine_submit_channel_request( - struct dce_i2c_sw *engine, - struct i2c_request_transaction_data *req) +static void dce_i2c_sw_engine_submit_channel_request(struct dce_i2c_sw *engine, + struct i2c_request_transaction_data *req) { struct ddc *ddc = engine->ddc; uint16_t clock_delay_div_4 = engine->clock_delay >> 2; @@ -439,10 +436,9 @@ static void dce_i2c_sw_engine_submit_channel_request( I2C_CHANNEL_OPERATION_FAILED; } -static bool dce_i2c_sw_engine_submit_payload( - struct dce_i2c_sw *engine, - struct i2c_payload *payload, - bool middle_of_transaction) +static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine, + struct i2c_payload *payload, + bool middle_of_transaction) { struct i2c_request_transaction_data request; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index fa314493ffc5..136bd93c3b65 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -941,9 +941,7 @@ bool dce110_link_encoder_validate_output_with_stream( break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_LVDS: - is_valid = - (stream->timing. - pixel_encoding == PIXEL_ENCODING_RGB) ? true : false; + is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB; break; case SIGNAL_TYPE_VIRTUAL: is_valid = true; @@ -1645,7 +1643,7 @@ void dce110_link_encoder_enable_hpd(struct link_encoder *enc) uint32_t hpd_enable = 0; uint32_t value = dm_read_reg(ctx, addr); - get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN); + hpd_enable = get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN); if (hpd_enable == 0) set_reg_field_value(value, 1, DC_HPD_CONTROL, DC_HPD_EN); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index 2fb9572ce25d..d3e6544022b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -27,6 +27,7 @@ #include "dmub_abm_lcd.h" #include "dc.h" #include "core_types.h" +#include "dmub_cmd.h" #define TO_DMUB_ABM(abm)\ container_of(abm, struct dce_abm, base) @@ -118,6 +119,32 @@ static bool dmub_abm_set_pause_ex(struct abm *abm, bool pause, unsigned int pane return ret; } +/***************************************************************************** + * dmub_abm_save_restore_ex() - calls dmub_abm_save_restore for preserving DMUB's + * Varibright states for LCD only. OLED is TBD + * @abm: used to check get dc context + * @panel_inst: panel instance index + * @pData: contains command to pause/un-pause abm and abm parameters + * + * + ***************************************************************************/ +static bool dmub_abm_save_restore_ex( + struct abm *abm, + unsigned int panel_inst, + struct abm_save_restore *pData) +{ + bool ret = false; + unsigned int feature_support; + struct dc_context *dc = abm->ctx; + + feature_support = abm_feature_support(abm, panel_inst); + + if (feature_support == ABM_LCD_SUPPORT) + ret = dmub_abm_save_restore(dc, panel_inst, pData); + + return ret; +} + static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) { bool ret = false; @@ -155,6 +182,7 @@ static const struct abm_funcs abm_funcs = { .get_target_backlight = dmub_abm_get_target_backlight_ex, .init_abm_config = dmub_abm_init_config_ex, .set_abm_pause = dmub_abm_set_pause_ex, + .save_restore = dmub_abm_save_restore_ex, .set_pipe_ex = dmub_abm_set_pipe_ex, .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex, }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index 39da73eba86e..592a8f7a1c6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -208,6 +208,52 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un return true; } + +/***************************************************************************** + * dmub_abm_save_restore() - dmub interface for abm save+pause and restore+ + * un-pause + * @dc: dc context + * @panel_inst: panel instance index + * @pData: contains command to pause/un-pause abm and exchange abm parameters + * + * When called Pause will get abm data and store in pData, and un-pause will + * set/apply abm data stored in pData. + * + *****************************************************************************/ +bool dmub_abm_save_restore( + struct dc_context *dc, + unsigned int panel_inst, + struct abm_save_restore *pData) +{ + union dmub_rb_cmd cmd; + uint8_t panel_mask = 0x01 << panel_inst; + unsigned int bytes = sizeof(struct abm_save_restore); + + // TODO: Optimize by only reading back final 4 bytes + dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb); + + // Copy iramtable into cw7 + memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)pData, bytes); + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_save_restore.header.type = DMUB_CMD__ABM; + cmd.abm_save_restore.header.sub_type = DMUB_CMD__ABM_SAVE_RESTORE; + + cmd.abm_save_restore.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr; + cmd.abm_save_restore.abm_init_config_data.bytes = bytes; + cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask; + + cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + // Copy iramtable data into local structure + memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes); + + return true; +} + bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) { union dmub_rb_cmd cmd; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h index 00b4e268768e..853564d7f471 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h @@ -28,6 +28,8 @@ #include "abm.h" +struct abm_save_restore; + void dmub_abm_init(struct abm *abm, uint32_t backlight); bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask); unsigned int dmub_abm_get_current_backlight(struct abm *abm); @@ -38,6 +40,10 @@ void dmub_abm_init_config(struct abm *abm, unsigned int inst); bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst); +bool dmub_abm_save_restore( + struct dc_context *dc, + unsigned int panel_inst, + struct abm_save_restore *pData); bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst); bool dmub_abm_set_backlight_level(struct abm *abm, unsigned int backlight_pwm_u16_16, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c new file mode 100644 index 000000000000..28149e53c2a6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -0,0 +1,303 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc.h" +#include "dc_dmub_srv.h" +#include "dmub/dmub_srv.h" +#include "core_types.h" +#include "dmub_replay.h" + +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ + +#define MAX_PIPES 6 + +/* + * Get Replay state from firmware. + */ +static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *state, uint8_t panel_inst) +{ + struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub; + /* uint32_t raw_state = 0; */ + uint32_t retry_count = 0; + enum dmub_status status; + + do { + // Send gpint command and wait for ack + status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_REPLAY_STATE, panel_inst, 30); + + if (status == DMUB_STATUS_OK) { + // GPINT was executed, get response + dmub_srv_get_gpint_response(srv, (uint32_t *)state); + } else + // Return invalid state when GPINT times out + *state = REPLAY_STATE_INVALID; + } while (++retry_count <= 1000 && *state == REPLAY_STATE_INVALID); + + // Assert if max retry hit + if (retry_count >= 1000 && *state == REPLAY_STATE_INVALID) { + ASSERT(0); + /* To-do: Add retry fail log */ + } +} + +/* + * Enable/Disable Replay. + */ +static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + uint32_t retry_count; + enum replay_state state = REPLAY_STATE_0; + + memset(&cmd, 0, sizeof(cmd)); + cmd.replay_enable.header.type = DMUB_CMD__REPLAY; + cmd.replay_enable.data.panel_inst = panel_inst; + + cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE; + if (enable) + cmd.replay_enable.data.enable = REPLAY_ENABLE; + else + cmd.replay_enable.data.enable = REPLAY_DISABLE; + + cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data); + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + /* Below loops 1000 x 500us = 500 ms. + * Exit REPLAY may need to wait 1-2 frames to power up. Timeout after at + * least a few frames. Should never hit the max retry assert below. + */ + if (wait) { + for (retry_count = 0; retry_count <= 1000; retry_count++) { + dmub_replay_get_state(dmub, &state, panel_inst); + + if (enable) { + if (state != REPLAY_STATE_0) + break; + } else { + if (state == REPLAY_STATE_0) + break; + } + + fsleep(500); + } + + /* assert if max retry hit */ + if (retry_count >= 1000) + ASSERT(0); + } + +} + +/* + * Set REPLAY power optimization flags. + */ +static void dmub_replay_set_power_opt(struct dmub_replay *dmub, unsigned int power_opt, uint8_t panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.replay_set_power_opt.header.type = DMUB_CMD__REPLAY; + cmd.replay_set_power_opt.header.sub_type = DMUB_CMD__SET_REPLAY_POWER_OPT; + cmd.replay_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_power_opt_data); + cmd.replay_set_power_opt.replay_set_power_opt_data.power_opt = power_opt; + cmd.replay_set_power_opt.replay_set_power_opt_data.panel_inst = panel_inst; + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +/* + * Setup Replay by programming phy registers and sending replay hw context values to firmware. + */ +static bool dmub_replay_copy_settings(struct dmub_replay *dmub, + struct dc_link *link, + struct replay_context *replay_context, + uint8_t panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + struct dmub_cmd_replay_copy_settings_data *copy_settings_data + = &cmd.replay_copy_settings.replay_copy_settings_data; + struct pipe_ctx *pipe_ctx = NULL; + struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; + int i = 0; + + for (i = 0; i < MAX_PIPES; i++) { + if (res_ctx && + res_ctx->pipe_ctx[i].stream && + res_ctx->pipe_ctx[i].stream->link && + res_ctx->pipe_ctx[i].stream->link == link && + res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { + pipe_ctx = &res_ctx->pipe_ctx[i]; + //TODO: refactor for multi edp support + break; + } + } + + if (!pipe_ctx) + return false; + + memset(&cmd, 0, sizeof(cmd)); + cmd.replay_copy_settings.header.type = DMUB_CMD__REPLAY; + cmd.replay_copy_settings.header.sub_type = DMUB_CMD__REPLAY_COPY_SETTINGS; + cmd.replay_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_replay_copy_settings_data); + + // HW insts + copy_settings_data->aux_inst = replay_context->aux_inst; + copy_settings_data->digbe_inst = replay_context->digbe_inst; + copy_settings_data->digfe_inst = replay_context->digfe_inst; + + if (pipe_ctx->plane_res.dpp) + copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; + else + copy_settings_data->dpp_inst = 0; + if (pipe_ctx->stream_res.tg) + copy_settings_data->otg_inst = pipe_ctx->stream_res.tg->inst; + else + copy_settings_data->otg_inst = 0; + + copy_settings_data->dpphy_inst = link->link_enc->transmitter; + + // Misc + copy_settings_data->line_time_in_ns = replay_context->line_time_in_ns; + copy_settings_data->panel_inst = panel_inst; + copy_settings_data->debug.u32All = link->replay_settings.config.debug_flags; + copy_settings_data->pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line; + copy_settings_data->max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line; + copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable; + copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported; + + copy_settings_data->flags.u32All = 0; + copy_settings_data->flags.bitfields.fec_enable_status = (link->fec_state == dc_link_fec_enabled); + copy_settings_data->flags.bitfields.dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1); + // WA for PSRSU+DSC on specific TCON, if DSC is enabled, force PSRSU as ffu mode(full frame update) + if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && + !link->dc->debug.disable_fec) && + (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->panel_config.dsc.disable_dsc_edp && + link->dc->caps.edp_dsc_support)) && + link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 /*&& + (!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1, + sizeof(DP_SINK_DEVICE_STR_ID_1)) || + !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2, + sizeof(DP_SINK_DEVICE_STR_ID_2)))*/) + copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 1; + else + copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0; + + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + return true; +} + +/* + * Set coasting vtotal. + */ +static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, + uint16_t coasting_vtotal, + uint8_t panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.replay_set_coasting_vtotal.header.type = DMUB_CMD__REPLAY; + cmd.replay_set_coasting_vtotal.header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL; + cmd.replay_set_coasting_vtotal.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data); + cmd.replay_set_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal; + + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +/* + * Get Replay residency from firmware. + */ +static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, + uint32_t *residency, const bool is_start, const bool is_alpm) +{ + struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub; + uint16_t param = (uint16_t)(panel_inst << 8); + + if (is_alpm) + param |= REPLAY_RESIDENCY_MODE_ALPM; + + if (is_start) + param |= REPLAY_RESIDENCY_ENABLE; + + // Send gpint command and wait for ack + dmub_srv_send_gpint_command(srv, DMUB_GPINT__REPLAY_RESIDENCY, param, 30); + + if (!is_start) + dmub_srv_get_gpint_response(srv, residency); + else + *residency = 0; +} + +static const struct dmub_replay_funcs replay_funcs = { + .replay_copy_settings = dmub_replay_copy_settings, + .replay_enable = dmub_replay_enable, + .replay_get_state = dmub_replay_get_state, + .replay_set_power_opt = dmub_replay_set_power_opt, + .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal, + .replay_residency = dmub_replay_residency, +}; + +/* + * Construct Replay object. + */ +static void dmub_replay_construct(struct dmub_replay *replay, struct dc_context *ctx) +{ + replay->ctx = ctx; + replay->funcs = &replay_funcs; +} + +/* + * Allocate and initialize Replay object. + */ +struct dmub_replay *dmub_replay_create(struct dc_context *ctx) +{ + struct dmub_replay *replay = kzalloc(sizeof(struct dmub_replay), GFP_KERNEL); + + if (replay == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dmub_replay_construct(replay, ctx); + + return replay; +} + +/* + * Deallocate Replay object. + */ +void dmub_replay_destroy(struct dmub_replay **dmub) +{ + kfree(*dmub); + *dmub = NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h new file mode 100644 index 000000000000..e8385bbf51fc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h @@ -0,0 +1,58 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_REPLAY_H_ +#define _DMUB_REPLAY_H_ + +#include "dc_types.h" +#include "dmub_cmd.h" +struct dc_link; +struct dmub_replay_funcs; + +struct dmub_replay { + struct dc_context *ctx; + const struct dmub_replay_funcs *funcs; +}; + +struct dmub_replay_funcs { + void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state, + uint8_t panel_inst); + void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait, + uint8_t panel_inst); + bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link, + struct replay_context *replay_context, uint8_t panel_inst); + void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt, + uint8_t panel_inst); + void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint16_t coasting_vtotal, + uint8_t panel_inst); + void (*replay_residency)(struct dmub_replay *dmub, + uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm); +}; + +struct dmub_replay *dmub_replay_create(struct dc_context *ctx); +void dmub_replay_destroy(struct dmub_replay **dmub); + + +#endif /* _DMUB_REPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 42e9b6a529f6..899b25b0bad8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -824,7 +824,7 @@ static enum dc_status build_mapped_resource( struct dc_state *context, struct dc_stream_state *stream) { - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 6966420dfbac..ad967b58d7be 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -219,7 +219,7 @@ static bool dce110_enable_display_power_gating( if (controller_id == underlay_idx) controller_id = CONTROLLER_ID_UNDERLAY0 - 1; - if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){ + if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) { bp_result = dcb->funcs->enable_disp_power_gating( dcb, controller_id + 1, cntl); @@ -1151,6 +1151,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) struct timing_generator *tg = pipe_ctx->stream_res.tg; struct dtbclk_dto_params dto_params = {0}; int dp_hpo_inst; + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) { pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( @@ -1177,7 +1179,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->set_dtbclk_dto(dccg, &dto_params); dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); - } + } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && dccg->funcs->disable_symclk_se) + dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, + link_enc->transmitter - TRANSMITTER_UNIPHY_A); if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { /* TODO: This looks like a bug to me as we are disabling HPO IO when @@ -1586,6 +1590,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( */ if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; + pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false; } return DC_OK; } @@ -2017,6 +2022,10 @@ static bool should_enable_fbc(struct dc *dc, if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled) return false; + /* Replay should not be enabled */ + if (pipe_ctx->stream->link->replay_settings.replay_feature_enabled) + return false; + /* Nothing to compress */ if (!pipe_ctx->plane_state) return false; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 46eca5a21e1c..1289b9418877 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -942,7 +942,7 @@ static enum dc_status build_mapped_resource( struct dc_state *context, struct dc_stream_state *stream) { - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; @@ -1119,13 +1119,15 @@ static enum dc_status dce110_add_stream_to_ctx( } static struct pipe_ctx *dce110_acquire_underlay( - struct dc_state *context, + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream) + const struct pipe_ctx *opp_head_pipe) { + struct dc_stream_state *stream = opp_head_pipe->stream; struct dc *dc = stream->ctx->dc; struct dce_hwseq *hws = dc->hwseq; - struct resource_context *res_ctx = &context->res_ctx; + struct resource_context *res_ctx = &new_ctx->res_ctx; unsigned int underlay_idx = pool->underlay_pipe_index; struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx]; @@ -1173,7 +1175,7 @@ static struct pipe_ctx *dce110_acquire_underlay( stream->timing.h_total, stream->timing.v_total, stream->timing.pix_clk_100hz / 10, - context->stream_count); + new_ctx->stream_count); color_space_to_black_color(dc, COLOR_SPACE_YCBCR601, &black_color); @@ -1233,7 +1235,7 @@ static const struct resource_funcs dce110_res_pool_funcs = { .panel_cntl_create = dce110_panel_cntl_create, .validate_bandwidth = dce110_validate_bandwidth, .validate_plane = dce110_validate_plane, - .acquire_idle_pipe_for_layer = dce110_acquire_underlay, + .acquire_free_pipe_as_secondary_dpp_pipe = dce110_acquire_underlay, .add_stream_to_ctx = dce110_add_stream_to_ctx, .validate_global = dce110_validate_global, .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c index 27cbb5b42c7e..6424e7f279dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c @@ -288,7 +288,7 @@ bool dce110_timing_generator_program_timing_generator( uint32_t vsync_offset = dc_crtc_timing->v_border_bottom + dc_crtc_timing->v_front_porch; - uint32_t v_sync_start =dc_crtc_timing->v_addressable + vsync_offset; + uint32_t v_sync_start = dc_crtc_timing->v_addressable + vsync_offset; uint32_t hsync_offset = dc_crtc_timing->h_border_right + dc_crtc_timing->h_front_porch; @@ -603,7 +603,7 @@ void dce110_timing_generator_program_blanking( { uint32_t vsync_offset = timing->v_border_bottom + timing->v_front_porch; - uint32_t v_sync_start =timing->v_addressable + vsync_offset; + uint32_t v_sync_start = timing->v_addressable + vsync_offset; uint32_t hsync_offset = timing->h_border_right + timing->h_front_porch; diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c index 690caaaff019..0ef9ebb3c1e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c @@ -127,7 +127,7 @@ static bool dce112_enable_display_power_gating( else cntl = ASIC_PIPE_DISABLE; - if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){ + if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) { bp_result = dcb->funcs->enable_disp_power_gating( dcb, controller_id + 1, cntl); diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index e115ff91aaaa..2b20180f1a32 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -873,7 +873,7 @@ static enum dc_status build_mapped_resource( struct dc_state *context, struct dc_stream_state *stream) { - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; @@ -964,7 +964,7 @@ enum dc_status resource_map_phy_clock_resources( { /* acquire new resources */ - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream( + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream( &context->res_ctx, stream); if (!pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 3935fd455f0f..061221394ce0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -58,13 +58,13 @@ #include "dce/dce_i2c.h" /* TODO remove this include */ -#include "dce80_resource.h" - #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_7_1_d.h" #include "gmc/gmc_7_1_sh_mask.h" #endif +#include "dce80/dce80_resource.h" + #ifndef mmDP_DPHY_INTERNAL_CTRL #define mmDP_DPHY_INTERNAL_CTRL 0x1CDE #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index b33955928bd0..5ca9ab8a76e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -39,9 +39,6 @@ #define BLACK_OFFSET_RGB_Y 0x0 #define BLACK_OFFSET_CBCR 0x8000 -#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3 -#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1 -#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10 #define REG(reg)\ dpp->tf_regs->reg @@ -200,8 +197,7 @@ static void dpp1_dscl_set_lb( DITHER_EN, 0, /* Dithering enable: Disabled */ INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ - } - else { + } else { /* DSCL caps: pixel data processed in float format */ REG_SET_2(LB_DATA_FORMAT, 0, INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ @@ -591,18 +587,6 @@ static void dpp1_dscl_set_manual_ratio_init( static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, const struct rect *recout) { - int visual_confirm_on = 0; - unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT; - - if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) - visual_confirm_on = 1; - - /* Check bounds to ensure the VC bar height was set to a sane value */ - if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) && - (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) { - visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height; - } - REG_SET_2(RECOUT_START, 0, /* First pixel of RECOUT in the active OTG area */ RECOUT_START_X, recout->x, @@ -613,8 +597,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, /* Number of RECOUT horizontal pixels */ RECOUT_WIDTH, recout->width, /* Number of RECOUT vertical lines */ - RECOUT_HEIGHT, recout->height - - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height)); + RECOUT_HEIGHT, recout->height); } /** diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index ee08b545aaea..377f1ba1a81b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -1056,7 +1056,7 @@ void dcn10_link_encoder_disable_output( struct bp_transmitter_control cntl = { 0 }; enum bp_result result; - if (!dcn10_is_dig_enabled(enc)) { + if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) { /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */ /*in DP_Alt_No_Connect case, we turn off the dig already, after excuation the PHY w/a sequence, not allow touch PHY any more*/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 8e9384094f6d..f2f55565e98a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -212,8 +212,9 @@ struct mpcc *mpc1_insert_plane( /* check insert_above_mpcc exist in tree->opp_list */ struct mpcc *temp_mpcc = tree->opp_list; - while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) - temp_mpcc = temp_mpcc->mpcc_bot; + if (temp_mpcc != insert_above_mpcc) + while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) + temp_mpcc = temp_mpcc->mpcc_bot; if (temp_mpcc == NULL) return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 4b02f8443534..9f9145742f14 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1055,7 +1055,7 @@ static enum dc_status build_mapped_resource( struct dc_state *context, struct dc_stream_state *stream) { - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; @@ -1083,14 +1083,15 @@ static enum dc_status dcn10_add_stream_to_ctx( return result; } -static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer( - struct dc_state *context, +static struct pipe_ctx *dcn10_acquire_free_pipe_for_layer( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream) + const struct pipe_ctx *opp_head_pipe) { - struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); - struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); + struct resource_context *res_ctx = &new_ctx->res_ctx; + struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream); + struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe); if (!head_pipe) { ASSERT(0); @@ -1271,7 +1272,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = { .link_enc_create = dcn10_link_encoder_create, .panel_cntl_create = dcn10_panel_cntl_create, .validate_bandwidth = dcn10_validate_bandwidth, - .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn10_acquire_free_pipe_for_layer, .validate_plane = dcn10_validate_plane, .validate_global = dcn10_validate_global, .add_stream_to_ctx = dcn10_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 4492bc2392b6..65fa9e21ad9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1054,9 +1054,9 @@ void dcn20_blank_pixel_data( enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; struct pipe_ctx *odm_pipe; int odm_cnt = 1; - - int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; - int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; + int h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + int v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; + int odm_slice_width, last_odm_slice_width, offset = 0; if (stream->link->test_pattern_enabled) return; @@ -1066,8 +1066,8 @@ void dcn20_blank_pixel_data( for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) odm_cnt++; - - width = width / odm_cnt; + odm_slice_width = h_active / odm_cnt; + last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1); if (blank) { dc->hwss.set_abm_immediate_disable(pipe_ctx); @@ -1080,28 +1080,31 @@ void dcn20_blank_pixel_data( test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; } + odm_pipe = pipe_ctx; + + while (odm_pipe->next_odm_pipe) { + dc->hwss.set_disp_pattern_generator(dc, + odm_pipe, + test_pattern, + test_pattern_color_space, + stream->timing.display_color_depth, + &black_color, + odm_slice_width, + v_active, + offset); + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; + } + dc->hwss.set_disp_pattern_generator(dc, - pipe_ctx, + odm_pipe, test_pattern, test_pattern_color_space, stream->timing.display_color_depth, &black_color, - width, - height, - 0); - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - dc->hwss.set_disp_pattern_generator(dc, - odm_pipe, - dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ? - CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, - test_pattern_color_space, - stream->timing.display_color_depth, - &black_color, - width, - height, - 0); - } + last_odm_slice_width, + v_active, + offset); if (!blank && dc->debug.enable_single_display_2to1_odm_policy) { /* when exiting dynamic ODM need to reinit DPG state for unused pipes */ @@ -1266,20 +1269,21 @@ void dcn20_pipe_control_lock( } if (flip_immediate && lock) { - const int TIMEOUT_FOR_FLIP_PENDING = 100000; + const int TIMEOUT_FOR_FLIP_PENDING_US = 100000; + unsigned int polling_interval_us = 1; int i; temp_pipe = pipe; while (temp_pipe) { if (temp_pipe->plane_state && temp_pipe->plane_state->flip_immediate) { - for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { + for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING_US / polling_interval_us; ++i) { if (!temp_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(temp_pipe->plane_res.hubp)) break; - udelay(1); + udelay(polling_interval_us); } /* no reason it should take this long for immediate flips */ - ASSERT(i != TIMEOUT_FOR_FLIP_PENDING); + ASSERT(i != TIMEOUT_FOR_FLIP_PENDING_US); } temp_pipe = temp_pipe->bottom_pipe; } @@ -1634,6 +1638,7 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || pipe_ctx->update_flags.bits.plane_changed || pipe_ctx->stream->update_flags.bits.gamut_remap + || plane_state->update_flags.bits.gamut_remap_change || pipe_ctx->stream->update_flags.bits.out_csc) { /* dpp/cm gamut remap*/ dc->hwss.program_gamut_remap(pipe_ctx); @@ -1949,7 +1954,8 @@ void dcn20_post_unlock_program_front_end( struct dc_state *context) { int i; - const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; + const unsigned int TIMEOUT_FOR_PIPE_ENABLE_US = 100000; + unsigned int polling_interval_us = 1; struct dce_hwseq *hwseq = dc->hwseq; DC_LOGGER_INIT(dc->ctx->logger); @@ -1971,10 +1977,9 @@ void dcn20_post_unlock_program_front_end( pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { struct hubp *hubp = pipe->plane_res.hubp; int j = 0; - - for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000 + for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us && hubp->funcs->hubp_is_flip_pending(hubp); j++) - udelay(1); + udelay(polling_interval_us); } } @@ -2123,6 +2128,15 @@ void dcn20_optimize_bandwidth( if (hubbub->funcs->program_compbuf_size) hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + dc_dmub_srv_p_state_delegate(dc, + true, context); + context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + dc->clk_mgr->clks.fw_based_mclk_switching = true; + } else { + dc->clk_mgr->clks.fw_based_mclk_switching = false; + } + dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, @@ -2707,6 +2721,8 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) struct dce_hwseq *hws = dc->hwseq; unsigned int k1_div = PIXEL_RATE_DIV_NA; unsigned int k2_div = PIXEL_RATE_DIV_NA; + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { if (dc->hwseq->funcs.setup_hpo_hw_control) @@ -2726,7 +2742,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) dto_params.timing = &pipe_ctx->stream->timing; dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); dccg->funcs->set_dtbclk_dto(dccg, &dto_params); - } + } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && dccg->funcs->enable_symclk_se) + dccg->funcs->enable_symclk_se(dccg, + stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) { hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 9f2e24398cd7..d587f807dfd7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1294,7 +1294,7 @@ static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { enum dc_status status = DC_OK; - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); if (!pipe_ctx) return DC_ERROR_UNEXPECTED; @@ -1948,7 +1948,7 @@ int dcn20_validate_apply_pipe_split_flags( v->ODMCombineEnablePerState[vlevel][pipe_plane]; if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) { - if (get_num_mpc_splits(pipe) == 1) { + if (resource_get_num_mpc_splits(pipe) == 1) { /*If need split for mpc but 2 way split already*/ if (split[i] == 4) split[i] = 2; /* 2 -> 4 MPC */ @@ -1956,7 +1956,7 @@ int dcn20_validate_apply_pipe_split_flags( split[i] = 0; /* 2 -> 2 MPC */ else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) merge[i] = true; /* 2 -> 1 MPC */ - } else if (get_num_mpc_splits(pipe) == 3) { + } else if (resource_get_num_mpc_splits(pipe) == 3) { /*If need split for mpc but 4 way split already*/ if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe) || !pipe->bottom_pipe)) { @@ -1965,7 +1965,7 @@ int dcn20_validate_apply_pipe_split_flags( pipe->top_pipe->plane_state == pipe->plane_state) merge[i] = true; /* 4 -> 1 MPC */ split[i] = 0; - } else if (get_num_odm_splits(pipe)) { + } else if (resource_get_num_odm_splits(pipe)) { /* ODM -> MPC transition */ if (pipe->prev_odm_pipe) { split[i] = 0; @@ -1973,7 +1973,7 @@ int dcn20_validate_apply_pipe_split_flags( } } } else { - if (get_num_odm_splits(pipe) == 1) { + if (resource_get_num_odm_splits(pipe) == 1) { /*If need split for odm but 2 way split already*/ if (split[i] == 4) split[i] = 2; /* 2 -> 4 ODM */ @@ -1983,7 +1983,7 @@ int dcn20_validate_apply_pipe_split_flags( ASSERT(0); /* NOT expected yet */ merge[i] = true; /* exit ODM */ } - } else if (get_num_odm_splits(pipe) == 3) { + } else if (resource_get_num_odm_splits(pipe) == 3) { /*If need split for odm but 4 way split already*/ if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe) || !pipe->next_odm_pipe)) { @@ -1993,7 +1993,7 @@ int dcn20_validate_apply_pipe_split_flags( merge[i] = true; /* exit ODM */ } split[i] = 0; - } else if (get_num_mpc_splits(pipe)) { + } else if (resource_get_num_mpc_splits(pipe)) { /* MPC -> ODM transition */ ASSERT(0); /* NOT expected yet */ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { @@ -2147,31 +2147,31 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, return voltage_supported; } -struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer( - struct dc_state *state, +struct pipe_ctx *dcn20_acquire_free_pipe_for_layer( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream) + const struct pipe_ctx *opp_head) { - struct resource_context *res_ctx = &state->res_ctx; - struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); - struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); + struct resource_context *res_ctx = &new_ctx->res_ctx; + struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(res_ctx, opp_head->stream); + struct pipe_ctx *sec_dpp_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, otg_master); - if (!head_pipe) - ASSERT(0); + ASSERT(otg_master); - if (!idle_pipe) + if (!sec_dpp_pipe) return NULL; - idle_pipe->stream = head_pipe->stream; - idle_pipe->stream_res.tg = head_pipe->stream_res.tg; - idle_pipe->stream_res.opp = head_pipe->stream_res.opp; + sec_dpp_pipe->stream = opp_head->stream; + sec_dpp_pipe->stream_res.tg = opp_head->stream_res.tg; + sec_dpp_pipe->stream_res.opp = opp_head->stream_res.opp; - idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; + sec_dpp_pipe->plane_res.hubp = pool->hubps[sec_dpp_pipe->pipe_idx]; + sec_dpp_pipe->plane_res.ipp = pool->ipps[sec_dpp_pipe->pipe_idx]; + sec_dpp_pipe->plane_res.dpp = pool->dpps[sec_dpp_pipe->pipe_idx]; + sec_dpp_pipe->plane_res.mpcc_inst = pool->dpps[sec_dpp_pipe->pipe_idx]->inst; - return idle_pipe; + return sec_dpp_pipe; } bool dcn20_get_dcc_compression_cap(const struct dc *dc, @@ -2216,7 +2216,7 @@ static const struct resource_funcs dcn20_res_pool_funcs = { .link_enc_create = dcn20_link_encoder_create, .panel_cntl_create = dcn20_panel_cntl_create, .validate_bandwidth = dcn20_validate_bandwidth, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn20_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index da0241e8c255..6d1a8924e57b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -58,10 +58,11 @@ unsigned int dcn20_calc_max_scaled_time( enum mmhubbub_wbif_mode mode, unsigned int urgent_watermark); -struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer( - struct dc_state *state, +struct pipe_ctx *dcn20_acquire_free_pipe_for_layer( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream); + const struct pipe_ctx *opp_head_pipe); struct stream_encoder *dcn20_stream_encoder_create( enum engine_id eng_id, diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index fdba8a9f5c30..2dc4d2c1410b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -992,14 +992,15 @@ static struct hubp *dcn201_hubp_create( return NULL; } -static struct pipe_ctx *dcn201_acquire_idle_pipe_for_layer( - struct dc_state *context, +static struct pipe_ctx *dcn201_acquire_free_pipe_for_layer( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream) + const struct pipe_ctx *opp_head_pipe) { - struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); - struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); + struct resource_context *res_ctx = &new_ctx->res_ctx; + struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream); + struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe); if (!head_pipe) ASSERT(0); @@ -1067,7 +1068,7 @@ static struct resource_funcs dcn201_res_pool_funcs = { .add_stream_to_ctx = dcn20_add_stream_to_ctx, .add_dsc_to_stream_resource = NULL, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .acquire_idle_pipe_for_layer = dcn201_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn201_acquire_free_pipe_for_layer, .populate_dml_writeback_from_context = dcn201_populate_dml_writeback_from_context, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .set_mcif_arb_params = dcn20_set_mcif_arb_params, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c index 33fc9aa8621b..d07c04458d31 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c @@ -43,7 +43,7 @@ #define DC_LOGGER \ dccg->ctx->logger -void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) +static void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h index e44a37491c1e..b7efa777ec73 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h @@ -32,6 +32,5 @@ struct dccg *dccg21_create( const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask); -void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk); #endif /* __DCN21_DCCG_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index d693ea42d033..d1a25fe6c44f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -660,6 +660,7 @@ static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, @@ -854,8 +855,8 @@ bool dcn21_fast_validate_bw(struct dc *dc, /* We only support full screen mpo with ODM */ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, sizeof(struct rect)) != 0) { ASSERT(mpo_pipe->plane_state != pipe->plane_state); goto validate_fail; @@ -1387,7 +1388,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = { .add_stream_to_ctx = dcn20_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, .patch_unknown_plane_state = dcn21_patch_unknown_plane_state, .set_mcif_arb_params = dcn20_set_mcif_arb_params, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index bf8864bc8a99..6cef62d7a2e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -461,6 +461,11 @@ void dcn30_init_hw(struct dc *dc) REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); } + if (dc->debug.enable_mem_low_power.bits.vga) { + // Power down VGA memory + REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); + } + if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; @@ -949,13 +954,36 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, } void dcn30_prepare_bandwidth(struct dc *dc, - struct dc_state *context) + struct dc_state *context) { + bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; + /* Any transition into an FPO config should disable MCLK switching first to avoid + * driver and FW P-State synchronization issues. + */ + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + dc->optimized_required = true; + context->bw_ctx.bw.dcn.clk.p_state_change_support = false; + } + if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); + /* + * enabled -> enabled: do not disable + * enabled -> disabled: disable + * disabled -> enabled: don't care + * disabled -> disabled: don't care + */ + if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) + dc_dmub_srv_p_state_delegate(dc, false, context); + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + /* After disabling P-State, restore the original value to ensure we get the correct P-State + * on the next optimize. */ + context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; + } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index abe4c12a10b5..88c0b24a3249 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -733,6 +733,7 @@ static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, }; @@ -1705,8 +1706,8 @@ noinline bool dcn30_internal_validate_bw( /* We only support full screen mpo with ODM */ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, sizeof(struct rect)) != 0) { ASSERT(mpo_pipe->plane_state != pipe->plane_state); goto validate_fail; @@ -2062,7 +2063,8 @@ bool dcn30_validate_bandwidth(struct dc *dc, } DC_FP_START(); - dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + if (dc->res_pool->funcs->calculate_wm_and_dlg) + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); DC_FP_END(); BW_VAL_TRACE_END_WATERMARKS(); @@ -2214,7 +2216,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = { .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 1bee9a4636e6..79d6697d13b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1379,7 +1379,7 @@ static struct resource_funcs dcn301_res_pool_funcs = { .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -1425,9 +1425,9 @@ static bool dcn301_resource_construct( dc->caps.max_cursor_size = 256; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; - dc->caps.max_slave_planes = 1; - dc->caps.max_slave_yuv_planes = 1; - dc->caps.max_slave_rgb_planes = 1; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 5ad6a22ee47d..447abcd593be 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -103,6 +103,7 @@ static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, }; @@ -1135,7 +1136,7 @@ static struct resource_funcs dcn302_res_pool_funcs = { .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 131b8b82afc0..adf4989177f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -85,6 +85,7 @@ static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, }; @@ -1061,7 +1062,7 @@ static struct resource_funcs dcn303_res_pool_funcs = { .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 01cc679ae418..8664f0c4c9b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -285,19 +285,11 @@ void dccg31_enable_symclk32_le( /* select one of the PHYD32CLKs as the source for symclk32_le */ switch (hpo_le_inst) { case 0: - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE0_GATE_DISABLE, 1, - SYMCLK32_ROOT_LE0_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, phyd32clk, SYMCLK32_LE0_EN, 1); break; case 1: - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE1_GATE_DISABLE, 1, - SYMCLK32_ROOT_LE1_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, phyd32clk, SYMCLK32_LE1_EN, 1); @@ -320,19 +312,38 @@ void dccg31_disable_symclk32_le( REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, 0, SYMCLK32_LE0_EN, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE0_GATE_DISABLE, 0, - SYMCLK32_ROOT_LE0_GATE_DISABLE, 0); break; case 1: REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, 0, SYMCLK32_LE1_EN, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, - SYMCLK32_LE1_GATE_DISABLE, 0, - SYMCLK32_ROOT_LE1_GATE_DISABLE, 0); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +void dccg31_set_symclk32_le_root_clock_gating( + struct dccg *dccg, + int hpo_le_inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) + return; + + switch (hpo_le_inst) { + case 0: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE0_GATE_DISABLE, enable ? 1 : 0, + SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 1 : 0); + break; + case 1: + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE1_GATE_DISABLE, enable ? 1 : 0, + SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 1 : 0); break; default: BREAK_TO_DEBUGGER(); @@ -661,10 +672,8 @@ void dccg31_init(struct dccg *dccg) dccg31_disable_symclk32_se(dccg, 2); dccg31_disable_symclk32_se(dccg, 3); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) { - dccg31_disable_symclk32_le(dccg, 0); - dccg31_disable_symclk32_le(dccg, 1); - } + dccg31_set_symclk32_le_root_clock_gating(dccg, 0, false); + dccg31_set_symclk32_le_root_clock_gating(dccg, 1, false); if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { dccg31_disable_dpstreamclk(dccg, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h index 0902ce5eb8a1..e3caaacf7493 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h @@ -179,6 +179,11 @@ void dccg31_disable_symclk32_le( struct dccg *dccg, int hpo_le_inst); +void dccg31_set_symclk32_le_root_clock_gating( + struct dccg *dccg, + int hpo_le_inst, + bool enable); + void dccg31_set_physymclk( struct dccg *dccg, int phy_inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index bd62502380d8..4596f3bac1b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -558,7 +558,7 @@ void dcn31_link_encoder_disable_output( struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 }; struct dc_link *link; - if (!dcn10_is_dig_enabled(enc)) + if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) return; link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 0278bae50a9d..45143459eedd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -154,7 +154,7 @@ static void dcn31_hpo_dp_stream_enc_dp_blank( VID_STREAM_STATUS, 0, 10, 5000); - /* Disable SDP tranmission */ + /* Disable SDP transmission */ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 7445ed27852a..1f4e0b6261ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -1018,8 +1018,8 @@ void hubbub31_init(struct hubbub *hubbub) /*done in hwseq*/ /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, - DISPCLK_R_DCHUBBUB_GATE_DIS, 0, - DCFCLK_R_DCHUBBUB_GATE_DIS, 0); + DISPCLK_R_DCHUBBUB_GATE_DIS, 1, + DCFCLK_R_DCHUBBUB_GATE_DIS, 1); } /* diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index fc33b5fcabe1..82de4fe2637f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -96,6 +96,7 @@ #include "dce/dmub_psr.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" +#include "dce/dmub_replay.h" #include "dml/dcn30/display_mode_vba_30.h" #include "vm_helper.h" @@ -896,6 +897,7 @@ static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, @@ -1479,6 +1481,9 @@ static void dcn31_resource_destruct(struct dcn31_resource_pool *pool) if (pool->base.psr != NULL) dmub_psr_destroy(&pool->base.psr); + if (pool->base.replay != NULL) + dmub_replay_destroy(&pool->base.replay); + if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); } @@ -1776,8 +1781,8 @@ bool dcn31_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_SKIP(fast); goto validate_out; } - - dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + if (dc->res_pool->funcs->calculate_wm_and_dlg) + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); @@ -1818,7 +1823,7 @@ static struct resource_funcs dcn31_res_pool_funcs = { .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, .populate_dml_pipes = dcn31_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -2085,6 +2090,14 @@ static bool dcn31_resource_construct( goto create_fail; } + /* Replay */ + pool->base.replay = dmub_replay_create(ctx); + if (pool->base.replay == NULL) { + dm_error("DC: failed to create replay obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + /* ABM */ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { pool->base.multiple_abms[i] = dmub_abm_create(ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c index 0746ed31d1d1..ad3f019a784f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c @@ -362,6 +362,7 @@ static const struct dccg_funcs dccg314_funcs = { .disable_symclk32_se = dccg31_disable_symclk32_se, .enable_symclk32_le = dccg31_enable_symclk32_le, .disable_symclk32_le = dccg31_disable_symclk32_le, + .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating, .set_physymclk = dccg31_set_physymclk, .set_dtbclk_dto = dccg314_set_dtbclk_dto, .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 6a9024aa3285..1c1fb2fa0822 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -93,6 +93,7 @@ #include "reg_helper.h" #include "dce/dmub_abm.h" #include "dce/dmub_psr.h" +#include "dce/dmub_replay.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dml/dcn314/display_mode_vba_314.h" @@ -870,6 +871,7 @@ static const struct dc_debug_options debug_defaults_drv = { .enable_z9_disable_interface = true, .minimum_z8_residency_time = 2000, .psr_skip_crtc_disable = true, + .replay_skip_crtc_disabled = true, .disable_dmcu = true, .force_abm_enable = false, .timing_trace = false, @@ -908,15 +910,15 @@ static const struct dc_debug_options debug_defaults_drv = { .root_clock_optimization = { .bits = { .dpp = true, - .dsc = false, - .hdmistream = false, - .hdmichar = false, - .dpstream = false, - .symclk32_se = false, - .symclk32_le = false, - .symclk_fe = false, - .physymclk = false, - .dpiasymclk = false, + .dsc = true, + .hdmistream = true, + .hdmichar = true, + .dpstream = true, + .symclk32_se = true, + .symclk32_le = true, + .symclk_fe = true, + .physymclk = true, + .dpiasymclk = true, } }, @@ -945,6 +947,7 @@ static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, @@ -1528,6 +1531,9 @@ static void dcn314_resource_destruct(struct dcn314_resource_pool *pool) if (pool->base.psr != NULL) dmub_psr_destroy(&pool->base.psr); + if (pool->base.replay != NULL) + dmub_replay_destroy(&pool->base.replay); + if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); } @@ -1683,7 +1689,9 @@ static bool filter_modes_for_single_channel_workaround(struct dc *dc, struct dc_state *context) { // Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR - if (dc->clk_mgr->bw_params->vram_type == 34 && dc->clk_mgr->bw_params->num_channels < 2) { + if (dc->clk_mgr->bw_params->vram_type == 34 && + dc->clk_mgr->bw_params->num_channels < 2 && + context->stream_count > 1) { int total_phy_pix_clk = 0; for (int i = 0; i < context->stream_count; i++) @@ -1732,8 +1740,8 @@ bool dcn314_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_SKIP(fast); goto validate_out; } - - dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + if (dc->res_pool->funcs->calculate_wm_and_dlg) + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); @@ -1765,7 +1773,7 @@ static struct resource_funcs dcn314_res_pool_funcs = { .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, .populate_dml_pipes = dcn314_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -2025,6 +2033,14 @@ static bool dcn314_resource_construct( goto create_fail; } + /* Replay */ + pool->base.replay = dmub_replay_create(ctx); + if (pool->base.replay == NULL) { + dm_error("DC: failed to create replay obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + /* ABM */ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { pool->base.multiple_abms[i] = dmub_abm_create(ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index df3a438abda8..127487ea3d7d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -895,6 +895,7 @@ static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, @@ -1659,7 +1660,7 @@ static int dcn315_populate_dml_pipes_from_context( { int i, pipe_cnt, crb_idx, crb_pipes; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = NULL; const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB; int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB; bool pixel_rate_crb = allow_pixel_rate_crb(dc, context); @@ -1817,7 +1818,7 @@ static struct resource_funcs dcn315_res_pool_funcs = { .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn315_update_soc_for_wm_a, .populate_dml_pipes = dcn315_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index 707cf28bbceb..5fe2c61527df 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -891,6 +891,7 @@ static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, @@ -1704,7 +1705,7 @@ static struct resource_funcs dcn316_res_pool_funcs = { .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, .populate_dml_pipes = dcn316_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c index 61ceff6bc0b1..921f58c0c729 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c @@ -281,7 +281,8 @@ static void dccg32_set_dpstreamclk( struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); /* set the dtbclk_p source */ - dccg32_set_dtbclk_p_src(dccg, src, otg_inst); + /* always program refclk as DTBCLK. No use-case expected to require DPREFCLK as refclk */ + dccg32_set_dtbclk_p_src(dccg, DTBCLK0, otg_inst); /* enabled to select one of the DTBCLKs for pipe */ switch (dp_hpo_inst) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c index a18b9c0c5709..8bfef6d095b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c @@ -955,8 +955,8 @@ void hubbub32_init(struct hubbub *hubbub) /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, - DISPCLK_R_DCHUBBUB_GATE_DIS, 0, - DCFCLK_R_DCHUBBUB_GATE_DIS, 0); + DISPCLK_R_DCHUBBUB_GATE_DIS, 1, + DCFCLK_R_DCHUBBUB_GATE_DIS, 1); } /* ignore the "df_pre_cstate_req" from the SDP port control. diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index d52d5feeb311..680e7fa8d18a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -47,11 +47,9 @@ #include "clk_mgr.h" #include "dsc.h" #include "dcn20/dcn20_optc.h" -#include "dmub_subvp_state.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn32_resource.h" #include "link.h" -#include "dmub/inc/dmub_subvp_state.h" #define DC_LOGGER_INIT(logger) @@ -569,7 +567,7 @@ bool dcn32_set_output_transfer_func(struct dc *dc, bool ret = false; /* program OGAM or 3DLUT only for the top pipe*/ - if (pipe_ctx->top_pipe == NULL) { + if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) { /*program shaper and 3dlut in MPC*/ ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream); if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) { @@ -1204,10 +1202,10 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->top_pipe || pipe->prev_odm_pipe) + if (!resource_is_pipe_type(pipe, OTG_MASTER)) continue; - if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) + if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); @@ -1301,7 +1299,7 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link) if (link->phy_state.symclk_ref_cnts.otg > 0) { for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) { + if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && pipe_ctx->stream->link == link) { pipe_ctx->clock_source->funcs->program_pix_clk( pipe_ctx->clock_source, &pipe_ctx->stream_res.pix_clk_params, @@ -1384,7 +1382,7 @@ void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe) { phantom_pipe->update_flags.raw = 0; if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - if (phantom_pipe->stream && phantom_pipe->plane_state) { + if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) { phantom_pipe->update_flags.bits.enable = 1; phantom_pipe->update_flags.bits.mpcc = 1; phantom_pipe->update_flags.bits.dppclk = 1; @@ -1394,7 +1392,7 @@ void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe) phantom_pipe->update_flags.bits.scaler = 1; phantom_pipe->update_flags.bits.viewport = 1; phantom_pipe->update_flags.bits.det_size = 1; - if (!phantom_pipe->top_pipe && !phantom_pipe->prev_odm_pipe) { + if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) { phantom_pipe->update_flags.bits.odm = 1; phantom_pipe->update_flags.bits.global_sync = 1; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c index c8041cfd594d..3082da04a63d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c @@ -64,7 +64,7 @@ void mpc32_mpc_init(struct mpc *mpc) } } -static void mpc32_power_on_blnd_lut( +void mpc32_power_on_blnd_lut( struct mpc *mpc, uint32_t mpcc_id, bool power_on) @@ -120,7 +120,7 @@ static enum dc_lut_mode mpc32_get_post1dlut_current(struct mpc *mpc, uint32_t mp return mode; } -static void mpc32_configure_post1dlut( +void mpc32_configure_post1dlut( struct mpc *mpc, uint32_t mpcc_id, bool is_ram_a) @@ -163,7 +163,7 @@ static void mpc32_post1dlut_get_reg_field( } /*program blnd lut RAM A*/ -static void mpc32_program_post1dluta_settings( +void mpc32_program_post1dluta_settings( struct mpc *mpc, uint32_t mpcc_id, const struct pwl_params *params) @@ -192,7 +192,7 @@ static void mpc32_program_post1dluta_settings( } /*program blnd lut RAM B*/ -static void mpc32_program_post1dlutb_settings( +void mpc32_program_post1dlutb_settings( struct mpc *mpc, uint32_t mpcc_id, const struct pwl_params *params) @@ -220,7 +220,7 @@ static void mpc32_program_post1dlutb_settings( cm_helper_program_gamcor_xfer_func(mpc->ctx, params, &gam_regs); } -static void mpc32_program_post1dlut_pwl( +void mpc32_program_post1dlut_pwl( struct mpc *mpc, uint32_t mpcc_id, const struct pwl_result_data *rgb, @@ -321,7 +321,7 @@ static enum dc_lut_mode mpc32_get_shaper_current(struct mpc *mpc, uint32_t mpcc_ } -static void mpc32_configure_shaper_lut( +void mpc32_configure_shaper_lut( struct mpc *mpc, bool is_ram_a, uint32_t mpcc_id) @@ -336,7 +336,7 @@ static void mpc32_configure_shaper_lut( } -static void mpc32_program_shaper_luta_settings( +void mpc32_program_shaper_luta_settings( struct mpc *mpc, const struct pwl_params *params, uint32_t mpcc_id) @@ -486,7 +486,7 @@ static void mpc32_program_shaper_luta_settings( } -static void mpc32_program_shaper_lutb_settings( +void mpc32_program_shaper_lutb_settings( struct mpc *mpc, const struct pwl_params *params, uint32_t mpcc_id) @@ -637,7 +637,7 @@ static void mpc32_program_shaper_lutb_settings( } -static void mpc32_program_shaper_lut( +void mpc32_program_shaper_lut( struct mpc *mpc, const struct pwl_result_data *rgb, uint32_t num, @@ -671,7 +671,7 @@ static void mpc32_program_shaper_lut( } -static void mpc32_power_on_shaper_3dlut( +void mpc32_power_on_shaper_3dlut( struct mpc *mpc, uint32_t mpcc_id, bool power_on) @@ -789,7 +789,7 @@ static enum dc_lut_mode get3dlut_config( } -static void mpc32_select_3dlut_ram( +void mpc32_select_3dlut_ram( struct mpc *mpc, enum dc_lut_mode mode, bool is_color_channel_12bits, @@ -803,7 +803,7 @@ static void mpc32_select_3dlut_ram( } -static void mpc32_select_3dlut_ram_mask( +void mpc32_select_3dlut_ram_mask( struct mpc *mpc, uint32_t ram_selection_mask, uint32_t mpcc_id) @@ -816,7 +816,7 @@ static void mpc32_select_3dlut_ram_mask( } -static void mpc32_set3dlut_ram12( +void mpc32_set3dlut_ram12( struct mpc *mpc, const struct dc_rgb *lut, uint32_t entries, @@ -848,7 +848,7 @@ static void mpc32_set3dlut_ram12( } -static void mpc32_set3dlut_ram10( +void mpc32_set3dlut_ram10( struct mpc *mpc, const struct dc_rgb *lut, uint32_t entries, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h index 2c2ecd053806..9ac584fa89ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h @@ -332,4 +332,65 @@ void dcn32_mpc_construct(struct dcn30_mpc *mpc30, int num_mpcc, int num_rmu); +void mpc32_power_on_blnd_lut( + struct mpc *mpc, + uint32_t mpcc_id, + bool power_on); +void mpc32_program_post1dlut_pwl( + struct mpc *mpc, + uint32_t mpcc_id, + const struct pwl_result_data *rgb, + uint32_t num); +void mpc32_program_post1dlutb_settings( + struct mpc *mpc, + uint32_t mpcc_id, + const struct pwl_params *params); +void mpc32_program_post1dluta_settings( + struct mpc *mpc, + uint32_t mpcc_id, + const struct pwl_params *params); +void mpc32_configure_post1dlut( + struct mpc *mpc, + uint32_t mpcc_id, + bool is_ram_a); +void mpc32_program_shaper_lut( + struct mpc *mpc, + const struct pwl_result_data *rgb, + uint32_t num, + uint32_t mpcc_id); +void mpc32_program_shaper_lutb_settings( + struct mpc *mpc, + const struct pwl_params *params, + uint32_t mpcc_id); +void mpc32_program_shaper_luta_settings( + struct mpc *mpc, + const struct pwl_params *params, + uint32_t mpcc_id); +void mpc32_configure_shaper_lut( + struct mpc *mpc, + bool is_ram_a, + uint32_t mpcc_id); +void mpc32_power_on_shaper_3dlut( + struct mpc *mpc, + uint32_t mpcc_id, + bool power_on); +void mpc32_set3dlut_ram10( + struct mpc *mpc, + const struct dc_rgb *lut, + uint32_t entries, + uint32_t mpcc_id); +void mpc32_set3dlut_ram12( + struct mpc *mpc, + const struct dc_rgb *lut, + uint32_t entries, + uint32_t mpcc_id); +void mpc32_select_3dlut_ram_mask( + struct mpc *mpc, + uint32_t ram_selection_mask, + uint32_t mpcc_id); +void mpc32_select_3dlut_ram( + struct mpc *mpc, + enum dc_lut_mode mode, + bool is_color_channel_12bits, + uint32_t mpcc_id); #endif //__DC_MPCC_DCN32_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 1cc09799f92d..935cd23e6a01 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -1709,8 +1709,8 @@ void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->top_pipe && !pipe->prev_odm_pipe && - pipe->plane_state && pipe->stream && + if (resource_is_pipe_type(pipe, OTG_MASTER) && + resource_is_pipe_type(pipe, DPP_PIPE) && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { phantom_plane = pipe->plane_state; phantom_stream = pipe->stream; @@ -1892,7 +1892,7 @@ int dcn32_populate_dml_pipes_from_context( { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = NULL; bool subvp_in_use = false; struct dc_crtc_timing *timing; bool vsr_odm_support = false; @@ -2038,7 +2038,7 @@ static struct resource_funcs dcn32_res_pool_funcs = { .validate_bandwidth = dcn32_validate_bandwidth, .calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg, .populate_dml_pipes = dcn32_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, @@ -2485,109 +2485,115 @@ struct resource_pool *dcn32_create_resource_pool( return NULL; } -static struct pipe_ctx *find_idle_secondary_pipe_check_mpo( - struct resource_context *res_ctx, +/* + * Find the most optimal free pipe from res_ctx, which could be used as a + * secondary dpp pipe for input opp head pipe. + * + * a free pipe - a pipe in input res_ctx not yet used for any streams or + * planes. + * secondary dpp pipe - a pipe gets inserted to a head OPP pipe's MPC blending + * tree. This is typical used for rendering MPO planes or additional offset + * areas in MPCC combine. + * + * Hardware Transition Minimization Algorithm for Finding a Secondary DPP Pipe + * ------------------------------------------------------------------------- + * + * PROBLEM: + * + * 1. There is a hardware limitation that a secondary DPP pipe cannot be + * transferred from one MPC blending tree to the other in a single frame. + * Otherwise it could cause glitches on the screen. + * + * For instance, we cannot transition from state 1 to state 2 in one frame. This + * is because PIPE1 is transferred from PIPE0's MPC blending tree over to + * PIPE2's MPC blending tree, which is not supported by hardware. + * To support this transition we need to first remove PIPE1 from PIPE0's MPC + * blending tree in one frame and then insert PIPE1 to PIPE2's MPC blending tree + * in the next frame. This is not optimal as it will delay the flip for two + * frames. + * + * State 1: + * PIPE0 -- secondary DPP pipe --> (PIPE1) + * PIPE2 -- secondary DPP pipe --> NONE + * + * State 2: + * PIPE0 -- secondary DPP pipe --> NONE + * PIPE2 -- secondary DPP pipe --> (PIPE1) + * + * 2. We want to in general minimize the unnecessary changes in pipe topology. + * If a pipe is already added in current blending tree and there are no changes + * to plane topology, we don't want to swap it with another free pipe + * unnecessarily in every update. Powering up and down a pipe would require a + * full update which delays the flip for 1 frame. If we use the original pipe + * we don't have to toggle its power. So we can flip faster. + */ +static int find_optimal_free_pipe_as_secondary_dpp_pipe( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, const struct resource_pool *pool, - const struct pipe_ctx *primary_pipe) + const struct pipe_ctx *new_opp_head) { - int i; - struct pipe_ctx *secondary_pipe = NULL; - struct pipe_ctx *next_odm_mpo_pipe = NULL; - int primary_index, preferred_pipe_idx; - struct pipe_ctx *old_primary_pipe = NULL; + const struct pipe_ctx *cur_opp_head; + int free_pipe_idx; - /* - * Modified from find_idle_secondary_pipe - * With windowed MPO and ODM, we want to avoid the case where we want a - * free pipe for the left side but the free pipe is being used on the - * right side. - * Add check on current_state if the primary_pipe is the left side, - * to check the right side ( primary_pipe->next_odm_pipe ) to see if - * it is using a pipe for MPO ( primary_pipe->next_odm_pipe->bottom_pipe ) - * - If so, then don't use this pipe - * EXCEPTION - 3 plane ( 2 MPO plane ) case - * - in this case, the primary pipe has already gotten a free pipe for the - * MPO window in the left - * - when it tries to get a free pipe for the MPO window on the right, - * it will see that it is already assigned to the right side - * ( primary_pipe->next_odm_pipe ). But in this case, we want this - * free pipe, since it will be for the right side. So add an - * additional condition, that skipping the free pipe on the right only - * applies if the primary pipe has no bottom pipe currently assigned + cur_opp_head = &cur_res_ctx->pipe_ctx[new_opp_head->pipe_idx]; + free_pipe_idx = resource_find_free_pipe_used_in_cur_mpc_blending_tree( + cur_res_ctx, new_res_ctx, cur_opp_head); + + /* Up until here if we have not found a free secondary pipe, we will + * need to wait for at least one frame to complete the transition + * sequence. */ - if (primary_pipe) { - primary_index = primary_pipe->pipe_idx; - old_primary_pipe = &primary_pipe->stream->ctx->dc->current_state->res_ctx.pipe_ctx[primary_index]; - if ((old_primary_pipe->next_odm_pipe) && (old_primary_pipe->next_odm_pipe->bottom_pipe) - && (!primary_pipe->bottom_pipe)) - next_odm_mpo_pipe = old_primary_pipe->next_odm_pipe->bottom_pipe; + if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + free_pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( + cur_res_ctx, new_res_ctx, pool); - preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx; - if ((res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) && - !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == preferred_pipe_idx)) { - secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; - secondary_pipe->pipe_idx = preferred_pipe_idx; - } - } - - /* - * search backwards for the second pipe to keep pipe - * assignment more consistent + /* Up until here if we have not found a free secondary pipe, we will + * need to wait for at least two frames to complete the transition + * sequence. It really doesn't matter which pipe we decide take from + * current enabled pipes. It won't save our frame time when we swap only + * one pipe or more pipes. */ - if (!secondary_pipe) - for (i = pool->pipe_count - 1; i >= 0; i--) { - if ((res_ctx->pipe_ctx[i].stream == NULL) && - !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == i)) { - secondary_pipe = &res_ctx->pipe_ctx[i]; - secondary_pipe->pipe_idx = i; - break; - } - } + if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + free_pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( + cur_res_ctx, new_res_ctx, pool); - return secondary_pipe; + if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + free_pipe_idx = resource_find_any_free_pipe(new_res_ctx, pool); + + return free_pipe_idx; } -struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( - struct dc_state *state, +struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream, - struct pipe_ctx *head_pipe) + const struct pipe_ctx *opp_head_pipe) { - struct resource_context *res_ctx = &state->res_ctx; - struct pipe_ctx *idle_pipe, *pipe; - struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx; - int head_index; + int free_pipe_idx = + find_optimal_free_pipe_as_secondary_dpp_pipe( + &cur_ctx->res_ctx, &new_ctx->res_ctx, + pool, opp_head_pipe); + struct pipe_ctx *free_pipe; - if (!head_pipe) - ASSERT(0); + if (free_pipe_idx >= 0) { + free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx]; + free_pipe->pipe_idx = free_pipe_idx; + free_pipe->stream = opp_head_pipe->stream; + free_pipe->stream_res.tg = opp_head_pipe->stream_res.tg; + free_pipe->stream_res.opp = opp_head_pipe->stream_res.opp; - /* - * Modified from dcn20_acquire_idle_pipe_for_layer - * Check if head_pipe in old_context already has bottom_pipe allocated. - * - If so, check if that pipe is available in the current context. - * -- If so, reuse pipe from old_context - */ - head_index = head_pipe->pipe_idx; - pipe = &old_ctx->pipe_ctx[head_index]; - if (pipe->bottom_pipe && res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx].stream == NULL) { - idle_pipe = &res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx]; - idle_pipe->pipe_idx = pipe->bottom_pipe->pipe_idx; + free_pipe->plane_res.hubp = pool->hubps[free_pipe->pipe_idx]; + free_pipe->plane_res.ipp = pool->ipps[free_pipe->pipe_idx]; + free_pipe->plane_res.dpp = pool->dpps[free_pipe->pipe_idx]; + free_pipe->plane_res.mpcc_inst = + pool->dpps[free_pipe->pipe_idx]->inst; } else { - idle_pipe = find_idle_secondary_pipe_check_mpo(res_ctx, pool, head_pipe); - if (!idle_pipe) - return NULL; + ASSERT(opp_head_pipe); + free_pipe = NULL; } - idle_pipe->stream = head_pipe->stream; - idle_pipe->stream_res.tg = head_pipe->stream_res.tg; - idle_pipe->stream_res.opp = head_pipe->stream_res.opp; - - idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; - - return idle_pipe; + return free_pipe; } unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h index 81e443170829..103a2b54d025 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h @@ -136,11 +136,11 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context); bool dcn32_is_center_timing(struct pipe_ctx *pipe); bool dcn32_is_psr_capable(struct pipe_ctx *pipe); -struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( - struct dc_state *state, +struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream, - struct pipe_ctx *head_pipe); + const struct pipe_ctx *opp_head_pipe); void dcn32_determine_det_override(struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index 5be242a1b82c..3ad2b48954e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -641,16 +641,21 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) uint8_t non_subvp_pipes = 0; bool drr_pipe_found = false; bool drr_psr_capable = false; + uint64_t refresh_rate = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->stream) - continue; - - if (pipe->plane_state && !pipe->top_pipe) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (resource_is_pipe_type(pipe, OPP_HEAD) && + resource_is_pipe_type(pipe, DPP_PIPE)) { + if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { subvp_count++; + + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + } if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { non_subvp_pipes++; drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe)); @@ -662,7 +667,8 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) } } - if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable) + if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable && + ((uint32_t)refresh_rate < 120)) result = true; return result; @@ -693,16 +699,21 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int bool drr_pipe_found = false; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; bool vblank_psr_capable = false; + uint64_t refresh_rate = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->stream) - continue; - - if (pipe->plane_state && !pipe->top_pipe) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (resource_is_pipe_type(pipe, OPP_HEAD) && + resource_is_pipe_type(pipe, DPP_PIPE)) { + if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { subvp_count++; + + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + } if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { non_subvp_pipes++; vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe)); @@ -715,7 +726,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int } if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable && - vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) + ((uint32_t)refresh_rate < 120) && + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) result = true; return result; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index a53478e15ce3..8d73cceb485b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -1588,7 +1588,7 @@ static struct resource_funcs dcn321_res_pool_funcs = { .validate_bandwidth = dcn32_validate_bandwidth, .calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg, .populate_dml_pipes = dcn32_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c index 0100a6053ab6..f2dfa96f9ef5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c @@ -3015,7 +3015,7 @@ static bool all_displays_in_sync(const struct pipe_ctx pipe[], int i, num_active_pipes = 0; for (i = 0; i < pipe_count; i++) { - if (!pipe[i].stream || pipe[i].top_pipe) + if (!resource_is_pipe_type(&pipe[i], OPP_HEAD)) continue; active_pipes[num_active_pipes++] = &pipe[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c index e73f089c84bb..50b0434354f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c @@ -1258,7 +1258,7 @@ bool dcn_validate_bandwidth( hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end; } else { /* pipe not split previously needs split */ - hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe); + hsplit_pipe = resource_find_free_secondary_pipe_legacy(&context->res_ctx, pool, pipe); ASSERT(hsplit_pipe); split_stream_across_pipes(&context->res_ctx, pool, pipe, hsplit_pipe); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 8ae5ddbd1b27..8afda5ecc0cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1305,7 +1305,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc, pipes[pipe_cnt].dout.is_virtual = 0; pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; - switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) { + switch (resource_get_num_odm_splits(&res_ctx->pipe_ctx[i])) { case 1: pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1; break; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index f294f2f8c75b..57cf0358cc43 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -3194,7 +3194,7 @@ static void CalculateFlipSchedule( unsigned int HostVMDynamicLevels; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; double HostVMInefficiencyFactor; double VRatioClamped; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 9af1a43c042b..ad741a723c0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -784,8 +784,7 @@ static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum o Delay = Delay + 1; // sft Delay = Delay + 1; - } - else { + } else { // sfr Delay = Delay + 2; // dsccif @@ -3489,8 +3488,7 @@ static double TruncToValidBPP( if (Format == dm_n422) { MinDSCBPP = 7; MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0; - } - else { + } else { MinDSCBPP = 8; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16.0; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 43016c462251..adea459e7d36 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -3505,7 +3505,7 @@ static void CalculateFlipSchedule( unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; double LineTime = v->HTotal[k] / v->PixelClock[k]; if (v->GPUVMEnable == true && v->HostVMEnable == true) { @@ -4135,7 +4135,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN31_MAX_FMT_420_BUFFER_WIDTH && v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) { - if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) { + if (v->Output[k] == dm_hdmi) { + FMTBufferExceeded = true; + } else if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) { v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1; v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index ed8ddb75b333..07adb614366e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -31,6 +31,7 @@ #include "dml/dcn20/dcn20_fpu.h" #include "dml/dcn31/dcn31_fpu.h" #include "dml/display_mode_vba.h" +#include "dml/dml_inline_defs.h" struct _vcs_dpi_ip_params_st dcn3_14_ip = { .VBlankNomDefaultUS = 668, @@ -273,6 +274,25 @@ static bool is_dual_plane(enum surface_pixel_format format) return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } +/* + * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing + * + * @param: num_us: number of microseconds + * @return: number of vertical lines. If exact number of vertical lines is not found then + * it will round up to next number of lines to guarantee num_us + */ +static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing) +{ + unsigned int num_lines = 0; + unsigned int lines_time_in_ns = 1000.0 * + (((float)timing->h_total * 1000.0) / + ((float)timing->pix_clk_100hz / 10.0)); + + num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0); + + return num_lines; +} + int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, bool fast_validate) @@ -289,19 +309,22 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; + unsigned int num_lines = 0; if (!res_ctx->pipe_ctx[i].stream) continue; pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; + num_lines = micro_sec_to_vert_lines(dcn3_14_ip.VBlankNomDefaultUS, timing); + if (pipe->stream->adjust.v_total_min != 0) pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; else pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total; pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; - pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS); + pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines); pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width); pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 9010c47476e9..a94aa0f21a7f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -3613,7 +3613,7 @@ static void CalculateFlipSchedule( unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; double LineTime = v->HTotal[k] / v->PixelClock[k]; if (v->GPUVMEnable == true && v->HostVMEnable == true) { @@ -4227,7 +4227,9 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_ } if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN314_MAX_FMT_420_BUFFER_WIDTH && v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) { - if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) { + if (v->Output[k] == dm_hdmi) { + FMTBufferExceeded = true; + } else if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) { v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1; v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index a95034801712..711d4085b33b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -37,7 +37,7 @@ static const struct subvp_high_refresh_list subvp_high_refresh_list = { .min_refresh = 120, - .max_refresh = 165, + .max_refresh = 175, .res = { {.width = 3840, .height = 2160, }, {.width = 3440, .height = 1440, }, @@ -756,7 +756,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // Find the minimum pipe split count for non SubVP pipes - if (pipe->stream && !pipe->top_pipe && + if (resource_is_pipe_type(pipe, OPP_HEAD) && pipe->stream->mall_stream_config.type == SUBVP_NONE) { split_cnt = 0; while (pipe) { @@ -886,7 +886,8 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) - if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe) + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !resource_is_pipe_type(pipe, DPP_PIPE)) continue; // Find the SubVP pipe @@ -899,7 +900,8 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) drr_pipe = &context->res_ctx.pipe_ctx[i]; // We check for master pipe only - if (!drr_pipe->stream || !drr_pipe->plane_state || drr_pipe->top_pipe || drr_pipe->prev_odm_pipe) + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !resource_is_pipe_type(pipe, DPP_PIPE)) continue; if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param && @@ -980,7 +982,8 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) - if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe) + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !resource_is_pipe_type(pipe, DPP_PIPE)) continue; if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) { @@ -1040,7 +1043,7 @@ static bool subvp_subvp_admissable(struct dc *dc, uint32_t i; uint8_t subvp_count = 0; uint32_t min_refresh = subvp_high_refresh_list.min_refresh, max_refresh = 0; - uint32_t refresh_rate = 0; + uint64_t refresh_rate = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1050,19 +1053,22 @@ static bool subvp_subvp_admissable(struct dc *dc, if (pipe->plane_state && !pipe->top_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + - pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) - / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); - if (refresh_rate < min_refresh) - min_refresh = refresh_rate; - if (refresh_rate > max_refresh) - max_refresh = refresh_rate; + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + + if ((uint32_t)refresh_rate < min_refresh) + min_refresh = (uint32_t)refresh_rate; + if ((uint32_t)refresh_rate > max_refresh) + max_refresh = (uint32_t)refresh_rate; subvp_count++; } } if (subvp_count == 2 && ((min_refresh < 120 && max_refresh < 120) || - (min_refresh >= 120 && max_refresh >= 120))) + (min_refresh >= subvp_high_refresh_list.min_refresh && + max_refresh <= subvp_high_refresh_list.max_refresh))) result = true; return result; @@ -1715,8 +1721,8 @@ bool dcn32_internal_validate_bw(struct dc *dc, if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled && !dc->config.enable_windowed_mpo_odm && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, sizeof(struct rect)) != 0) { ASSERT(mpo_pipe->plane_state != pipe->plane_state); goto validate_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index a50e7f4dce42..ecea008f19d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -3459,6 +3459,7 @@ bool dml32_CalculatePrefetchSchedule( double TimeForFetchingMetaPTE = 0; double TimeForFetchingRowInVBlank = 0; double LinesToRequestPrefetchPixelData = 0; + double LinesForPrefetchBandwidth = 0; unsigned int HostVMDynamicLevelsTrips; double trip_to_mem; double Tvm_trips; @@ -3888,11 +3889,15 @@ bool dml32_CalculatePrefetchSchedule( TimeForFetchingMetaPTE = Tvm_oto; TimeForFetchingRowInVBlank = Tr0_oto; *PrefetchBandwidth = prefetch_bw_oto; + /* Clamp to oto for bandwidth calculation */ + LinesForPrefetchBandwidth = dst_y_prefetch_oto; } else { *DestinationLinesForPrefetch = dst_y_prefetch_equ; TimeForFetchingMetaPTE = Tvm_equ; TimeForFetchingRowInVBlank = Tr0_equ; *PrefetchBandwidth = prefetch_bw_equ; + /* Clamp to equ for bandwidth calculation */ + LinesForPrefetchBandwidth = dst_y_prefetch_equ; } *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0; @@ -3900,7 +3905,7 @@ bool dml32_CalculatePrefetchSchedule( *DestinationLinesToRequestRowInVBlank = dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0; - LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - + LinesToRequestPrefetchPixelData = LinesForPrefetchBandwidth - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank; #ifdef __DML_VBA_DEBUG__ @@ -4124,7 +4129,7 @@ void dml32_CalculateFlipSchedule( unsigned int HostVMDynamicLevelsTrips; double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; + double ImmediateFlipBW = 1.0; if (GPUVMEnable == true && HostVMEnable == true) HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 58dd62cce4bb..3966845c7694 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -40,6 +40,8 @@ static bool dsc_policy_enable_dsc_when_not_needed; static bool dsc_policy_disable_dsc_stream_overhead; +static bool disable_128b_132b_stream_overhead; + #ifndef MAX #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) #endif @@ -47,8 +49,44 @@ static bool dsc_policy_disable_dsc_stream_overhead; #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #endif +/* Need to account for padding due to pixel-to-symbol packing + * for uncompressed 128b/132b streams. + */ +static uint32_t apply_128b_132b_stream_overhead( + const struct dc_crtc_timing *timing, const uint32_t kbps) +{ + uint32_t total_kbps = kbps; + + if (disable_128b_132b_stream_overhead) + return kbps; + + if (!timing->flags.DSC) { + struct fixed31_32 bpp; + struct fixed31_32 overhead_factor; + + bpp = dc_fixpt_from_int(kbps); + bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10); + + /* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size) + * Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive + */ + overhead_factor = dc_fixpt_from_int(timing->h_addressable); + overhead_factor = dc_fixpt_mul(overhead_factor, bpp); + overhead_factor = dc_fixpt_div_int(overhead_factor, 128); + overhead_factor = dc_fixpt_div( + dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)), + overhead_factor); + + total_kbps = dc_fixpt_ceil( + dc_fixpt_mul_int(overhead_factor, total_kbps)); + } + + return total_kbps; +} + uint32_t dc_bandwidth_in_kbps_from_timing( - const struct dc_crtc_timing *timing) + const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding) { uint32_t bits_per_channel = 0; uint32_t kbps; @@ -96,6 +134,9 @@ uint32_t dc_bandwidth_in_kbps_from_timing( kbps = kbps * 2 / 3; } + if (link_encoding == DC_LINK_ENCODING_DP_128b_132b) + kbps = apply_128b_132b_stream_overhead(timing, kbps); + return kbps; } @@ -107,6 +148,7 @@ static bool decide_dsc_bandwidth_range( const uint32_t num_slices_h, const struct dsc_enc_caps *dsc_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range); static uint32_t compute_bpp_x16_from_target_bandwidth( @@ -133,6 +175,7 @@ static bool setup_dsc_config( int target_bandwidth_kbps, const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg); static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) @@ -398,6 +441,7 @@ bool dc_dsc_compute_bandwidth_range( uint32_t max_bpp_x16, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range) { bool is_dsc_possible = false; @@ -417,11 +461,11 @@ bool dc_dsc_compute_bandwidth_range( if (is_dsc_possible) is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, - &options, &config); + &options, link_encoding, &config); if (is_dsc_possible) is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, - config.num_slices_h, &dsc_common_caps, timing, range); + config.num_slices_h, &dsc_common_caps, timing, link_encoding, range); return is_dsc_possible; } @@ -557,6 +601,7 @@ static bool decide_dsc_bandwidth_range( const uint32_t num_slices_h, const struct dsc_enc_caps *dsc_caps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_bw_range *range) { uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16; @@ -586,7 +631,7 @@ static bool decide_dsc_bandwidth_range( /* populate output structure */ if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) { /* native stream bandwidth */ - range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); + range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing, link_encoding); /* max dsc target bpp */ range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, @@ -612,6 +657,7 @@ static bool decide_dsc_target_bpp_x16( const int target_bandwidth_kbps, const struct dc_crtc_timing *timing, const int num_slices_h, + const enum dc_link_encoding_format link_encoding, int *target_bpp_x16) { struct dc_dsc_bw_range range; @@ -619,7 +665,7 @@ static bool decide_dsc_target_bpp_x16( *target_bpp_x16 = 0; if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16, - num_slices_h, dsc_common_caps, timing, &range)) { + num_slices_h, dsc_common_caps, timing, link_encoding, &range)) { if (target_bandwidth_kbps >= range.stream_kbps) { if (policy->enable_dsc_when_not_needed) /* enable max bpp even dsc is not needed */ @@ -796,6 +842,7 @@ static bool setup_dsc_config( int target_bandwidth_kbps, const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; @@ -995,6 +1042,7 @@ static bool setup_dsc_config( target_bandwidth_kbps, timing, num_slices_h, + link_encoding, &target_bpp); dsc_cfg->bits_per_pixel = target_bpp; } @@ -1023,6 +1071,7 @@ bool dc_dsc_compute_config( const struct dc_dsc_config_options *options, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, + const enum dc_link_encoding_format link_encoding, struct dc_dsc_config *dsc_cfg) { bool is_dsc_possible = false; @@ -1032,7 +1081,7 @@ bool dc_dsc_compute_config( is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, target_bandwidth_kbps, - timing, options, dsc_cfg); + timing, options, link_encoding, dsc_cfg); return is_dsc_possible; } @@ -1165,6 +1214,11 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable) dsc_policy_disable_dsc_stream_overhead = disable; } +void dc_set_disable_128b_132b_stream_overhead(bool disable) +{ + disable_128b_132b_stream_overhead = disable; +} + void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options) { options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h index 59884ef651b3..4a2bf81286d8 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h @@ -31,21 +31,21 @@ /****************************** new register headers */ /*** following in header */ -#define DDC_GPIO_REG_LIST_ENTRY(type,cd,id) \ +#define DDC_GPIO_REG_LIST_ENTRY(type, cd, id) \ .type ## _reg = REG(DC_GPIO_DDC ## id ## _ ## type),\ .type ## _mask = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## _MASK,\ .type ## _shift = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## __SHIFT -#define DDC_GPIO_REG_LIST(cd,id) \ +#define DDC_GPIO_REG_LIST(cd, id) \ {\ - DDC_GPIO_REG_LIST_ENTRY(MASK,cd,id),\ - DDC_GPIO_REG_LIST_ENTRY(A,cd,id),\ - DDC_GPIO_REG_LIST_ENTRY(EN,cd,id),\ - DDC_GPIO_REG_LIST_ENTRY(Y,cd,id)\ + DDC_GPIO_REG_LIST_ENTRY(MASK, cd, id),\ + DDC_GPIO_REG_LIST_ENTRY(A, cd, id),\ + DDC_GPIO_REG_LIST_ENTRY(EN, cd, id),\ + DDC_GPIO_REG_LIST_ENTRY(Y, cd, id)\ } -#define DDC_REG_LIST(cd,id) \ - DDC_GPIO_REG_LIST(cd,id),\ +#define DDC_REG_LIST(cd, id) \ + DDC_GPIO_REG_LIST(cd, id),\ .ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP) #define DDC_REG_LIST_DCN2(cd, id) \ @@ -54,34 +54,34 @@ .phy_aux_cntl = REG(PHY_AUX_CNTL), \ .dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5) -#define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\ +#define DDC_GPIO_VGA_REG_LIST_ENTRY(type, cd)\ .type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\ .type ## _mask = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## _MASK,\ .type ## _shift = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## __SHIFT #define DDC_GPIO_VGA_REG_LIST(cd) \ {\ - DDC_GPIO_VGA_REG_LIST_ENTRY(MASK,cd),\ - DDC_GPIO_VGA_REG_LIST_ENTRY(A,cd),\ - DDC_GPIO_VGA_REG_LIST_ENTRY(EN,cd),\ - DDC_GPIO_VGA_REG_LIST_ENTRY(Y,cd)\ + DDC_GPIO_VGA_REG_LIST_ENTRY(MASK, cd),\ + DDC_GPIO_VGA_REG_LIST_ENTRY(A, cd),\ + DDC_GPIO_VGA_REG_LIST_ENTRY(EN, cd),\ + DDC_GPIO_VGA_REG_LIST_ENTRY(Y, cd)\ } #define DDC_VGA_REG_LIST(cd) \ DDC_GPIO_VGA_REG_LIST(cd),\ .ddc_setup = mmDC_I2C_DDCVGA_SETUP -#define DDC_GPIO_I2C_REG_LIST_ENTRY(type,cd) \ +#define DDC_GPIO_I2C_REG_LIST_ENTRY(type, cd) \ .type ## _reg = REG(DC_GPIO_I2CPAD_ ## type),\ .type ## _mask = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## _MASK,\ .type ## _shift = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## __SHIFT #define DDC_GPIO_I2C_REG_LIST(cd) \ {\ - DDC_GPIO_I2C_REG_LIST_ENTRY(MASK,cd),\ - DDC_GPIO_I2C_REG_LIST_ENTRY(A,cd),\ - DDC_GPIO_I2C_REG_LIST_ENTRY(EN,cd),\ - DDC_GPIO_I2C_REG_LIST_ENTRY(Y,cd)\ + DDC_GPIO_I2C_REG_LIST_ENTRY(MASK, cd),\ + DDC_GPIO_I2C_REG_LIST_ENTRY(A, cd),\ + DDC_GPIO_I2C_REG_LIST_ENTRY(EN, cd),\ + DDC_GPIO_I2C_REG_LIST_ENTRY(Y, cd)\ } #define DDC_I2C_REG_LIST(cd) \ @@ -150,12 +150,12 @@ struct ddc_sh_mask { #define ddc_data_regs(id) \ {\ - DDC_REG_LIST(DATA,id)\ + DDC_REG_LIST(DATA, id)\ } #define ddc_clk_regs(id) \ {\ - DDC_REG_LIST(CLK,id)\ + DDC_REG_LIST(CLK, id)\ } #define ddc_vga_data_regs \ diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h index dcfdd71b2304..debb363cfcf4 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h @@ -36,17 +36,17 @@ #define ONE_MORE_5 6 -#define HPD_GPIO_REG_LIST_ENTRY(type,cd,id) \ +#define HPD_GPIO_REG_LIST_ENTRY(type, cd, id) \ .type ## _reg = REG(DC_GPIO_HPD_## type),\ .type ## _mask = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## _MASK,\ .type ## _shift = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## __SHIFT #define HPD_GPIO_REG_LIST(id) \ {\ - HPD_GPIO_REG_LIST_ENTRY(MASK,cd,id),\ - HPD_GPIO_REG_LIST_ENTRY(A,cd,id),\ - HPD_GPIO_REG_LIST_ENTRY(EN,cd,id),\ - HPD_GPIO_REG_LIST_ENTRY(Y,cd,id)\ + HPD_GPIO_REG_LIST_ENTRY(MASK, cd, id),\ + HPD_GPIO_REG_LIST_ENTRY(A, cd, id),\ + HPD_GPIO_REG_LIST_ENTRY(EN, cd, id),\ + HPD_GPIO_REG_LIST_ENTRY(Y, cd, id)\ } #define HPD_REG_LIST(id) \ diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 034610b74a37..027aec70c070 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -125,39 +125,15 @@ struct resource_funcs { struct dc *dc, struct dc_state *context); - /* - * Acquires a free pipe for the head pipe. - * The head pipe is first pipe in the current context that matches the stream - * and does not have a top pipe or prev_odm_pipe. - */ - struct pipe_ctx *(*acquire_idle_pipe_for_layer)( - struct dc_state *context, + struct pipe_ctx *(*acquire_free_pipe_as_secondary_dpp_pipe)( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, const struct resource_pool *pool, - struct dc_stream_state *stream); + const struct pipe_ctx *opp_head_pipe); - /* - * Acquires a free pipe for the head pipe with some additional checks for odm. - * The head pipe is passed in as an argument unlike acquire_idle_pipe_for_layer - * where it is read from the context. So this allows us look for different - * idle_pipe if the head_pipes are different ( ex. in odm 2:1 when we have - * a left and right pipe ). - * - * It also checks the old context to see if: - * - * 1. a pipe has already been allocated for the head pipe. If so, it will - * try to select that pipe as the idle pipe if it is available in the current - * context. - * 2. if the head_pipe is on the left, it will check if the right pipe has - * a pipe already allocated. If so, it will not use that pipe if it is - * selected as the idle pipe. - */ - struct pipe_ctx *(*acquire_idle_pipe_for_head_pipe_in_layer)( - struct dc_state *context, - const struct resource_pool *pool, - struct dc_stream_state *stream, - struct pipe_ctx *head_pipe); - - enum dc_status (*validate_plane)(const struct dc_plane_state *plane_state, struct dc_caps *caps); + enum dc_status (*validate_plane)( + const struct dc_plane_state *plane_state, + struct dc_caps *caps); enum dc_status (*add_stream_to_ctx)( struct dc *dc, @@ -304,6 +280,8 @@ struct resource_pool { struct dmcu *dmcu; struct dmub_psr *psr; + struct dmub_replay *replay; + struct abm *multiple_abms[MAX_PIPES]; const struct resource_funcs *funcs; @@ -572,6 +550,23 @@ struct dc_state { } perf_params; }; +struct replay_context { + /* ddc line */ + enum channel_id aux_inst; + /* Transmitter id */ + enum transmitter digbe_inst; + /* Engine Id is used for Dig Be source select */ + enum engine_id digfe_inst; + /* Controller Id used for Dig Fe source select */ + enum controller_id controllerId; + unsigned int line_time_in_ns; +}; + +enum dc_replay_enable { + DC_REPLAY_DISABLE = 0, + DC_REPLAY_ENABLE = 1, +}; + struct dc_bounding_box_max_clk { int max_dcfclk_mhz; int max_dispclk_mhz; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index d2190a3320f6..33db15d69f23 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -27,6 +27,8 @@ #include "dm_services_types.h" +struct abm_save_restore; + struct abm { struct dc_context *ctx; const struct abm_funcs *funcs; @@ -55,6 +57,10 @@ struct abm_funcs { unsigned int bytes, unsigned int inst); bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst); + bool (*save_restore)( + struct abm *abm, + unsigned int panel_inst, + struct abm_save_restore *pData); bool (*set_pipe_ex)(struct abm *abm, unsigned int otg_inst, unsigned int option, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h index 7254182b7c72..af6b9509d09d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h @@ -172,8 +172,6 @@ struct aux_engine_funcs { struct aux_engine *engine, uint8_t *returned_bytes); bool (*is_engine_available)(struct aux_engine *engine); - enum i2caux_engine_type (*get_engine_type)( - const struct aux_engine *engine); bool (*acquire)( struct aux_engine *engine, struct ddc *ddc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 8dc804bbe98b..3e2f0f64c98c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -123,6 +123,11 @@ struct dccg_funcs { struct dccg *dccg, int hpo_le_inst); + void (*set_symclk32_le_root_clock_gating)( + struct dccg *dccg, + int hpo_le_inst, + bool enable); + void (*set_physymclk)( struct dccg *dccg, int phy_inst, @@ -167,6 +172,16 @@ struct dccg_funcs { struct dccg *dccg, unsigned int dpp_inst, bool clock_on); + + void (*enable_symclk_se)( + struct dccg *dccg, + uint32_t stream_enc_inst, + uint32_t link_enc_inst); + + void (*disable_symclk_se)( + struct dccg *dccg, + uint32_t stream_enc_inst, + uint32_t link_enc_inst); }; #endif //__DAL_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index aaa293613846..f5677dbb4e7d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -49,6 +49,8 @@ struct dcn_hubbub_wm_set { uint32_t dram_clk_change; uint32_t usr_retrain; uint32_t fclk_pstate_change; + uint32_t sr_enter_exit_Z8; + uint32_t sr_enter_Z8; }; struct dcn_hubbub_wm { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index c4fbbf08ef86..a6dedf3c7d74 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -269,6 +269,7 @@ struct stream_encoder_funcs { struct stream_encoder *enc, unsigned int pix_per_container); void (*enable_fifo)(struct stream_encoder *enc); void (*disable_fifo)(struct stream_encoder *enc); + void (*map_stream_to_link)(struct stream_encoder *enc, uint32_t stream_enc_inst, uint32_t link_enc_inst); }; struct hpo_dp_stream_encoder_state { diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index f839494d59d8..e3e8c76c17cf 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -179,6 +179,10 @@ struct link_service { int (*aux_transfer_raw)(struct ddc_service *ddc, struct aux_payload *payload, enum aux_return_code_type *operation_result); + bool (*configure_fixed_vs_pe_retimer)( + struct ddc_service *ddc, + const uint8_t *data, + uint32_t len); bool (*aux_transfer_with_retries_no_mutex)(struct ddc_service *ddc, struct aux_payload *payload); bool (*is_in_aux_transaction_mode)(struct ddc_service *ddc); @@ -269,6 +273,20 @@ struct link_service { uint16_t psr_vtotal_su); void (*edp_get_psr_residency)( const struct dc_link *link, uint32_t *residency); + + bool (*edp_get_replay_state)( + const struct dc_link *link, uint64_t *state); + bool (*edp_set_replay_allow_active)(struct dc_link *dc_link, + const bool *enable, bool wait, bool force_static, + const unsigned int *power_opts); + bool (*edp_setup_replay)(struct dc_link *link, + const struct dc_stream_state *stream); + bool (*edp_set_coasting_vtotal)( + struct dc_link *link, uint16_t coasting_vtotal); + bool (*edp_replay_residency)(const struct dc_link *link, + unsigned int *residency, const bool is_start, + const bool is_alpm); + bool (*edp_wait_for_t12)(struct dc_link *link); bool (*edp_is_ilr_optimization_required)(struct dc_link *link, struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index eaeb684c8a48..e546b9c506c1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -142,10 +142,6 @@ struct clock_source *dc_resource_find_first_free_pll( struct resource_context *res_ctx, const struct resource_pool *pool); -struct pipe_ctx *resource_get_head_pipe_for_stream( - struct resource_context *res_ctx, - struct dc_stream_state *stream); - bool resource_attach_surfaces_to_context( struct dc_plane_state *const *plane_state, int surface_count, @@ -153,11 +149,232 @@ bool resource_attach_surfaces_to_context( struct dc_state *context, const struct resource_pool *pool); -struct pipe_ctx *find_idle_secondary_pipe( +#define FREE_PIPE_INDEX_NOT_FOUND -1 + +/* + * pipe types are identified based on MUXes in DCN front end that are capable + * of taking input from one DCN pipeline to another DCN pipeline. The name is + * in a form of XXXX_YYYY, where XXXX is the DCN front end hardware block the + * pipeline ends with and YYYY is the rendering role that the pipe is in. + * + * For instance OTG_MASTER is a pipe ending with OTG hardware block in its + * pipeline and it is in a role of a master pipe for timing generation. + * + * For quick reference a diagram of each pipe type's areas of responsibility + * for outputting timings on the screen is shown below: + * + * Timing Active for Stream 0 + * __________________________________________________ + * |OTG master 0 (OPP head 0)|OPP head 2 (DPP pipe 2) | + * | (DPP pipe 0)| | + * | Top Plane 0 | | + * | ______________|____ | + * | |DPP pipe 1 |DPP | | + * | | |pipe| | + * | | Bottom |3 | | + * | | Plane 1 | | | + * | | | | | + * | |______________|____| | + * | | | + * | | | + * | ODM slice 0 | ODM slice 1 | + * |_________________________|________________________| + * + * Timing Active for Stream 1 + * __________________________________________________ + * |OTG master 4 (OPP head 4) | + * | | + * | | + * | | + * | | + * | | + * | Blank Pixel Data | + * | (generated by DPG4) | + * | | + * | | + * | | + * | | + * | | + * |__________________________________________________| + * + * Inter-pipe Relation + * __________________________________________________ + * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER | + * | | plane 0 | slice 0 | | + * | 0 | -------------MPC---------ODM----------- | + * | | plane 1 | | | | | + * | 1 | ------------- | | | | + * | | plane 0 | slice 1 | | | + * | 2 | -------------MPC--------- | | + * | | plane 1 | | | | + * | 3 | ------------- | | | + * | | | blank | | + * | 4 | | ----------------------- | + * | | | | | + * | 5 | (FREE) | | | + * |________|_______________|___________|_____________| + */ +enum pipe_type { + /* free pipe - free pipe is an uninitialized pipe without a stream + * associated with it. It is a free DCN pipe resource. It can be + * acquired as any type of pipe. + */ + FREE_PIPE, + + /* OTG master pipe - the master pipe of its OPP head pipes with a + * functional OTG. It merges all its OPP head pipes pixel data in ODM + * block and output to backend DIG. OTG master pipe is responsible for + * generating entire crtc timing to backend DIG. An OTG master pipe may + * or may not have a plane. If it has a plane it blends it as the left + * most MPC slice of the top most layer. If it doesn't have a plane it + * can output pixel data from its OPP head pipes' test pattern + * generators (DPG) such as solid black pixel data to blank the screen. + */ + OTG_MASTER, + + /* OPP head pipe - the head pipe of an MPC blending tree with a + * functional OPP outputting to an OTG. OPP head pipe is responsible for + * processing output pixels in its own ODM slice. It may or may not have + * a plane. If it has a plane it blends it as the top most layer within + * its own ODM slice. If it doesn't have a plane it can output pixel + * data from its DPG such as solid black pixel data to blank the pixel + * data in its own ODM slice. OTG master pipe is also an OPP head pipe + * but with more responsibility. + */ + OPP_HEAD, + + /* DPP pipe - the pipe with a functional DPP outputting to an OPP head + * pipe's MPC. DPP pipe is responsible for processing pixel data from + * its own MPC slice of a plane. It must be connected to an OPP head + * pipe and it must have a plane associated with it. + */ + DPP_PIPE, +}; + +/* + * Determine if the input pipe ctx is of a pipe type. + * return - true if pipe ctx is of the input type. + */ +bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type); + +/* + * Determine if the input pipe ctx is used for rendering a plane with MPCC + * combine. MPCC combine is a hardware feature to combine multiple DPP pipes + * into a single plane. It is typically used for bypassing pipe bandwidth + * limitation for rendering a very large plane or saving power by reducing UCLK + * and DPPCLK speeds. + * + * For instance in the Inter-pipe Relation diagram shown below, both PIPE 0 and + * 1 are for MPCC combine for plane 0 + * + * Inter-pipe Relation + * __________________________________________________ + * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER | + * | | plane 0 | | | + * | 0 | -------------MPC----------------------- | + * | | plane 0 | | | | + * | 1 | ------------- | | | + * |________|_______________|___________|_____________| + * + * return - true if pipe ctx is used for mpcc combine. + */ +bool resource_is_for_mpcc_combine(const struct pipe_ctx *pipe_ctx); + +/* + * Look for a free pipe in new resource context that is used as a secondary DPP + * pipe in MPC blending tree associated with input OPP head pipe. + * + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int resource_find_free_pipe_used_in_cur_mpc_blending_tree( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct pipe_ctx *cur_opp_head); + +/* + * Look for a free pipe in new resource context that is not used in current + * resource context. + * + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int recource_find_free_pipe_not_used_in_cur_res_ctx( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool); + +/* + * Look for a free pipe in new resource context that is used as a secondary DPP + * pipe in any MPCC combine in current resource context. + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool); + +/* + * Look for any free pipe in new resource context. + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int resource_find_any_free_pipe(struct resource_context *new_res_ctx, + const struct resource_pool *pool); + +/* + * Legacy find free secondary pipe logic deprecated for newer DCNs as it doesn't + * find the most optimal free pipe to prevent from time consuming hardware state + * transitions. + */ +struct pipe_ctx *resource_find_free_secondary_pipe_legacy( struct resource_context *res_ctx, const struct resource_pool *pool, const struct pipe_ctx *primary_pipe); +/* + * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice + * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it + * will have 4 pieces of slice. + * return - 0 if pipe is not used for a plane with MPCC combine. otherwise + * the number of MPC "cuts" for the plane. + */ +int resource_get_num_mpc_splits(const struct pipe_ctx *pipe); + +/* + * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice + * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it + * will have 4 pieces of slice. + * return - 0 if pipe is not used for ODM combine. otherwise + * the number of ODM "cuts" for the timing. + */ +int resource_get_num_odm_splits(const struct pipe_ctx *pipe); + +/* + * Get the OTG master pipe in resource context associated with the stream. + * return - NULL if not found. Otherwise the OTG master pipe associated with the + * stream. + */ +struct pipe_ctx *resource_get_otg_master_for_stream( + struct resource_context *res_ctx, + struct dc_stream_state *stream); + +/* + * Get the OTG master pipe for the input pipe context. + * return - the OTG master pipe for the input pipe + * context. + */ +struct pipe_ctx *resource_get_otg_master(const struct pipe_ctx *pipe_ctx); + +/* + * Get the OPP head pipe for the input pipe context. + * return - the OPP head pipe for the input pipe + * context. + */ +struct pipe_ctx *resource_get_opp_head(const struct pipe_ctx *pipe_ctx); + + bool resource_validate_attach_surfaces( const struct dc_validation_set set[], int set_count, @@ -193,10 +410,6 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); void get_audio_check(struct audio_info *aud_modes, struct audio_check *aud_chk); -int get_num_mpc_splits(struct pipe_ctx *pipe); - -int get_num_odm_splits(struct pipe_ctx *pipe); - bool get_temp_dp_link_res(struct dc_link *link, struct link_resource *link_res, struct dc_link_settings *link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c index c923b2af8510..37bc98faa7a0 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c @@ -38,10 +38,9 @@ #define DCN_BASE__INST0_SEG2 0x000034C0 -static enum dc_irq_source to_dal_irq_source_dcn314( - struct irq_service *irq_service, - uint32_t src_id, - uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile index a52b56e2859e..6af8a97d4a77 100644 --- a/drivers/gpu/drm/amd/display/dc/link/Makefile +++ b/drivers/gpu/drm/amd/display/dc/link/Makefile @@ -42,7 +42,8 @@ AMD_DISPLAY_FILES += $(AMD_DAL_LINK_ACCESSORIES) ############################################################################### # hwss ############################################################################### -LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o +LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o \ +link_hwss_dio_fixed_vs_pe_retimer.o link_hwss_hpo_fixed_vs_pe_retimer_dp.o AMD_DAL_LINK_HWSS = $(addprefix $(AMDDALPATH)/dc/link/hwss/, \ $(LINK_HWSS)) diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index db9f1baa27e5..fe4282771cd0 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -428,15 +428,24 @@ static void set_crtc_test_pattern(struct dc_link *link, stream->timing.display_color_depth; struct bit_depth_reduction_params params; struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; - int width = pipe_ctx->stream->timing.h_addressable + + struct pipe_ctx *odm_pipe; + int odm_cnt = 1; + int h_active = pipe_ctx->stream->timing.h_addressable + pipe_ctx->stream->timing.h_border_left + pipe_ctx->stream->timing.h_border_right; - int height = pipe_ctx->stream->timing.v_addressable + + int v_active = pipe_ctx->stream->timing.v_addressable + pipe_ctx->stream->timing.v_border_bottom + pipe_ctx->stream->timing.v_border_top; + int odm_slice_width, last_odm_slice_width, offset = 0; memset(¶ms, 0, sizeof(params)); + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + odm_cnt++; + + odm_slice_width = h_active / odm_cnt; + last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1); + switch (test_pattern) { case DP_TEST_PATTERN_COLOR_SQUARES: controller_test_pattern = @@ -473,16 +482,13 @@ static void set_crtc_test_pattern(struct dc_link *link, { /* disable bit depth reduction */ pipe_ctx->stream->bit_depth_params = params; - opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) { + opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, controller_test_pattern, color_depth); - else if (link->dc->hwss.set_disp_pattern_generator) { - struct pipe_ctx *odm_pipe; + } else if (link->dc->hwss.set_disp_pattern_generator) { enum controller_dp_color_space controller_color_space; - int opp_cnt = 1; - int offset = 0; - int dpg_width = width; + struct output_pixel_processor *odm_opp; switch (test_pattern_color_space) { case DP_TEST_PATTERN_COLOR_SPACE_RGB: @@ -502,24 +508,9 @@ static void set_crtc_test_pattern(struct dc_link *link, break; } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - dpg_width = width / opp_cnt; - offset = dpg_width; - - link->dc->hwss.set_disp_pattern_generator(link->dc, - pipe_ctx, - controller_test_pattern, - controller_color_space, - color_depth, - NULL, - dpg_width, - height, - 0); - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; - + odm_pipe = pipe_ctx; + while (odm_pipe->next_odm_pipe) { + odm_opp = odm_pipe->stream_res.opp; odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); link->dc->hwss.set_disp_pattern_generator(link->dc, odm_pipe, @@ -527,11 +518,23 @@ static void set_crtc_test_pattern(struct dc_link *link, controller_color_space, color_depth, NULL, - dpg_width, - height, + odm_slice_width, + v_active, offset); - offset += offset; + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; } + odm_opp = odm_pipe->stream_res.opp; + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); + link->dc->hwss.set_disp_pattern_generator(link->dc, + odm_pipe, + controller_test_pattern, + controller_color_space, + color_depth, + NULL, + last_odm_slice_width, + v_active, + offset); } } break; @@ -540,23 +543,17 @@ static void set_crtc_test_pattern(struct dc_link *link, /* restore bitdepth reduction */ resource_build_bit_depth_reduction_params(pipe_ctx->stream, ¶ms); pipe_ctx->stream->bit_depth_params = params; - opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) { + opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - color_depth); - else if (link->dc->hwss.set_disp_pattern_generator) { - struct pipe_ctx *odm_pipe; - int opp_cnt = 1; - int dpg_width; - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - - dpg_width = width / opp_cnt; - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { - struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + color_depth); + } else if (link->dc->hwss.set_disp_pattern_generator) { + struct output_pixel_processor *odm_opp; + odm_pipe = pipe_ctx; + while (odm_pipe->next_odm_pipe) { + odm_opp = odm_pipe->stream_res.opp; odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); link->dc->hwss.set_disp_pattern_generator(link->dc, odm_pipe, @@ -564,19 +561,23 @@ static void set_crtc_test_pattern(struct dc_link *link, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - dpg_width, - height, - 0); + odm_slice_width, + v_active, + offset); + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; } + odm_opp = odm_pipe->stream_res.opp; + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); link->dc->hwss.set_disp_pattern_generator(link->dc, - pipe_ctx, + odm_pipe, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - dpg_width, - height, - 0); + last_odm_slice_width, + v_active, + offset); } } break; @@ -674,7 +675,8 @@ bool dp_set_test_pattern( if (pipes[i].stream == NULL) continue; - if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) { + if (resource_is_pipe_type(&pipes[i], OTG_MASTER) && + pipes[i].stream->link == link) { pipe_ctx = &pipes[i]; break; } @@ -702,6 +704,7 @@ bool dp_set_test_pattern( /* Reset Test Pattern state */ link->test_pattern_enabled = false; + link->current_test_pattern = test_pattern; return true; } @@ -739,6 +742,7 @@ bool dp_set_test_pattern( if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { /* Set Test Pattern state */ link->test_pattern_enabled = true; + link->current_test_pattern = test_pattern; if (p_link_settings != NULL) dpcd_set_link_settings(link, p_link_settings); @@ -937,6 +941,7 @@ bool dp_set_test_pattern( /* Set Test Pattern state */ link->test_pattern_enabled = true; + link->current_test_pattern = test_pattern; } return true; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c index bebf9c4c8702..1328a0ade342 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c @@ -46,6 +46,9 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx) if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); + if (stream_enc->funcs->map_stream_to_link) + stream_enc->funcs->map_stream_to_link(stream_enc, + stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); if (stream_enc->funcs->enable_fifo) stream_enc->funcs->enable_fifo(stream_enc); } @@ -163,7 +166,7 @@ void set_dio_dp_lane_settings(struct dc_link *link, link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings); } -static void update_dio_stream_allocation_table(struct dc_link *link, +void update_dio_stream_allocation_table(struct dc_link *link, const struct link_resource *link_res, const struct link_mst_stream_allocation_table *table) { diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h index 8b8a099feeb0..f4633d3cf9b9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h @@ -55,5 +55,8 @@ void setup_dio_audio_output(struct pipe_ctx *pipe_ctx, struct audio_output *audio_output, uint32_t audio_inst); void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx); void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx); +void update_dio_stream_allocation_table(struct dc_link *link, + const struct link_resource *link_res, + const struct link_mst_stream_allocation_table *table); #endif /* __LINK_HWSS_DIO_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c new file mode 100644 index 000000000000..b659baa23147 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c @@ -0,0 +1,200 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_dio.h" +#include "link_hwss_dio_fixed_vs_pe_retimer.h" +#include "link_enc_cfg.h" + +uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link) +{ + // TODO: Get USB-C cable orientation + if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR) + return 0xF2; + else + return 0x12; +} + +void dp_dio_fixed_vs_pe_retimer_exit_manual_automation(struct dc_link *link) +{ + const uint8_t dp_type = dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(link); + const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + const uint8_t vendor_lttpr_exit_manual_automation_1[4] = {0x1, 0x50, dp_type, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_2[4] = {0x1, 0x50, 0x50, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_3[4] = {0x1, 0x51, 0x50, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_4[4] = {0x1, 0x10, 0x58, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_5[4] = {0x1, 0x10, 0x59, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_6[4] = {0x1, 0x30, 0x51, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_7[4] = {0x1, 0x30, 0x52, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_8[4] = {0x1, 0x30, 0x54, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_9[4] = {0x1, 0x30, 0x55, 0x0}; + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_0[0], sizeof(vendor_lttpr_exit_manual_automation_0)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_1[0], sizeof(vendor_lttpr_exit_manual_automation_1)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_2[0], sizeof(vendor_lttpr_exit_manual_automation_2)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_3[0], sizeof(vendor_lttpr_exit_manual_automation_3)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_4[0], sizeof(vendor_lttpr_exit_manual_automation_4)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_5[0], sizeof(vendor_lttpr_exit_manual_automation_5)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_6[0], sizeof(vendor_lttpr_exit_manual_automation_6)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_7[0], sizeof(vendor_lttpr_exit_manual_automation_7)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_8[0], sizeof(vendor_lttpr_exit_manual_automation_8)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_9[0], sizeof(vendor_lttpr_exit_manual_automation_9)); +} + +static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_link *link, + const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params, + const struct link_hwss *link_hwss) +{ + struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 }; + const uint8_t pltpat_custom[10] = {0x1F, 0x7C, 0xF0, 0xC1, 0x07, 0x1F, 0x7C, 0xF0, 0xC1, 0x07}; + const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + + + if (tp_params == NULL) + return false; + + if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && + link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) { + // Deprogram overrides from previous test pattern + dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); + } + + switch (tp_params->dp_phy_pattern) { + case DP_TEST_PATTERN_80BIT_CUSTOM: + if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern, + pltpat_custom, tp_params->custom_pattern_size) != 0) + return false; + break; + case DP_TEST_PATTERN_D102: + break; + default: + if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM || + link->current_test_pattern == DP_TEST_PATTERN_D102) + // Deprogram overrides from previous test pattern + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_0[0], + sizeof(vendor_lttpr_exit_manual_automation_0)); + + return false; + } + + hw_tp_params.dp_phy_pattern = tp_params->dp_phy_pattern; + hw_tp_params.dp_panel_mode = tp_params->dp_panel_mode; + + if (link_hwss->ext.set_dp_link_test_pattern) + link_hwss->ext.set_dp_link_test_pattern(link, link_res, &hw_tp_params); + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg0[0], sizeof(vendor_lttpr_write_data_pg0)); + + return true; +} + +static void set_dio_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + if (!set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override( + link, link_res, tp_params, get_dio_link_hwss())) { + link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params); + } + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +} + +void enable_dio_fixed_vs_pe_retimer_program_4lane_output(struct dc_link *link) +{ + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); +} + +static void enable_dio_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + if (link_settings->lane_count == LANE_COUNT_FOUR) + enable_dio_fixed_vs_pe_retimer_program_4lane_output(link); + + enable_dio_dp_link_output(link, link_res, signal, clock_source, link_settings); +} + +static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = { + .setup_stream_encoder = setup_dio_stream_encoder, + .reset_stream_encoder = reset_dio_stream_encoder, + .setup_stream_attribute = setup_dio_stream_attribute, + .disable_link_output = disable_dio_link_output, + .setup_audio_output = setup_dio_audio_output, + .enable_audio_packet = enable_dio_audio_packet, + .disable_audio_packet = disable_dio_audio_packet, + .ext = { + .set_throttled_vcp_size = set_dio_throttled_vcp_size, + .enable_dp_link_output = enable_dio_fixed_vs_pe_retimer_dp_link_output, + .set_dp_link_test_pattern = set_dio_fixed_vs_pe_retimer_dp_link_test_pattern, + .set_dp_lane_settings = set_dio_dp_lane_settings, + .update_stream_allocation_table = update_dio_stream_allocation_table, + }, +}; + +bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link) +{ + if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) + return false; + + if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) + return false; + + return true; +} + +const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void) +{ + return &dio_fixed_vs_pe_retimer_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h new file mode 100644 index 000000000000..9ac08a332540 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h @@ -0,0 +1,37 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ +#define __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ + +#include "link.h" + +uint32_t dp_dio_fixed_vs_pe_retimer_get_lttpr_write_address(struct dc_link *link); +uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link); +void dp_dio_fixed_vs_pe_retimer_exit_manual_automation(struct dc_link *link); +void enable_dio_fixed_vs_pe_retimer_program_4lane_output(struct dc_link *link); +bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link); +const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void); + +#endif /* __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c index 586fe25c1702..e1257404357b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c @@ -28,7 +28,7 @@ #include "dccg.h" #include "clk_mgr.h" -static void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, +void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size) { struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = @@ -41,7 +41,7 @@ static void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, throttled_vcp_size); } -static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, +void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, const struct dc_link_settings *link_settings, struct fixed31_32 throttled_vcp_size) { @@ -69,7 +69,7 @@ static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, hblank_min_symbol_width); } -static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) +void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) { struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc; @@ -78,14 +78,14 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst); } -static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) +void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) { struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; stream_enc->funcs->disable(stream_enc); } -static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) +void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) { struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; struct dc_stream_state *stream = pipe_ctx->stream; @@ -102,12 +102,17 @@ static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); } -static void enable_hpo_dp_link_output(struct dc_link *link, +void enable_hpo_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) + link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( + link->dc->res_pool->dccg, + link_res->hpo_dp_link_enc->inst, + true); link_res->hpo_dp_link_enc->funcs->enable_link_phy( link_res->hpo_dp_link_enc, link_settings, @@ -115,13 +120,18 @@ static void enable_hpo_dp_link_output(struct dc_link *link, link->link_enc->hpd_source); } -static void disable_hpo_dp_link_output(struct dc_link *link, +void disable_hpo_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); link_res->hpo_dp_link_enc->funcs->disable_link_phy( link_res->hpo_dp_link_enc, signal); + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) + link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( + link->dc->res_pool->dccg, + link_res->hpo_dp_link_enc->inst, + false); } static void set_hpo_dp_link_test_pattern(struct dc_link *link, @@ -144,7 +154,7 @@ static void set_hpo_dp_lane_settings(struct dc_link *link, lane_settings[0].FFE_PRESET.raw); } -static void update_hpo_dp_stream_allocation_table(struct dc_link *link, +void update_hpo_dp_stream_allocation_table(struct dc_link *link, const struct link_resource *link_res, const struct link_mst_stream_allocation_table *table) { @@ -153,7 +163,7 @@ static void update_hpo_dp_stream_allocation_table(struct dc_link *link, table); } -static void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx, +void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx, struct audio_output *audio_output, uint32_t audio_inst) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup( @@ -162,13 +172,13 @@ static void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx, &pipe_ctx->stream->audio_info); } -static void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) +void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable( pipe_ctx->stream_res.hpo_dp_stream_enc); } -static void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) +void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->stream_res.audio) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable( diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h index 3cbb94b41a23..1d3ed8ca83b5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h @@ -28,9 +28,35 @@ #include "link_hwss.h" #include "link.h" +void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size); +void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, + const struct dc_link_settings *link_settings, + struct fixed31_32 throttled_vcp_size); +void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, + const struct dc_link_settings *link_settings, + struct fixed31_32 throttled_vcp_size); +void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx); +void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx); +void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx); +void enable_hpo_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings); +void disable_hpo_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); +void update_hpo_dp_stream_allocation_table(struct dc_link *link, + const struct link_resource *link_res, + const struct link_mst_stream_allocation_table *table); +void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx, + struct audio_output *audio_output, uint32_t audio_inst); +void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx); +void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx); +const struct link_hwss *get_hpo_dp_link_hwss(void); bool can_use_hpo_dp_link_hwss(const struct dc_link *link, const struct link_resource *link_res); -const struct link_hwss *get_hpo_dp_link_hwss(void); #endif /* __LINK_HWSS_HPO_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c new file mode 100644 index 000000000000..b621b97711b6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c @@ -0,0 +1,229 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_hpo_dp.h" +#include "link_hwss_hpo_fixed_vs_pe_retimer_dp.h" +#include "link_hwss_dio_fixed_vs_pe_retimer.h" + +static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link, + const struct dc_lane_settings *hw_lane_settings) +{ + const uint8_t vendor_ffe_preset_table[16] = { + 0x01, 0x41, 0x61, 0x81, + 0xB1, 0x05, 0x35, 0x65, + 0x85, 0xA5, 0x09, 0x39, + 0x59, 0x89, 0x0F, 0x24}; + + const uint8_t ffe_mask[4] = { + (hw_lane_settings[0].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) + & (hw_lane_settings[0].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF), + (hw_lane_settings[1].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) + & (hw_lane_settings[1].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF), + (hw_lane_settings[2].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) + & (hw_lane_settings[2].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF), + (hw_lane_settings[3].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) + & (hw_lane_settings[3].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF)}; + + const uint8_t ffe_cfg[4] = { + vendor_ffe_preset_table[hw_lane_settings[0].FFE_PRESET.settings.level] & ffe_mask[0], + vendor_ffe_preset_table[hw_lane_settings[1].FFE_PRESET.settings.level] & ffe_mask[1], + vendor_ffe_preset_table[hw_lane_settings[2].FFE_PRESET.settings.level] & ffe_mask[2], + vendor_ffe_preset_table[hw_lane_settings[3].FFE_PRESET.settings.level] & ffe_mask[3]}; + + const uint8_t dp_type = dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(link); + + const uint8_t vendor_lttpr_write_data_ffe1[4] = {0x01, 0x50, dp_type, 0x0F}; + const uint8_t vendor_lttpr_write_data_ffe2[4] = {0x01, 0x55, dp_type, ffe_cfg[0]}; + const uint8_t vendor_lttpr_write_data_ffe3[4] = {0x01, 0x56, dp_type, ffe_cfg[1]}; + const uint8_t vendor_lttpr_write_data_ffe4[4] = {0x01, 0x57, dp_type, ffe_cfg[2]}; + const uint8_t vendor_lttpr_write_data_ffe5[4] = {0x01, 0x58, dp_type, ffe_cfg[3]}; + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe1[0], sizeof(vendor_lttpr_write_data_ffe1)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe2[0], sizeof(vendor_lttpr_write_data_ffe2)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe3[0], sizeof(vendor_lttpr_write_data_ffe3)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe4[0], sizeof(vendor_lttpr_write_data_ffe4)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe5[0], sizeof(vendor_lttpr_write_data_ffe5)); +} + +static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; + const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0}; + const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0}; + const uint8_t vendor_lttpr_write_data_pg3[4] = {0x1, 0x10, 0x58, 0x21}; + const uint8_t vendor_lttpr_write_data_pg4[4] = {0x1, 0x10, 0x59, 0x21}; + const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F}; + const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F}; + const uint8_t vendor_lttpr_write_data_pg7[4] = {0x1, 0x30, 0x51, 0x20}; + const uint8_t vendor_lttpr_write_data_pg8[4] = {0x1, 0x30, 0x52, 0x20}; + const uint8_t vendor_lttpr_write_data_pg9[4] = {0x1, 0x30, 0x54, 0x20}; + const uint8_t vendor_lttpr_write_data_pg10[4] = {0x1, 0x30, 0x55, 0x20}; + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg0[0], sizeof(vendor_lttpr_write_data_pg0)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg1[0], sizeof(vendor_lttpr_write_data_pg1)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg2[0], sizeof(vendor_lttpr_write_data_pg2)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg3[0], sizeof(vendor_lttpr_write_data_pg3)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg4[0], sizeof(vendor_lttpr_write_data_pg4)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg5[0], sizeof(vendor_lttpr_write_data_pg5)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg6[0], sizeof(vendor_lttpr_write_data_pg6)); + + if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR) + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg7[0], sizeof(vendor_lttpr_write_data_pg7)); + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg8[0], sizeof(vendor_lttpr_write_data_pg8)); + + if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR) + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg9[0], sizeof(vendor_lttpr_write_data_pg9)); + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg10[0], sizeof(vendor_lttpr_write_data_pg10)); +} + +static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link *link, + const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params, + const struct link_hwss *link_hwss) +{ + struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 }; + const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + + if (tp_params == NULL) + return false; + + if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN || + tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) { + // Deprogram overrides from previously set square wave override + if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM || + link->current_test_pattern == DP_TEST_PATTERN_D102) + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_0[0], + sizeof(vendor_lttpr_exit_manual_automation_0)); + else + dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); + + return false; + } + + hw_tp_params.dp_phy_pattern = DP_TEST_PATTERN_PRBS31; + hw_tp_params.dp_panel_mode = tp_params->dp_panel_mode; + + if (link_hwss->ext.set_dp_link_test_pattern) + link_hwss->ext.set_dp_link_test_pattern(link, link_res, &hw_tp_params); + + dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params); + + dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]); + + return true; +} + +static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + if (!dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern( + link, link_res, tp_params, get_hpo_dp_link_hwss())) { + link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( + link_res->hpo_dp_link_enc, tp_params); + } + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +} + +static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + link_res->hpo_dp_link_enc->funcs->set_ffe( + link_res->hpo_dp_link_enc, + link_settings, + lane_settings[0].FFE_PRESET.raw); + + // FFE is programmed when retimer is programmed for SQ128, but explicit + // programming needed here as well in case FFE-only update is requested + if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && + link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) + dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]); +} + +static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + if (link_settings->lane_count == LANE_COUNT_FOUR) + enable_dio_fixed_vs_pe_retimer_program_4lane_output(link); + + enable_hpo_dp_link_output(link, link_res, signal, clock_source, link_settings); +} + +static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = { + .setup_stream_encoder = setup_hpo_dp_stream_encoder, + .reset_stream_encoder = reset_hpo_dp_stream_encoder, + .setup_stream_attribute = setup_hpo_dp_stream_attribute, + .disable_link_output = disable_hpo_dp_link_output, + .setup_audio_output = setup_hpo_dp_audio_output, + .enable_audio_packet = enable_hpo_dp_audio_packet, + .disable_audio_packet = disable_hpo_dp_audio_packet, + .ext = { + .set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size, + .set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width, + .enable_dp_link_output = enable_hpo_fixed_vs_pe_retimer_dp_link_output, + .set_dp_link_test_pattern = set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern, + .set_dp_lane_settings = set_hpo_fixed_vs_pe_retimer_dp_lane_settings, + .update_stream_allocation_table = update_hpo_dp_stream_allocation_table, + }, +}; + +bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link) +{ + if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) + return false; + + if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) + return false; + + return true; +} + +const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void) +{ + return &hpo_fixed_vs_pe_retimer_dp_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h new file mode 100644 index 000000000000..82301187bc7c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h @@ -0,0 +1,33 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ +#define __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ + +#include "link.h" + +bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link); +const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void); + +#endif /* __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 8041b8369e45..c9b6676eaf53 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -876,8 +876,7 @@ static bool detect_link_and_local_sink(struct dc_link *link, (link->dpcd_sink_ext_caps.bits.oled == 1)) { dpcd_set_source_specific_data(link); msleep(post_oui_delay); - set_default_brightness_aux(link); - //TODO: use cached + set_cached_brightness_aux(link); } return true; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 1a7b93e41e35..79aef205598b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -182,11 +182,8 @@ void link_resume(struct dc_link *link) static bool is_master_pipe_for_link(const struct dc_link *link, const struct pipe_ctx *pipe) { - return (pipe->stream && - pipe->stream->link && - pipe->stream->link == link && - pipe->top_pipe == NULL && - pipe->prev_odm_pipe == NULL); + return resource_is_pipe_type(pipe, OTG_MASTER) && + pipe->stream->link == link; } /* @@ -1079,8 +1076,14 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps) static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) { uint64_t kbps; + enum dc_link_encoding_format link_encoding; - kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing); + if (dp_is_128b_132b_signal(pipe_ctx)) + link_encoding = DC_LINK_ENCODING_DP_128b_132b; + else + link_encoding = DC_LINK_ENCODING_DP_8b_10b; + + kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing, link_encoding); return get_pbn_from_bw_in_kbps(kbps); } @@ -1538,7 +1541,8 @@ struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp( dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT); struct fixed31_32 timing_bw = dc_fixpt_from_int( - dc_bandwidth_in_kbps_from_timing(&stream->timing)); + dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(link))); struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_div(timing_bw, timeslot_bw_effective); @@ -1971,6 +1975,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) bool is_vga_mode = (stream->timing.h_addressable == 640) && (stream->timing.v_addressable == 480); struct dc *dc = pipe_ctx->stream->ctx->dc; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); if (stream->phy_pix_clk == 0) stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; @@ -2010,6 +2015,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) display_color_depth = COLOR_DEPTH_888; + /* We need to enable stream encoder for TMDS first to apply 1/4 TMDS + * character clock in case that beyond 340MHz. + */ + if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) + link_hwss->setup_stream_encoder(pipe_ctx); + dc->hwss.enable_tmds_link_output( link, &pipe_ctx->link_res, @@ -2129,7 +2140,8 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link->dpcd_sink_ext_caps.bits.oled == 1 || link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { - set_default_brightness_aux(link); // TODO: use cached if known + set_cached_brightness_aux(link); + if (link->dpcd_sink_ext_caps.bits.oled == 1) msleep(bl_oled_enable_delay); edp_backlight_enable_aux(link, true); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index ac1c3e2e7c1d..195ca9e52eda 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -132,6 +132,7 @@ static void construct_link_service_ddc(struct link_service *link_srv) link_srv->destroy_ddc_service = link_destroy_ddc_service; link_srv->query_ddc_data = link_query_ddc_data; link_srv->aux_transfer_raw = link_aux_transfer_raw; + link_srv->configure_fixed_vs_pe_retimer = link_configure_fixed_vs_pe_retimer; link_srv->aux_transfer_with_retries_no_mutex = link_aux_transfer_with_retries_no_mutex; link_srv->is_in_aux_transaction_mode = link_is_in_aux_transaction_mode; @@ -207,6 +208,13 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s link_srv->edp_set_sink_vtotal_in_psr_active = edp_set_sink_vtotal_in_psr_active; link_srv->edp_get_psr_residency = edp_get_psr_residency; + + link_srv->edp_get_replay_state = edp_get_replay_state; + link_srv->edp_set_replay_allow_active = edp_set_replay_allow_active; + link_srv->edp_setup_replay = edp_setup_replay; + link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal; + link_srv->edp_replay_residency = edp_replay_residency; + link_srv->edp_wait_for_t12 = edp_wait_for_t12; link_srv->edp_is_ilr_optimization_required = edp_is_ilr_optimization_required; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index e8b2fc4002a5..b45fda96eaf6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -130,7 +130,8 @@ static bool dp_active_dongle_validate_timing( /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ outputTiming.flags.DSC = 0; #endif - if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) + if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) > + dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) return false; } else { // DP to HDMI TMDS converter if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) @@ -285,7 +286,7 @@ static bool dp_validate_mode_timing( link_setting = &link->verified_link_cap; */ - req_bw = dc_bandwidth_in_kbps_from_timing(timing); + req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link)); max_bw = dp_link_bandwidth_kbps(link, link_setting); if (req_bw <= max_bw) { @@ -357,7 +358,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un for (uint8_t i = 0; i < num_streams; ++i) { link[i] = stream[i].link; - bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing); + bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing, + dc_link_get_highest_encoding_format(link[i])); } ret = dpia_validate_usb4_bw(link, bw_needed, num_streams); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c index 0fa1228bc178..ecfd83299e75 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c @@ -412,6 +412,88 @@ int link_aux_transfer_raw(struct ddc_service *ddc, } } +uint32_t link_get_fixed_vs_pe_retimer_write_address(struct dc_link *link) +{ + uint32_t vendor_lttpr_write_address = 0xF004F; + uint8_t offset; + + switch (link->dpcd_caps.lttpr_caps.phy_repeater_cnt) { + case 0x80: // 1 lttpr repeater + offset = 1; + break; + case 0x40: // 2 lttpr repeaters + offset = 2; + break; + case 0x20: // 3 lttpr repeaters + offset = 3; + break; + case 0x10: // 4 lttpr repeaters + offset = 4; + break; + case 0x08: // 5 lttpr repeaters + offset = 5; + break; + case 0x04: // 6 lttpr repeaters + offset = 6; + break; + case 0x02: // 7 lttpr repeaters + offset = 7; + break; + case 0x01: // 8 lttpr repeaters + offset = 8; + break; + default: + offset = 0xFF; + } + + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + } + return vendor_lttpr_write_address; +} + +uint32_t link_get_fixed_vs_pe_retimer_read_address(struct dc_link *link) +{ + return link_get_fixed_vs_pe_retimer_write_address(link) + 4; +} + +bool link_configure_fixed_vs_pe_retimer(struct ddc_service *ddc, const uint8_t *data, uint32_t length) +{ + struct aux_payload write_payload = { + .i2c_over_aux = false, + .write = true, + .address = link_get_fixed_vs_pe_retimer_write_address(ddc->link), + .length = length, + .data = (uint8_t *) data, + .reply = NULL, + .mot = I2C_MOT_UNDEF, + .write_status_update = false, + .defer_delay = 0, + }; + + return link_aux_transfer_with_retries_no_mutex(ddc, + &write_payload); +} + +bool link_query_fixed_vs_pe_retimer(struct ddc_service *ddc, uint8_t *data, uint32_t length) +{ + struct aux_payload read_payload = { + .i2c_over_aux = false, + .write = false, + .address = link_get_fixed_vs_pe_retimer_read_address(ddc->link), + .length = length, + .data = data, + .reply = NULL, + .mot = I2C_MOT_UNDEF, + .write_status_update = false, + .defer_delay = 0, + }; + + return link_aux_transfer_with_retries_no_mutex(ddc, + &read_payload); +} + bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc, struct aux_payload *payload) { @@ -427,7 +509,7 @@ bool try_to_configure_aux_timeout(struct ddc_service *ddc, if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && !ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa && - ASICREV_IS_YELLOW_CARP(ddc->ctx->asic_id.hw_internal_rev)) { + ddc->ctx->dce_version == DCN_VERSION_3_1) { /* Fixed VS workaround for AUX timeout */ const uint32_t fixed_vs_address = 0xF004F; const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc}; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h index 860ef15d7f1b..a3e25e55bed6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h @@ -72,6 +72,20 @@ bool link_query_ddc_data( bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc, struct aux_payload *payload); +bool link_configure_fixed_vs_pe_retimer( + struct ddc_service *ddc, + const uint8_t *data, + uint32_t length); + +bool link_query_fixed_vs_pe_retimer( + struct ddc_service *ddc, + uint8_t *data, + uint32_t length); + +uint32_t link_get_fixed_vs_pe_retimer_read_address(struct dc_link *link); +uint32_t link_get_fixed_vs_pe_retimer_write_address(struct dc_link *link); + + void write_scdc_data( struct ddc_service *ddc_service, uint32_t pix_clk, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 3a5e80b57711..237e0ff955f3 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -906,7 +906,7 @@ bool link_decide_link_settings(struct dc_stream_state *stream, struct dc_link_settings *link_setting) { struct dc_link *link = stream->link; - uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(link)); memset(link_setting, 0, sizeof(*link_setting)); @@ -939,7 +939,8 @@ bool link_decide_link_settings(struct dc_stream_state *stream, tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; tmp_timing.flags.DSC = 0; - orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); + orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing, + dc_link_get_highest_encoding_format(link)); edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw); max_link_rate = tmp_link_setting.link_rate; } @@ -2008,6 +2009,16 @@ void detect_edp_sink_caps(struct dc_link *link) core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP, &link->dpcd_caps.alpm_caps.raw, sizeof(link->dpcd_caps.alpm_caps.raw)); + + /* + * Read REPLAY info + */ + core_link_read_dpcd(link, DP_SINK_PR_PIXEL_DEVIATION_PER_LINE, + &link->dpcd_caps.pr_info.pixel_deviation_per_line, + sizeof(link->dpcd_caps.pr_info.pixel_deviation_per_line)); + core_link_read_dpcd(link, DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE, + &link->dpcd_caps.pr_info.max_deviation_line, + sizeof(link->dpcd_caps.pr_info.max_deviation_line)); } bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) @@ -2165,7 +2176,9 @@ static bool dp_verify_link_cap( link, &irq_data)) (*fail_count)++; - + } else if (status == LINK_TRAINING_LINK_LOSS) { + success = true; + (*fail_count)++; } else { (*fail_count)++; } @@ -2188,6 +2201,7 @@ bool dp_verify_link_cap_with_retries( int i = 0; bool success = false; int fail_count = 0; + struct dc_link_settings last_verified_link_cap = fail_safe_link_settings; dp_trace_detect_lt_init(link); @@ -2204,10 +2218,14 @@ bool dp_verify_link_cap_with_retries( if (!link_detect_connection_type(link, &type) || type == dc_connection_none) { link->verified_link_cap = fail_safe_link_settings; break; - } else if (dp_verify_link_cap(link, known_limit_link_setting, - &fail_count) && fail_count == 0) { - success = true; - break; + } else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) { + last_verified_link_cap = link->verified_link_cap; + if (fail_count == 0) { + success = true; + break; + } + } else { + link->verified_link_cap = last_verified_link_cap; } fsleep(10 * 1000); } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index ef8739df91bc..e047bbeaa49a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -182,6 +182,68 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) return false; } +static bool handle_hpd_irq_replay_sink(struct dc_link *link) +{ + union dpcd_replay_configuration replay_configuration; + /*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/ + union psr_error_status replay_error_status; + + if (!link->replay_settings.replay_feature_enabled) + return false; + + dm_helpers_dp_read_dpcd( + link->ctx, + link, + DP_SINK_PR_REPLAY_STATUS, + &replay_configuration.raw, + sizeof(replay_configuration.raw)); + + dm_helpers_dp_read_dpcd( + link->ctx, + link, + DP_PSR_ERROR_STATUS, + &replay_error_status.raw, + sizeof(replay_error_status.raw)); + + link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR = + replay_error_status.bits.LINK_CRC_ERROR; + link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR = + replay_configuration.bits.DESYNC_ERROR_STATUS; + link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR = + replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS; + + if (link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR || + link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR || + link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) { + bool allow_active; + + /* Acknowledge and clear configuration bits */ + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_SINK_PR_REPLAY_STATUS, + &replay_configuration.raw, + sizeof(replay_configuration.raw)); + + /* Acknowledge and clear error bits */ + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_PSR_ERROR_STATUS,/*DpcdAddress_REPLAY_Error_Status*/ + &replay_error_status.raw, + sizeof(replay_error_status.raw)); + + /* Replay error, disable and re-enable Replay */ + if (link->replay_settings.replay_allow_active) { + allow_active = false; + edp_set_replay_allow_active(link, &allow_active, true, false, NULL); + allow_active = true; + edp_set_replay_allow_active(link, &allow_active, true, false, NULL); + } + } + return true; +} + void dp_handle_link_loss(struct dc_link *link) { struct pipe_ctx *pipes[MAX_PIPES]; @@ -360,6 +422,10 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, /* PSR-related error was detected and handled */ return true; + if (handle_hpd_irq_replay_sink(link)) + /* Replay-related error was detected and handled */ + return true; + /* If PSR-related error handled, Main link may be off, * so do not handle as a normal sink status change interrupt. */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index e011df4bdaf2..90339c2dfd84 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1699,13 +1699,20 @@ bool perform_link_training_with_retries( } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */ uint32_t req_bw; uint32_t link_bw; + enum dc_link_encoding_format link_encoding = DC_LINK_ENCODING_UNSPECIFIED; decide_fallback_link_setting(link, &max_link_settings, &cur_link_settings, status); + + if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) + link_encoding = DC_LINK_ENCODING_DP_8b_10b; + else if (link_dp_get_encoding_format(&cur_link_settings) == DP_128b_132b_ENCODING) + link_encoding = DC_LINK_ENCODING_DP_128b_132b; + /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to * minimum link bandwidth. */ - req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, link_encoding); link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings); is_link_bw_low = (req_bw > link_bw); is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index 15faaf645b14..fd8f6f198146 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -36,6 +36,7 @@ #include "link_dpcd.h" #include "link_dp_phy.h" #include "link_dp_capability.h" +#include "link_ddc.h" #define DC_LOGGER \ link->ctx->logger @@ -46,42 +47,20 @@ void dp_fixed_vs_pe_read_lane_adjust( { const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; - const uint8_t offset = dp_parse_lttpr_repeater_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - uint32_t vendor_lttpr_write_address = 0xF004F; - uint32_t vendor_lttpr_read_address = 0xF0053; uint8_t dprx_vs = 0; uint8_t dprx_pe = 0; uint8_t lane; - if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - vendor_lttpr_read_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - } - /* W/A to read lane settings requested by DPRX */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_read_dpcd( - link, - vendor_lttpr_read_address, - &dprx_vs, - 1); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); - core_link_read_dpcd( - link, - vendor_lttpr_read_address, - &dprx_pe, - 1); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + + link_query_fixed_vs_pe_retimer(link->ddc, &dprx_vs, 1); + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); + + link_query_fixed_vs_pe_retimer(link->ddc, &dprx_pe, 1); for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3; @@ -95,19 +74,11 @@ void dp_fixed_vs_pe_set_retimer_lane_settings( const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], uint8_t lane_count) { - const uint8_t offset = dp_parse_lttpr_repeater_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; - uint32_t vendor_lttpr_write_address = 0xF004F; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; uint8_t lane = 0; - if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - } - for (lane = 0; lane < lane_count; lane++) { vendor_lttpr_write_data_vs[3] |= dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); @@ -116,21 +87,14 @@ void dp_fixed_vs_pe_set_retimer_lane_settings( } /* Force LTTPR to output desired VS and PE */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_reset[0], - sizeof(vendor_lttpr_write_data_reset)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); } static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence( @@ -236,7 +200,11 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( uint32_t pre_disable_intercept_delay_ms = 0; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - uint32_t vendor_lttpr_write_address = 0xF004F; + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; union down_spread_ctrl downspread = {0}; @@ -244,10 +212,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( uint8_t toggle_rate; uint8_t rate; - if (link->local_sink) - pre_disable_intercept_delay_ms = - link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms; - /* Only 8b/10b is supported */ ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING); @@ -258,37 +222,27 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( } if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + if (offset == 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; /* Certain display and cable configuration require extra delay */ - if (offset > 2) - pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2; + } else if (offset > 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + } } /* Vendor specific: Reset lane settings */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_reset[0], - sizeof(vendor_lttpr_write_data_reset)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); /* Vendor specific: Enable intercept */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_intercept_en[0], - sizeof(vendor_lttpr_write_data_intercept_en)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); + /* 1. set link rate, lane count and spread. */ @@ -339,6 +293,19 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); + } + /* 2. Perform link training */ /* Perform Clock Recovery Sequence */ @@ -351,7 +318,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - enum dc_status dpcd_status = DC_OK; uint8_t i = 0; retries_cr = 0; @@ -386,18 +352,12 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( for (i = 0; i < max_vendor_dpcd_retries; i++) { if (pre_disable_intercept_delay_ms != 0) msleep(pre_disable_intercept_delay_ms); - dpcd_status = core_link_write_dpcd( - link, - vendor_lttpr_write_address, + if (link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_dis[0], - sizeof(vendor_lttpr_write_data_intercept_dis)); - - if (dpcd_status == DC_OK) + sizeof(vendor_lttpr_write_data_intercept_dis))) break; - core_link_write_dpcd( - link, - vendor_lttpr_write_address, + link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); } @@ -413,16 +373,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( } /* Vendor specific: Update VS and PE to DPRX requested value */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); dpcd_set_lane_settings( link, @@ -518,16 +472,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( } /* Vendor specific: Update VS and PE to DPRX requested value */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); /* 2. update DPCD*/ if (!retries_ch_eq) @@ -596,10 +544,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E}; const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01}; const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68}; + uint32_t pre_disable_intercept_delay_ms = 0; uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - uint32_t pre_disable_intercept_delay_ms = 0; - uint32_t vendor_lttpr_write_address = 0xF004F; + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; union down_spread_ctrl downspread = {0}; @@ -607,10 +559,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( uint8_t toggle_rate; uint8_t rate; - if (link->local_sink) - pre_disable_intercept_delay_ms = - link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms; - /* Only 8b/10b is supported */ ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING); @@ -621,37 +569,26 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( } if (offset != 0xFF) { - vendor_lttpr_write_address += - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + if (offset == 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; /* Certain display and cable configuration require extra delay */ - if (offset > 2) - pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2; + } else if (offset > 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + } } /* Vendor specific: Reset lane settings */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_reset[0], - sizeof(vendor_lttpr_write_data_reset)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); /* Vendor specific: Enable intercept */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_intercept_en[0], - sizeof(vendor_lttpr_write_data_intercept_en)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); /* 1. set link rate, lane count and spread. */ @@ -702,6 +639,19 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); + } + /* 2. Perform link training */ /* Perform Clock Recovery Sequence */ @@ -714,7 +664,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - enum dc_status dpcd_status = DC_OK; uint8_t i = 0; retries_cr = 0; @@ -749,18 +698,12 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( for (i = 0; i < max_vendor_dpcd_retries; i++) { if (pre_disable_intercept_delay_ms != 0) msleep(pre_disable_intercept_delay_ms); - dpcd_status = core_link_write_dpcd( - link, - vendor_lttpr_write_address, + if (link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_dis[0], - sizeof(vendor_lttpr_write_data_intercept_dis)); - - if (dpcd_status == DC_OK) + sizeof(vendor_lttpr_write_data_intercept_dis))) break; - core_link_write_dpcd( - link, - vendor_lttpr_write_address, + link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); } @@ -776,16 +719,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( } /* Vendor specific: Update VS and PE to DPRX requested value */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); dpcd_set_lane_settings( link, @@ -858,17 +795,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - core_link_write_dpcd( - link, - vendor_lttpr_write_address, + link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_adicora_eq1[0], sizeof(vendor_lttpr_write_data_adicora_eq1)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, + link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_adicora_eq2[0], sizeof(vendor_lttpr_write_data_adicora_eq2)); + /* Note: also check that TPS4 is a supported feature*/ tr_pattern = lt_settings->pattern_for_eq; @@ -892,16 +826,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( } /* Vendor specific: Update VS and PE to DPRX requested value */ - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_vs[0], - sizeof(vendor_lttpr_write_data_vs)); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_pe[0], - sizeof(vendor_lttpr_write_data_pe)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); /* 2. update DPCD*/ if (!retries_ch_eq) { @@ -914,11 +842,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( lt_settings, tr_pattern, 0); - core_link_write_dpcd( - link, - vendor_lttpr_write_address, - &vendor_lttpr_write_data_adicora_eq3[0], - sizeof(vendor_lttpr_write_data_adicora_eq3)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_adicora_eq3[0], + sizeof(vendor_lttpr_write_data_adicora_eq3)); + } else dpcd_set_lane_settings(link, lt_settings, 0); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 2039a345f23a..98e715aa6d8e 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -34,9 +34,13 @@ #include "dm_helpers.h" #include "dal_asic_id.h" #include "dce/dmub_psr.h" +#include "dc/dc_dmub_srv.h" +#include "dce/dmub_replay.h" #include "abm.h" #define DC_LOGGER_INIT(logger) +#define DP_SINK_PR_ENABLE_AND_CONFIGURATION 0x37B + /* Travis */ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; /* Nutmeg */ @@ -46,43 +50,42 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) { union dpcd_edp_config edp_config_set; bool panel_mode_edp = false; + enum dc_status result; memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); - if (panel_mode != DP_PANEL_MODE_DEFAULT) { + switch (panel_mode) { + case DP_PANEL_MODE_EDP: + case DP_PANEL_MODE_SPECIAL: + panel_mode_edp = true; + break; - switch (panel_mode) { - case DP_PANEL_MODE_EDP: - case DP_PANEL_MODE_SPECIAL: - panel_mode_edp = true; - break; + default: + break; + } - default: - break; - } + /*set edp panel mode in receiver*/ + result = core_link_read_dpcd( + link, + DP_EDP_CONFIGURATION_SET, + &edp_config_set.raw, + sizeof(edp_config_set.raw)); - /*set edp panel mode in receiver*/ - core_link_read_dpcd( + if (result == DC_OK && + edp_config_set.bits.PANEL_MODE_EDP + != panel_mode_edp) { + + edp_config_set.bits.PANEL_MODE_EDP = + panel_mode_edp; + result = core_link_write_dpcd( link, DP_EDP_CONFIGURATION_SET, &edp_config_set.raw, sizeof(edp_config_set.raw)); - if (edp_config_set.bits.PANEL_MODE_EDP - != panel_mode_edp) { - enum dc_status result; - - edp_config_set.bits.PANEL_MODE_EDP = - panel_mode_edp; - result = core_link_write_dpcd( - link, - DP_EDP_CONFIGURATION_SET, - &edp_config_set.raw, - sizeof(edp_config_set.raw)); - - ASSERT(result == DC_OK); - } + ASSERT(result == DC_OK); } + link->panel_mode = panel_mode; DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " "eDP panel mode enabled: %d \n", @@ -164,6 +167,7 @@ bool edp_set_backlight_level_nits(struct dc_link *link, *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; + link->backlight_settings.backlight_millinits = backlight_millinits; if (!link->dpcd_caps.panel_luminance_control) { if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, @@ -251,10 +255,20 @@ static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millin link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, - (uint8_t *) backlight_millinits, - sizeof(uint32_t))) - return false; + if (!link->dpcd_caps.panel_luminance_control) { + if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *)backlight_millinits, + sizeof(uint32_t))) + return false; + } else { + //setting to 0 as a precaution, since target_luminance_value is 3 bytes + memset(backlight_millinits, 0, sizeof(uint32_t)); + + if (!core_link_read_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, + (uint8_t *)backlight_millinits, + sizeof(struct target_luminance_value))) + return false; + } return true; } @@ -276,6 +290,16 @@ bool set_default_brightness_aux(struct dc_link *link) return false; } +bool set_cached_brightness_aux(struct dc_link *link) +{ + if (link->backlight_settings.backlight_millinits) + return edp_set_backlight_level_nits(link, true, + link->backlight_settings.backlight_millinits, 0); + else + return set_default_brightness_aux(link); + return false; +} + bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing) { @@ -309,7 +333,7 @@ bool edp_is_ilr_optimization_required(struct dc_link *link, core_link_read_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, sizeof(lane_count_set)); - req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); + req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing, dc_link_get_highest_encoding_format(link)); if (!crtc_timing->flags.DSC) edp_decide_link_settings(link, &link_setting, req_bw); @@ -807,6 +831,167 @@ bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_ return true; } +bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, + bool wait, bool force_static, const unsigned int *power_opts) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (replay == NULL && force_static) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + /* Set power optimization flag */ + if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) { + if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) { + replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst); + link->replay_settings.replay_power_opt_active = *power_opts; + } + } + + /* Activate or deactivate Replay */ + if (allow_active && link->replay_settings.replay_allow_active != *allow_active) { + // TODO: Handle mux change case if force_static is set + // If force_static is set, just change the replay_allow_active state directly + if (replay != NULL && link->replay_settings.replay_feature_enabled) + replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst); + link->replay_settings.replay_allow_active = *allow_active; + } + + return true; +} + +bool edp_get_replay_state(const struct dc_link *link, uint64_t *state) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + enum replay_state pr_state = REPLAY_STATE_0; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if (replay != NULL && link->replay_settings.replay_feature_enabled) + replay->funcs->replay_get_state(replay, &pr_state, panel_inst); + *state = pr_state; + + return true; +} + +bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream) +{ + /* To-do: Setup Replay */ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + int i; + unsigned int panel_inst; + struct replay_context replay_context = { 0 }; + unsigned int lineTimeInNs = 0; + + + union replay_enable_and_configuration replay_config; + + union dpcd_alpm_configuration alpm_config; + + replay_context.controllerId = CONTROLLER_ID_UNDEFINED; + + if (!link) + return false; + + if (!replay) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel; + replay_context.digbe_inst = link->link_enc->transmitter; + replay_context.digfe_inst = link->link_enc->preferred_engine; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + /* dmcu -1 for all controller id values, + * therefore +1 here + */ + replay_context.controllerId = + dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; + break; + } + } + + lineTimeInNs = + ((stream->timing.h_total * 1000000) / + (stream->timing.pix_clk_100hz / 10)) + 1; + + replay_context.line_time_in_ns = lineTimeInNs; + + if (replay) + link->replay_settings.replay_feature_enabled = + replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst); + if (link->replay_settings.replay_feature_enabled) { + + replay_config.bits.FREESYNC_PANEL_REPLAY_MODE = 1; + replay_config.bits.TIMING_DESYNC_ERROR_VERIFICATION = + link->replay_settings.config.replay_timing_sync_supported; + replay_config.bits.STATE_TRANSITION_ERROR_DETECTION = 1; + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_SINK_PR_ENABLE_AND_CONFIGURATION, + (uint8_t *)&(replay_config.raw), sizeof(uint8_t)); + + memset(&alpm_config, 0, sizeof(alpm_config)); + alpm_config.bits.ENABLE = 1; + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_RECEIVER_ALPM_CONFIG, + &alpm_config.raw, + sizeof(alpm_config.raw)); + } + return true; +} + +bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (!replay) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if (coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { + replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst); + link->replay_settings.coasting_vtotal = coasting_vtotal; + } + + return true; +} + +bool edp_replay_residency(const struct dc_link *link, + unsigned int *residency, const bool is_start, const bool is_alpm) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if (replay != NULL && link->replay_settings.replay_feature_enabled) + replay->funcs->replay_residency(replay, panel_inst, residency, is_start, is_alpm); + else + *residency = 0; + + return true; +} + static struct abm *get_abm_from_stream_res(const struct dc_link *link) { int i; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 28f552080558..0a5bbda8c739 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -30,6 +30,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); bool set_default_brightness_aux(struct dc_link *link); +bool set_cached_brightness_aux(struct dc_link *link); void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); int edp_get_backlight_level(const struct dc_link *link); bool edp_get_backlight_level_nits(struct dc_link *link, @@ -52,6 +53,14 @@ bool edp_setup_psr(struct dc_link *link, bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su); void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency); +bool edp_set_replay_allow_active(struct dc_link *dc_link, const bool *enable, + bool wait, bool force_static, const unsigned int *power_opts); +bool edp_setup_replay(struct dc_link *link, + const struct dc_stream_state *stream); +bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal); +bool edp_replay_residency(const struct dc_link *link, + unsigned int *residency, const bool is_start, const bool is_alpm); +bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); bool edp_wait_for_t12(struct dc_link *link); bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 4585e0419da6..2d995c87fbb9 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -378,6 +378,7 @@ struct dmub_srv_hw_funcs { union dmub_fw_boot_status (*get_fw_status)(struct dmub_srv *dmub); + union dmub_fw_boot_options (*get_fw_boot_option)(struct dmub_srv *dmub); void (*set_gpint)(struct dmub_srv *dmub, union dmub_gpint_data_register reg); @@ -778,9 +779,15 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb); enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub, union dmub_fw_boot_status *status); +enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub, + union dmub_fw_boot_options *option); + enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, union dmub_rb_cmd *cmd); +enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub, + bool skip); + bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry); bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index af1f50742371..7afa78b918b5 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -170,6 +170,95 @@ extern "C" { #endif #pragma pack(push, 1) +#define ABM_NUM_OF_ACE_SEGMENTS 5 + +union abm_flags { + struct { + /** + * @abm_enabled: Indicates if ABM is enabled. + */ + unsigned int abm_enabled : 1; + + /** + * @disable_abm_requested: Indicates if driver has requested ABM to be disabled. + */ + unsigned int disable_abm_requested : 1; + + /** + * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled + * immediately. + */ + unsigned int disable_abm_immediately : 1; + + /** + * @disable_abm_immediate_keep_gain: Indicates if driver has requested ABM + * to be disabled immediately and keep gain. + */ + unsigned int disable_abm_immediate_keep_gain : 1; + + /** + * @fractional_pwm: Indicates if fractional duty cycle for backlight PWM is enabled. + */ + unsigned int fractional_pwm : 1; + + /** + * @abm_gradual_bl_change: Indicates if algorithm has completed gradual adjustment + * of user backlight level. + */ + unsigned int abm_gradual_bl_change : 1; + } bitfields; + + unsigned int u32All; +}; + +struct abm_save_restore { + /** + * @flags: Misc. ABM flags. + */ + union abm_flags flags; + + /** + * @pause: true: pause ABM and get state + * false: unpause ABM after setting state + */ + uint32_t pause; + + /** + * @next_ace_slope: Next ACE slopes to be programmed in HW (u3.13) + */ + uint32_t next_ace_slope[ABM_NUM_OF_ACE_SEGMENTS]; + + /** + * @next_ace_thresh: Next ACE thresholds to be programmed in HW (u10.6) + */ + uint32_t next_ace_thresh[ABM_NUM_OF_ACE_SEGMENTS]; + + /** + * @next_ace_offset: Next ACE offsets to be programmed in HW (u10.6) + */ + uint32_t next_ace_offset[ABM_NUM_OF_ACE_SEGMENTS]; + + + /** + * @knee_threshold: Current x-position of ACE knee (u0.16). + */ + uint32_t knee_threshold; + /** + * @current_gain: Current backlight reduction (u16.16). + */ + uint32_t current_gain; + /** + * @curr_bl_level: Current actual backlight level converging to target backlight level. + */ + uint16_t curr_bl_level; + + /** + * @curr_user_bl_level: Current nominal backlight level converging to level requested by user. + */ + uint16_t curr_user_bl_level; + +}; + /** * union dmub_addr - DMUB physical/virtual 64-bit address. */ @@ -248,6 +337,112 @@ union dmub_psr_debug_flags { uint32_t u32All; }; +/** + * Flags that can be set by driver to change some Replay behaviour. + */ +union replay_debug_flags { + struct { + /** + * Enable visual confirm in FW. + */ + uint32_t visual_confirm : 1; + + /** + * @skip_crc: Set if need to skip CRC. + */ + uint32_t skip_crc : 1; + + /** + * @force_link_power_on: Force disable ALPM control + */ + uint32_t force_link_power_on : 1; + + /** + * @force_phy_power_on: Force phy power on + */ + uint32_t force_phy_power_on : 1; + + /** + * @timing_resync_disabled: Disabled Replay normal sleep mode timing resync + */ + uint32_t timing_resync_disabled : 1; + + /** + * @skip_crtc_disabled: CRTC disable skipped + */ + uint32_t skip_crtc_disabled : 1; + + /** + * @force_defer_one_frame_update: Force defer one frame update in ultra sleep mode + */ + uint32_t force_defer_one_frame_update : 1; + /** + * @disable_delay_alpm_on: Force disable delay alpm on + */ + uint32_t disable_delay_alpm_on : 1; + /** + * @disable_desync_error_check: Force disable desync error check + */ + uint32_t disable_desync_error_check : 1; + /** + * @disable_desync_error_check: Force disable desync error check + */ + uint32_t disable_dmub_save_restore : 1; + + uint32_t reserved : 22; + } bitfields; + + uint32_t u32All; +}; + +union replay_hw_flags { + struct { + /** + * @allow_alpm_fw_standby_mode: To indicate whether the + * ALPM FW standby mode is allowed + */ + uint32_t allow_alpm_fw_standby_mode : 1; + + /* + * @dsc_enable_status: DSC enable status in driver + */ + uint32_t dsc_enable_status : 1; + + /** + * @fec_enable_status: receive fec enable/disable status from driver + */ + uint32_t fec_enable_status : 1; + + /* + * @smu_optimizations_en: SMU power optimization. + * Only when active display is Replay capable and display enters Replay. + * Trigger interrupt to SMU to powerup/down. + */ + uint32_t smu_optimizations_en : 1; + + /** + * @otg_powered_down: Flag to keep track of OTG power state. + */ + uint32_t otg_powered_down : 1; + + /** + * @phy_power_state: Indicates current phy power state + */ + uint32_t phy_power_state : 1; + + /** + * @link_power_state: Indicates current link power state + */ + uint32_t link_power_state : 1; + /** + * Use TPS3 signal when restore main link. + */ + uint32_t force_wakeup_by_tps3 : 1; + } bitfields; + + uint32_t u32All; +}; + /** * DMUB visual confirm color */ @@ -565,10 +760,43 @@ enum dmub_gpint_command { */ DMUB_GPINT__PSR_RESIDENCY = 9, + /** + * DESC: Get REPLAY state from FW. + * RETURN: REPLAY state enum. This enum may need to be converted to the legacy REPLAY state value. + */ + DMUB_GPINT__GET_REPLAY_STATE = 13, + + /** + * DESC: Start REPLAY residency counter. Stop REPLAY resdiency counter and get value. + * ARGS: We can measure residency from various points. The argument will specify the residency mode. + * By default, it is measured from after we powerdown the PHY, to just before we powerup the PHY. + * RETURN: REPLAY residency in milli-percent. + */ + DMUB_GPINT__REPLAY_RESIDENCY = 14, + + /** * DESC: Notifies DMCUB detection is done so detection required can be cleared. */ DMUB_GPINT__NOTIFY_DETECTION_DONE = 12, + /** + * DESC: Updates the trace buffer lower 32-bit mask. + * ARGS: The new mask + * RETURN: Lower 32-bit mask. + */ + DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK = 101, + /** + * DESC: Updates the trace buffer lower 32-bit mask. + * ARGS: The new mask + * RETURN: Lower 32-bit mask. + */ + DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0 = 102, + /** + * DESC: Updates the trace buffer mask bi0~bit15. + * ARGS: The new mask + * RETURN: Lower 32-bit mask. + */ + DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1 = 103, }; /** @@ -763,6 +991,11 @@ enum dmub_cmd_type { * Command type used for all VBIOS interface commands. */ + /** + * Command type used for all REPLAY commands. + */ + DMUB_CMD__REPLAY = 83, + /** * Command type used for all SECURE_DISPLAY commands. */ @@ -1911,6 +2144,10 @@ enum dmub_phy_fsm_state { DMUB_PHY_FSM_PLL_EN, DMUB_PHY_FSM_TX_EN, DMUB_PHY_FSM_FAST_LP, + DMUB_PHY_FSM_P2_PLL_OFF_CPM, + DMUB_PHY_FSM_P2_PLL_OFF_PG, + DMUB_PHY_FSM_P2_PLL_OFF, + DMUB_PHY_FSM_P2_PLL_ON, }; /** @@ -2496,6 +2733,272 @@ struct dmub_cmd_psr_set_power_opt_data { uint32_t power_opt; }; +#define REPLAY_RESIDENCY_MODE_SHIFT (0) +#define REPLAY_RESIDENCY_ENABLE_SHIFT (1) + +#define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) +# define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT) +# define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) + +#define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) +# define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT) +# define REPLAY_RESIDENCY_ENABLE (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) + +enum replay_state { + REPLAY_STATE_0 = 0x0, + REPLAY_STATE_1 = 0x10, + REPLAY_STATE_1A = 0x11, + REPLAY_STATE_2 = 0x20, + REPLAY_STATE_3 = 0x30, + REPLAY_STATE_3INIT = 0x31, + REPLAY_STATE_4 = 0x40, + REPLAY_STATE_4A = 0x41, + REPLAY_STATE_4B = 0x42, + REPLAY_STATE_4C = 0x43, + REPLAY_STATE_4D = 0x44, + REPLAY_STATE_4B_LOCKED = 0x4A, + REPLAY_STATE_4C_UNLOCKED = 0x4B, + REPLAY_STATE_5 = 0x50, + REPLAY_STATE_5A = 0x51, + REPLAY_STATE_5B = 0x52, + REPLAY_STATE_5A_LOCKED = 0x5A, + REPLAY_STATE_5B_UNLOCKED = 0x5B, + REPLAY_STATE_6 = 0x60, + REPLAY_STATE_6A = 0x61, + REPLAY_STATE_6B = 0x62, + REPLAY_STATE_INVALID = 0xFF, +}; + +/** + * Replay command sub-types. + */ +enum dmub_cmd_replay_type { + /** + * Copy driver-calculated parameters to REPLAY state. + */ + DMUB_CMD__REPLAY_COPY_SETTINGS = 0, + /** + * Enable REPLAY. + */ + DMUB_CMD__REPLAY_ENABLE = 1, + /** + * Set Replay power option. + */ + DMUB_CMD__SET_REPLAY_POWER_OPT = 2, + /** + * Set coasting vtotal. + */ + DMUB_CMD__REPLAY_SET_COASTING_VTOTAL = 3, +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command. + */ +struct dmub_cmd_replay_copy_settings_data { + /** + * Flags that can be set by driver to change some replay behaviour. + */ + union replay_debug_flags debug; + + /** + * @flags: Flags used to determine feature functionality. + */ + union replay_hw_flags flags; + + /** + * DPP HW instance. + */ + uint8_t dpp_inst; + /** + * OTG HW instance. + */ + uint8_t otg_inst; + /** + * DIG FE HW instance. + */ + uint8_t digfe_inst; + /** + * DIG BE HW instance. + */ + uint8_t digbe_inst; + /** + * AUX HW instance. + */ + uint8_t aux_inst; + /** + * Panel Instance. + * Panel isntance to identify which psr_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * @pixel_deviation_per_line: Indicate the maximum pixel deviation per line compare + * to Source timing when Sink maintains coasting vtotal during the Replay normal sleep mode + */ + uint8_t pixel_deviation_per_line; + /** + * @max_deviation_line: The max number of deviation line that can keep the timing + * synchronized between the Source and Sink during Replay normal sleep mode. + */ + uint8_t max_deviation_line; + /** + * Length of each horizontal line in ns. + */ + uint32_t line_time_in_ns; + /** + * PHY instance. + */ + uint8_t dpphy_inst; + /** + * Determines if SMU optimzations are enabled/disabled. + */ + uint8_t smu_optimizations_en; + /** + * Determines if timing sync are enabled/disabled. + */ + uint8_t replay_timing_sync_supported; + /* + * Use FSM state for Replay power up/down + */ + uint8_t use_phy_fsm; +}; + +/** + * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command. + */ +struct dmub_rb_cmd_replay_copy_settings { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command. + */ + struct dmub_cmd_replay_copy_settings_data replay_copy_settings_data; +}; + +/** + * Replay disable / enable state for dmub_rb_cmd_replay_enable_data.enable + */ +enum replay_enable { + /** + * Disable REPLAY. + */ + REPLAY_DISABLE = 0, + /** + * Enable REPLAY. + */ + REPLAY_ENABLE = 1, +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_ENABLE command. + */ +struct dmub_rb_cmd_replay_enable_data { + /** + * Replay enable or disable. + */ + uint8_t enable; + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Phy state to enter. + * Values to use are defined in dmub_phy_fsm_state + */ + uint8_t phy_fsm_state; + /** + * Phy rate for DP - RBR/HBR/HBR2/HBR3. + * Set this using enum phy_link_rate. + * This does not support HDMI/DP2 for now. + */ + uint8_t phy_rate; +}; + +/** + * Definition of a DMUB_CMD__REPLAY_ENABLE command. + * Replay enable/disable is controlled using action in data. + */ +struct dmub_rb_cmd_replay_enable { + /** + * Command header. + */ + struct dmub_cmd_header header; + + struct dmub_rb_cmd_replay_enable_data data; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__SET_REPLAY_POWER_OPT command. + */ +struct dmub_cmd_replay_set_power_opt_data { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[3]; + /** + * REPLAY power option + */ + uint32_t power_opt; +}; + +/** + * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. + */ +struct dmub_rb_cmd_replay_set_power_opt { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. + */ + struct dmub_cmd_replay_set_power_opt_data replay_set_power_opt_data; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_COASTING_VTOTAL command. + */ +struct dmub_cmd_replay_set_coasting_vtotal_data { + /** + * 16-bit value dicated by driver that indicates the coasting vtotal. + */ + uint16_t coasting_vtotal; + /** + * REPLAY control version. + */ + uint8_t cmd_version; + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; +}; + +/** + * Definition of a DMUB_CMD__REPLAY_SET_COASTING_VTOTAL command. + */ +struct dmub_rb_cmd_replay_set_coasting_vtotal { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of a DMUB_CMD__REPLAY_SET_COASTING_VTOTAL command. + */ + struct dmub_cmd_replay_set_coasting_vtotal_data replay_set_coasting_vtotal_data; +}; + /** * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command. */ @@ -2586,6 +3089,10 @@ enum hw_lock_client { * PSR SU is the client of HW Lock Manager. */ HW_LOCK_CLIENT_PSR_SU = 1, + /** + * Replay is the client of HW Lock Manager. + */ + HW_LOCK_CLIENT_REPLAY = 4, /** * Invalid client. */ @@ -2672,6 +3179,12 @@ enum dmub_cmd_abm_type { * unregister vertical interrupt after steady state is reached */ DMUB_CMD__ABM_PAUSE = 6, + + /** + * Save and Restore ABM state. On save we save parameters, and + * on restore we update state with passed in data. + */ + DMUB_CMD__ABM_SAVE_RESTORE = 7, }; /** @@ -3056,6 +3569,7 @@ struct dmub_cmd_abm_pause_data { uint8_t pad[1]; }; + /** * Definition of a DMUB_CMD__ABM_PAUSE command. */ @@ -3071,6 +3585,36 @@ struct dmub_rb_cmd_abm_pause { struct dmub_cmd_abm_pause_data abm_pause_data; }; +/** + * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command. + */ +struct dmub_rb_cmd_abm_save_restore { + /** + * Command header. + */ + struct dmub_cmd_header header; + + /** + * OTG hw instance + */ + uint8_t otg_inst; + + /** + * Enable or disable ABM pause + */ + uint8_t freeze; + + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t debug; + + /** + * Data passed from driver to FW in a DMUB_CMD__ABM_INIT_CONFIG command. + */ + struct dmub_cmd_abm_init_config_data abm_init_config_data; +}; + /** * Data passed from driver to FW in a DMUB_CMD__QUERY_FEATURE_CAPS command. */ @@ -3508,6 +4052,11 @@ union dmub_rb_cmd { */ struct dmub_rb_cmd_abm_pause abm_pause; + /** + * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command. + */ + struct dmub_rb_cmd_abm_save_restore abm_save_restore; + /** * Definition of a DMUB_CMD__DP_AUX_ACCESS command. */ @@ -3576,6 +4125,22 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE command. */ struct dmub_rb_cmd_idle_opt_dcn_notify_idle idle_opt_notify_idle; + /* + * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command. + */ + struct dmub_rb_cmd_replay_copy_settings replay_copy_settings; + /** + * Definition of a DMUB_CMD__REPLAY_ENABLE command. + */ + struct dmub_rb_cmd_replay_enable replay_enable; + /** + * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. + */ + struct dmub_rb_cmd_replay_set_power_opt replay_set_power_opt; + /** + * Definition of a DMUB_CMD__REPLAY_SET_COASTING_VTOTAL command. + */ + struct dmub_rb_cmd_replay_set_coasting_vtotal replay_set_coasting_vtotal; }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h deleted file mode 100644 index 21b02bad696f..000000000000 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef DMUB_SUBVP_STATE_H -#define DMUB_SUBVP_STATE_H - -#include "dmub_cmd.h" - -#define DMUB_SUBVP_INST0 0 -#define DMUB_SUBVP_INST1 1 -#define SUBVP_MAX_WATERMARK 0xFFFF - -struct dmub_subvp_hubp_state { - uint32_t CURSOR0_0_CURSOR_POSITION; - uint32_t CURSOR0_0_CURSOR_HOT_SPOT; - uint32_t CURSOR0_0_CURSOR_DST_OFFSET; - uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH; - uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS; - uint32_t CURSOR0_0_CURSOR_SIZE; - uint32_t CURSOR0_0_CURSOR_CONTROL; - uint32_t HUBPREQ0_CURSOR_SETTINGS; - uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH; - uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE; - uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; - uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS; - uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS; - uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH; - uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; - uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C; - uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; - uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; -}; - -enum subvp_error_code { - DMUB_SUBVP_INVALID_STATE, - DMUB_SUBVP_INVALID_TRANSITION, -}; - -enum subvp_state { - DMUB_SUBVP_DISABLED, - DMUB_SUBVP_IDLE, - DMUB_SUBVP_TRY_ACQUIRE_LOCKS, - DMUB_SUBVP_WAIT_FOR_LOCKS, - DMUB_SUBVP_PRECONFIGURE, - DMUB_SUBVP_PREPARE, - DMUB_SUBVP_ENABLE, - DMUB_SUBVP_SWITCHING, - DMUB_SUBVP_END, - DMUB_SUBVP_RESTORE, -}; - -/* Defines information for SUBVP to handle vertical interrupts. */ -struct dmub_subvp_vertical_interrupt_event { - /** - * @inst: Hardware instance of vertical interrupt. - */ - uint8_t otg_inst; - - /** - * @pad: Align structure to 4 byte boundary. - */ - uint8_t pad[3]; - - enum subvp_state curr_state; -}; - -struct dmub_subvp_vertical_interrupt_state { - /** - * @events: Event list. - */ - struct dmub_subvp_vertical_interrupt_event events[DMUB_MAX_STREAMS]; -}; - -struct dmub_subvp_vline_interrupt_event { - - uint8_t hubp_inst; - uint8_t pad[3]; -}; - -struct dmub_subvp_vline_interrupt_state { - struct dmub_subvp_vline_interrupt_event events[DMUB_MAX_PLANES]; -}; - -struct dmub_subvp_interrupt_ctx { - struct dmub_subvp_vertical_interrupt_state vertical_int; - struct dmub_subvp_vline_interrupt_state vline_int; -}; - -struct dmub_subvp_pipe_state { - uint32_t pix_clk_100hz; - uint16_t main_vblank_start; - uint16_t main_vblank_end; - uint16_t mall_region_lines; - uint16_t prefetch_lines; - uint16_t prefetch_to_mall_start_lines; - uint16_t processing_delay_lines; - uint8_t main_pipe_index; - uint8_t phantom_pipe_index; - uint16_t htotal; // htotal for main / phantom pipe - uint16_t vtotal; - uint16_t optc_underflow_count; - uint16_t hubp_underflow_count; - uint8_t pad[2]; -}; - -/** - * struct dmub_subvp_vblank_drr_info - Store DRR state when handling - * SubVP + VBLANK with DRR multi-display case. - * - * The info stored in this struct is only valid if drr_in_use = 1. - */ -struct dmub_subvp_vblank_drr_info { - uint8_t drr_in_use; - uint8_t drr_window_size_ms; // DRR window size -- indicates largest VMIN/VMAX adjustment per frame - uint16_t min_vtotal_supported; // Min VTOTAL that supports switching in VBLANK - uint16_t max_vtotal_supported; // Max VTOTAL that can still support SubVP static scheduling requirements - uint16_t prev_vmin; // Store VMIN value before MCLK switch (used to restore after MCLK end) - uint16_t prev_vmax; // Store VMAX value before MCLK switch (used to restore after MCLK end) - uint8_t use_ramping; // Use ramping or not - uint8_t pad[1]; -}; - -struct dmub_subvp_vblank_pipe_info { - uint32_t pix_clk_100hz; - uint16_t vblank_start; - uint16_t vblank_end; - uint16_t vstartup_start; - uint16_t vtotal; - uint16_t htotal; - uint8_t pipe_index; - uint8_t pad[1]; - struct dmub_subvp_vblank_drr_info drr_info; // DRR considered as part of SubVP + VBLANK case -}; - -enum subvp_switch_type { - DMUB_SUBVP_ONLY, // Used for SubVP only, and SubVP + VACTIVE - DMUB_SUBVP_AND_SUBVP, // 2 SubVP displays - DMUB_SUBVP_AND_VBLANK, - DMUB_SUBVP_AND_FPO, -}; - -/* SubVP state. */ -struct dmub_subvp_state { - struct dmub_subvp_pipe_state pipe_state[DMUB_MAX_SUBVP_STREAMS]; - struct dmub_subvp_interrupt_ctx int_ctx; - struct dmub_subvp_vblank_pipe_info vblank_info; - enum subvp_state state; // current state - enum subvp_switch_type switch_type; // enum take up 4 bytes (?) - uint8_t mclk_pending; - uint8_t num_subvp_streams; - uint8_t vertical_int_margin_us; - uint8_t pstate_allow_width_us; - uint32_t subvp_mclk_switch_count; - uint32_t subvp_wait_lock_count; - uint32_t driver_wait_lock_count; - uint32_t subvp_vblank_frame_count; - uint16_t watermark_a_cache; - uint8_t pad[2]; -}; - -#endif /* _DMUB_SUBVP_STATE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 5e952541e72d..094e9f864557 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -352,6 +352,14 @@ union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub) return status; } +union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub) +{ + union dmub_fw_boot_options option; + + option.all = REG_READ(DMCUB_SCRATCH14); + return option; +} + void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params) { union dmub_fw_boot_options boot_options = {0}; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index 89c5a948b67d..4d520a893c7b 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -239,6 +239,8 @@ void dmub_dcn31_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip) union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub); +union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub); + void dmub_dcn31_setup_outbox0(struct dmub_srv *dmub, const struct dmub_region *outbox0); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index bdaf43892f47..93624ffe4eb8 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -255,6 +255,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->get_gpint_response = dmub_dcn31_get_gpint_response; funcs->get_gpint_dataout = dmub_dcn31_get_gpint_dataout; funcs->get_fw_status = dmub_dcn31_get_fw_boot_status; + funcs->get_fw_boot_option = dmub_dcn31_get_fw_boot_option; funcs->enable_dmub_boot_options = dmub_dcn31_enable_dmub_boot_options; funcs->skip_dmub_panel_power_sequence = dmub_dcn31_skip_dmub_panel_power_sequence; //outbox0 call stacks @@ -639,11 +640,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, if (dmub->hw_funcs.enable_dmub_boot_options) dmub->hw_funcs.enable_dmub_boot_options(dmub, params); - if (dmub->hw_funcs.skip_dmub_panel_power_sequence) + if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual) dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, params->skip_panel_power_sequence); - if (dmub->hw_funcs.reset_release) + if (dmub->hw_funcs.reset_release && !dmub->is_virtual) dmub->hw_funcs.reset_release(dmub); dmub->hw_init = true; @@ -846,6 +847,32 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub, return DMUB_STATUS_OK; } +enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub, + union dmub_fw_boot_options *option) +{ + option->all = 0; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.get_fw_boot_option) + *option = dmub->hw_funcs.get_fw_boot_option(dmub); + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub, + bool skip) +{ + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual) + dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, skip); + + return DMUB_STATUS_OK; +} + enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, union dmub_rb_cmd *cmd) { diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index f843fc497855..68dfc7968017 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -40,6 +40,7 @@ #define DP_BRANCH_HW_REV_20 0x20 #define DP_DEVICE_ID_38EC11 0x38EC11 +#define DP_DEVICE_ID_BA4159 0xBA4159 #define DP_FORCE_PSRSU_CAPABILITY 0x40F #define DP_SINK_PSR_ACTIVE_VTOTAL 0x373 diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index c062a44db078..914f28e9f224 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -172,6 +172,9 @@ enum dpcd_psr_sink_states { #define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326 #define DP_SOURCE_BACKLIGHT_CONTROL 0x32E #define DP_SOURCE_BACKLIGHT_ENABLE 0x32F -#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340 +#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340 +#define DP_SINK_PR_REPLAY_STATUS 0x378 +#define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379 +#define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A #endif /* __DAL_DPCD_DEFS_H__ */ diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index cd870af5fd25..1b8ab20f1715 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -53,7 +53,7 @@ enum { BITS_PER_DP_BYTE = 10, DATA_EFFICIENCY_8b_10b_x10000 = 8000, /* 80% data efficiency */ DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100 = 97, /* 97% data efficiency when FEC is enabled */ - DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */ + DATA_EFFICIENCY_128b_132b_x10000 = 9641, /* 96.71% data efficiency x 99.7% downspread factor */ }; enum lttpr_mode { diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 67a062af3ab0..ff8e5708735d 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -359,7 +359,7 @@ static struct fixed31_32 translate_from_linear_space( scratch_1 = dc_fixpt_add(one, args->a3); /* In the first region (first 16 points) and in the * region delimited by START/END we calculate with - * full precision to avoid error accumulation. + * full precision to avoid error accumulation. */ if ((cal_buffer->buffer_index >= PRECISE_LUT_REGION_START && cal_buffer->buffer_index <= PRECISE_LUT_REGION_END) || @@ -379,8 +379,7 @@ static struct fixed31_32 translate_from_linear_space( scratch_1 = dc_fixpt_sub(scratch_1, args->a2); return scratch_1; - } - else + } else return dc_fixpt_mul(args->arg, args->a1); } diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index ec64f19e1786..84f9b412a4f1 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -149,6 +149,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, /* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) vsc_packet_revision = vsc_packet_rev4; + else if (stream->link->replay_settings.config.replay_supported) + vsc_packet_revision = vsc_packet_rev4; else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) vsc_packet_revision = vsc_packet_rev2; @@ -536,6 +538,9 @@ void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream, case FREESYNC_TYPE_PCON_IN_WHITELIST: mod_build_adaptive_sync_infopacket_v1(info_packet); break; + case ADAPTIVE_SYNC_TYPE_EDP: + mod_build_adaptive_sync_infopacket_v1(info_packet); + break; case ADAPTIVE_SYNC_TYPE_NONE: case FREESYNC_TYPE_PCON_NOT_IN_WHITELIST: default: diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 30349881a283..73a2b37fbbd7 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -926,6 +926,11 @@ void mod_power_calc_psr_configs(struct psr_config *psr_config, !link->dpcd_caps.psr_info.psr_dpcd_caps.bits.LINK_TRAINING_ON_EXIT_NOT_REQUIRED; } +void init_replay_config(struct dc_link *link, struct replay_config *pr_config) +{ + link->replay_settings.config = *pr_config; +} + bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_state *stream) { return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal); diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index ffc924c9991b..d9e0d67d67f7 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -53,6 +53,8 @@ bool dmub_init_abm_config(struct resource_pool *res_pool, struct dmcu_iram_parameters params, unsigned int inst); +void init_replay_config(struct dc_link *link, struct replay_config *pr_config); + bool is_psr_su_specific_panel(struct dc_link *link); void mod_power_calc_psr_configs(struct psr_config *psr_config, struct dc_link *link, diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index f175e65b853a..abe829bbd54a 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -250,6 +250,7 @@ enum DC_DEBUG_MASK { DC_DISABLE_PSR = 0x10, DC_FORCE_SUBVP_MCLK_SWITCH = 0x20, DC_DISABLE_MPO = 0x40, + DC_ENABLE_DPIA_TRACE = 0x80, }; enum amd_dpm_forced_level; diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h index 537aee0536d3..f2f8f9b39c6b 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h @@ -15805,6 +15805,11 @@ #define mmDME6_DME_MEMORY_CONTROL 0x093d #define mmDME6_DME_MEMORY_CONTROL_BASE_IDX 3 +// addressBlock: dce_dc_hpo_hpo_top_dispdec +// base address: 0x0 +#define mmHPO_TOP_CLOCK_CONTROL 0x0e43 +#define mmHPO_TOP_CLOCK_CONTROL_BASE_IDX 3 + // base address: 0x1a698 #define mmDC_PERFMON29_PERFCOUNTER_CNTL 0x0e66 #define mmDC_PERFMON29_PERFCOUNTER_CNTL_BASE_IDX 3 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h index f9d90b098519..e0a447351623 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h @@ -60666,7 +60666,12 @@ #define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L #define DME6_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L +// addressBlock: dce_dc_hpo_hpo_top_dispdec +//HPO_TOP_CLOCK_CONTROL +#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_GATE_DIS__SHIFT 0x9 +#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_GATE_DIS_MASK 0x00000200L +// addressBlock: dce_dc_hpo_hpo_dcperfmon_dc_perfmon_dispdec //DC_PERFMON29_PERFCOUNTER_CNTL #define DC_PERFMON29_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0 #define DC_PERFMON29_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h index 476469d41d73..b45a35aae241 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h @@ -14205,6 +14205,10 @@ +// addressBlock: dce_dc_hpo_hpo_top_dispdec +// base address: 0x0 +#define mmHPO_TOP_CLOCK_CONTROL 0x0e43 +#define mmHPO_TOP_CLOCK_CONTROL_BASE_IDX 3 // base address: 0x1a698 #define mmDC_PERFMON26_PERFCOUNTER_CNTL 0x0e66 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h index b9de0ebc8b03..3dae29f9581e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h @@ -52401,7 +52401,10 @@ #define DC_PERFMON25_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0 #define DC_PERFMON25_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL - +// addressBlock: dce_dc_hpo_hpo_top_dispdec +//HPO_TOP_CLOCK_CONTROL +#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_GATE_DIS__SHIFT 0x9 +#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_GATE_DIS_MASK 0x00000200L // addressBlock: dce_dc_hpo_hpo_dcperfmon_dc_perfmon_dispdec //DC_PERFMON26_PERFCOUNTER_CNTL diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h index a22481e7bcdb..e0c28c29ddb0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h @@ -38896,5 +38896,13 @@ #define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L #define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +//PCIE_PERF_CNTL_TXCLK3 +#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL__SHIFT 0x0 +#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL_MASK 0x000000FFL + +//PCIE_PERF_CNTL_TXCLK7 +#define PCIE_PERF_CNTL_TXCLK7__EVENT0_SEL__SHIFT 0x0 +#define PCIE_PERF_CNTL_TXCLK7__EVENT0_SEL_MASK 0x000000FFL + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_offset.h new file mode 100644 index 000000000000..a5e7ba5d99ca --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_offset.h @@ -0,0 +1,279 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _osssys_6_1_0_OFFSET_HEADER +#define _osssys_6_1_0_OFFSET_HEADER + + + +// addressBlock: osssys_osssysdec +// base address: 0x4280 +#define regIH_VMID_0_LUT 0x0000 +#define regIH_VMID_0_LUT_BASE_IDX 0 +#define regIH_VMID_1_LUT 0x0001 +#define regIH_VMID_1_LUT_BASE_IDX 0 +#define regIH_VMID_2_LUT 0x0002 +#define regIH_VMID_2_LUT_BASE_IDX 0 +#define regIH_VMID_3_LUT 0x0003 +#define regIH_VMID_3_LUT_BASE_IDX 0 +#define regIH_VMID_4_LUT 0x0004 +#define regIH_VMID_4_LUT_BASE_IDX 0 +#define regIH_VMID_5_LUT 0x0005 +#define regIH_VMID_5_LUT_BASE_IDX 0 +#define regIH_VMID_6_LUT 0x0006 +#define regIH_VMID_6_LUT_BASE_IDX 0 +#define regIH_VMID_7_LUT 0x0007 +#define regIH_VMID_7_LUT_BASE_IDX 0 +#define regIH_VMID_8_LUT 0x0008 +#define regIH_VMID_8_LUT_BASE_IDX 0 +#define regIH_VMID_9_LUT 0x0009 +#define regIH_VMID_9_LUT_BASE_IDX 0 +#define regIH_VMID_10_LUT 0x000a +#define regIH_VMID_10_LUT_BASE_IDX 0 +#define regIH_VMID_11_LUT 0x000b +#define regIH_VMID_11_LUT_BASE_IDX 0 +#define regIH_VMID_12_LUT 0x000c +#define regIH_VMID_12_LUT_BASE_IDX 0 +#define regIH_VMID_13_LUT 0x000d +#define regIH_VMID_13_LUT_BASE_IDX 0 +#define regIH_VMID_14_LUT 0x000e +#define regIH_VMID_14_LUT_BASE_IDX 0 +#define regIH_VMID_15_LUT 0x000f +#define regIH_VMID_15_LUT_BASE_IDX 0 +#define regIH_VMID_0_LUT_MM 0x0010 +#define regIH_VMID_0_LUT_MM_BASE_IDX 0 +#define regIH_VMID_1_LUT_MM 0x0011 +#define regIH_VMID_1_LUT_MM_BASE_IDX 0 +#define regIH_VMID_2_LUT_MM 0x0012 +#define regIH_VMID_2_LUT_MM_BASE_IDX 0 +#define regIH_VMID_3_LUT_MM 0x0013 +#define regIH_VMID_3_LUT_MM_BASE_IDX 0 +#define regIH_VMID_4_LUT_MM 0x0014 +#define regIH_VMID_4_LUT_MM_BASE_IDX 0 +#define regIH_VMID_5_LUT_MM 0x0015 +#define regIH_VMID_5_LUT_MM_BASE_IDX 0 +#define regIH_VMID_6_LUT_MM 0x0016 +#define regIH_VMID_6_LUT_MM_BASE_IDX 0 +#define regIH_VMID_7_LUT_MM 0x0017 +#define regIH_VMID_7_LUT_MM_BASE_IDX 0 +#define regIH_VMID_8_LUT_MM 0x0018 +#define regIH_VMID_8_LUT_MM_BASE_IDX 0 +#define regIH_VMID_9_LUT_MM 0x0019 +#define regIH_VMID_9_LUT_MM_BASE_IDX 0 +#define regIH_VMID_10_LUT_MM 0x001a +#define regIH_VMID_10_LUT_MM_BASE_IDX 0 +#define regIH_VMID_11_LUT_MM 0x001b +#define regIH_VMID_11_LUT_MM_BASE_IDX 0 +#define regIH_VMID_12_LUT_MM 0x001c +#define regIH_VMID_12_LUT_MM_BASE_IDX 0 +#define regIH_VMID_13_LUT_MM 0x001d +#define regIH_VMID_13_LUT_MM_BASE_IDX 0 +#define regIH_VMID_14_LUT_MM 0x001e +#define regIH_VMID_14_LUT_MM_BASE_IDX 0 +#define regIH_VMID_15_LUT_MM 0x001f +#define regIH_VMID_15_LUT_MM_BASE_IDX 0 +#define regIH_COOKIE_0 0x0020 +#define regIH_COOKIE_0_BASE_IDX 0 +#define regIH_COOKIE_1 0x0021 +#define regIH_COOKIE_1_BASE_IDX 0 +#define regIH_COOKIE_2 0x0022 +#define regIH_COOKIE_2_BASE_IDX 0 +#define regIH_COOKIE_3 0x0023 +#define regIH_COOKIE_3_BASE_IDX 0 +#define regIH_COOKIE_4 0x0024 +#define regIH_COOKIE_4_BASE_IDX 0 +#define regIH_COOKIE_5 0x0025 +#define regIH_COOKIE_5_BASE_IDX 0 +#define regIH_COOKIE_6 0x0026 +#define regIH_COOKIE_6_BASE_IDX 0 +#define regIH_COOKIE_7 0x0027 +#define regIH_COOKIE_7_BASE_IDX 0 +#define regIH_REGISTER_LAST_PART0 0x003f +#define regIH_REGISTER_LAST_PART0_BASE_IDX 0 +#define regIH_RB_CNTL 0x0080 +#define regIH_RB_CNTL_BASE_IDX 0 +#define regIH_RB_RPTR 0x0081 +#define regIH_RB_RPTR_BASE_IDX 0 +#define regIH_RB_WPTR 0x0082 +#define regIH_RB_WPTR_BASE_IDX 0 +#define regIH_RB_BASE 0x0083 +#define regIH_RB_BASE_BASE_IDX 0 +#define regIH_RB_BASE_HI 0x0084 +#define regIH_RB_BASE_HI_BASE_IDX 0 +#define regIH_RB_WPTR_ADDR_HI 0x0085 +#define regIH_RB_WPTR_ADDR_HI_BASE_IDX 0 +#define regIH_RB_WPTR_ADDR_LO 0x0086 +#define regIH_RB_WPTR_ADDR_LO_BASE_IDX 0 +#define regIH_DOORBELL_RPTR 0x0087 +#define regIH_DOORBELL_RPTR_BASE_IDX 0 +#define regIH_DOORBELL_RETRY_CAM 0x0088 +#define regIH_DOORBELL_RETRY_CAM_BASE_IDX 0 +#define regIH_RB_CNTL_RING1 0x008c +#define regIH_RB_CNTL_RING1_BASE_IDX 0 +#define regIH_RB_RPTR_RING1 0x008d +#define regIH_RB_RPTR_RING1_BASE_IDX 0 +#define regIH_RB_WPTR_RING1 0x008e +#define regIH_RB_WPTR_RING1_BASE_IDX 0 +#define regIH_RB_BASE_RING1 0x008f +#define regIH_RB_BASE_RING1_BASE_IDX 0 +#define regIH_RB_BASE_HI_RING1 0x0090 +#define regIH_RB_BASE_HI_RING1_BASE_IDX 0 +#define regIH_DOORBELL_RPTR_RING1 0x0093 +#define regIH_DOORBELL_RPTR_RING1_BASE_IDX 0 +#define regIH_RETRY_CAM_ACK 0x00a4 +#define regIH_RETRY_CAM_ACK_BASE_IDX 0 +#define regIH_VERSION 0x00a5 +#define regIH_VERSION_BASE_IDX 0 +#define regIH_CNTL 0x00a8 +#define regIH_CNTL_BASE_IDX 0 +#define regIH_CLK_CTRL 0x00a9 +#define regIH_CLK_CTRL_BASE_IDX 0 +#define regIH_STORM_CLIENT_LIST_CNTL 0x00aa +#define regIH_STORM_CLIENT_LIST_CNTL_BASE_IDX 0 +#define regIH_LIMIT_INT_RATE_CNTL 0x00ab +#define regIH_LIMIT_INT_RATE_CNTL_BASE_IDX 0 +#define regIH_RETRY_INT_CAM_CNTL 0x00ac +#define regIH_RETRY_INT_CAM_CNTL_BASE_IDX 0 +#define regIH_MEM_POWER_CTRL 0x00ad +#define regIH_MEM_POWER_CTRL_BASE_IDX 0 +#define regIH_MEM_POWER_CTRL2 0x00ae +#define regIH_MEM_POWER_CTRL2_BASE_IDX 0 +#define regIH_CNTL2 0x00c1 +#define regIH_CNTL2_BASE_IDX 0 +#define regIH_STATUS 0x00c2 +#define regIH_STATUS_BASE_IDX 0 +#define regIH_PERFMON_CNTL 0x00c3 +#define regIH_PERFMON_CNTL_BASE_IDX 0 +#define regIH_PERFCOUNTER0_RESULT 0x00c4 +#define regIH_PERFCOUNTER0_RESULT_BASE_IDX 0 +#define regIH_PERFCOUNTER1_RESULT 0x00c5 +#define regIH_PERFCOUNTER1_RESULT_BASE_IDX 0 +#define regIH_DSM_MATCH_VALUE_BIT_31_0 0x00c7 +#define regIH_DSM_MATCH_VALUE_BIT_31_0_BASE_IDX 0 +#define regIH_DSM_MATCH_VALUE_BIT_63_32 0x00c8 +#define regIH_DSM_MATCH_VALUE_BIT_63_32_BASE_IDX 0 +#define regIH_DSM_MATCH_VALUE_BIT_95_64 0x00c9 +#define regIH_DSM_MATCH_VALUE_BIT_95_64_BASE_IDX 0 +#define regIH_DSM_MATCH_FIELD_CONTROL 0x00ca +#define regIH_DSM_MATCH_FIELD_CONTROL_BASE_IDX 0 +#define regIH_DSM_MATCH_DATA_CONTROL 0x00cb +#define regIH_DSM_MATCH_DATA_CONTROL_BASE_IDX 0 +#define regIH_DSM_MATCH_FCN_ID 0x00cc +#define regIH_DSM_MATCH_FCN_ID_BASE_IDX 0 +#define regIH_VF_RB_STATUS 0x00ce +#define regIH_VF_RB_STATUS_BASE_IDX 0 +#define regIH_VF_RB_STATUS2 0x00cf +#define regIH_VF_RB_STATUS2_BASE_IDX 0 +#define regIH_VF_RB1_STATUS 0x00d0 +#define regIH_VF_RB1_STATUS_BASE_IDX 0 +#define regIH_VF_RB1_STATUS2 0x00d1 +#define regIH_VF_RB1_STATUS2_BASE_IDX 0 +#define regIH_RB_STATUS 0x00d4 +#define regIH_RB_STATUS_BASE_IDX 0 +#define regIH_INT_FLOOD_CNTL 0x00d5 +#define regIH_INT_FLOOD_CNTL_BASE_IDX 0 +#define regIH_RB0_INT_FLOOD_STATUS 0x00d6 +#define regIH_RB0_INT_FLOOD_STATUS_BASE_IDX 0 +#define regIH_RB1_INT_FLOOD_STATUS 0x00d7 +#define regIH_RB1_INT_FLOOD_STATUS_BASE_IDX 0 +#define regIH_INT_FLOOD_STATUS 0x00d9 +#define regIH_INT_FLOOD_STATUS_BASE_IDX 0 +#define regIH_INT_FLAGS 0x00dc +#define regIH_INT_FLAGS_BASE_IDX 0 +#define regIH_LAST_INT_INFO0 0x00dd +#define regIH_LAST_INT_INFO0_BASE_IDX 0 +#define regIH_LAST_INT_INFO1 0x00de +#define regIH_LAST_INT_INFO1_BASE_IDX 0 +#define regIH_LAST_INT_INFO2 0x00df +#define regIH_LAST_INT_INFO2_BASE_IDX 0 +#define regIH_SCRATCH 0x00e0 +#define regIH_SCRATCH_BASE_IDX 0 +#define regIH_CLIENT_CREDIT_ERROR 0x00e1 +#define regIH_CLIENT_CREDIT_ERROR_BASE_IDX 0 +#define regIH_GPU_IOV_VIOLATION_LOG 0x00e2 +#define regIH_GPU_IOV_VIOLATION_LOG_BASE_IDX 0 +#define regIH_GPU_IOV_VIOLATION_LOG2 0x00e3 +#define regIH_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0 +#define regIH_COOKIE_REC_VIOLATION_LOG 0x00e4 +#define regIH_COOKIE_REC_VIOLATION_LOG_BASE_IDX 0 +#define regIH_CREDIT_STATUS 0x00e5 +#define regIH_CREDIT_STATUS_BASE_IDX 0 +#define regIH_MMHUB_ERROR 0x00e6 +#define regIH_MMHUB_ERROR_BASE_IDX 0 +#define regIH_VF_RB_STATUS3 0x00ea +#define regIH_VF_RB_STATUS3_BASE_IDX 0 +#define regIH_VF_RB_STATUS4 0x00eb +#define regIH_VF_RB_STATUS4_BASE_IDX 0 +#define regIH_VF_RB1_STATUS3 0x00ec +#define regIH_VF_RB1_STATUS3_BASE_IDX 0 +#define regIH_MSI_STORM_CTRL 0x00f1 +#define regIH_MSI_STORM_CTRL_BASE_IDX 0 +#define regIH_MSI_STORM_CLIENT_INDEX 0x00f2 +#define regIH_MSI_STORM_CLIENT_INDEX_BASE_IDX 0 +#define regIH_MSI_STORM_CLIENT_DATA 0x00f3 +#define regIH_MSI_STORM_CLIENT_DATA_BASE_IDX 0 +#define regIH_REGISTER_LAST_PART2 0x00ff +#define regIH_REGISTER_LAST_PART2_BASE_IDX 0 +#define regSEM_MAILBOX 0x010a +#define regSEM_MAILBOX_BASE_IDX 0 +#define regSEM_MAILBOX_CLEAR 0x010b +#define regSEM_MAILBOX_CLEAR_BASE_IDX 0 +#define regSEM_REGISTER_LAST_PART2 0x017f +#define regSEM_REGISTER_LAST_PART2_BASE_IDX 0 +#define regIH_ACTIVE_FCN_ID 0x0180 +#define regIH_ACTIVE_FCN_ID_BASE_IDX 0 +#define regIH_VIRT_RESET_REQ 0x0181 +#define regIH_VIRT_RESET_REQ_BASE_IDX 0 +#define regIH_CLIENT_CFG 0x0184 +#define regIH_CLIENT_CFG_BASE_IDX 0 +#define regIH_RING1_CLIENT_CFG_INDEX 0x0185 +#define regIH_RING1_CLIENT_CFG_INDEX_BASE_IDX 0 +#define regIH_RING1_CLIENT_CFG_DATA 0x0186 +#define regIH_RING1_CLIENT_CFG_DATA_BASE_IDX 0 +#define regIH_CLIENT_CFG_INDEX 0x0188 +#define regIH_CLIENT_CFG_INDEX_BASE_IDX 0 +#define regIH_CLIENT_CFG_DATA 0x0189 +#define regIH_CLIENT_CFG_DATA_BASE_IDX 0 +#define regIH_CLIENT_CFG_DATA2 0x018a +#define regIH_CLIENT_CFG_DATA2_BASE_IDX 0 +#define regIH_CID_REMAP_INDEX 0x018b +#define regIH_CID_REMAP_INDEX_BASE_IDX 0 +#define regIH_CID_REMAP_DATA 0x018c +#define regIH_CID_REMAP_DATA_BASE_IDX 0 +#define regIH_CHICKEN 0x018d +#define regIH_CHICKEN_BASE_IDX 0 +#define regIH_MMHUB_CNTL 0x018e +#define regIH_MMHUB_CNTL_BASE_IDX 0 +#define regIH_INT_DROP_CNTL 0x018f +#define regIH_INT_DROP_CNTL_BASE_IDX 0 +#define regIH_INT_DROP_MATCH_VALUE0 0x0190 +#define regIH_INT_DROP_MATCH_VALUE0_BASE_IDX 0 +#define regIH_INT_DROP_MATCH_VALUE1 0x0191 +#define regIH_INT_DROP_MATCH_VALUE1_BASE_IDX 0 +#define regIH_INT_DROP_MATCH_MASK0 0x0192 +#define regIH_INT_DROP_MATCH_MASK0_BASE_IDX 0 +#define regIH_INT_DROP_MATCH_MASK1 0x0193 +#define regIH_INT_DROP_MATCH_MASK1_BASE_IDX 0 +#define regIH_REGISTER_LAST_PART1 0x019f +#define regIH_REGISTER_LAST_PART1_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_sh_mask.h new file mode 100644 index 000000000000..15d5689dde65 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_sh_mask.h @@ -0,0 +1,1019 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _osssys_6_1_0_SH_MASK_HEADER +#define _osssys_6_1_0_SH_MASK_HEADER + + +// addressBlock: osssys_osssysdec +//IH_VMID_0_LUT +#define IH_VMID_0_LUT__PASID__SHIFT 0x0 +#define IH_VMID_0_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_1_LUT +#define IH_VMID_1_LUT__PASID__SHIFT 0x0 +#define IH_VMID_1_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_2_LUT +#define IH_VMID_2_LUT__PASID__SHIFT 0x0 +#define IH_VMID_2_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_3_LUT +#define IH_VMID_3_LUT__PASID__SHIFT 0x0 +#define IH_VMID_3_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_4_LUT +#define IH_VMID_4_LUT__PASID__SHIFT 0x0 +#define IH_VMID_4_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_5_LUT +#define IH_VMID_5_LUT__PASID__SHIFT 0x0 +#define IH_VMID_5_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_6_LUT +#define IH_VMID_6_LUT__PASID__SHIFT 0x0 +#define IH_VMID_6_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_7_LUT +#define IH_VMID_7_LUT__PASID__SHIFT 0x0 +#define IH_VMID_7_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_8_LUT +#define IH_VMID_8_LUT__PASID__SHIFT 0x0 +#define IH_VMID_8_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_9_LUT +#define IH_VMID_9_LUT__PASID__SHIFT 0x0 +#define IH_VMID_9_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_10_LUT +#define IH_VMID_10_LUT__PASID__SHIFT 0x0 +#define IH_VMID_10_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_11_LUT +#define IH_VMID_11_LUT__PASID__SHIFT 0x0 +#define IH_VMID_11_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_12_LUT +#define IH_VMID_12_LUT__PASID__SHIFT 0x0 +#define IH_VMID_12_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_13_LUT +#define IH_VMID_13_LUT__PASID__SHIFT 0x0 +#define IH_VMID_13_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_14_LUT +#define IH_VMID_14_LUT__PASID__SHIFT 0x0 +#define IH_VMID_14_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_15_LUT +#define IH_VMID_15_LUT__PASID__SHIFT 0x0 +#define IH_VMID_15_LUT__PASID_MASK 0x0000FFFFL +//IH_VMID_0_LUT_MM +#define IH_VMID_0_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_0_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_1_LUT_MM +#define IH_VMID_1_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_1_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_2_LUT_MM +#define IH_VMID_2_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_2_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_3_LUT_MM +#define IH_VMID_3_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_3_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_4_LUT_MM +#define IH_VMID_4_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_4_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_5_LUT_MM +#define IH_VMID_5_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_5_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_6_LUT_MM +#define IH_VMID_6_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_6_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_7_LUT_MM +#define IH_VMID_7_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_7_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_8_LUT_MM +#define IH_VMID_8_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_8_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_9_LUT_MM +#define IH_VMID_9_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_9_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_10_LUT_MM +#define IH_VMID_10_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_10_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_11_LUT_MM +#define IH_VMID_11_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_11_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_12_LUT_MM +#define IH_VMID_12_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_12_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_13_LUT_MM +#define IH_VMID_13_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_13_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_14_LUT_MM +#define IH_VMID_14_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_14_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_VMID_15_LUT_MM +#define IH_VMID_15_LUT_MM__PASID__SHIFT 0x0 +#define IH_VMID_15_LUT_MM__PASID_MASK 0x0000FFFFL +//IH_COOKIE_0 +#define IH_COOKIE_0__CLIENT_ID__SHIFT 0x0 +#define IH_COOKIE_0__SOURCE_ID__SHIFT 0x8 +#define IH_COOKIE_0__RING_ID__SHIFT 0x10 +#define IH_COOKIE_0__VM_ID__SHIFT 0x18 +#define IH_COOKIE_0__RESERVED__SHIFT 0x1c +#define IH_COOKIE_0__VMID_TYPE__SHIFT 0x1f +#define IH_COOKIE_0__CLIENT_ID_MASK 0x000000FFL +#define IH_COOKIE_0__SOURCE_ID_MASK 0x0000FF00L +#define IH_COOKIE_0__RING_ID_MASK 0x00FF0000L +#define IH_COOKIE_0__VM_ID_MASK 0x0F000000L +#define IH_COOKIE_0__RESERVED_MASK 0x70000000L +#define IH_COOKIE_0__VMID_TYPE_MASK 0x80000000L +//IH_COOKIE_1 +#define IH_COOKIE_1__TIMESTAMP_31_0__SHIFT 0x0 +#define IH_COOKIE_1__TIMESTAMP_31_0_MASK 0xFFFFFFFFL +//IH_COOKIE_2 +#define IH_COOKIE_2__TIMESTAMP_47_32__SHIFT 0x0 +#define IH_COOKIE_2__RESERVED__SHIFT 0x10 +#define IH_COOKIE_2__TIMESTAMP_SRC__SHIFT 0x1f +#define IH_COOKIE_2__TIMESTAMP_47_32_MASK 0x0000FFFFL +#define IH_COOKIE_2__RESERVED_MASK 0x7FFF0000L +#define IH_COOKIE_2__TIMESTAMP_SRC_MASK 0x80000000L +//IH_COOKIE_3 +#define IH_COOKIE_3__PAS_ID__SHIFT 0x0 +#define IH_COOKIE_3__RESERVED__SHIFT 0x10 +#define IH_COOKIE_3__PASID_SRC__SHIFT 0x1f +#define IH_COOKIE_3__PAS_ID_MASK 0x0000FFFFL +#define IH_COOKIE_3__RESERVED_MASK 0x7FFF0000L +#define IH_COOKIE_3__PASID_SRC_MASK 0x80000000L +//IH_COOKIE_4 +#define IH_COOKIE_4__CONTEXT_ID_31_0__SHIFT 0x0 +#define IH_COOKIE_4__CONTEXT_ID_31_0_MASK 0xFFFFFFFFL +//IH_COOKIE_5 +#define IH_COOKIE_5__CONTEXT_ID_63_32__SHIFT 0x0 +#define IH_COOKIE_5__CONTEXT_ID_63_32_MASK 0xFFFFFFFFL +//IH_COOKIE_6 +#define IH_COOKIE_6__CONTEXT_ID_95_64__SHIFT 0x0 +#define IH_COOKIE_6__CONTEXT_ID_95_64_MASK 0xFFFFFFFFL +//IH_COOKIE_7 +#define IH_COOKIE_7__CONTEXT_ID_128_96__SHIFT 0x0 +#define IH_COOKIE_7__CONTEXT_ID_128_96_MASK 0xFFFFFFFFL +//IH_REGISTER_LAST_PART0 +#define IH_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0 +#define IH_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL +//IH_RB_CNTL +#define IH_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define IH_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8 +#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT 0x9 +#define IH_RB_CNTL__FULL_DRAIN_CLEAR__SHIFT 0xa +#define IH_RB_CNTL__PAGE_RB_CLEAR__SHIFT 0xb +#define IH_RB_CNTL__RB_USED_INT_THRESHOLD__SHIFT 0xc +#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT 0x10 +#define IH_RB_CNTL__ENABLE_INTR__SHIFT 0x11 +#define IH_RB_CNTL__MC_SWAP__SHIFT 0x12 +#define IH_RB_CNTL__MC_SNOOP__SHIFT 0x14 +#define IH_RB_CNTL__RPTR_REARM__SHIFT 0x15 +#define IH_RB_CNTL__MC_RO__SHIFT 0x16 +#define IH_RB_CNTL__MC_VMID__SHIFT 0x18 +#define IH_RB_CNTL__MC_SPACE__SHIFT 0x1c +#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f +#define IH_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define IH_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L +#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L +#define IH_RB_CNTL__FULL_DRAIN_CLEAR_MASK 0x00000400L +#define IH_RB_CNTL__PAGE_RB_CLEAR_MASK 0x00000800L +#define IH_RB_CNTL__RB_USED_INT_THRESHOLD_MASK 0x0000F000L +#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L +#define IH_RB_CNTL__ENABLE_INTR_MASK 0x00020000L +#define IH_RB_CNTL__MC_SWAP_MASK 0x000C0000L +#define IH_RB_CNTL__MC_SNOOP_MASK 0x00100000L +#define IH_RB_CNTL__RPTR_REARM_MASK 0x00200000L +#define IH_RB_CNTL__MC_RO_MASK 0x00400000L +#define IH_RB_CNTL__MC_VMID_MASK 0x0F000000L +#define IH_RB_CNTL__MC_SPACE_MASK 0x70000000L +#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L +//IH_RB_RPTR +#define IH_RB_RPTR__OFFSET__SHIFT 0x2 +#define IH_RB_RPTR__OFFSET_MASK 0x0003FFFCL +//IH_RB_WPTR +#define IH_RB_WPTR__RB_OVERFLOW__SHIFT 0x0 +#define IH_RB_WPTR__OFFSET__SHIFT 0x2 +#define IH_RB_WPTR__RB_LEFT_NONE__SHIFT 0x12 +#define IH_RB_WPTR__RB_MAY_OVERFLOW__SHIFT 0x13 +#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x00000001L +#define IH_RB_WPTR__OFFSET_MASK 0x0003FFFCL +#define IH_RB_WPTR__RB_LEFT_NONE_MASK 0x00040000L +#define IH_RB_WPTR__RB_MAY_OVERFLOW_MASK 0x00080000L +//IH_RB_BASE +#define IH_RB_BASE__ADDR__SHIFT 0x0 +#define IH_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//IH_RB_BASE_HI +#define IH_RB_BASE_HI__ADDR__SHIFT 0x0 +#define IH_RB_BASE_HI__ADDR_MASK 0x000000FFL +//IH_RB_WPTR_ADDR_HI +#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define IH_RB_WPTR_ADDR_HI__ADDR_MASK 0x0000FFFFL +//IH_RB_WPTR_ADDR_LO +#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define IH_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//IH_DOORBELL_RPTR +#define IH_DOORBELL_RPTR__OFFSET__SHIFT 0x0 +#define IH_DOORBELL_RPTR__ENABLE__SHIFT 0x1c +#define IH_DOORBELL_RPTR__OFFSET_MASK 0x03FFFFFFL +#define IH_DOORBELL_RPTR__ENABLE_MASK 0x10000000L +//IH_DOORBELL_RETRY_CAM +#define IH_DOORBELL_RETRY_CAM__OFFSET__SHIFT 0x0 +#define IH_DOORBELL_RETRY_CAM__ENABLE__SHIFT 0x1c +#define IH_DOORBELL_RETRY_CAM__OFFSET_MASK 0x03FFFFFFL +#define IH_DOORBELL_RETRY_CAM__ENABLE_MASK 0x10000000L +//IH_RB_CNTL_RING1 +#define IH_RB_CNTL_RING1__RB_ENABLE__SHIFT 0x0 +#define IH_RB_CNTL_RING1__RB_SIZE__SHIFT 0x1 +#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE__SHIFT 0x9 +#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR__SHIFT 0xa +#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR__SHIFT 0xb +#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD__SHIFT 0xc +#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE__SHIFT 0x10 +#define IH_RB_CNTL_RING1__MC_SWAP__SHIFT 0x12 +#define IH_RB_CNTL_RING1__MC_SNOOP__SHIFT 0x14 +#define IH_RB_CNTL_RING1__MC_RO__SHIFT 0x16 +#define IH_RB_CNTL_RING1__MC_VMID__SHIFT 0x18 +#define IH_RB_CNTL_RING1__MC_SPACE__SHIFT 0x1c +#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f +#define IH_RB_CNTL_RING1__RB_ENABLE_MASK 0x00000001L +#define IH_RB_CNTL_RING1__RB_SIZE_MASK 0x0000003EL +#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L +#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR_MASK 0x00000400L +#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR_MASK 0x00000800L +#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD_MASK 0x0000F000L +#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L +#define IH_RB_CNTL_RING1__MC_SWAP_MASK 0x000C0000L +#define IH_RB_CNTL_RING1__MC_SNOOP_MASK 0x00100000L +#define IH_RB_CNTL_RING1__MC_RO_MASK 0x00400000L +#define IH_RB_CNTL_RING1__MC_VMID_MASK 0x0F000000L +#define IH_RB_CNTL_RING1__MC_SPACE_MASK 0x70000000L +#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L +//IH_RB_RPTR_RING1 +#define IH_RB_RPTR_RING1__OFFSET__SHIFT 0x2 +#define IH_RB_RPTR_RING1__OFFSET_MASK 0x0003FFFCL +//IH_RB_WPTR_RING1 +#define IH_RB_WPTR_RING1__RB_OVERFLOW__SHIFT 0x0 +#define IH_RB_WPTR_RING1__OFFSET__SHIFT 0x2 +#define IH_RB_WPTR_RING1__RB_LEFT_NONE__SHIFT 0x12 +#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW__SHIFT 0x13 +#define IH_RB_WPTR_RING1__RB_OVERFLOW_MASK 0x00000001L +#define IH_RB_WPTR_RING1__OFFSET_MASK 0x0003FFFCL +#define IH_RB_WPTR_RING1__RB_LEFT_NONE_MASK 0x00040000L +#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW_MASK 0x00080000L +//IH_RB_BASE_RING1 +#define IH_RB_BASE_RING1__ADDR__SHIFT 0x0 +#define IH_RB_BASE_RING1__ADDR_MASK 0xFFFFFFFFL +//IH_RB_BASE_HI_RING1 +#define IH_RB_BASE_HI_RING1__ADDR__SHIFT 0x0 +#define IH_RB_BASE_HI_RING1__ADDR_MASK 0x000000FFL +//IH_DOORBELL_RPTR_RING1 +#define IH_DOORBELL_RPTR_RING1__OFFSET__SHIFT 0x0 +#define IH_DOORBELL_RPTR_RING1__ENABLE__SHIFT 0x1c +#define IH_DOORBELL_RPTR_RING1__OFFSET_MASK 0x03FFFFFFL +#define IH_DOORBELL_RPTR_RING1__ENABLE_MASK 0x10000000L +//IH_RETRY_CAM_ACK +#define IH_RETRY_CAM_ACK__INDEX__SHIFT 0x0 +#define IH_RETRY_CAM_ACK__INDEX_MASK 0x000003FFL +//IH_VERSION +#define IH_VERSION__MINVER__SHIFT 0x0 +#define IH_VERSION__MAJVER__SHIFT 0x8 +#define IH_VERSION__REV__SHIFT 0x10 +#define IH_VERSION__MINVER_MASK 0x0000007FL +#define IH_VERSION__MAJVER_MASK 0x00007F00L +#define IH_VERSION__REV_MASK 0x003F0000L +//IH_CNTL +#define IH_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x0 +#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL__SHIFT 0x6 +#define IH_CNTL__IH_FIFO_HIGHWATER__SHIFT 0x8 +#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT 0x14 +#define IH_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x0000001FL +#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL_MASK 0x000000C0L +#define IH_CNTL__IH_FIFO_HIGHWATER_MASK 0x00007F00L +#define IH_CNTL__MC_WR_CLEAN_CNT_MASK 0x01F00000L +//IH_CLK_CTRL +#define IH_CLK_CTRL__IH_PASID_LUT_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x17 +#define IH_CLK_CTRL__MSI_STORM_COUNTER_CLK_SOFT_OVERRIDE__SHIFT 0x18 +#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19 +#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a +#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b +#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c +#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d +#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e +#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f +#define IH_CLK_CTRL__IH_PASID_LUT_MEM_CLK_SOFT_OVERRIDE_MASK 0x00800000L +#define IH_CLK_CTRL__MSI_STORM_COUNTER_CLK_SOFT_OVERRIDE_MASK 0x01000000L +#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L +#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L +#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L +#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L +#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L +#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L +#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L +//IH_STORM_CLIENT_LIST_CNTL +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT__SHIFT 0x1 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT__SHIFT 0x2 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT__SHIFT 0x3 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT__SHIFT 0x4 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT__SHIFT 0x5 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT__SHIFT 0x6 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT__SHIFT 0x7 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT__SHIFT 0x8 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT__SHIFT 0x9 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT__SHIFT 0xa +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT__SHIFT 0xb +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT__SHIFT 0xc +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT__SHIFT 0xd +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT__SHIFT 0xe +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT__SHIFT 0xf +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT__SHIFT 0x10 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT__SHIFT 0x11 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT__SHIFT 0x12 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT__SHIFT 0x13 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT__SHIFT 0x14 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT__SHIFT 0x15 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT__SHIFT 0x16 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT__SHIFT 0x17 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT__SHIFT 0x18 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT__SHIFT 0x19 +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT__SHIFT 0x1a +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT__SHIFT 0x1b +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT__SHIFT 0x1c +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT__SHIFT 0x1d +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT__SHIFT 0x1e +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT__SHIFT 0x1f +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT_MASK 0x00000002L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT_MASK 0x00000004L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT_MASK 0x00000008L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT_MASK 0x00000010L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT_MASK 0x00000020L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT_MASK 0x00000040L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT_MASK 0x00000080L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT_MASK 0x00000100L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT_MASK 0x00000200L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT_MASK 0x00000400L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT_MASK 0x00000800L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT_MASK 0x00001000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT_MASK 0x00002000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT_MASK 0x00004000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT_MASK 0x00008000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT_MASK 0x00010000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT_MASK 0x00020000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT_MASK 0x00040000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT_MASK 0x00080000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT_MASK 0x00100000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT_MASK 0x00200000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT_MASK 0x00400000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT_MASK 0x00800000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT_MASK 0x01000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT_MASK 0x02000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT_MASK 0x04000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT_MASK 0x08000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT_MASK 0x10000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT_MASK 0x20000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L +#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L +//IH_LIMIT_INT_RATE_CNTL +#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE__SHIFT 0x0 +#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL__SHIFT 0x1 +#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD__SHIFT 0x5 +#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY__SHIFT 0x11 +#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT__SHIFT 0x15 +#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE_MASK 0x00000001L +#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL_MASK 0x0000001EL +#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD_MASK 0x0000FFE0L +#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY_MASK 0x001E0000L +#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT_MASK 0xFFE00000L +//IH_RETRY_INT_CAM_CNTL +#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE__SHIFT 0x0 +#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE__SHIFT 0x8 +#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE__SHIFT 0x14 +#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE_MASK 0x0000001FL +#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE_MASK 0x00003F00L +#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE_MASK 0x00300000L +//IH_MEM_POWER_CTRL +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN__SHIFT 0x0 +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN__SHIFT 0x1 +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN__SHIFT 0x2 +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN__SHIFT 0x3 +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS__SHIFT 0x4 +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8 +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_CTRL_EN__SHIFT 0x10 +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_LS_EN__SHIFT 0x11 +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DS_EN__SHIFT 0x12 +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_SD_EN__SHIFT 0x13 +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_IDLE_HYSTERESIS__SHIFT 0x14 +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18 +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0x1e +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN_MASK 0x00000001L +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN_MASK 0x00000002L +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN_MASK 0x00000004L +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN_MASK 0x00000008L +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS_MASK 0x00000070L +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L +#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_CTRL_EN_MASK 0x00010000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_LS_EN_MASK 0x00020000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DS_EN_MASK 0x00040000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_SD_EN_MASK 0x00080000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_IDLE_HYSTERESIS_MASK 0x00700000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L +#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DOWN_ENTER_DELAY_MASK 0xC0000000L +//IH_MEM_POWER_CTRL2 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_CTRL_EN__SHIFT 0x0 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_LS_EN__SHIFT 0x1 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DS_EN__SHIFT 0x2 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_SD_EN__SHIFT 0x3 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_IDLE_HYSTERESIS__SHIFT 0x4 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8 +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_CTRL_EN_MASK 0x00000001L +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_LS_EN_MASK 0x00000002L +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DS_EN_MASK 0x00000004L +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_SD_EN_MASK 0x00000008L +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_IDLE_HYSTERESIS_MASK 0x00000070L +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L +#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L +//IH_CNTL2 +#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT__SHIFT 0x0 +#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE__SHIFT 0x8 +#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT_MASK 0x0000001FL +#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE_MASK 0x00000100L +//IH_STATUS +#define IH_STATUS__IDLE__SHIFT 0x0 +#define IH_STATUS__INPUT_IDLE__SHIFT 0x1 +#define IH_STATUS__BUFFER_IDLE__SHIFT 0x2 +#define IH_STATUS__RB_FULL__SHIFT 0x3 +#define IH_STATUS__RB_FULL_DRAIN__SHIFT 0x4 +#define IH_STATUS__RB_OVERFLOW__SHIFT 0x5 +#define IH_STATUS__MC_WR_IDLE__SHIFT 0x6 +#define IH_STATUS__MC_WR_STALL__SHIFT 0x7 +#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT 0x8 +#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT 0x9 +#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT 0xa +#define IH_STATUS__SWITCH_READY__SHIFT 0xb +#define IH_STATUS__RB1_FULL__SHIFT 0xc +#define IH_STATUS__RB1_FULL_DRAIN__SHIFT 0xd +#define IH_STATUS__RB1_OVERFLOW__SHIFT 0xe +#define IH_STATUS__SELF_INT_GEN_IDLE__SHIFT 0x12 +#define IH_STATUS__RETRY_INT_CAM_IDLE__SHIFT 0x13 +#define IH_STATUS__ZSTATES_FENCE__SHIFT 0x14 +#define IH_STATUS__IH_BUFFER_MEM_POWER_GATED__SHIFT 0x15 +#define IH_STATUS__IH_RETRY_INT_CAM_MEM_POWER_GATED__SHIFT 0x16 +#define IH_STATUS__IH_PASID_LUT_MEM_POWER_GATED__SHIFT 0x17 +#define IH_STATUS__IDLE_MASK 0x00000001L +#define IH_STATUS__INPUT_IDLE_MASK 0x00000002L +#define IH_STATUS__BUFFER_IDLE_MASK 0x00000004L +#define IH_STATUS__RB_FULL_MASK 0x00000008L +#define IH_STATUS__RB_FULL_DRAIN_MASK 0x00000010L +#define IH_STATUS__RB_OVERFLOW_MASK 0x00000020L +#define IH_STATUS__MC_WR_IDLE_MASK 0x00000040L +#define IH_STATUS__MC_WR_STALL_MASK 0x00000080L +#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK 0x00000100L +#define IH_STATUS__MC_WR_CLEAN_STALL_MASK 0x00000200L +#define IH_STATUS__BIF_INTERRUPT_LINE_MASK 0x00000400L +#define IH_STATUS__SWITCH_READY_MASK 0x00000800L +#define IH_STATUS__RB1_FULL_MASK 0x00001000L +#define IH_STATUS__RB1_FULL_DRAIN_MASK 0x00002000L +#define IH_STATUS__RB1_OVERFLOW_MASK 0x00004000L +#define IH_STATUS__SELF_INT_GEN_IDLE_MASK 0x00040000L +#define IH_STATUS__RETRY_INT_CAM_IDLE_MASK 0x00080000L +#define IH_STATUS__ZSTATES_FENCE_MASK 0x00100000L +#define IH_STATUS__IH_BUFFER_MEM_POWER_GATED_MASK 0x00200000L +#define IH_STATUS__IH_RETRY_INT_CAM_MEM_POWER_GATED_MASK 0x00400000L +#define IH_STATUS__IH_PASID_LUT_MEM_POWER_GATED_MASK 0x00800000L +//IH_PERFMON_CNTL +#define IH_PERFMON_CNTL__ENABLE0__SHIFT 0x0 +#define IH_PERFMON_CNTL__CLEAR0__SHIFT 0x1 +#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 +#define IH_PERFMON_CNTL__ENABLE1__SHIFT 0x10 +#define IH_PERFMON_CNTL__CLEAR1__SHIFT 0x11 +#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT 0x12 +#define IH_PERFMON_CNTL__ENABLE0_MASK 0x00000001L +#define IH_PERFMON_CNTL__CLEAR0_MASK 0x00000002L +#define IH_PERFMON_CNTL__PERF_SEL0_MASK 0x00000FFCL +#define IH_PERFMON_CNTL__ENABLE1_MASK 0x00010000L +#define IH_PERFMON_CNTL__CLEAR1_MASK 0x00020000L +#define IH_PERFMON_CNTL__PERF_SEL1_MASK 0x0FFC0000L +//IH_PERFCOUNTER0_RESULT +#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 +#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//IH_PERFCOUNTER1_RESULT +#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 +#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//IH_DSM_MATCH_VALUE_BIT_31_0 +#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE__SHIFT 0x0 +#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE_MASK 0xFFFFFFFFL +//IH_DSM_MATCH_VALUE_BIT_63_32 +#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE__SHIFT 0x0 +#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE_MASK 0xFFFFFFFFL +//IH_DSM_MATCH_VALUE_BIT_95_64 +#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE__SHIFT 0x0 +#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE_MASK 0xFFFFFFFFL +//IH_DSM_MATCH_FIELD_CONTROL +#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN__SHIFT 0x0 +#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN__SHIFT 0x1 +#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN__SHIFT 0x2 +#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN__SHIFT 0x3 +#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN__SHIFT 0x4 +#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN__SHIFT 0x5 +#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN__SHIFT 0x6 +#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN_MASK 0x00000001L +#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN_MASK 0x00000002L +#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN_MASK 0x00000004L +#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN_MASK 0x00000008L +#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN_MASK 0x00000010L +#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN_MASK 0x00000020L +#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN_MASK 0x00000040L +//IH_DSM_MATCH_DATA_CONTROL +#define IH_DSM_MATCH_DATA_CONTROL__VALUE__SHIFT 0x0 +#define IH_DSM_MATCH_DATA_CONTROL__VALUE_MASK 0x0FFFFFFFL +//IH_DSM_MATCH_FCN_ID +#define IH_DSM_MATCH_FCN_ID__VF_ID__SHIFT 0x0 +#define IH_DSM_MATCH_FCN_ID__PF_VF__SHIFT 0x7 +#define IH_DSM_MATCH_FCN_ID__VF_ID_MASK 0x0000000FL +#define IH_DSM_MATCH_FCN_ID__PF_VF_MASK 0x00000080L +//IH_VF_RB_STATUS +#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0 +#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL +//IH_VF_RB_STATUS2 +#define IH_VF_RB_STATUS2__RB_FULL_VF__SHIFT 0x0 +#define IH_VF_RB_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL +//IH_VF_RB1_STATUS +#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0 +#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL +//IH_VF_RB1_STATUS2 +#define IH_VF_RB1_STATUS2__RB_FULL_VF__SHIFT 0x0 +#define IH_VF_RB1_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL +//IH_RB_STATUS +#define IH_RB_STATUS__RB_FULL__SHIFT 0x0 +#define IH_RB_STATUS__RB_FULL_DRAIN__SHIFT 0x1 +#define IH_RB_STATUS__RB_OVERFLOW__SHIFT 0x2 +#define IH_RB_STATUS__RB1_FULL__SHIFT 0x4 +#define IH_RB_STATUS__RB1_FULL_DRAIN__SHIFT 0x5 +#define IH_RB_STATUS__RB1_OVERFLOW__SHIFT 0x6 +#define IH_RB_STATUS__RB_FULL_MASK 0x00000001L +#define IH_RB_STATUS__RB_FULL_DRAIN_MASK 0x00000002L +#define IH_RB_STATUS__RB_OVERFLOW_MASK 0x00000004L +#define IH_RB_STATUS__RB1_FULL_MASK 0x00000010L +#define IH_RB_STATUS__RB1_FULL_DRAIN_MASK 0x00000020L +#define IH_RB_STATUS__RB1_OVERFLOW_MASK 0x00000040L +//IH_INT_FLOOD_CNTL +#define IH_INT_FLOOD_CNTL__HIGHWATER__SHIFT 0x0 +#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE__SHIFT 0x3 +#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS__SHIFT 0x4 +#define IH_INT_FLOOD_CNTL__HIGHWATER_MASK 0x00000007L +#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE_MASK 0x00000008L +#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS_MASK 0x00000010L +//IH_RB0_INT_FLOOD_STATUS +#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0 +#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f +#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL +#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L +//IH_RB1_INT_FLOOD_STATUS +#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0 +#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f +#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL +#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L +//IH_INT_FLOOD_STATUS +#define IH_INT_FLOOD_STATUS__INT_DROP_CNT__SHIFT 0x0 +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID__SHIFT 0x8 +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID__SHIFT 0x10 +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID__SHIFT 0x18 +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF__SHIFT 0x1d +#define IH_INT_FLOOD_STATUS__INT_DROPPED__SHIFT 0x1e +#define IH_INT_FLOOD_STATUS__INT_DROP_CNT_MASK 0x000000FFL +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID_MASK 0x0000FF00L +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID_MASK 0x00FF0000L +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID_MASK 0x0F000000L +#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_MASK 0x20000000L +#define IH_INT_FLOOD_STATUS__INT_DROPPED_MASK 0x40000000L +//IH_INT_FLAGS +#define IH_INT_FLAGS__CLIENT_0_FLAG__SHIFT 0x0 +#define IH_INT_FLAGS__CLIENT_1_FLAG__SHIFT 0x1 +#define IH_INT_FLAGS__CLIENT_2_FLAG__SHIFT 0x2 +#define IH_INT_FLAGS__CLIENT_3_FLAG__SHIFT 0x3 +#define IH_INT_FLAGS__CLIENT_4_FLAG__SHIFT 0x4 +#define IH_INT_FLAGS__CLIENT_5_FLAG__SHIFT 0x5 +#define IH_INT_FLAGS__CLIENT_6_FLAG__SHIFT 0x6 +#define IH_INT_FLAGS__CLIENT_7_FLAG__SHIFT 0x7 +#define IH_INT_FLAGS__CLIENT_8_FLAG__SHIFT 0x8 +#define IH_INT_FLAGS__CLIENT_9_FLAG__SHIFT 0x9 +#define IH_INT_FLAGS__CLIENT_10_FLAG__SHIFT 0xa +#define IH_INT_FLAGS__CLIENT_11_FLAG__SHIFT 0xb +#define IH_INT_FLAGS__CLIENT_12_FLAG__SHIFT 0xc +#define IH_INT_FLAGS__CLIENT_13_FLAG__SHIFT 0xd +#define IH_INT_FLAGS__CLIENT_14_FLAG__SHIFT 0xe +#define IH_INT_FLAGS__CLIENT_15_FLAG__SHIFT 0xf +#define IH_INT_FLAGS__CLIENT_16_FLAG__SHIFT 0x10 +#define IH_INT_FLAGS__CLIENT_17_FLAG__SHIFT 0x11 +#define IH_INT_FLAGS__CLIENT_18_FLAG__SHIFT 0x12 +#define IH_INT_FLAGS__CLIENT_19_FLAG__SHIFT 0x13 +#define IH_INT_FLAGS__CLIENT_20_FLAG__SHIFT 0x14 +#define IH_INT_FLAGS__CLIENT_21_FLAG__SHIFT 0x15 +#define IH_INT_FLAGS__CLIENT_22_FLAG__SHIFT 0x16 +#define IH_INT_FLAGS__CLIENT_23_FLAG__SHIFT 0x17 +#define IH_INT_FLAGS__CLIENT_24_FLAG__SHIFT 0x18 +#define IH_INT_FLAGS__CLIENT_25_FLAG__SHIFT 0x19 +#define IH_INT_FLAGS__CLIENT_26_FLAG__SHIFT 0x1a +#define IH_INT_FLAGS__CLIENT_27_FLAG__SHIFT 0x1b +#define IH_INT_FLAGS__CLIENT_28_FLAG__SHIFT 0x1c +#define IH_INT_FLAGS__CLIENT_29_FLAG__SHIFT 0x1d +#define IH_INT_FLAGS__CLIENT_30_FLAG__SHIFT 0x1e +#define IH_INT_FLAGS__CLIENT_31_FLAG__SHIFT 0x1f +#define IH_INT_FLAGS__CLIENT_0_FLAG_MASK 0x00000001L +#define IH_INT_FLAGS__CLIENT_1_FLAG_MASK 0x00000002L +#define IH_INT_FLAGS__CLIENT_2_FLAG_MASK 0x00000004L +#define IH_INT_FLAGS__CLIENT_3_FLAG_MASK 0x00000008L +#define IH_INT_FLAGS__CLIENT_4_FLAG_MASK 0x00000010L +#define IH_INT_FLAGS__CLIENT_5_FLAG_MASK 0x00000020L +#define IH_INT_FLAGS__CLIENT_6_FLAG_MASK 0x00000040L +#define IH_INT_FLAGS__CLIENT_7_FLAG_MASK 0x00000080L +#define IH_INT_FLAGS__CLIENT_8_FLAG_MASK 0x00000100L +#define IH_INT_FLAGS__CLIENT_9_FLAG_MASK 0x00000200L +#define IH_INT_FLAGS__CLIENT_10_FLAG_MASK 0x00000400L +#define IH_INT_FLAGS__CLIENT_11_FLAG_MASK 0x00000800L +#define IH_INT_FLAGS__CLIENT_12_FLAG_MASK 0x00001000L +#define IH_INT_FLAGS__CLIENT_13_FLAG_MASK 0x00002000L +#define IH_INT_FLAGS__CLIENT_14_FLAG_MASK 0x00004000L +#define IH_INT_FLAGS__CLIENT_15_FLAG_MASK 0x00008000L +#define IH_INT_FLAGS__CLIENT_16_FLAG_MASK 0x00010000L +#define IH_INT_FLAGS__CLIENT_17_FLAG_MASK 0x00020000L +#define IH_INT_FLAGS__CLIENT_18_FLAG_MASK 0x00040000L +#define IH_INT_FLAGS__CLIENT_19_FLAG_MASK 0x00080000L +#define IH_INT_FLAGS__CLIENT_20_FLAG_MASK 0x00100000L +#define IH_INT_FLAGS__CLIENT_21_FLAG_MASK 0x00200000L +#define IH_INT_FLAGS__CLIENT_22_FLAG_MASK 0x00400000L +#define IH_INT_FLAGS__CLIENT_23_FLAG_MASK 0x00800000L +#define IH_INT_FLAGS__CLIENT_24_FLAG_MASK 0x01000000L +#define IH_INT_FLAGS__CLIENT_25_FLAG_MASK 0x02000000L +#define IH_INT_FLAGS__CLIENT_26_FLAG_MASK 0x04000000L +#define IH_INT_FLAGS__CLIENT_27_FLAG_MASK 0x08000000L +#define IH_INT_FLAGS__CLIENT_28_FLAG_MASK 0x10000000L +#define IH_INT_FLAGS__CLIENT_29_FLAG_MASK 0x20000000L +#define IH_INT_FLAGS__CLIENT_30_FLAG_MASK 0x40000000L +#define IH_INT_FLAGS__CLIENT_31_FLAG_MASK 0x80000000L +//IH_LAST_INT_INFO0 +#define IH_LAST_INT_INFO0__CLIENT_ID__SHIFT 0x0 +#define IH_LAST_INT_INFO0__SOURCE_ID__SHIFT 0x8 +#define IH_LAST_INT_INFO0__RING_ID__SHIFT 0x10 +#define IH_LAST_INT_INFO0__VM_ID__SHIFT 0x18 +#define IH_LAST_INT_INFO0__VMID_TYPE__SHIFT 0x1f +#define IH_LAST_INT_INFO0__CLIENT_ID_MASK 0x000000FFL +#define IH_LAST_INT_INFO0__SOURCE_ID_MASK 0x0000FF00L +#define IH_LAST_INT_INFO0__RING_ID_MASK 0x00FF0000L +#define IH_LAST_INT_INFO0__VM_ID_MASK 0x0F000000L +#define IH_LAST_INT_INFO0__VMID_TYPE_MASK 0x80000000L +//IH_LAST_INT_INFO1 +#define IH_LAST_INT_INFO1__CONTEXT_ID__SHIFT 0x0 +#define IH_LAST_INT_INFO1__CONTEXT_ID_MASK 0xFFFFFFFFL +//IH_LAST_INT_INFO2 +#define IH_LAST_INT_INFO2__PAS_ID__SHIFT 0x0 +#define IH_LAST_INT_INFO2__VF_ID__SHIFT 0x10 +#define IH_LAST_INT_INFO2__VF__SHIFT 0x17 +#define IH_LAST_INT_INFO2__PAS_ID_MASK 0x0000FFFFL +#define IH_LAST_INT_INFO2__VF_ID_MASK 0x000F0000L +#define IH_LAST_INT_INFO2__VF_MASK 0x00800000L +//IH_SCRATCH +#define IH_SCRATCH__DATA__SHIFT 0x0 +#define IH_SCRATCH__DATA_MASK 0xFFFFFFFFL +//IH_CLIENT_CREDIT_ERROR +#define IH_CLIENT_CREDIT_ERROR__CLEAR__SHIFT 0x0 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR__SHIFT 0x1 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR__SHIFT 0x2 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR__SHIFT 0x3 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR__SHIFT 0x4 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR__SHIFT 0x5 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR__SHIFT 0x6 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR__SHIFT 0x7 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR__SHIFT 0x8 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR__SHIFT 0x9 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR__SHIFT 0xa +#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR__SHIFT 0xb +#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR__SHIFT 0xc +#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR__SHIFT 0xd +#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR__SHIFT 0xe +#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR__SHIFT 0xf +#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR__SHIFT 0x10 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR__SHIFT 0x11 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR__SHIFT 0x12 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR__SHIFT 0x13 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR__SHIFT 0x14 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR__SHIFT 0x15 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR__SHIFT 0x16 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR__SHIFT 0x17 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR__SHIFT 0x18 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR__SHIFT 0x19 +#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR__SHIFT 0x1a +#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR__SHIFT 0x1b +#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR__SHIFT 0x1c +#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR__SHIFT 0x1d +#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR__SHIFT 0x1e +#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR__SHIFT 0x1f +#define IH_CLIENT_CREDIT_ERROR__CLEAR_MASK 0x00000001L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR_MASK 0x00000002L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR_MASK 0x00000004L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR_MASK 0x00000008L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR_MASK 0x00000010L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR_MASK 0x00000020L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR_MASK 0x00000040L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR_MASK 0x00000080L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR_MASK 0x00000100L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR_MASK 0x00000200L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR_MASK 0x00000400L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR_MASK 0x00000800L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR_MASK 0x00001000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR_MASK 0x00002000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR_MASK 0x00004000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR_MASK 0x00008000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR_MASK 0x00010000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR_MASK 0x00020000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR_MASK 0x00040000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR_MASK 0x00080000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR_MASK 0x00100000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR_MASK 0x00200000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR_MASK 0x00400000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR_MASK 0x00800000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR_MASK 0x01000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR_MASK 0x02000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR_MASK 0x04000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR_MASK 0x08000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR_MASK 0x10000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR_MASK 0x20000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR_MASK 0x40000000L +#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR_MASK 0x80000000L +//IH_GPU_IOV_VIOLATION_LOG +#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define IH_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x16 +#define IH_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x17 +#define IH_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x18 +#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL +#define IH_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00400000L +#define IH_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00800000L +#define IH_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x0F000000L +//IH_GPU_IOV_VIOLATION_LOG2 +#define IH_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0 +#define IH_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL +//IH_COOKIE_REC_VIOLATION_LOG +#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID__SHIFT 0x8 +#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x10 +#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID_MASK 0x0000FF00L +#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID_MASK 0x03FF0000L +//IH_CREDIT_STATUS +#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED__SHIFT 0x1 +#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED__SHIFT 0x2 +#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED__SHIFT 0x3 +#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED__SHIFT 0x4 +#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED__SHIFT 0x5 +#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED__SHIFT 0x6 +#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED__SHIFT 0x7 +#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED__SHIFT 0x8 +#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED__SHIFT 0x9 +#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED__SHIFT 0xa +#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED__SHIFT 0xb +#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED__SHIFT 0xc +#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED__SHIFT 0xd +#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED__SHIFT 0xe +#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED__SHIFT 0xf +#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED__SHIFT 0x10 +#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED__SHIFT 0x11 +#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED__SHIFT 0x12 +#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED__SHIFT 0x13 +#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED__SHIFT 0x14 +#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED__SHIFT 0x15 +#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED__SHIFT 0x16 +#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED__SHIFT 0x17 +#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED__SHIFT 0x18 +#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED__SHIFT 0x19 +#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED__SHIFT 0x1a +#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED__SHIFT 0x1b +#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED__SHIFT 0x1c +#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED__SHIFT 0x1d +#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED__SHIFT 0x1e +#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED__SHIFT 0x1f +#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED_MASK 0x00000002L +#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED_MASK 0x00000004L +#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED_MASK 0x00000008L +#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED_MASK 0x00000010L +#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED_MASK 0x00000020L +#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED_MASK 0x00000040L +#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED_MASK 0x00000080L +#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED_MASK 0x00000100L +#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED_MASK 0x00000200L +#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED_MASK 0x00000400L +#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED_MASK 0x00000800L +#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED_MASK 0x00001000L +#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED_MASK 0x00002000L +#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED_MASK 0x00004000L +#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED_MASK 0x00008000L +#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED_MASK 0x00010000L +#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED_MASK 0x00020000L +#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED_MASK 0x00040000L +#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED_MASK 0x00080000L +#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED_MASK 0x00100000L +#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED_MASK 0x00200000L +#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED_MASK 0x00400000L +#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED_MASK 0x00800000L +#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED_MASK 0x01000000L +#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED_MASK 0x02000000L +#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED_MASK 0x04000000L +#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED_MASK 0x08000000L +#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED_MASK 0x10000000L +#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED_MASK 0x20000000L +#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED_MASK 0x40000000L +#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED_MASK 0x80000000L +//IH_MMHUB_ERROR +#define IH_MMHUB_ERROR__IH_BRESP_01__SHIFT 0x1 +#define IH_MMHUB_ERROR__IH_BRESP_10__SHIFT 0x2 +#define IH_MMHUB_ERROR__IH_BRESP_11__SHIFT 0x3 +#define IH_MMHUB_ERROR__IH_BUSER_NACK_01__SHIFT 0x5 +#define IH_MMHUB_ERROR__IH_BUSER_NACK_10__SHIFT 0x6 +#define IH_MMHUB_ERROR__IH_BUSER_NACK_11__SHIFT 0x7 +#define IH_MMHUB_ERROR__IH_BRESP_01_MASK 0x00000002L +#define IH_MMHUB_ERROR__IH_BRESP_10_MASK 0x00000004L +#define IH_MMHUB_ERROR__IH_BRESP_11_MASK 0x00000008L +#define IH_MMHUB_ERROR__IH_BUSER_NACK_01_MASK 0x00000020L +#define IH_MMHUB_ERROR__IH_BUSER_NACK_10_MASK 0x00000040L +#define IH_MMHUB_ERROR__IH_BUSER_NACK_11_MASK 0x00000080L +//IH_VF_RB_STATUS3 +#define IH_VF_RB_STATUS3__RB_OVERFLOW_VF__SHIFT 0x0 +#define IH_VF_RB_STATUS3__RB_OVERFLOW_VF_MASK 0x0000FFFFL +//IH_VF_RB_STATUS4 +#define IH_VF_RB_STATUS4__BIF_INTERRUPT_LINE_VF__SHIFT 0x0 +#define IH_VF_RB_STATUS4__BIF_INTERRUPT_LINE_VF_MASK 0x0000FFFFL +//IH_VF_RB1_STATUS3 +#define IH_VF_RB1_STATUS3__RB_OVERFLOW_VF__SHIFT 0x0 +#define IH_VF_RB1_STATUS3__RB_OVERFLOW_VF_MASK 0x0000FFFFL +//IH_MSI_STORM_CTRL +#define IH_MSI_STORM_CTRL__DELAY__SHIFT 0x0 +#define IH_MSI_STORM_CTRL__DELAY_MASK 0x00000FFFL +//IH_MSI_STORM_CLIENT_INDEX +#define IH_MSI_STORM_CLIENT_INDEX__INDEX__SHIFT 0x0 +#define IH_MSI_STORM_CLIENT_INDEX__INDEX_MASK 0x00000007L +//IH_MSI_STORM_CLIENT_DATA +#define IH_MSI_STORM_CLIENT_DATA__CLIENT_ID__SHIFT 0x0 +#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID__SHIFT 0x8 +#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10 +#define IH_MSI_STORM_CLIENT_DATA__UTCL2_PAGE_FAULT_MATCH_ENABLE__SHIFT 0x11 +#define IH_MSI_STORM_CLIENT_DATA__ENTRY_VALID__SHIFT 0x1f +#define IH_MSI_STORM_CLIENT_DATA__CLIENT_ID_MASK 0x000000FFL +#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MASK 0x0000FF00L +#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L +#define IH_MSI_STORM_CLIENT_DATA__UTCL2_PAGE_FAULT_MATCH_ENABLE_MASK 0x00020000L +#define IH_MSI_STORM_CLIENT_DATA__ENTRY_VALID_MASK 0x80000000L +//IH_REGISTER_LAST_PART2 +#define IH_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0 +#define IH_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL +//SEM_MAILBOX +#define SEM_MAILBOX__HOSTPORT__SHIFT 0x0 +#define SEM_MAILBOX__RESERVED__SHIFT 0x10 +#define SEM_MAILBOX__HOSTPORT_MASK 0x0000FFFFL +#define SEM_MAILBOX__RESERVED_MASK 0xFFFF0000L +//SEM_MAILBOX_CLEAR +#define SEM_MAILBOX_CLEAR__CLEAR__SHIFT 0x0 +#define SEM_MAILBOX_CLEAR__RESERVED__SHIFT 0x10 +#define SEM_MAILBOX_CLEAR__CLEAR_MASK 0x0000FFFFL +#define SEM_MAILBOX_CLEAR__RESERVED_MASK 0xFFFF0000L +//SEM_REGISTER_LAST_PART2 +#define SEM_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0 +#define SEM_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL +//IH_ACTIVE_FCN_ID +#define IH_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0 +#define IH_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4 +#define IH_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f +#define IH_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL +#define IH_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L +#define IH_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L +//IH_VIRT_RESET_REQ +#define IH_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define IH_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define IH_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define IH_VIRT_RESET_REQ__PF_MASK 0x80000000L +//IH_CLIENT_CFG +#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0 +#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000003FL +//IH_RING1_CLIENT_CFG_INDEX +#define IH_RING1_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0 +#define IH_RING1_CLIENT_CFG_INDEX__INDEX_MASK 0x00000007L +//IH_RING1_CLIENT_CFG_DATA +#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID__SHIFT 0x0 +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID__SHIFT 0x8 +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10 +#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID_MASK 0x000000FFL +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MASK 0x0000FF00L +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L +//IH_CLIENT_CFG_INDEX +#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0 +#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL +//IH_CLIENT_CFG_DATA +#define IH_CLIENT_CFG_DATA__CLIENT_TYPE__SHIFT 0x12 +#define IH_CLIENT_CFG_DATA__VF_RB_SELECT__SHIFT 0x16 +#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID__SHIFT 0x18 +#define IH_CLIENT_CFG_DATA__INTERFACE_TYPE__SHIFT 0x19 +#define IH_CLIENT_CFG_DATA__CLIENT_TYPE_MASK 0x000C0000L +#define IH_CLIENT_CFG_DATA__VF_RB_SELECT_MASK 0x00C00000L +#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID_MASK 0x01000000L +#define IH_CLIENT_CFG_DATA__INTERFACE_TYPE_MASK 0x02000000L +//IH_CLIENT_CFG_DATA2 +#define IH_CLIENT_CFG_DATA2__CREDIT_RETURN_ADDR__SHIFT 0x0 +#define IH_CLIENT_CFG_DATA2__CREDIT_RETURN_ADDR_MASK 0xFFFFFFFFL +//IH_CID_REMAP_INDEX +#define IH_CID_REMAP_INDEX__INDEX__SHIFT 0x0 +#define IH_CID_REMAP_INDEX__INDEX_MASK 0x00000003L +//IH_CID_REMAP_DATA +#define IH_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0 +#define IH_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8 +#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x18 +#define IH_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL +#define IH_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0003FF00L +#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0xFF000000L +//IH_CHICKEN +#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0 +#define IH_CHICKEN__DBGU_TRIGGER_ENABLE__SHIFT 0x1 +#define IH_CHICKEN__CROSS_TRIGGER_ENABLE__SHIFT 0x2 +#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE__SHIFT 0x3 +#define IH_CHICKEN__MC_SPACE_GPA_ENABLE__SHIFT 0x4 +#define IH_CHICKEN__REG_FIREWALL_ENABLE__SHIFT 0x5 +#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L +#define IH_CHICKEN__DBGU_TRIGGER_ENABLE_MASK 0x00000002L +#define IH_CHICKEN__CROSS_TRIGGER_ENABLE_MASK 0x00000004L +#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE_MASK 0x00000008L +#define IH_CHICKEN__MC_SPACE_GPA_ENABLE_MASK 0x00000010L +#define IH_CHICKEN__REG_FIREWALL_ENABLE_MASK 0x00000020L +//IH_MMHUB_CNTL +#define IH_MMHUB_CNTL__UNITID__SHIFT 0x0 +#define IH_MMHUB_CNTL__IV_TLVL__SHIFT 0x8 +#define IH_MMHUB_CNTL__WPTR_WB_TLVL__SHIFT 0xc +#define IH_MMHUB_CNTL__UNITID_MASK 0x0000003FL +#define IH_MMHUB_CNTL__IV_TLVL_MASK 0x00000F00L +#define IH_MMHUB_CNTL__WPTR_WB_TLVL_MASK 0x0000F000L +//IH_INT_DROP_CNTL +#define IH_INT_DROP_CNTL__INT_DROP_EN__SHIFT 0x0 +#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN__SHIFT 0x1 +#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN__SHIFT 0x2 +#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN__SHIFT 0x3 +#define IH_INT_DROP_CNTL__VF_MATCH_EN__SHIFT 0x4 +#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN__SHIFT 0x5 +#define IH_INT_DROP_CNTL__INT_DROP_MODE__SHIFT 0x6 +#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN__SHIFT 0x8 +#define IH_INT_DROP_CNTL__INT_DROPPED__SHIFT 0x10 +#define IH_INT_DROP_CNTL__INT_DROP_EN_MASK 0x00000001L +#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN_MASK 0x00000002L +#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN_MASK 0x00000004L +#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN_MASK 0x00000008L +#define IH_INT_DROP_CNTL__VF_MATCH_EN_MASK 0x00000010L +#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN_MASK 0x00000020L +#define IH_INT_DROP_CNTL__INT_DROP_MODE_MASK 0x000000C0L +#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN_MASK 0x00000100L +#define IH_INT_DROP_CNTL__INT_DROPPED_MASK 0x00010000L +//IH_INT_DROP_MATCH_VALUE0 +#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE__SHIFT 0x0 +#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE__SHIFT 0x8 +#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE__SHIFT 0x10 +#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE__SHIFT 0x17 +#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE__SHIFT 0x18 +#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE_MASK 0x000000FFL +#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE_MASK 0x0000FF00L +#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE_MASK 0x001F0000L +#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE_MASK 0x00800000L +#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE_MASK 0xFF000000L +//IH_INT_DROP_MATCH_VALUE1 +#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE__SHIFT 0x0 +#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE_MASK 0xFFFFFFFFL +//IH_INT_DROP_MATCH_MASK0 +#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK__SHIFT 0x0 +#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK__SHIFT 0x8 +#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK__SHIFT 0x10 +#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK__SHIFT 0x17 +#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK__SHIFT 0x18 +#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK_MASK 0x000000FFL +#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK_MASK 0x0000FF00L +#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK_MASK 0x001F0000L +#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK_MASK 0x00800000L +#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK_MASK 0xFF000000L +//IH_INT_DROP_MATCH_MASK1 +#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK__SHIFT 0x0 +#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK_MASK 0xFFFFFFFFL +//IH_REGISTER_LAST_PART1 +#define IH_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0 +#define IH_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL + +#endif diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index d0df3381539f..8433f99f6667 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -315,16 +315,19 @@ struct kfd2kgd_calls { uint32_t watch_address_mask, uint32_t watch_id, uint32_t watch_mode, - uint32_t debug_vmid); + uint32_t debug_vmid, + uint32_t inst); uint32_t (*clear_address_watch)(struct amdgpu_device *adev, uint32_t watch_id); void (*get_iq_wait_times)(struct amdgpu_device *adev, - uint32_t *wait_times); + uint32_t *wait_times, + uint32_t inst); void (*build_grace_period_packet_info)(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data); + uint32_t *reg_data, + uint32_t inst); void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, int *wave_cnt, int *max_waves_per_cu, uint32_t inst); void (*program_trap_handler_settings)(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 9f542f6e19ed..84c5224d994c 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -132,7 +132,8 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_MEM_TEMP, AMDGPU_PP_SENSOR_VCE_POWER, AMDGPU_PP_SENSOR_UVD_POWER, - AMDGPU_PP_SENSOR_GPU_POWER, + AMDGPU_PP_SENSOR_GPU_AVG_POWER, + AMDGPU_PP_SENSOR_GPU_INPUT_POWER, AMDGPU_PP_SENSOR_SS_APU_SHARE, AMDGPU_PP_SENSOR_SS_DGPU_SHARE, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, @@ -892,4 +893,73 @@ struct gpu_metrics_v2_3 { uint16_t average_temperature_core[8]; // average CPU core temperature on APUs uint16_t average_temperature_l3[2]; }; + +struct gpu_metrics_v2_4 { + struct metrics_table_header common_header; + + /* Temperature (unit: centi-Celsius) */ + uint16_t temperature_gfx; + uint16_t temperature_soc; + uint16_t temperature_core[8]; + uint16_t temperature_l3[2]; + + /* Utilization (unit: centi) */ + uint16_t average_gfx_activity; + uint16_t average_mm_activity; + + /* Driver attached timestamp (in ns) */ + uint64_t system_clock_counter; + + /* Power/Energy (unit: mW) */ + uint16_t average_socket_power; + uint16_t average_cpu_power; + uint16_t average_soc_power; + uint16_t average_gfx_power; + uint16_t average_core_power[8]; + + /* Average clocks (unit: MHz) */ + uint16_t average_gfxclk_frequency; + uint16_t average_socclk_frequency; + uint16_t average_uclk_frequency; + uint16_t average_fclk_frequency; + uint16_t average_vclk_frequency; + uint16_t average_dclk_frequency; + + /* Current clocks (unit: MHz) */ + uint16_t current_gfxclk; + uint16_t current_socclk; + uint16_t current_uclk; + uint16_t current_fclk; + uint16_t current_vclk; + uint16_t current_dclk; + uint16_t current_coreclk[8]; + uint16_t current_l3clk[2]; + + /* Throttle status (ASIC dependent) */ + uint32_t throttle_status; + + /* Fans */ + uint16_t fan_pwm; + + uint16_t padding[3]; + + /* Throttle status (ASIC independent) */ + uint64_t indep_throttle_status; + + /* Average Temperature (unit: centi-Celsius) */ + uint16_t average_temperature_gfx; + uint16_t average_temperature_soc; + uint16_t average_temperature_core[8]; + uint16_t average_temperature_l3[2]; + + /* Power/Voltage (unit: mV) */ + uint16_t average_cpu_voltage; + uint16_t average_soc_voltage; + uint16_t average_gfx_voltage; + + /* Power/Current (unit: mA) */ + uint16_t average_cpu_current; + uint16_t average_soc_current; + uint16_t average_gfx_current; +}; #endif diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index 0997e999416a..b1db2b190187 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -275,7 +275,9 @@ union MESAPI__ADD_QUEUE { uint32_t trap_en : 1; uint32_t is_aql_queue : 1; uint32_t skip_process_ctx_clear : 1; - uint32_t reserved : 19; + uint32_t map_legacy_kq : 1; + uint32_t exclusively_scheduled : 1; + uint32_t reserved : 17; }; struct MES_API_STATUS api_status; uint64_t tma_addr; diff --git a/drivers/gpu/drm/amd/include/yellow_carp_offset.h b/drivers/gpu/drm/amd/include/yellow_carp_offset.h index 0fea6a746611..a2c8dca2425e 100644 --- a/drivers/gpu/drm/amd/include/yellow_carp_offset.h +++ b/drivers/gpu/drm/amd/include/yellow_carp_offset.h @@ -7,13 +7,11 @@ #define MAX_SEGMENT 6 -struct IP_BASE_INSTANCE -{ +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; } __maybe_unused; -struct IP_BASE -{ +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; } __maybe_unused; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 9ef88a0b1b57..5b1d73b00ef7 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -743,7 +743,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, type = PP_OD_EDIT_CCLK_VDDC_TABLE; else if (*buf == 'm') type = PP_OD_EDIT_MCLK_VDDC_TABLE; - else if(*buf == 'r') + else if (*buf == 'r') type = PP_OD_RESTORE_DEFAULT_TABLE; else if (*buf == 'c') type = PP_OD_COMMIT_DPM_TABLE; @@ -1467,6 +1467,32 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, return -EINVAL; } +static unsigned int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, + enum amd_pp_sensors sensor, + void *query) +{ + int r, size = sizeof(uint32_t); + + if (amdgpu_in_reset(adev)) + return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + /* get the sensor value */ + r = amdgpu_dpm_read_sensor(adev, sensor, query, &size); + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + return r; +} + /** * DOC: gpu_busy_percent * @@ -1481,26 +1507,10 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - int r, value, size = sizeof(value); - - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(ddev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return r; - } - - /* read the IP busy sensor */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, - (void *)&value, &size); - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + unsigned int value; + int r; + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value); if (r) return r; @@ -1521,26 +1531,10 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - int r, value, size = sizeof(value); - - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(ddev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return r; - } - - /* read the IP busy sensor */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, - (void *)&value, &size); - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); + unsigned int value; + int r; + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value); if (r) return r; @@ -1814,45 +1808,15 @@ out: return size; } -static int amdgpu_device_read_powershift(struct amdgpu_device *adev, - uint32_t *ss_power, bool dgpu_share) -{ - struct drm_device *ddev = adev_to_drm(adev); - uint32_t size; - int r = 0; - - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(ddev->dev); - if (r < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return r; - } - - if (dgpu_share) - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE, - (void *)ss_power, &size); - else - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, - (void *)ss_power, &size); - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return r; -} - static int amdgpu_show_powershift_percent(struct device *dev, - char *buf, bool dgpu_share) + char *buf, enum amd_pp_sensors sensor) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); uint32_t ss_power; int r = 0, i; - r = amdgpu_device_read_powershift(adev, &ss_power, dgpu_share); + r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power); if (r == -EOPNOTSUPP) { /* sensor not available on dGPU, try to read from APU */ adev = NULL; @@ -1865,14 +1829,15 @@ static int amdgpu_show_powershift_percent(struct device *dev, } mutex_unlock(&mgpu_info.mutex); if (adev) - r = amdgpu_device_read_powershift(adev, &ss_power, dgpu_share); + r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power); } - if (!r) - r = sysfs_emit(buf, "%u%%\n", ss_power); + if (r) + return r; - return r; + return sysfs_emit(buf, "%u%%\n", ss_power); } + /** * DOC: smartshift_apu_power * @@ -1886,7 +1851,7 @@ static int amdgpu_show_powershift_percent(struct device *dev, static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr, char *buf) { - return amdgpu_show_powershift_percent(dev, buf, false); + return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE); } /** @@ -1902,7 +1867,7 @@ static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr, char *buf) { - return amdgpu_show_powershift_percent(dev, buf, true); + return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE); } /** @@ -1965,7 +1930,6 @@ out: return r; } - static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, uint32_t mask, enum amdgpu_device_attr_states *states) { @@ -1978,15 +1942,15 @@ static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, uint32_t mask, enum amdgpu_device_attr_states *states) { - uint32_t ss_power, size; + uint32_t ss_power; if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) *states = ATTR_STATE_UNSUPPORTED; - else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, - (void *)&ss_power, &size)) + else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, + (void *)&ss_power)) *states = ATTR_STATE_UNSUPPORTED; - else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE, - (void *)&ss_power, &size)) + else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE, + (void *)&ss_power)) *states = ATTR_STATE_UNSUPPORTED; return 0; @@ -2049,8 +2013,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { if (gc_ver < IP_VERSION(9, 0, 0) || - gc_ver == IP_VERSION(9, 4, 1) || - gc_ver == IP_VERSION(9, 4, 2)) + !amdgpu_device_has_display_hardware(adev)) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { if (mp1_ver < IP_VERSION(10, 0, 0)) @@ -2096,7 +2059,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ gc_ver == IP_VERSION(10, 1, 2) || gc_ver == IP_VERSION(11, 0, 0) || gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3))) + gc_ver == IP_VERSION(11, 0, 3) || + gc_ver == IP_VERSION(9, 4, 3))) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { if (!((gc_ver == IP_VERSION(10, 3, 1) || @@ -2110,7 +2074,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ gc_ver == IP_VERSION(10, 1, 2) || gc_ver == IP_VERSION(11, 0, 0) || gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3))) + gc_ver == IP_VERSION(11, 0, 3) || + gc_ver == IP_VERSION(9, 4, 3))) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { if (!((gc_ver == IP_VERSION(10, 3, 1) || @@ -2172,15 +2137,19 @@ static int amdgpu_device_attr_create(struct amdgpu_device *adev, uint32_t mask, struct list_head *attr_list) { int ret = 0; - struct device_attribute *dev_attr = &attr->dev_attr; - const char *name = dev_attr->attr.name; enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED; struct amdgpu_device_attr_entry *attr_entry; + struct device_attribute *dev_attr; + const char *name; int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update; - BUG_ON(!attr); + if (!attr) + return -EINVAL; + + dev_attr = &attr->dev_attr; + name = dev_attr->attr.name; attr_update = attr->attr_update ? attr->attr_update : default_attr_update; @@ -2266,46 +2235,32 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); int channel = to_sensor_dev_attr(attr)->index; - int r, temp = 0, size = sizeof(temp); - - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; + int r, temp = 0; if (channel >= PP_TEMP_MAX) return -EINVAL; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } - switch (channel) { case PP_TEMP_JUNCTION: /* get current junction temperature */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, - (void *)&temp, &size); + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, + (void *)&temp); break; case PP_TEMP_EDGE: /* get current edge temperature */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, - (void *)&temp, &size); + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, + (void *)&temp); break; case PP_TEMP_MEM: /* get current memory temperature */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP, - (void *)&temp, &size); + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP, + (void *)&temp); break; default: r = -EINVAL; break; } - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - if (r) return r; @@ -2589,25 +2544,10 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); u32 min_rpm = 0; - u32 size = sizeof(min_rpm); int r; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } - - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, - (void *)&min_rpm, &size); - - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, + (void *)&min_rpm); if (r) return r; @@ -2621,25 +2561,10 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); u32 max_rpm = 0; - u32 size = sizeof(max_rpm); int r; - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } - - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, - (void *)&max_rpm, &size); - - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, + (void *)&max_rpm); if (r) return r; @@ -2801,26 +2726,11 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); u32 vddgfx; - int r, size = sizeof(vddgfx); - - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + int r; /* get the voltage */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, - (void *)&vddgfx, &size); - - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX, + (void *)&vddgfx); if (r) return r; @@ -2840,30 +2750,15 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); u32 vddnb; - int r, size = sizeof(vddnb); - - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; + int r; /* only APUs have vddnb */ if (!(adev->flags & AMD_IS_APU)) return -EINVAL; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } - /* get the voltage */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, - (void *)&vddnb, &size); - - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB, + (void *)&vddnb); if (r) return r; @@ -2877,40 +2772,48 @@ static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, return sysfs_emit(buf, "vddnb\n"); } -static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, - struct device_attribute *attr, - char *buf) +static unsigned int amdgpu_hwmon_get_power(struct device *dev, + enum amd_pp_sensors sensor) { struct amdgpu_device *adev = dev_get_drvdata(dev); + unsigned int uw; u32 query = 0; - int r, size = sizeof(u32); - unsigned uw; - - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } - - /* get the voltage */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, - (void *)&query, &size); - - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + int r; + r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query); if (r) return r; /* convert to microwatts */ uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; - return sysfs_emit(buf, "%u\n", uw); + return uw; +} + +static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned int val; + + val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER); + if (val < 0) + return val; + + return sysfs_emit(buf, "%u\n", val); +} + +static ssize_t amdgpu_hwmon_show_power_input(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned int val; + + val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER); + if (val < 0) + return val; + + return sysfs_emit(buf, "%u\n", val); } static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, @@ -3045,26 +2948,11 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); uint32_t sclk; - int r, size = sizeof(sclk); - - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + int r; /* get the sclk */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, - (void *)&sclk, &size); - - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK, + (void *)&sclk); if (r) return r; @@ -3084,26 +2972,11 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); uint32_t mclk; - int r, size = sizeof(mclk); - - if (amdgpu_in_reset(adev)) - return -EPERM; - if (adev->in_suspend && !adev->in_runpm) - return -EPERM; - - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return r; - } + int r; /* get the sclk */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, - (void *)&mclk, &size); - - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK, + (void *)&mclk); if (r) return r; @@ -3163,6 +3036,8 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, * * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU. * + * - power1_input: instantaneous power used by the SoC in microWatts. On APUs this includes the CPU. + * * - power1_cap_min: minimum cap supported in microWatts * * - power1_cap_max: maximum cap supported in microWatts @@ -3231,6 +3106,7 @@ static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NU static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); +static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0); static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); @@ -3277,6 +3153,7 @@ static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_label.dev_attr.attr, &sensor_dev_attr_power1_average.dev_attr.attr, + &sensor_dev_attr_power1_input.dev_attr.attr, &sensor_dev_attr_power1_cap_max.dev_attr.attr, &sensor_dev_attr_power1_cap_min.dev_attr.attr, &sensor_dev_attr_power1_cap.dev_attr.attr, @@ -3302,6 +3179,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; + uint32_t tmp; /* under multi-vf mode, the hwmon attributes are all not supported */ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) @@ -3387,6 +3265,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, (attr == &sensor_dev_attr_power1_average.dev_attr.attr)) return 0; + /* not all products support both average and instantaneous */ + if (attr == &sensor_dev_attr_power1_average.dev_attr.attr && + amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP) + return 0; + if (attr == &sensor_dev_attr_power1_input.dev_attr.attr && + amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP) + return 0; + /* hide max/min values if we can't both query and manage the fan */ if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) && (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) && @@ -3533,7 +3419,8 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) #if defined(CONFIG_DEBUG_FS) static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m, - struct amdgpu_device *adev) { + struct amdgpu_device *adev) +{ uint16_t *p_val; uint32_t size; int i; @@ -3582,7 +3469,7 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) seq_printf(m, "\t%u mV (VDDNB)\n", value); size = sizeof(uint32_t); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); size = sizeof(value); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h index 52045ad59bed..eec816f0cbf9 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h @@ -24,8 +24,7 @@ #ifndef __AMDGPU_PM_H__ #define __AMDGPU_PM_H__ -struct cg_flag_name -{ +struct cg_flag_name { u64 flag; const char *name; }; diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h index 1dc7a065a6d4..251ed011b3b0 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h @@ -41,8 +41,7 @@ #define SMU_13_0_0_PP_OVERDRIVE_VERSION 0x83 // OverDrive 8 Table Version 0.2 #define SMU_13_0_0_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 -enum SMU_13_0_0_ODFEATURE_CAP -{ +enum SMU_13_0_0_ODFEATURE_CAP { SMU_13_0_0_ODCAP_GFXCLK_LIMITS = 0, SMU_13_0_0_ODCAP_UCLK_LIMITS, SMU_13_0_0_ODCAP_POWER_LIMIT, @@ -62,8 +61,7 @@ enum SMU_13_0_0_ODFEATURE_CAP SMU_13_0_0_ODCAP_COUNT, }; -enum SMU_13_0_0_ODFEATURE_ID -{ +enum SMU_13_0_0_ODFEATURE_ID { SMU_13_0_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_13_0_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature SMU_13_0_0_ODFEATURE_UCLK_LIMITS = 1 << SMU_13_0_0_ODCAP_UCLK_LIMITS, //UCLK Limit feature SMU_13_0_0_ODFEATURE_POWER_LIMIT = 1 << SMU_13_0_0_ODCAP_POWER_LIMIT, //Power Limit feature @@ -85,8 +83,7 @@ enum SMU_13_0_0_ODFEATURE_ID #define SMU_13_0_0_MAX_ODFEATURE 32 //Maximum Number of OD Features -enum SMU_13_0_0_ODSETTING_ID -{ +enum SMU_13_0_0_ODSETTING_ID { SMU_13_0_0_ODSETTING_GFXCLKFMAX = 0, SMU_13_0_0_ODSETTING_GFXCLKFMIN, SMU_13_0_0_ODSETTING_UCLKFMIN, @@ -123,8 +120,7 @@ enum SMU_13_0_0_ODSETTING_ID }; #define SMU_13_0_0_MAX_ODSETTING 64 //Maximum Number of ODSettings -enum SMU_13_0_0_PWRMODE_SETTING -{ +enum SMU_13_0_0_PWRMODE_SETTING { SMU_13_0_0_PMSETTING_POWER_LIMIT_QUIET = 0, SMU_13_0_0_PMSETTING_POWER_LIMIT_BALANCE, SMU_13_0_0_PMSETTING_POWER_LIMIT_TURBO, @@ -144,8 +140,7 @@ enum SMU_13_0_0_PWRMODE_SETTING }; #define SMU_13_0_0_MAX_PMSETTING 32 //Maximum Number of PowerMode Settings -struct smu_13_0_0_overdrive_table -{ +struct smu_13_0_0_overdrive_table { uint8_t revision; //Revision = SMU_13_0_0_PP_OVERDRIVE_VERSION uint8_t reserve[3]; //Zero filled field reserved for future use uint32_t feature_count; //Total number of supported features @@ -156,8 +151,7 @@ struct smu_13_0_0_overdrive_table int16_t pm_setting[SMU_13_0_0_MAX_PMSETTING]; //Optimized power mode feature settings }; -enum SMU_13_0_0_PPCLOCK_ID -{ +enum SMU_13_0_0_PPCLOCK_ID { SMU_13_0_0_PPCLOCK_GFXCLK = 0, SMU_13_0_0_PPCLOCK_SOCCLK, SMU_13_0_0_PPCLOCK_UCLK, @@ -175,8 +169,7 @@ enum SMU_13_0_0_PPCLOCK_ID }; #define SMU_13_0_0_MAX_PPCLOCK 16 //Maximum Number of PP Clocks -struct smu_13_0_0_powerplay_table -{ +struct smu_13_0_0_powerplay_table { struct atom_common_table_header header; //For SMU13, header.format_revision = 15, header.content_revision = 0 uint8_t table_revision; //For SMU13, table_revision = 2 uint8_t padding; diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 36c831b280ed..5d28c951a319 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -191,8 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, } #if 0 -static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = -{ +static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = { { 0, 4, 1 }, { 1, 4, 1 }, { 2, 5, 1 }, @@ -204,32 +203,27 @@ static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = { 0xffffffff } }; -static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = -{ +static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = { { 0, 4, 1 }, { 0xffffffff } }; -static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = -{ +static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = { { 0, 4, 1 }, { 0xffffffff } }; -static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = -{ +static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = { { 0, 4, 1 }, { 0xffffffff } }; -static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = -{ +static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = { { 0, 4, 1 }, { 0xffffffff } }; -static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = -{ +static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = { { 0, 4, 1 }, { 1, 4, 1 }, { 2, 5, 1 }, @@ -260,39 +254,32 @@ static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = { 0xffffffff } }; -static const struct kv_lcac_config_reg sx0_cac_config_reg[] = -{ +static const struct kv_lcac_config_reg sx0_cac_config_reg[] = { { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } }; -static const struct kv_lcac_config_reg mc0_cac_config_reg[] = -{ +static const struct kv_lcac_config_reg mc0_cac_config_reg[] = { { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } }; -static const struct kv_lcac_config_reg mc1_cac_config_reg[] = -{ +static const struct kv_lcac_config_reg mc1_cac_config_reg[] = { { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } }; -static const struct kv_lcac_config_reg mc2_cac_config_reg[] = -{ +static const struct kv_lcac_config_reg mc2_cac_config_reg[] = { { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } }; -static const struct kv_lcac_config_reg mc3_cac_config_reg[] = -{ +static const struct kv_lcac_config_reg mc3_cac_config_reg[] = { { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } }; -static const struct kv_lcac_config_reg cpl_cac_config_reg[] = -{ +static const struct kv_lcac_config_reg cpl_cac_config_reg[] = { { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } }; #endif -static const struct kv_pt_config_reg didt_config_kv[] = -{ +static const struct kv_pt_config_reg didt_config_kv[] = { { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, @@ -1173,9 +1160,9 @@ static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) pi->graphics_level[i].ClkBypassCntl = 2; else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) pi->graphics_level[i].ClkBypassCntl = 7; - else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) + else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200) pi->graphics_level[i].ClkBypassCntl = 6; - else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) + else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200) pi->graphics_level[i].ClkBypassCntl = 8; else pi->graphics_level[i].ClkBypassCntl = 0; @@ -1825,7 +1812,7 @@ static void kv_set_valid_clock_range(struct amdgpu_device *adev, if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].sclk_frequency) > (table->entries[pi->lowest_valid].sclk_frequency - - new_ps->levels[new_ps->num_levels -1].sclk)) + new_ps->levels[new_ps->num_levels - 1].sclk)) pi->highest_valid = pi->lowest_valid; else pi->lowest_valid = pi->highest_valid; @@ -3333,8 +3320,7 @@ static const struct amd_ip_funcs kv_dpm_ip_funcs = { .set_powergating_state = kv_dpm_set_powergating_state, }; -const struct amdgpu_ip_block_version kv_smu_ip_block = -{ +const struct amdgpu_ip_block_version kv_smu_ip_block = { .type = AMD_IP_BLOCK_TYPE_SMC, .major = 1, .minor = 0, diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h index 055321f61ca7..3e7caa715533 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h @@ -117,8 +117,7 @@ enum r600_display_watermark { R600_DISPLAY_WATERMARK_HIGH = 1, }; -enum r600_display_gap -{ +enum r600_display_gap { R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, R600_PM_DISPLAY_GAP_VBLANK = 1, R600_PM_DISPLAY_GAP_WATERMARK = 2, diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h b/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h index c7dc117a688c..90ec411c5029 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h @@ -29,8 +29,7 @@ #define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 -struct PP_SIslands_Dpm2PerfLevel -{ +struct PP_SIslands_Dpm2PerfLevel { uint8_t MaxPS; uint8_t TgtAct; uint8_t MaxPS_StepInc; @@ -47,8 +46,7 @@ struct PP_SIslands_Dpm2PerfLevel typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel; -struct PP_SIslands_DPM2Status -{ +struct PP_SIslands_DPM2Status { uint32_t dpm2Flags; uint8_t CurrPSkip; uint8_t CurrPSkipPowerShift; @@ -68,8 +66,7 @@ struct PP_SIslands_DPM2Status typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status; -struct PP_SIslands_DPM2Parameters -{ +struct PP_SIslands_DPM2Parameters { uint32_t TDPLimit; uint32_t NearTDPLimit; uint32_t SafePowerLimit; @@ -78,8 +75,7 @@ struct PP_SIslands_DPM2Parameters }; typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters; -struct PP_SIslands_PAPMStatus -{ +struct PP_SIslands_PAPMStatus { uint32_t EstimatedDGPU_T; uint32_t EstimatedDGPU_P; uint32_t EstimatedAPU_T; @@ -89,8 +85,7 @@ struct PP_SIslands_PAPMStatus }; typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus; -struct PP_SIslands_PAPMParameters -{ +struct PP_SIslands_PAPMParameters { uint32_t NearTDPLimitTherm; uint32_t NearTDPLimitPAPM; uint32_t PlatformPowerLimit; @@ -100,8 +95,7 @@ struct PP_SIslands_PAPMParameters }; typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters; -struct SISLANDS_SMC_SCLK_VALUE -{ +struct SISLANDS_SMC_SCLK_VALUE { uint32_t vCG_SPLL_FUNC_CNTL; uint32_t vCG_SPLL_FUNC_CNTL_2; uint32_t vCG_SPLL_FUNC_CNTL_3; @@ -113,8 +107,7 @@ struct SISLANDS_SMC_SCLK_VALUE typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE; -struct SISLANDS_SMC_MCLK_VALUE -{ +struct SISLANDS_SMC_MCLK_VALUE { uint32_t vMPLL_FUNC_CNTL; uint32_t vMPLL_FUNC_CNTL_1; uint32_t vMPLL_FUNC_CNTL_2; @@ -129,8 +122,7 @@ struct SISLANDS_SMC_MCLK_VALUE typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE; -struct SISLANDS_SMC_VOLTAGE_VALUE -{ +struct SISLANDS_SMC_VOLTAGE_VALUE { uint16_t value; uint8_t index; uint8_t phase_settings; @@ -138,8 +130,7 @@ struct SISLANDS_SMC_VOLTAGE_VALUE typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE; -struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL -{ +struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL { uint8_t ACIndex; uint8_t displayWatermark; uint8_t gen2PCIE; @@ -180,8 +171,7 @@ struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL; -struct SISLANDS_SMC_SWSTATE -{ +struct SISLANDS_SMC_SWSTATE { uint8_t flags; uint8_t levelCount; uint8_t padding2; @@ -205,8 +195,7 @@ struct SISLANDS_SMC_SWSTATE_SINGLE { #define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3 #define SISLANDS_SMC_VOLTAGEMASK_MAX 4 -struct SISLANDS_SMC_VOLTAGEMASKTABLE -{ +struct SISLANDS_SMC_VOLTAGEMASKTABLE { uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX]; }; @@ -214,8 +203,7 @@ typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE; #define SISLANDS_MAX_NO_VREG_STEPS 32 -struct SISLANDS_SMC_STATETABLE -{ +struct SISLANDS_SMC_STATETABLE { uint8_t thermalProtectType; uint8_t systemFlags; uint8_t maxVDDCIndexInPPTable; @@ -254,8 +242,7 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE; #define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c #define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120 -struct PP_SIslands_FanTable -{ +struct PP_SIslands_FanTable { uint8_t fdo_mode; uint8_t padding; int16_t temp_min; @@ -285,8 +272,7 @@ typedef struct PP_SIslands_FanTable PP_SIslands_FanTable; #define SMC_SISLANDS_SCALE_I 7 #define SMC_SISLANDS_SCALE_R 12 -struct PP_SIslands_CacConfig -{ +struct PP_SIslands_CacConfig { uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES]; uint32_t lkge_lut_V0; uint32_t lkge_lut_Vstep; @@ -308,23 +294,20 @@ typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig; #define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16 #define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 -struct SMC_SIslands_MCRegisterAddress -{ +struct SMC_SIslands_MCRegisterAddress { uint16_t s0; uint16_t s1; }; typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress; -struct SMC_SIslands_MCRegisterSet -{ +struct SMC_SIslands_MCRegisterSet { uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; }; typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet; -struct SMC_SIslands_MCRegisters -{ +struct SMC_SIslands_MCRegisters { uint8_t last; uint8_t reserved[3]; SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; @@ -333,8 +316,7 @@ struct SMC_SIslands_MCRegisters typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters; -struct SMC_SIslands_MCArbDramTimingRegisterSet -{ +struct SMC_SIslands_MCArbDramTimingRegisterSet { uint32_t mc_arb_dram_timing; uint32_t mc_arb_dram_timing2; uint8_t mc_arb_rfsh_rate; @@ -344,8 +326,7 @@ struct SMC_SIslands_MCArbDramTimingRegisterSet typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet; -struct SMC_SIslands_MCArbDramTimingRegisters -{ +struct SMC_SIslands_MCArbDramTimingRegisters { uint8_t arb_current; uint8_t reserved[3]; SMC_SIslands_MCArbDramTimingRegisterSet data[16]; @@ -353,8 +334,7 @@ struct SMC_SIslands_MCArbDramTimingRegisters typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters; -struct SMC_SISLANDS_SPLL_DIV_TABLE -{ +struct SMC_SISLANDS_SPLL_DIV_TABLE { uint32_t freq[256]; uint32_t ss[256]; }; @@ -374,8 +354,7 @@ typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE; #define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16 -struct Smc_SIslands_DTE_Configuration -{ +struct Smc_SIslands_DTE_Configuration { uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; uint32_t K; diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index ff360c699171..9e4f8a4104a3 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -612,7 +612,7 @@ static int pp_dpm_get_pp_num_states(void *handle, memset(data, 0, sizeof(*data)); - if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps) + if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps) return -EINVAL; data->nums = hwmgr->num_ps; @@ -644,7 +644,7 @@ static int pp_dpm_get_pp_table(void *handle, char **table) { struct pp_hwmgr *hwmgr = handle; - if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table) + if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table) return -EINVAL; *table = (char *)hwmgr->soft_pp_table; @@ -1002,7 +1002,7 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, struct pp_hwmgr *hwmgr = handle; int ret = 0; - if (!hwmgr || !hwmgr->pm_en ||!limit) + if (!hwmgr || !hwmgr->pm_en || !limit) return -EINVAL; if (power_type != PP_PWR_TYPE_SUSTAINED) @@ -1047,7 +1047,7 @@ static int pp_get_display_power_level(void *handle, { struct pp_hwmgr *hwmgr = handle; - if (!hwmgr || !hwmgr->pm_en ||!output) + if (!hwmgr || !hwmgr->pm_en || !output) return -EINVAL; return phm_get_dal_power_level(hwmgr, output); @@ -1120,7 +1120,7 @@ static int pp_get_clock_by_type_with_latency(void *handle, { struct pp_hwmgr *hwmgr = handle; - if (!hwmgr || !hwmgr->pm_en ||!clocks) + if (!hwmgr || !hwmgr->pm_en || !clocks) return -EINVAL; return phm_get_clock_by_type_with_latency(hwmgr, type, clocks); @@ -1132,7 +1132,7 @@ static int pp_get_clock_by_type_with_voltage(void *handle, { struct pp_hwmgr *hwmgr = handle; - if (!hwmgr || !hwmgr->pm_en ||!clocks) + if (!hwmgr || !hwmgr->pm_en || !clocks) return -EINVAL; return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks); @@ -1155,7 +1155,7 @@ static int pp_display_clock_voltage_request(void *handle, { struct pp_hwmgr *hwmgr = handle; - if (!hwmgr || !hwmgr->pm_en ||!clock) + if (!hwmgr || !hwmgr->pm_en || !clock) return -EINVAL; return phm_display_clock_voltage_request(hwmgr, clock); @@ -1167,7 +1167,7 @@ static int pp_get_display_mode_validation_clocks(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - if (!hwmgr || !hwmgr->pm_en ||!clocks) + if (!hwmgr || !hwmgr->pm_en || !clocks) return -EINVAL; clocks->level = PP_DAL_POWERLEVEL_7; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c index 45f608838f6e..65b95d6be5c5 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c @@ -38,8 +38,7 @@ #include "gca/gfx_7_2_d.h" #include "gca/gfx_7_2_sh_mask.h" -static const struct baco_cmd_entry gpio_tbl[] = -{ +static const struct baco_cmd_entry gpio_tbl[] = { { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, @@ -52,15 +51,13 @@ static const struct baco_cmd_entry gpio_tbl[] = { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } }; -static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = -{ +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } }; -static const struct baco_cmd_entry use_bclk_tbl[] = -{ +static const struct baco_cmd_entry use_bclk_tbl[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, @@ -82,8 +79,7 @@ static const struct baco_cmd_entry use_bclk_tbl[] = { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 } }; -static const struct baco_cmd_entry turn_off_plls_tbl[] = -{ +static const struct baco_cmd_entry turn_off_plls_tbl[] = { { CMD_READMODIFYWRITE, mmDISPPLL_BG_CNTL, DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK, DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT, 0, 0x1 }, { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_DC }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__OSC_EN_MASK, CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT, 0, 0x0 }, @@ -120,8 +116,7 @@ static const struct baco_cmd_entry turn_off_plls_tbl[] = { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x2 } }; -static const struct baco_cmd_entry enter_baco_tbl[] = -{ +static const struct baco_cmd_entry enter_baco_tbl[] = { { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, @@ -136,8 +131,7 @@ static const struct baco_cmd_entry enter_baco_tbl[] = #define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK -static const struct baco_cmd_entry exit_baco_tbl[] = -{ +static const struct baco_cmd_entry exit_baco_tbl[] = { { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, @@ -152,8 +146,7 @@ static const struct baco_cmd_entry exit_baco_tbl[] = { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } }; -static const struct baco_cmd_entry clean_baco_tbl[] = -{ +static const struct baco_cmd_entry clean_baco_tbl[] = { { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }, { CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c index 1c73776bd606..fd79337a3536 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c @@ -42,7 +42,7 @@ static bool baco_wait_register(struct pp_hwmgr *hwmgr, u32 reg, u32 mask, u32 va } static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 mask, - u32 shift, u32 value, u32 timeout) + u32 shift, u32 value, u32 timeout) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); u32 data; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c index c0368f2dfb21..b3e768fa79f2 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c @@ -36,8 +36,7 @@ #include "smu/smu_7_1_3_sh_mask.h" -static const struct baco_cmd_entry gpio_tbl[] = -{ +static const struct baco_cmd_entry gpio_tbl[] = { { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, @@ -50,15 +49,13 @@ static const struct baco_cmd_entry gpio_tbl[] = { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } }; -static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = -{ +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } }; -static const struct baco_cmd_entry use_bclk_tbl[] = -{ +static const struct baco_cmd_entry use_bclk_tbl[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, @@ -78,8 +75,7 @@ static const struct baco_cmd_entry use_bclk_tbl[] = { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 } }; -static const struct baco_cmd_entry turn_off_plls_tbl[] = -{ +static const struct baco_cmd_entry turn_off_plls_tbl[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 }, @@ -88,8 +84,7 @@ static const struct baco_cmd_entry turn_off_plls_tbl[] = { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 } }; -static const struct baco_cmd_entry clk_req_b_tbl[] = -{ +static const struct baco_cmd_entry clk_req_b_tbl[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, @@ -104,8 +99,7 @@ static const struct baco_cmd_entry clk_req_b_tbl[] = { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 } }; -static const struct baco_cmd_entry enter_baco_tbl[] = -{ +static const struct baco_cmd_entry enter_baco_tbl[] = { { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 }, { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 }, @@ -122,8 +116,7 @@ static const struct baco_cmd_entry enter_baco_tbl[] = #define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK -static const struct baco_cmd_entry exit_baco_tbl[] = -{ +static const struct baco_cmd_entry exit_baco_tbl[] = { { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, @@ -138,8 +131,7 @@ static const struct baco_cmd_entry exit_baco_tbl[] = { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } }; -static const struct baco_cmd_entry clean_baco_tbl[] = -{ +static const struct baco_cmd_entry clean_baco_tbl[] = { { CMD_WRITE, mmBIOS_SCRATCH_0, 0, 0, 0, 0 }, { CMD_WRITE, mmBIOS_SCRATCH_1, 0, 0, 0, 0 }, { CMD_WRITE, mmBIOS_SCRATCH_2, 0, 0, 0, 0 }, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c index f2cef0930aa9..2b5ac21fee39 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c @@ -120,7 +120,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) case CHIP_TOPAZ: hwmgr->smumgr_funcs = &iceland_smu_funcs; topaz_set_asic_special_caps(hwmgr); - hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK | + hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | PP_ENABLE_GFX_CG_THRU_SMU); hwmgr->pp_table_version = PP_TABLE_V0; hwmgr->od_enabled = false; @@ -133,7 +133,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) case CHIP_FIJI: hwmgr->smumgr_funcs = &fiji_smu_funcs; fiji_set_asic_special_caps(hwmgr); - hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK | + hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | PP_ENABLE_GFX_CG_THRU_SMU); break; case CHIP_POLARIS11: @@ -195,7 +195,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) int hwmgr_sw_init(struct pp_hwmgr *hwmgr) { - if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init) + if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init) return -EINVAL; phm_register_irq_handlers(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c index 8f8e296f2fe9..a6a6d43b09f8 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c @@ -35,8 +35,7 @@ #include "smu/smu_7_1_3_d.h" #include "smu/smu_7_1_3_sh_mask.h" -static const struct baco_cmd_entry gpio_tbl[] = -{ +static const struct baco_cmd_entry gpio_tbl[] = { { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, @@ -49,15 +48,13 @@ static const struct baco_cmd_entry gpio_tbl[] = { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } }; -static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = -{ +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } }; -static const struct baco_cmd_entry use_bclk_tbl[] = -{ +static const struct baco_cmd_entry use_bclk_tbl[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, @@ -70,8 +67,7 @@ static const struct baco_cmd_entry use_bclk_tbl[] = { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 } }; -static const struct baco_cmd_entry turn_off_plls_tbl[] = -{ +static const struct baco_cmd_entry turn_off_plls_tbl[] = { { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 }, { CMD_DELAY_US, 0, 0, 0, 1, 0x0 }, { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 }, @@ -92,8 +88,7 @@ static const struct baco_cmd_entry turn_off_plls_tbl[] = { CMD_DELAY_US, 0, 0, 0, 5, 0x0 } }; -static const struct baco_cmd_entry clk_req_b_tbl[] = -{ +static const struct baco_cmd_entry clk_req_b_tbl[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }, @@ -108,8 +103,7 @@ static const struct baco_cmd_entry clk_req_b_tbl[] = { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 } }; -static const struct baco_cmd_entry enter_baco_tbl[] = -{ +static const struct baco_cmd_entry enter_baco_tbl[] = { { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 }, { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 }, @@ -126,8 +120,7 @@ static const struct baco_cmd_entry enter_baco_tbl[] = #define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK -static const struct baco_cmd_entry exit_baco_tbl[] = -{ +static const struct baco_cmd_entry exit_baco_tbl[] = { { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, @@ -142,14 +135,12 @@ static const struct baco_cmd_entry exit_baco_tbl[] = { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } }; -static const struct baco_cmd_entry clean_baco_tbl[] = -{ +static const struct baco_cmd_entry clean_baco_tbl[] = { { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } }; -static const struct baco_cmd_entry use_bclk_tbl_vg[] = -{ +static const struct baco_cmd_entry use_bclk_tbl_vg[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, @@ -160,8 +151,7 @@ static const struct baco_cmd_entry use_bclk_tbl_vg[] = { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 } }; -static const struct baco_cmd_entry turn_off_plls_tbl_vg[] = -{ +static const struct baco_cmd_entry turn_off_plls_tbl_vg[] = { { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 }, { CMD_DELAY_US, 0, 0, 0, 1, 0x0 }, { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 }, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h index b3103bd4be42..1f987e846628 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h @@ -278,16 +278,14 @@ struct pp_atom_ctrl__avfs_parameters { uint8_t ucReserved; }; -struct _AtomCtrl_HiLoLeakageOffsetTable -{ +struct _AtomCtrl_HiLoLeakageOffsetTable { USHORT usHiLoLeakageThreshold; USHORT usEdcDidtLoDpm7TableOffset; USHORT usEdcDidtHiDpm7TableOffset; }; typedef struct _AtomCtrl_HiLoLeakageOffsetTable AtomCtrl_HiLoLeakageOffsetTable; -struct _AtomCtrl_EDCLeakgeTable -{ +struct _AtomCtrl_EDCLeakgeTable { ULONG DIDT_REG[24]; }; typedef struct _AtomCtrl_EDCLeakgeTable AtomCtrl_EDCLeakgeTable; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h index 2fc1733bcdcf..e86e05c786d9 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h @@ -147,8 +147,7 @@ struct pp_atomfwctrl_bios_boot_up_values { uint8_t ucCoolingID; }; -struct pp_atomfwctrl_smc_dpm_parameters -{ +struct pp_atomfwctrl_smc_dpm_parameters { uint8_t liquid1_i2c_address; uint8_t liquid2_i2c_address; uint8_t vr_i2c_address; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h index dac29fe6cfc6..6f54c410c2f9 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h @@ -166,7 +166,7 @@ static fInt fNaturalLog(fInt value) error_term = fAdd(fNegativeOne, value); - return (fAdd(solution, error_term)); + return fAdd(solution, error_term); } static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) @@ -230,7 +230,7 @@ static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possib static fInt fNegate(fInt X) { fInt CONSTANT_NEGONE = ConvertToFraction(-1); - return (fMultiply(X, CONSTANT_NEGONE)); + return fMultiply(X, CONSTANT_NEGONE); } static fInt Convert_ULONG_ToFraction(uint32_t X) @@ -382,14 +382,14 @@ static int ConvertBackToInteger (fInt A) /*THIS is the function that will be use scaledDecimal.full = uGetScaledDecimal(A); - fullNumber = fAdd(scaledDecimal,scaledReal); + fullNumber = fAdd(scaledDecimal, scaledReal); return fullNumber.full; } static fInt fGetSquare(fInt A) { - return fMultiply(A,A); + return fMultiply(A, A); } /* x_new = x_old - (x_old^2 - C) / (2 * x_old) */ @@ -447,7 +447,7 @@ static fInt fSqrt(fInt num) } while (uAbs(error) > 0); - return (x_new); + return x_new; } static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) @@ -459,7 +459,7 @@ static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) f_CONSTANT100 = ConvertToFraction(100); f_CONSTANT10 = ConvertToFraction(10); - while(GreaterThan(A, f_CONSTANT100) || GreaterThan(B, f_CONSTANT100) || GreaterThan(C, f_CONSTANT100)) { + while (GreaterThan(A, f_CONSTANT100) || GreaterThan(B, f_CONSTANT100) || GreaterThan(C, f_CONSTANT100)) { A = fDivide(A, f_CONSTANT10); B = fDivide(B, f_CONSTANT10); C = fDivide(C, f_CONSTANT10); @@ -515,7 +515,7 @@ static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole dec[i] = tmp / (1 << SHIFT_AMOUNT); tmp = tmp - ((1 << SHIFT_AMOUNT)*dec[i]); tmp *= 10; - scaledDecimal = scaledDecimal + dec[i]*uPow(10, PRECISION - 1 -i); + scaledDecimal = scaledDecimal + dec[i]*uPow(10, PRECISION - 1 - i); } return scaledDecimal; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h index b0ac4d121adc..7a31cfa5e7fb 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h @@ -419,8 +419,7 @@ typedef struct _ATOM_Fiji_PowerTune_Table { USHORT usReserved; } ATOM_Fiji_PowerTune_Table; -typedef struct _ATOM_Polaris_PowerTune_Table -{ +typedef struct _ATOM_Polaris_PowerTune_Table { UCHAR ucRevId; USHORT usTDP; USHORT usConfigurableTDP; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c index 182118e3fd5f..5794b64507bf 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c @@ -1237,7 +1237,7 @@ static int get_vce_clock_voltage_limit_table(struct pp_hwmgr *hwmgr, const VCEClockInfoArray *array) { unsigned long i; - struct phm_vce_clock_voltage_dependency_table *vce_table = NULL; + struct phm_vce_clock_voltage_dependency_table *vce_table; vce_table = kzalloc(struct_size(vce_table, entries, table->numEntries), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 86d6e88c7386..02ba68d7c654 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -430,37 +430,37 @@ static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, } /* temporary hardcoded clock voltage breakdown tables */ -static const DpmClock_t VddDcfClk[]= { +static const DpmClock_t VddDcfClk[] = { { 300, 2600}, { 600, 3200}, { 600, 3600}, }; -static const DpmClock_t VddSocClk[]= { +static const DpmClock_t VddSocClk[] = { { 478, 2600}, { 722, 3200}, { 722, 3600}, }; -static const DpmClock_t VddFClk[]= { +static const DpmClock_t VddFClk[] = { { 400, 2600}, {1200, 3200}, {1200, 3600}, }; -static const DpmClock_t VddDispClk[]= { +static const DpmClock_t VddDispClk[] = { { 435, 2600}, { 661, 3200}, {1086, 3600}, }; -static const DpmClock_t VddDppClk[]= { +static const DpmClock_t VddDppClk[] = { { 435, 2600}, { 661, 3200}, { 661, 3600}, }; -static const DpmClock_t VddPhyClk[]= { +static const DpmClock_t VddPhyClk[] = { { 540, 2600}, { 810, 3200}, { 810, 3600}, @@ -1358,7 +1358,7 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, struct amdgpu_device *adev = hwmgr->adev; int i; - smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges); + smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); if (adev->apu_flags & AMD_APU_IS_RAVEN2) { for (i = 0; i < NUM_WM_RANGES; i++) @@ -1461,7 +1461,7 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) phm_get_sysfs_buf(&buf, &size); - size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], + size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n", title[0], title[1], title[2], title[3], title[4], title[5]); for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 1cb402264497..5a2371484a58 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -83,15 +83,15 @@ #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) -static struct profile_mode_setting smu7_profiling[7] = - {{0, 0, 0, 0, 0, 0, 0, 0}, +static struct profile_mode_setting smu7_profiling[7] = { + {0, 0, 0, 0, 0, 0, 0, 0}, {1, 0, 100, 30, 1, 0, 100, 10}, {1, 10, 0, 30, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 10, 16, 31}, {1, 0, 11, 50, 1, 0, 100, 10}, {1, 0, 5, 30, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0}, - }; +}; #define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310) @@ -904,7 +904,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) dep_sclk_table->entries[i].clk; data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = - (i == 0) ? true : false; + i == 0; data->dpm_table.sclk_table.count++; } } @@ -919,7 +919,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = dep_mclk_table->entries[i].clk; data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = - (i == 0) ? true : false; + i == 0; data->dpm_table.mclk_table.count++; } } @@ -950,7 +950,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) odn_table->odn_core_clock_dpm_levels.num_of_pl = data->golden_dpm_table.sclk_table.count; entries = odn_table->odn_core_clock_dpm_levels.entries; - for (i=0; igolden_dpm_table.sclk_table.count; i++) { + for (i = 0; i < data->golden_dpm_table.sclk_table.count; i++) { entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value; entries[i].enabled = true; entries[i].vddc = dep_sclk_table->entries[i].vddc; @@ -962,7 +962,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) odn_table->odn_memory_clock_dpm_levels.num_of_pl = data->golden_dpm_table.mclk_table.count; entries = odn_table->odn_memory_clock_dpm_levels.entries; - for (i=0; igolden_dpm_table.mclk_table.count; i++) { + for (i = 0; i < data->golden_dpm_table.mclk_table.count; i++) { entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value; entries[i].enabled = true; entries[i].vddc = dep_mclk_table->entries[i].vddc; @@ -1813,13 +1813,13 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; - data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1; + data->voting_rights_clients[1] = SMU7_VOTINGRIGHTSCLIENTS_DFLT1; data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; - data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3; - data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4; - data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5; - data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6; - data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7; + data->voting_rights_clients[3] = SMU7_VOTINGRIGHTSCLIENTS_DFLT3; + data->voting_rights_clients[4] = SMU7_VOTINGRIGHTSCLIENTS_DFLT4; + data->voting_rights_clients[5] = SMU7_VOTINGRIGHTSCLIENTS_DFLT5; + data->voting_rights_clients[6] = SMU7_VOTINGRIGHTSCLIENTS_DFLT6; + data->voting_rights_clients[7] = SMU7_VOTINGRIGHTSCLIENTS_DFLT7; data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; @@ -2002,7 +2002,7 @@ static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr) } else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) || ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) { min = 900; - max= 2100; + max = 2100; } else if (hwmgr->chip_id == CHIP_POLARIS10) { if (adev->pdev->subsystem_vendor == 0x106B) { min = 1000; @@ -4018,7 +4018,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, offset = data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ? - AverageGraphicsActivity: + AverageGraphicsActivity : AverageMemoryActivity); activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); @@ -4039,7 +4039,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; *size = 4; return 0; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: return smu7_get_gpu_power(hwmgr, (uint32_t *)value); case AMDGPU_PP_SENSOR_VDDGFX: if ((data->vr_config & VRCONF_VDDGFX_MASK) == diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c index 21be23ec3c79..65001bed0a9a 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c @@ -520,8 +520,7 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = { { 0xFFFFFFFF } }; -static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] = -{ +static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value Type * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -646,7 +645,7 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] = { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, @@ -666,8 +665,7 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct gpu_pt_config_reg GCCACConfig_VegaM[] = -{ +static const struct gpu_pt_config_reg GCCACConfig_VegaM[] = { // --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- // Offset Mask Shift Value Type // --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -703,8 +701,7 @@ static const struct gpu_pt_config_reg GCCACConfig_VegaM[] = { 0xFFFFFFFF } // End of list }; -static const struct gpu_pt_config_reg DIDTConfig_VegaM[] = -{ +static const struct gpu_pt_config_reg DIDTConfig_VegaM[] = { // --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- // Offset Mask Shift Value Type // --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -831,7 +828,7 @@ static const struct gpu_pt_config_reg DIDTConfig_VegaM[] = { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, @@ -1103,7 +1100,7 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == smc_result), "Failed to enable CAC in SMC.", result = -1); - data->cac_enabled = (0 == smc_result) ? true : false; + data->cac_enabled = smc_result == 0; } return result; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c index d0b1ab6c4523..79a566f3564a 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c @@ -696,7 +696,7 @@ int smu_get_voltage_dependency_table_ppt_v1( return -EINVAL); dep_table->count = allowed_dep_table->count; - for (i=0; icount; i++) { + for (i = 0; i < dep_table->count; i++) { dep_table->entries[i].clk = allowed_dep_table->entries[i].clk; dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd; dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h index 2a75da1e9f03..83b3c9315143 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h @@ -194,7 +194,7 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table, #define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \ (fieldval) << PHM_FIELD_SHIFT(reg, field), \ - PHM_FIELD_MASK(reg, field) ) + PHM_FIELD_MASK(reg, field)) #define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \ diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c index ea743bea8e29..432d4fd2a0ba 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c @@ -36,8 +36,7 @@ #include "smu/smu_7_1_2_sh_mask.h" -static const struct baco_cmd_entry gpio_tbl[] = -{ +static const struct baco_cmd_entry gpio_tbl[] = { { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, @@ -50,15 +49,13 @@ static const struct baco_cmd_entry gpio_tbl[] = { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } }; -static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = -{ +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } }; -static const struct baco_cmd_entry use_bclk_tbl[] = -{ +static const struct baco_cmd_entry use_bclk_tbl[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, @@ -80,8 +77,7 @@ static const struct baco_cmd_entry use_bclk_tbl[] = { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 } }; -static const struct baco_cmd_entry turn_off_plls_tbl[] = -{ +static const struct baco_cmd_entry turn_off_plls_tbl[] = { { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 }, { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 }, @@ -112,8 +108,7 @@ static const struct baco_cmd_entry turn_off_plls_tbl[] = { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 } }; -static const struct baco_cmd_entry enter_baco_tbl[] = -{ +static const struct baco_cmd_entry enter_baco_tbl[] = { { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 }, { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 }, @@ -130,8 +125,7 @@ static const struct baco_cmd_entry enter_baco_tbl[] = #define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK -static const struct baco_cmd_entry exit_baco_tbl[] = -{ +static const struct baco_cmd_entry exit_baco_tbl[] = { { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, @@ -146,22 +140,19 @@ static const struct baco_cmd_entry exit_baco_tbl[] = { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } }; -static const struct baco_cmd_entry clean_baco_tbl[] = -{ +static const struct baco_cmd_entry clean_baco_tbl[] = { { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } }; -static const struct baco_cmd_entry gpio_tbl_iceland[] = -{ +static const struct baco_cmd_entry gpio_tbl_iceland[] = { { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff } }; -static const struct baco_cmd_entry exit_baco_tbl_iceland[] = -{ +static const struct baco_cmd_entry exit_baco_tbl_iceland[] = { { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, @@ -177,8 +168,7 @@ static const struct baco_cmd_entry exit_baco_tbl_iceland[] = { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } }; -static const struct baco_cmd_entry clean_baco_tbl_iceland[] = -{ +static const struct baco_cmd_entry clean_baco_tbl_iceland[] = { { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } }; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c index 46bb16c29cf6..6836e98d37be 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c @@ -31,24 +31,22 @@ -static const struct soc15_baco_cmd_entry pre_baco_tbl[] = -{ +static const struct soc15_baco_cmd_entry pre_baco_tbl[] = { {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIF_DOORBELL_CNTL), BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT, 0, 1}, {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIF_FB_EN), 0, 0, 0, 0}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_DSTATE_BYPASS_MASK, BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT, 0, 1}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_RST_INTR_MASK_MASK, BACO_CNTL__BACO_RST_INTR_MASK__SHIFT, 0, 1} }; -static const struct soc15_baco_cmd_entry enter_baco_tbl[] = -{ +static const struct soc15_baco_cmd_entry enter_baco_tbl[] = { {CMD_WAITFOR, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__SOC_DOMAIN_IDLE_MASK, THM_BACO_CNTL__SOC_DOMAIN_IDLE__SHIFT, 0xffffffff, 0x80000000}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 1}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 1}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 1}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_VDCI_RESET_MASK, THM_BACO_CNTL__BACO_SOC_VDCI_RESET__SHIFT, 0, 1}, - {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT,0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT, 0, 1}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 1}, - {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT,0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT, 0, 1}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 1}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 1}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 1}, @@ -58,13 +56,12 @@ static const struct soc15_baco_cmd_entry enter_baco_tbl[] = {CMD_WAITFOR, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_MODE_MASK, BACO_CNTL__BACO_MODE__SHIFT, 0xffffffff, 0x100} }; -static const struct soc15_baco_cmd_entry exit_baco_tbl[] = -{ +static const struct soc15_baco_cmd_entry exit_baco_tbl[] = { {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0}, - {CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 10,0}, - {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0,0}, + {CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 10, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 0}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 0}, - {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT,0, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT, 0, 0}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 0}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_PWROKRAW_CNTL_MASK, THM_BACO_CNTL__BACO_PWROKRAW_CNTL__SHIFT, 0, 1}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT, 0, 0}, @@ -74,13 +71,12 @@ static const struct soc15_baco_cmd_entry exit_baco_tbl[] = {CMD_WAITFOR, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_EXIT_MASK, 0, 0xffffffff, 0}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SB_AXI_FENCE_MASK, THM_BACO_CNTL__BACO_SB_AXI_FENCE__SHIFT, 0, 0}, {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 0}, - {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK ,BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 0}, - {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_EN_MASK , BACO_CNTL__BACO_EN__SHIFT, 0,0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0}, {CMD_WAITFOR, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0} }; -static const struct soc15_baco_cmd_entry clean_baco_tbl[] = -{ +static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_6), 0, 0, 0, 0}, {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0}, }; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index c51dd4c74fe9..6d6bc6a380b3 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -1375,8 +1375,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) dep_mm_table->entries[i].eclk) { dpm_table->dpm_levels[dpm_table->count].value = dep_mm_table->entries[i].eclk; - dpm_table->dpm_levels[dpm_table->count].enabled = - (i == 0) ? true : false; + dpm_table->dpm_levels[dpm_table->count].enabled = i == 0; dpm_table->count++; } } @@ -1391,8 +1390,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) dep_mm_table->entries[i].vclk) { dpm_table->dpm_levels[dpm_table->count].value = dep_mm_table->entries[i].vclk; - dpm_table->dpm_levels[dpm_table->count].enabled = - (i == 0) ? true : false; + dpm_table->dpm_levels[dpm_table->count].enabled = i == 0; dpm_table->count++; } } @@ -1405,8 +1403,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) dep_mm_table->entries[i].dclk) { dpm_table->dpm_levels[dpm_table->count].value = dep_mm_table->entries[i].dclk; - dpm_table->dpm_levels[dpm_table->count].enabled = - (i == 0) ? true : false; + dpm_table->dpm_levels[dpm_table->count].enabled = i == 0; dpm_table->count++; } } @@ -3969,7 +3966,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value); break; case AMDGPU_PP_SENSOR_VDDGFX: diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c index 309a9d3bc1b7..3007b054c873 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c @@ -30,8 +30,7 @@ #include "pp_debug.h" #include "soc15_common.h" -static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -55,8 +54,7 @@ static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg SEDiDtCtrl3Config_vega10[] = -{ +static const struct vega10_didt_config_reg SEDiDtCtrl3Config_vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -120,8 +118,7 @@ static const struct vega10_didt_config_reg SEDiDtCtrl3Config_vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg SEDiDtCtrl2Config_Vega10[] = -{ +static const struct vega10_didt_config_reg SEDiDtCtrl2Config_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -149,8 +146,7 @@ static const struct vega10_didt_config_reg SEDiDtCtrl2Config_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg SEDiDtCtrl1Config_Vega10[] = -{ +static const struct vega10_didt_config_reg SEDiDtCtrl1Config_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -172,8 +168,7 @@ static const struct vega10_didt_config_reg SEDiDtCtrl1Config_Vega10[] = }; -static const struct vega10_didt_config_reg SEDiDtWeightConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg SEDiDtWeightConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -201,8 +196,7 @@ static const struct vega10_didt_config_reg SEDiDtWeightConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg SEDiDtCtrl0Config_Vega10[] = -{ +static const struct vega10_didt_config_reg SEDiDtCtrl0Config_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -260,8 +254,7 @@ static const struct vega10_didt_config_reg SEDiDtCtrl0Config_Vega10[] = }; -static const struct vega10_didt_config_reg SEDiDtStallCtrlConfig_vega10[] = -{ +static const struct vega10_didt_config_reg SEDiDtStallCtrlConfig_vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -293,8 +286,7 @@ static const struct vega10_didt_config_reg SEDiDtStallCtrlConfig_vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg SEDiDtStallPatternConfig_vega10[] = -{ +static const struct vega10_didt_config_reg SEDiDtStallPatternConfig_vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -362,8 +354,7 @@ static const struct vega10_didt_config_reg SEDiDtStallPatternConfig_vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg SELCacConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg SELCacConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -401,8 +392,7 @@ static const struct vega10_didt_config_reg SELCacConfig_Vega10[] = }; -static const struct vega10_didt_config_reg SEEDCStallPatternConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg SEEDCStallPatternConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -431,8 +421,7 @@ static const struct vega10_didt_config_reg SEEDCStallPatternConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg SEEDCForceStallPatternConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg SEEDCForceStallPatternConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -451,8 +440,7 @@ static const struct vega10_didt_config_reg SEEDCForceStallPatternConfig_Vega10[] { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg SEEDCStallDelayConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg SEEDCStallDelayConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -478,8 +466,7 @@ static const struct vega10_didt_config_reg SEEDCStallDelayConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg SEEDCThresholdConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg SEEDCThresholdConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -492,8 +479,7 @@ static const struct vega10_didt_config_reg SEEDCThresholdConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg SEEDCCtrlResetConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg SEEDCCtrlResetConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -514,8 +500,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlResetConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg SEEDCCtrlConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg SEEDCCtrlConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -536,8 +521,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -571,8 +555,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg GCDiDtDroopCtrlConfig_vega10[] = -{ +static const struct vega10_didt_config_reg GCDiDtDroopCtrlConfig_vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -586,8 +569,7 @@ static const struct vega10_didt_config_reg GCDiDtDroopCtrlConfig_vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg GCDiDtCtrl0Config_vega10[] = -{ +static const struct vega10_didt_config_reg GCDiDtCtrl0Config_vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -601,8 +583,7 @@ static const struct vega10_didt_config_reg GCDiDtCtrl0Config_vega10[] = }; -static const struct vega10_didt_config_reg PSMSEEDCStallPatternConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg PSMSEEDCStallPatternConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -621,8 +602,7 @@ static const struct vega10_didt_config_reg PSMSEEDCStallPatternConfig_Vega10[] { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -651,8 +631,7 @@ static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -673,8 +652,7 @@ static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] = -{ +static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -695,8 +673,7 @@ static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] = -{ +static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -710,8 +687,7 @@ static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg PSMGCEDCCtrlResetConfig_vega10[] = -{ +static const struct vega10_didt_config_reg PSMGCEDCCtrlResetConfig_vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -726,8 +702,7 @@ static const struct vega10_didt_config_reg PSMGCEDCCtrlResetConfig_vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg PSMGCEDCCtrlConfig_vega10[] = -{ +static const struct vega10_didt_config_reg PSMGCEDCCtrlConfig_vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -742,8 +717,7 @@ static const struct vega10_didt_config_reg PSMGCEDCCtrlConfig_vega10[] = { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg AvfsPSMResetConfig_vega10[]= -{ +static const struct vega10_didt_config_reg AvfsPSMResetConfig_vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -756,8 +730,7 @@ static const struct vega10_didt_config_reg AvfsPSMResetConfig_vega10[]= { 0xFFFFFFFF } /* End of list */ }; -static const struct vega10_didt_config_reg AvfsPSMInitConfig_vega10[] = -{ +static const struct vega10_didt_config_reg AvfsPSMInitConfig_vega10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -919,7 +892,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) mutex_lock(&adev->grbm_idx_mutex); for (count = 0; count < num_se; count++) { - data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | (count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT); @@ -970,7 +943,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) mutex_lock(&adev->grbm_idx_mutex); for (count = 0; count < num_se; count++) { - data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | (count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT); @@ -1031,7 +1004,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) mutex_lock(&adev->grbm_idx_mutex); for (count = 0; count < num_se; count++) { - data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | (count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); @@ -1081,7 +1054,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) mutex_lock(&adev->grbm_idx_mutex); for (count = 0; count < num_se; count++) { - data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | (count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); result = vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h index 9c479bd9a786..8b0590b834cc 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h @@ -317,16 +317,14 @@ typedef struct _ATOM_Vega10_Thermal_Controller { UCHAR ucFlags; /* to be defined */ } ATOM_Vega10_Thermal_Controller; -typedef struct _ATOM_Vega10_VCE_State_Record -{ +typedef struct _ATOM_Vega10_VCE_State_Record { UCHAR ucVCEClockIndex; /*index into usVCEDependencyTableOffset of 'ATOM_Vega10_MM_Dependency_Table' type */ UCHAR ucFlag; /* 2 bits indicates memory p-states */ UCHAR ucSCLKIndex; /* index into ATOM_Vega10_SCLK_Dependency_Table */ UCHAR ucMCLKIndex; /* index into ATOM_Vega10_MCLK_Dependency_Table */ } ATOM_Vega10_VCE_State_Record; -typedef struct _ATOM_Vega10_VCE_State_Table -{ +typedef struct _ATOM_Vega10_VCE_State_Table { UCHAR ucRevId; UCHAR ucNumEntries; ATOM_Vega10_VCE_State_Record entries[1]; @@ -361,8 +359,7 @@ typedef struct _ATOM_Vega10_PowerTune_Table { USHORT usTemperatureLimitTedge; } ATOM_Vega10_PowerTune_Table; -typedef struct _ATOM_Vega10_PowerTune_Table_V2 -{ +typedef struct _ATOM_Vega10_PowerTune_Table_V2 { UCHAR ucRevId; USHORT usSocketPowerLimit; USHORT usBatteryPowerLimit; @@ -388,8 +385,7 @@ typedef struct _ATOM_Vega10_PowerTune_Table_V2 USHORT usTemperatureLimitTedge; } ATOM_Vega10_PowerTune_Table_V2; -typedef struct _ATOM_Vega10_PowerTune_Table_V3 -{ +typedef struct _ATOM_Vega10_PowerTune_Table_V3 { UCHAR ucRevId; USHORT usSocketPowerLimit; USHORT usBatteryPowerLimit; @@ -428,15 +424,13 @@ typedef struct _ATOM_Vega10_Hard_Limit_Record { USHORT usVddMemLimit; } ATOM_Vega10_Hard_Limit_Record; -typedef struct _ATOM_Vega10_Hard_Limit_Table -{ +typedef struct _ATOM_Vega10_Hard_Limit_Table { UCHAR ucRevId; UCHAR ucNumEntries; ATOM_Vega10_Hard_Limit_Record entries[1]; } ATOM_Vega10_Hard_Limit_Table; -typedef struct _Vega10_PPTable_Generic_SubTable_Header -{ +typedef struct _Vega10_PPTable_Generic_SubTable_Header { UCHAR ucRevId; } Vega10_PPTable_Generic_SubTable_Header; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c index bb90d8abf79b..3be616af327e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c @@ -372,9 +372,9 @@ static int get_mm_clock_voltage_table( return 0; } -static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda) +static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda) { - switch(line){ + switch (line) { case Vega10_I2CLineID_DDC1: *scl = Vega10_I2C_DDC1CLK; *sda = Vega10_I2C_DDC1DATA; @@ -954,7 +954,7 @@ static int init_powerplay_extended_tables( if (!result && powerplay_table->usPixclkDependencyTableOffset) result = get_pix_clk_voltage_dependency_table(hwmgr, &pp_table_info->vdd_dep_on_pixclk, - (const ATOM_Vega10_PIXCLK_Dependency_Table*) + (const ATOM_Vega10_PIXCLK_Dependency_Table *) pixclk_dep_table); if (!result && powerplay_table->usPhyClkDependencyTableOffset) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c index bc53cce4f32d..32cc8de296e4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c @@ -29,16 +29,14 @@ #include "vega12_ppsmc.h" #include "vega12_baco.h" -static const struct soc15_baco_cmd_entry pre_baco_tbl[] = -{ +static const struct soc15_baco_cmd_entry pre_baco_tbl[] = { { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmBIF_DOORBELL_CNTL_BASE_IDX, mmBIF_DOORBELL_CNTL, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT, 0, 0 }, { CMD_WRITE, NBIF_HWID, 0, mmBIF_FB_EN_BASE_IDX, mmBIF_FB_EN, 0, 0, 0, 0 }, { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_DSTATE_BYPASS_MASK, BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT, 0, 1 }, { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_RST_INTR_MASK_MASK, BACO_CNTL__BACO_RST_INTR_MASK__SHIFT, 0, 1 } }; -static const struct soc15_baco_cmd_entry enter_baco_tbl[] = -{ +static const struct soc15_baco_cmd_entry enter_baco_tbl[] = { { CMD_WAITFOR, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__SOC_DOMAIN_IDLE_MASK, THM_BACO_CNTL__SOC_DOMAIN_IDLE__SHIFT, 0xffffffff, 0x80000000 }, { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 1 }, { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 1 }, @@ -56,8 +54,7 @@ static const struct soc15_baco_cmd_entry enter_baco_tbl[] = { CMD_WAITFOR, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, BACO_CNTL__BACO_MODE__SHIFT, 0xffffffff, 0x100 } }; -static const struct soc15_baco_cmd_entry exit_baco_tbl[] = -{ +static const struct soc15_baco_cmd_entry exit_baco_tbl[] = { { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0 }, { CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 10, 0 }, { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 0 }, @@ -77,8 +74,7 @@ static const struct soc15_baco_cmd_entry exit_baco_tbl[] = { CMD_WAITFOR, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0 } }; -static const struct soc15_baco_cmd_entry clean_baco_tbl[] = -{ +static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { { CMD_WRITE, NBIF_HWID, 0, mmBIOS_SCRATCH_6_BASE_IDX, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, { CMD_WRITE, NBIF_HWID, 0, mmBIOS_SCRATCH_7_BASE_IDX, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } }; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index 1937be1cf5b4..460067933de2 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -1529,7 +1529,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx, *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value); if (!ret) *size = 4; @@ -1623,13 +1623,13 @@ static int vega12_notify_smc_display_config_after_ps_adjustment( if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { clock_req.clock_type = amd_pp_dcef_clock; - clock_req.clock_freq_in_khz = min_clocks.dcefClock/10; + clock_req.clock_freq_in_khz = min_clocks.dcefClock / 10; if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) { if (data->smu_features[GNLD_DS_DCEFCLK].supported) PP_ASSERT_WITH_CODE( !smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, - min_clocks.dcefClockInSR /100, + min_clocks.dcefClockInSR / 100, NULL), "Attempt to set divider for DCEFCLK Failed!", return -1); @@ -2354,8 +2354,8 @@ static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) uint32_t i, latency; disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && - !hwmgr->display_config->multi_monitor_in_sync) || - vblank_too_short; + !hwmgr->display_config->multi_monitor_in_sync) || + vblank_too_short; latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; /* gfxclk */ @@ -2522,7 +2522,7 @@ static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, + (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level, NULL)), "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", return ret); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h index aa63ae41942d..9f2ce4308548 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h @@ -38,8 +38,7 @@ #define VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8 #define VG12_PSUEDO_NUM_UCLK_DPM_LEVELS 4 -enum -{ +enum { GNLD_DPM_PREFETCHER = 0, GNLD_DPM_GFXCLK, GNLD_DPM_UCLK, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h index bf4f5095b80d..9b8435a4d306 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h @@ -72,8 +72,7 @@ enum ATOM_VEGA12_PPCLOCK_ID { typedef enum ATOM_VEGA12_PPCLOCK_ID ATOM_VEGA12_PPCLOCK_ID; -typedef struct _ATOM_VEGA12_POWERPLAYTABLE -{ +typedef struct _ATOM_VEGA12_POWERPLAYTABLE { struct atom_common_table_header sHeader; UCHAR ucTableRevision; USHORT usTableSize; @@ -92,11 +91,11 @@ typedef struct _ATOM_VEGA12_POWERPLAYTABLE USHORT usODPowerSavePowerLimit; USHORT usSoftwareShutdownTemp; - ULONG PowerSavingClockMax [ATOM_VEGA12_PPCLOCK_COUNT]; - ULONG PowerSavingClockMin [ATOM_VEGA12_PPCLOCK_COUNT]; + ULONG PowerSavingClockMax[ATOM_VEGA12_PPCLOCK_COUNT]; + ULONG PowerSavingClockMin[ATOM_VEGA12_PPCLOCK_COUNT]; - ULONG ODSettingsMax [ATOM_VEGA12_ODSETTING_COUNT]; - ULONG ODSettingsMin [ATOM_VEGA12_ODSETTING_COUNT]; + ULONG ODSettingsMax[ATOM_VEGA12_ODSETTING_COUNT]; + ULONG ODSettingsMin[ATOM_VEGA12_ODSETTING_COUNT]; USHORT usReserve[5]; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c index 8d99c7a5abf8..994c0d374bfa 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c @@ -31,8 +31,7 @@ #include "amdgpu_ras.h" -static const struct soc15_baco_cmd_entry clean_baco_tbl[] = -{ +static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_6), 0, 0, 0, 0}, {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0}, }; @@ -90,11 +89,11 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) data |= 0x80000000; WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); - if(smum_send_msg_to_smc_with_parameter(hwmgr, + if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0, NULL)) return -EINVAL; } else { - if(smum_send_msg_to_smc_with_parameter(hwmgr, + if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 1, NULL)) return -EINVAL; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 4e19ccbdb807..3b33af30eb0f 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -1402,7 +1402,7 @@ static int vega20_od8_set_settings( "Failed to export over drive table!", return ret); - switch(index) { + switch (index) { case OD8_SETTING_GFXCLK_FMIN: od_table.GfxclkFmin = (uint16_t)value; break; @@ -2129,7 +2129,7 @@ static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr, return ret; } -static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, +static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, int idx, uint32_t *query) { int ret = 0; @@ -2140,10 +2140,17 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, return ret; /* For the 40.46 release, they changed the value name */ - if (hwmgr->smu_version == 0x282e00) - *query = metrics_table.AverageSocketPower << 8; - else + switch (idx) { + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: + if (hwmgr->smu_version == 0x282e00) + *query = metrics_table.AverageSocketPower << 8; + else + ret = -EOPNOTSUPP; + break; + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: *query = metrics_table.CurrSocketPower << 8; + break; + } return ret; } @@ -2253,9 +2260,10 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: *size = 16; - ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value); + ret = vega20_get_gpu_power(hwmgr, idx, (uint32_t *)value); break; case AMDGPU_PP_SENSOR_VDDGFX: val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) & @@ -2360,7 +2368,7 @@ static int vega20_notify_smc_display_config_after_ps_adjustment( dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, + (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level, NULL)), "[SetHardMinFreq] Set hard min uclk failed!", return ret); @@ -3579,7 +3587,7 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, + (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level, NULL)), "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", return ret); @@ -3605,7 +3613,7 @@ static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr) dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinByFreq, - (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level, + (PPCLK_FCLK << 16) | dpm_table->dpm_state.soft_min_level, NULL)), "[SetFclkToHightestDpmLevel] Set soft min fclk failed!", return ret); @@ -3727,8 +3735,8 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) uint32_t i, latency; disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && - !hwmgr->display_config->multi_monitor_in_sync) || - vblank_too_short; + !hwmgr->display_config->multi_monitor_in_sync) || + vblank_too_short; latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; /* gfxclk */ diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h index 075c0094da9c..1ba9b5fe2a5d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h @@ -385,8 +385,7 @@ struct vega20_odn_data { struct vega20_odn_temp_table odn_temp_table; }; -enum OD8_FEATURE_ID -{ +enum OD8_FEATURE_ID { OD8_GFXCLK_LIMITS = 1 << 0, OD8_GFXCLK_CURVE = 1 << 1, OD8_UCLK_MAX = 1 << 2, @@ -399,8 +398,7 @@ enum OD8_FEATURE_ID OD8_FAN_ZERO_RPM_CONTROL = 1 << 9 }; -enum OD8_SETTING_ID -{ +enum OD8_SETTING_ID { OD8_SETTING_GFXCLK_FMIN = 0, OD8_SETTING_GFXCLK_FMAX, OD8_SETTING_GFXCLK_FREQ1, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h index 2222e29405c6..b468dddbefff 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h @@ -73,14 +73,13 @@ enum ATOM_VEGA20_ODSETTING_ID { }; typedef enum ATOM_VEGA20_ODSETTING_ID ATOM_VEGA20_ODSETTING_ID; -typedef struct _ATOM_VEGA20_OVERDRIVE8_RECORD -{ +typedef struct _ATOM_VEGA20_OVERDRIVE8_RECORD { UCHAR ucODTableRevision; ULONG ODFeatureCount; - UCHAR ODFeatureCapabilities [ATOM_VEGA20_ODFEATURE_MAX_COUNT]; //OD feature support flags + UCHAR ODFeatureCapabilities[ATOM_VEGA20_ODFEATURE_MAX_COUNT]; //OD feature support flags ULONG ODSettingCount; - ULONG ODSettingsMax [ATOM_VEGA20_ODSETTING_MAX_COUNT]; //Upper Limit for each OD Setting - ULONG ODSettingsMin [ATOM_VEGA20_ODSETTING_MAX_COUNT]; //Lower Limit for each OD Setting + ULONG ODSettingsMax[ATOM_VEGA20_ODSETTING_MAX_COUNT]; //Upper Limit for each OD Setting + ULONG ODSettingsMin[ATOM_VEGA20_ODSETTING_MAX_COUNT]; //Lower Limit for each OD Setting } ATOM_VEGA20_OVERDRIVE8_RECORD; enum ATOM_VEGA20_PPCLOCK_ID { @@ -99,16 +98,14 @@ enum ATOM_VEGA20_PPCLOCK_ID { }; typedef enum ATOM_VEGA20_PPCLOCK_ID ATOM_VEGA20_PPCLOCK_ID; -typedef struct _ATOM_VEGA20_POWER_SAVING_CLOCK_RECORD -{ +typedef struct _ATOM_VEGA20_POWER_SAVING_CLOCK_RECORD { UCHAR ucTableRevision; ULONG PowerSavingClockCount; // Count of PowerSavingClock Mode - ULONG PowerSavingClockMax [ATOM_VEGA20_PPCLOCK_MAX_COUNT]; // PowerSavingClock Mode Clock Maximum array In MHz - ULONG PowerSavingClockMin [ATOM_VEGA20_PPCLOCK_MAX_COUNT]; // PowerSavingClock Mode Clock Minimum array In MHz + ULONG PowerSavingClockMax[ATOM_VEGA20_PPCLOCK_MAX_COUNT]; // PowerSavingClock Mode Clock Maximum array In MHz + ULONG PowerSavingClockMin[ATOM_VEGA20_PPCLOCK_MAX_COUNT]; // PowerSavingClock Mode Clock Minimum array In MHz } ATOM_VEGA20_POWER_SAVING_CLOCK_RECORD; -typedef struct _ATOM_VEGA20_POWERPLAYTABLE -{ +typedef struct _ATOM_VEGA20_POWERPLAYTABLE { struct atom_common_table_header sHeader; UCHAR ucTableRevision; USHORT usTableSize; diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h index 01a7d66864f2..f4f9a104d170 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h @@ -44,8 +44,7 @@ struct phm_fan_speed_info { }; /* Automatic Power State Throttling */ -enum PHM_AutoThrottleSource -{ +enum PHM_AutoThrottleSource { PHM_AutoThrottleSource_Thermal, PHM_AutoThrottleSource_External }; diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 612d66aeaab9..81650727a5de 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -190,8 +190,7 @@ struct phm_vce_clock_voltage_dependency_table { }; -enum SMU_ASIC_RESET_MODE -{ +enum SMU_ASIC_RESET_MODE { SMU_ASIC_RESET_MODE_0, SMU_ASIC_RESET_MODE_1, SMU_ASIC_RESET_MODE_2, @@ -516,7 +515,7 @@ struct phm_vq_budgeting_record { struct phm_vq_budgeting_table { uint8_t numEntries; - struct phm_vq_budgeting_record entries[1]; + struct phm_vq_budgeting_record entries[0]; }; struct phm_clock_and_voltage_limits { @@ -607,8 +606,7 @@ struct phm_ppt_v2_information { uint8_t uc_dcef_dpm_voltage_mode; }; -struct phm_ppt_v3_information -{ +struct phm_ppt_v3_information { uint8_t uc_thermal_controller_type; uint16_t us_small_power_limit1; diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h index f7c41185097e..2003acc70ca0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h @@ -25,14 +25,12 @@ #include "power_state.h" -static const struct PP_TemperatureRange __maybe_unused SMU7ThermalWithDelayPolicy[] = -{ +static const struct PP_TemperatureRange __maybe_unused SMU7ThermalWithDelayPolicy[] = { {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, }; -static const struct PP_TemperatureRange __maybe_unused SMU7ThermalPolicy[] = -{ +static const struct PP_TemperatureRange __maybe_unused SMU7ThermalPolicy[] = { {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, }; diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h index e14072d45918..bfce9087a47f 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h @@ -101,8 +101,7 @@ #define VR_SMIO_PATTERN_2 4 #define VR_STATIC_VOLTAGE 5 -struct SMU7_PIDController -{ +struct SMU7_PIDController { uint32_t Ki; int32_t LFWindupUL; int32_t LFWindupLL; @@ -136,8 +135,7 @@ typedef struct SMU7_PIDController SMU7_PIDController; #define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000 #define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000 -struct SMU7_Firmware_Header -{ +struct SMU7_Firmware_Header { uint32_t Digest[5]; uint32_t Version; uint32_t HeaderSize; diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h index 71c9b2d28640..b5f177412769 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h @@ -118,8 +118,7 @@ typedef struct { #endif -struct SMU71_PIDController -{ +struct SMU71_PIDController { uint32_t Ki; int32_t LFWindupUpperLim; int32_t LFWindupLowerLim; @@ -133,8 +132,7 @@ struct SMU71_PIDController typedef struct SMU71_PIDController SMU71_PIDController; -struct SMU7_LocalDpmScoreboard -{ +struct SMU7_LocalDpmScoreboard { uint32_t PercentageBusy; int32_t PIDError; @@ -179,8 +177,8 @@ struct SMU7_LocalDpmScoreboard uint8_t DteClampMode; uint8_t FpsClampMode; - uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_GRAPHICS]; - uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_GRAPHICS]; + uint16_t LevelResidencyCounters[SMU71_MAX_LEVELS_GRAPHICS]; + uint16_t LevelSwitchCounters[SMU71_MAX_LEVELS_GRAPHICS]; void (*TargetStateCalculator)(uint8_t); void (*SavedTargetStateCalculator)(uint8_t); @@ -200,8 +198,7 @@ typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard; #define SMU7_MAX_VOLTAGE_CLIENTS 12 -struct SMU7_VoltageScoreboard -{ +struct SMU7_VoltageScoreboard { uint16_t CurrentVoltage; uint16_t HighestVoltage; uint16_t MaxVid; @@ -325,8 +322,7 @@ typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard; // -------------------------------------------------------------------------------------------------- -struct SMU7_ThermalScoreboard -{ +struct SMU7_ThermalScoreboard { int16_t GpuLimit; int16_t GpuHyst; uint16_t CurrGnbTemp; @@ -360,8 +356,7 @@ typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard; #define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000 // All 'soft registers' should be uint32_t. -struct SMU71_SoftRegisters -{ +struct SMU71_SoftRegisters { uint32_t RefClockFrequency; uint32_t PmTimerPeriod; uint32_t FeatureEnables; @@ -413,8 +408,7 @@ struct SMU71_SoftRegisters typedef struct SMU71_SoftRegisters SMU71_SoftRegisters; -struct SMU71_Firmware_Header -{ +struct SMU71_Firmware_Header { uint32_t Digest[5]; uint32_t Version; uint32_t HeaderSize; diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h index c6b12a4c00db..cf4b2c3c65bc 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h @@ -37,8 +37,7 @@ enum Poly3rdOrderCoeff { POLY_3RD_ORDER_COUNT }; -struct SMU7_Poly3rdOrder_Data -{ +struct SMU7_Poly3rdOrder_Data { int32_t a; int32_t b; int32_t c; @@ -51,8 +50,7 @@ struct SMU7_Poly3rdOrder_Data typedef struct SMU7_Poly3rdOrder_Data SMU7_Poly3rdOrder_Data; -struct Power_Calculator_Data -{ +struct Power_Calculator_Data { uint16_t NoLoadVoltage; uint16_t LoadVoltage; uint16_t Resistance; @@ -71,8 +69,7 @@ struct Power_Calculator_Data typedef struct Power_Calculator_Data PowerCalculatorData_t; -struct Gc_Cac_Weight_Data -{ +struct Gc_Cac_Weight_Data { uint8_t index; uint32_t value; }; @@ -187,8 +184,7 @@ typedef struct { #define SMU73_THERMAL_CLAMP_MODE_COUNT 8 -struct SMU7_HystController_Data -{ +struct SMU7_HystController_Data { uint16_t waterfall_up; uint16_t waterfall_down; uint16_t waterfall_limit; @@ -199,8 +195,7 @@ struct SMU7_HystController_Data typedef struct SMU7_HystController_Data SMU7_HystController_Data; -struct SMU73_PIDController -{ +struct SMU73_PIDController { uint32_t Ki; int32_t LFWindupUpperLim; int32_t LFWindupLowerLim; @@ -215,8 +210,7 @@ struct SMU73_PIDController typedef struct SMU73_PIDController SMU73_PIDController; -struct SMU7_LocalDpmScoreboard -{ +struct SMU7_LocalDpmScoreboard { uint32_t PercentageBusy; int32_t PIDError; @@ -261,8 +255,8 @@ struct SMU7_LocalDpmScoreboard uint8_t DteClampMode; uint8_t FpsClampMode; - uint16_t LevelResidencyCounters [SMU73_MAX_LEVELS_GRAPHICS]; - uint16_t LevelSwitchCounters [SMU73_MAX_LEVELS_GRAPHICS]; + uint16_t LevelResidencyCounters[SMU73_MAX_LEVELS_GRAPHICS]; + uint16_t LevelSwitchCounters[SMU73_MAX_LEVELS_GRAPHICS]; void (*TargetStateCalculator)(uint8_t); void (*SavedTargetStateCalculator)(uint8_t); @@ -315,8 +309,7 @@ typedef uint8_t (*VoltageChangeHandler_t)(uint16_t, uint8_t); typedef uint32_t SMU_VoltageLevel; -struct SMU7_VoltageScoreboard -{ +struct SMU7_VoltageScoreboard { SMU_VoltageLevel TargetVoltage; uint16_t MaxVid; uint8_t HighestVidOffset; @@ -354,7 +347,7 @@ struct SMU7_VoltageScoreboard VoltageChangeHandler_t functionLinks[6]; - uint16_t * VddcFollower1; + uint16_t *VddcFollower1; int16_t Driver_OD_RequestedVidOffset1; int16_t Driver_OD_RequestedVidOffset2; @@ -366,8 +359,7 @@ typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard; // ------------------------------------------------------------------------------------------------------------------------- #define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */ -struct SMU7_PCIeLinkSpeedScoreboard -{ +struct SMU7_PCIeLinkSpeedScoreboard { uint8_t DpmEnable; uint8_t DpmRunning; uint8_t DpmForce; @@ -396,8 +388,7 @@ typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard; #define SMU7_SCALE_I 7 #define SMU7_SCALE_R 12 -struct SMU7_PowerScoreboard -{ +struct SMU7_PowerScoreboard { uint32_t GpuPower; uint32_t VddcPower; @@ -436,8 +427,7 @@ typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard; #define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000 // All 'soft registers' should be uint32_t. -struct SMU73_SoftRegisters -{ +struct SMU73_SoftRegisters { uint32_t RefClockFrequency; uint32_t PmTimerPeriod; uint32_t FeatureEnables; @@ -493,8 +483,7 @@ struct SMU73_SoftRegisters typedef struct SMU73_SoftRegisters SMU73_SoftRegisters; -struct SMU73_Firmware_Header -{ +struct SMU73_Firmware_Header { uint32_t Digest[5]; uint32_t Version; uint32_t HeaderSize; @@ -708,9 +697,9 @@ typedef struct VFT_CELL_t VFT_CELL_t; struct VFT_TABLE_t { VFT_CELL_t Cell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS]; - uint16_t AvfsGbv [NUM_VFT_COLUMNS]; - uint16_t BtcGbv [NUM_VFT_COLUMNS]; - uint16_t Temperature [TEMP_RANGE_MAXSTEPS]; + uint16_t AvfsGbv[NUM_VFT_COLUMNS]; + uint16_t BtcGbv[NUM_VFT_COLUMNS]; + uint16_t Temperature[TEMP_RANGE_MAXSTEPS]; uint8_t NumTemperatureSteps; uint8_t padding[3]; diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h index 5916be08a7fe..fd0964ac465e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h @@ -27,8 +27,7 @@ #pragma pack(push, 1) -struct SMIO_Pattern -{ +struct SMIO_Pattern { uint16_t Voltage; uint8_t Smio; uint8_t padding; @@ -36,8 +35,7 @@ struct SMIO_Pattern typedef struct SMIO_Pattern SMIO_Pattern; -struct SMIO_Table -{ +struct SMIO_Table { SMIO_Pattern Pattern[SMU_MAX_SMIO_LEVELS]; }; @@ -100,8 +98,7 @@ struct SMU73_Discrete_Ulv { typedef struct SMU73_Discrete_Ulv SMU73_Discrete_Ulv; -struct SMU73_Discrete_MemoryLevel -{ +struct SMU73_Discrete_MemoryLevel { uint32_t MinVoltage; uint32_t MinMvdd; @@ -124,10 +121,9 @@ struct SMU73_Discrete_MemoryLevel typedef struct SMU73_Discrete_MemoryLevel SMU73_Discrete_MemoryLevel; -struct SMU73_Discrete_LinkLevel -{ +struct SMU73_Discrete_LinkLevel { uint8_t PcieGenSpeed; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 - uint8_t PcieLaneCount; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint8_t PcieLaneCount; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 uint8_t EnabledForActivity; uint8_t SPC; uint32_t DownThreshold; @@ -139,8 +135,7 @@ typedef struct SMU73_Discrete_LinkLevel SMU73_Discrete_LinkLevel; // MC ARB DRAM Timing registers. -struct SMU73_Discrete_MCArbDramTimingTableEntry -{ +struct SMU73_Discrete_MCArbDramTimingTableEntry { uint32_t McArbDramTiming; uint32_t McArbDramTiming2; uint8_t McArbBurstTime; @@ -151,16 +146,14 @@ struct SMU73_Discrete_MCArbDramTimingTableEntry typedef struct SMU73_Discrete_MCArbDramTimingTableEntry SMU73_Discrete_MCArbDramTimingTableEntry; -struct SMU73_Discrete_MCArbDramTimingTable -{ +struct SMU73_Discrete_MCArbDramTimingTable { SMU73_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; }; typedef struct SMU73_Discrete_MCArbDramTimingTable SMU73_Discrete_MCArbDramTimingTable; // UVD VCLK/DCLK state (level) definition. -struct SMU73_Discrete_UvdLevel -{ +struct SMU73_Discrete_UvdLevel { uint32_t VclkFrequency; uint32_t DclkFrequency; uint32_t MinVoltage; @@ -172,8 +165,7 @@ struct SMU73_Discrete_UvdLevel typedef struct SMU73_Discrete_UvdLevel SMU73_Discrete_UvdLevel; // Clocks for other external blocks (VCE, ACP, SAMU). -struct SMU73_Discrete_ExtClkLevel -{ +struct SMU73_Discrete_ExtClkLevel { uint32_t Frequency; uint32_t MinVoltage; uint8_t Divider; @@ -182,8 +174,7 @@ struct SMU73_Discrete_ExtClkLevel typedef struct SMU73_Discrete_ExtClkLevel SMU73_Discrete_ExtClkLevel; -struct SMU73_Discrete_StateInfo -{ +struct SMU73_Discrete_StateInfo { uint32_t SclkFrequency; uint32_t MclkFrequency; uint32_t VclkFrequency; @@ -206,8 +197,7 @@ struct SMU73_Discrete_StateInfo typedef struct SMU73_Discrete_StateInfo SMU73_Discrete_StateInfo; -struct SMU73_Discrete_DpmTable -{ +struct SMU73_Discrete_DpmTable { // Multi-DPM controller settings SMU73_PIDController GraphicsPIDController; SMU73_PIDController MemoryPIDController; @@ -225,9 +215,9 @@ struct SMU73_Discrete_DpmTable uint32_t MvddLevelCount; - uint8_t BapmVddcVidHiSidd [SMU73_MAX_LEVELS_VDDC]; - uint8_t BapmVddcVidLoSidd [SMU73_MAX_LEVELS_VDDC]; - uint8_t BapmVddcVidHiSidd2 [SMU73_MAX_LEVELS_VDDC]; + uint8_t BapmVddcVidHiSidd[SMU73_MAX_LEVELS_VDDC]; + uint8_t BapmVddcVidLoSidd[SMU73_MAX_LEVELS_VDDC]; + uint8_t BapmVddcVidHiSidd2[SMU73_MAX_LEVELS_VDDC]; uint8_t GraphicsDpmLevelCount; uint8_t MemoryDpmLevelCount; @@ -246,19 +236,19 @@ struct SMU73_Discrete_DpmTable uint32_t Reserved[4]; // State table entries for each DPM state - SMU73_Discrete_GraphicsLevel GraphicsLevel [SMU73_MAX_LEVELS_GRAPHICS]; + SMU73_Discrete_GraphicsLevel GraphicsLevel[SMU73_MAX_LEVELS_GRAPHICS]; SMU73_Discrete_MemoryLevel MemoryACPILevel; - SMU73_Discrete_MemoryLevel MemoryLevel [SMU73_MAX_LEVELS_MEMORY]; - SMU73_Discrete_LinkLevel LinkLevel [SMU73_MAX_LEVELS_LINK]; + SMU73_Discrete_MemoryLevel MemoryLevel[SMU73_MAX_LEVELS_MEMORY]; + SMU73_Discrete_LinkLevel LinkLevel[SMU73_MAX_LEVELS_LINK]; SMU73_Discrete_ACPILevel ACPILevel; - SMU73_Discrete_UvdLevel UvdLevel [SMU73_MAX_LEVELS_UVD]; - SMU73_Discrete_ExtClkLevel VceLevel [SMU73_MAX_LEVELS_VCE]; - SMU73_Discrete_ExtClkLevel AcpLevel [SMU73_MAX_LEVELS_ACP]; - SMU73_Discrete_ExtClkLevel SamuLevel [SMU73_MAX_LEVELS_SAMU]; + SMU73_Discrete_UvdLevel UvdLevel[SMU73_MAX_LEVELS_UVD]; + SMU73_Discrete_ExtClkLevel VceLevel[SMU73_MAX_LEVELS_VCE]; + SMU73_Discrete_ExtClkLevel AcpLevel[SMU73_MAX_LEVELS_ACP]; + SMU73_Discrete_ExtClkLevel SamuLevel[SMU73_MAX_LEVELS_SAMU]; SMU73_Discrete_Ulv Ulv; uint32_t SclkStepSize; - uint32_t Smio [SMU73_MAX_ENTRIES_SMIO]; + uint32_t Smio[SMU73_MAX_ENTRIES_SMIO]; uint8_t UvdBootLevel; uint8_t VceBootLevel; @@ -368,8 +358,7 @@ typedef struct SMU73_Discrete_DpmTable SMU73_Discrete_DpmTable; // --------------------------------------------------- Fan Table ----------------------------------------------------------- -struct SMU73_Discrete_FanTable -{ +struct SMU73_Discrete_FanTable { uint16_t FdoMode; int16_t TempMin; int16_t TempMed; @@ -397,8 +386,7 @@ typedef struct SMU73_Discrete_FanTable SMU73_Discrete_FanTable; -struct SMU7_MclkDpmScoreboard -{ +struct SMU7_MclkDpmScoreboard { uint32_t PercentageBusy; @@ -448,8 +436,8 @@ struct SMU7_MclkDpmScoreboard uint8_t VbiWaitCounter; uint8_t EnabledLevelsChange; - uint16_t LevelResidencyCounters [SMU73_MAX_LEVELS_MEMORY]; - uint16_t LevelSwitchCounters [SMU73_MAX_LEVELS_MEMORY]; + uint16_t LevelResidencyCounters[SMU73_MAX_LEVELS_MEMORY]; + uint16_t LevelSwitchCounters[SMU73_MAX_LEVELS_MEMORY]; void (*TargetStateCalculator)(uint8_t); void (*SavedTargetStateCalculator)(uint8_t); @@ -469,8 +457,7 @@ struct SMU7_MclkDpmScoreboard typedef struct SMU7_MclkDpmScoreboard SMU7_MclkDpmScoreboard; -struct SMU7_UlvScoreboard -{ +struct SMU7_UlvScoreboard { uint8_t EnterUlv; uint8_t ExitUlv; uint8_t UlvActive; @@ -485,8 +472,7 @@ struct SMU7_UlvScoreboard typedef struct SMU7_UlvScoreboard SMU7_UlvScoreboard; -struct VddgfxSavedRegisters -{ +struct VddgfxSavedRegisters { uint32_t GPU_DBG[3]; uint32_t MEC_BaseAddress_Hi; uint32_t MEC_BaseAddress_Lo; @@ -497,8 +483,7 @@ struct VddgfxSavedRegisters typedef struct VddgfxSavedRegisters VddgfxSavedRegisters; -struct SMU7_VddGfxScoreboard -{ +struct SMU7_VddGfxScoreboard { uint8_t VddGfxEnable; uint8_t VddGfxActive; uint8_t VPUResetOccured; diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h index 771523001533..7d5ed7751976 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h @@ -224,8 +224,8 @@ struct SMU7_LocalDpmScoreboard { uint8_t DteClampMode; uint8_t FpsClampMode; - uint16_t LevelResidencyCounters [SMU75_MAX_LEVELS_GRAPHICS]; - uint16_t LevelSwitchCounters [SMU75_MAX_LEVELS_GRAPHICS]; + uint16_t LevelResidencyCounters[SMU75_MAX_LEVELS_GRAPHICS]; + uint16_t LevelSwitchCounters[SMU75_MAX_LEVELS_GRAPHICS]; void (*TargetStateCalculator)(uint8_t); void (*SavedTargetStateCalculator)(uint8_t); @@ -316,7 +316,7 @@ struct SMU7_VoltageScoreboard { VoltageChangeHandler_t functionLinks[6]; - uint16_t * VddcFollower1; + uint16_t *VddcFollower1; int16_t Driver_OD_RequestedVidOffset1; int16_t Driver_OD_RequestedVidOffset2; }; @@ -677,9 +677,9 @@ typedef struct SCS_CELL_t SCS_CELL_t; struct VFT_TABLE_t { VFT_CELL_t Cell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS]; - uint16_t AvfsGbv [NUM_VFT_COLUMNS]; - uint16_t BtcGbv [NUM_VFT_COLUMNS]; - int16_t Temperature [TEMP_RANGE_MAXSTEPS]; + uint16_t AvfsGbv[NUM_VFT_COLUMNS]; + uint16_t BtcGbv[NUM_VFT_COLUMNS]; + int16_t Temperature[TEMP_RANGE_MAXSTEPS]; #ifdef SMU__FIRMWARE_SCKS_PRESENT__1 SCS_CELL_t ScksCell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS]; diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h index 78ada9ffd508..e130f52fe8d6 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h @@ -36,8 +36,7 @@ #define SMU7_NUM_NON_TES 2 // All 'soft registers' should be uint32_t. -struct SMU7_SoftRegisters -{ +struct SMU7_SoftRegisters { uint32_t RefClockFrequency; uint32_t PmTimerP; uint32_t FeatureEnables; @@ -80,8 +79,7 @@ struct SMU7_SoftRegisters typedef struct SMU7_SoftRegisters SMU7_SoftRegisters; -struct SMU7_Fusion_GraphicsLevel -{ +struct SMU7_Fusion_GraphicsLevel { uint32_t MinVddNb; uint32_t SclkFrequency; @@ -111,8 +109,7 @@ struct SMU7_Fusion_GraphicsLevel typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel; -struct SMU7_Fusion_GIOLevel -{ +struct SMU7_Fusion_GIOLevel { uint8_t EnabledForActivity; uint8_t LclkDid; uint8_t Vid; @@ -137,8 +134,7 @@ struct SMU7_Fusion_GIOLevel typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel; // UVD VCLK/DCLK state (level) definition. -struct SMU7_Fusion_UvdLevel -{ +struct SMU7_Fusion_UvdLevel { uint32_t VclkFrequency; uint32_t DclkFrequency; uint16_t MinVddNb; @@ -155,8 +151,7 @@ struct SMU7_Fusion_UvdLevel typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel; // Clocks for other external blocks (VCE, ACP, SAMU). -struct SMU7_Fusion_ExtClkLevel -{ +struct SMU7_Fusion_ExtClkLevel { uint32_t Frequency; uint16_t MinVoltage; uint8_t Divider; @@ -166,8 +161,7 @@ struct SMU7_Fusion_ExtClkLevel }; typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel; -struct SMU7_Fusion_ACPILevel -{ +struct SMU7_Fusion_ACPILevel { uint32_t Flags; uint32_t MinVddNb; uint32_t SclkFrequency; @@ -181,8 +175,7 @@ struct SMU7_Fusion_ACPILevel typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel; -struct SMU7_Fusion_NbDpm -{ +struct SMU7_Fusion_NbDpm { uint8_t DpmXNbPsHi; uint8_t DpmXNbPsLo; uint8_t Dpm0PgNbPsHi; @@ -197,8 +190,7 @@ struct SMU7_Fusion_NbDpm typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm; -struct SMU7_Fusion_StateInfo -{ +struct SMU7_Fusion_StateInfo { uint32_t SclkFrequency; uint32_t LclkFrequency; uint32_t VclkFrequency; @@ -214,8 +206,7 @@ struct SMU7_Fusion_StateInfo typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo; -struct SMU7_Fusion_DpmTable -{ +struct SMU7_Fusion_DpmTable { uint32_t SystemFlags; SMU7_PIDController GraphicsPIDController; @@ -230,12 +221,12 @@ struct SMU7_Fusion_DpmTable uint8_t SamuLevelCount; uint16_t FpsHighT; - SMU7_Fusion_GraphicsLevel GraphicsLevel [SMU__NUM_SCLK_DPM_STATE]; + SMU7_Fusion_GraphicsLevel GraphicsLevel[SMU__NUM_SCLK_DPM_STATE]; SMU7_Fusion_ACPILevel ACPILevel; - SMU7_Fusion_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD]; - SMU7_Fusion_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE]; - SMU7_Fusion_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP]; - SMU7_Fusion_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU]; + SMU7_Fusion_UvdLevel UvdLevel[SMU7_MAX_LEVELS_UVD]; + SMU7_Fusion_ExtClkLevel VceLevel[SMU7_MAX_LEVELS_VCE]; + SMU7_Fusion_ExtClkLevel AcpLevel[SMU7_MAX_LEVELS_ACP]; + SMU7_Fusion_ExtClkLevel SamuLevel[SMU7_MAX_LEVELS_SAMU]; uint8_t UvdBootLevel; uint8_t VceBootLevel; @@ -266,10 +257,9 @@ struct SMU7_Fusion_DpmTable }; -struct SMU7_Fusion_GIODpmTable -{ +struct SMU7_Fusion_GIODpmTable { - SMU7_Fusion_GIOLevel GIOLevel [SMU7_MAX_LEVELS_GIO]; + SMU7_Fusion_GIOLevel GIOLevel[SMU7_MAX_LEVELS_GIO]; SMU7_PIDController GioPIDController; diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h index faae4b918d90..2c69a5694f94 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h @@ -178,20 +178,20 @@ typedef struct { uint8_t padding8_2[2]; /* SOC Frequencies */ - PllSetting_t GfxclkLevel [NUM_GFXCLK_DPM_LEVELS]; + PllSetting_t GfxclkLevel[NUM_GFXCLK_DPM_LEVELS]; - uint8_t SocclkDid [NUM_SOCCLK_DPM_LEVELS]; /* DID */ - uint8_t SocDpmVoltageIndex [NUM_SOCCLK_DPM_LEVELS]; + uint8_t SocclkDid[NUM_SOCCLK_DPM_LEVELS]; /* DID */ + uint8_t SocDpmVoltageIndex[NUM_SOCCLK_DPM_LEVELS]; - uint8_t VclkDid [NUM_UVD_DPM_LEVELS]; /* DID */ - uint8_t DclkDid [NUM_UVD_DPM_LEVELS]; /* DID */ - uint8_t UvdDpmVoltageIndex [NUM_UVD_DPM_LEVELS]; + uint8_t VclkDid[NUM_UVD_DPM_LEVELS]; /* DID */ + uint8_t DclkDid[NUM_UVD_DPM_LEVELS]; /* DID */ + uint8_t UvdDpmVoltageIndex[NUM_UVD_DPM_LEVELS]; - uint8_t EclkDid [NUM_VCE_DPM_LEVELS]; /* DID */ - uint8_t VceDpmVoltageIndex [NUM_VCE_DPM_LEVELS]; + uint8_t EclkDid[NUM_VCE_DPM_LEVELS]; /* DID */ + uint8_t VceDpmVoltageIndex[NUM_VCE_DPM_LEVELS]; - uint8_t Mp0clkDid [NUM_MP0CLK_DPM_LEVELS]; /* DID */ - uint8_t Mp0DpmVoltageIndex [NUM_MP0CLK_DPM_LEVELS]; + uint8_t Mp0clkDid[NUM_MP0CLK_DPM_LEVELS]; /* DID */ + uint8_t Mp0DpmVoltageIndex[NUM_MP0CLK_DPM_LEVELS]; DisplayClockTable_t DisplayClockTable[DSPCLK_COUNT][NUM_DSPCLK_LEVELS]; QuadraticInt_t DisplayClock2Gfxclk[DSPCLK_COUNT]; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c index 4bc8db1be738..9e4228232f02 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c @@ -2732,7 +2732,7 @@ static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) static int ci_smu_init(struct pp_hwmgr *hwmgr) { - struct ci_smumgr *ci_priv = NULL; + struct ci_smumgr *ci_priv; ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c index 02c094a06605..5e43ad2b2956 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c @@ -332,7 +332,7 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr) static int fiji_smu_init(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *fiji_priv = NULL; + struct fiji_smumgr *fiji_priv; fiji_priv = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c index 060fc140c574..97d9802fe673 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c @@ -259,7 +259,7 @@ static int iceland_start_smu(struct pp_hwmgr *hwmgr) static int iceland_smu_init(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *iceland_priv = NULL; + struct iceland_smumgr *iceland_priv; iceland_priv = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c index e7ed2a7adf8f..ff6b563ecbf5 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c @@ -1888,7 +1888,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) | (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) | (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT); - data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false; + data->apply_avfs_cks_off_voltage = avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1; } return result; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c index acbe41174d7e..6fe6e6abb5d8 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c @@ -226,7 +226,7 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr) static int tonga_smu_init(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *tonga_priv = NULL; + struct tonga_smumgr *tonga_priv; tonga_priv = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL); if (tonga_priv == NULL) diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c index 7d024d3facef..34c9f59b889a 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c @@ -295,9 +295,8 @@ static int vegam_process_firmware_header(struct pp_hwmgr *hwmgr) static bool vegam_is_dpm_running(struct pp_hwmgr *hwmgr) { - return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) - ? true : false; + return 1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON); } static uint32_t vegam_get_mac_definition(uint32_t value) @@ -1660,7 +1659,7 @@ static int vegam_populate_avfs_parameters(struct pp_hwmgr *hwmgr) (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) | (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT); data->apply_avfs_cks_off_voltage = - (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false; + avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1; } return result; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 222af2fae745..f005a90c35af 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -618,7 +618,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; arcturus_set_ppt_funcs(smu); /* OD is not supported on Arcturus */ - smu->od_enabled =false; + smu->od_enabled = false; break; case IP_VERSION(13, 0, 2): aldebaran_set_ppt_funcs(smu); @@ -1648,7 +1648,7 @@ static int smu_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = adev->powerplay.pp_handle; - if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; smu_dpm_set_vcn_enable(smu, false); @@ -1700,7 +1700,7 @@ static int smu_suspend(void *handle) int ret; uint64_t count; - if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; if (!smu->pm_enabled) @@ -2217,8 +2217,7 @@ const struct amd_ip_funcs smu_ip_funcs = { .set_powergating_state = smu_set_powergating_state, }; -const struct amdgpu_ip_block_version smu_v11_0_ip_block = -{ +const struct amdgpu_ip_block_version smu_v11_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_SMC, .major = 11, .minor = 0, @@ -2226,8 +2225,7 @@ const struct amdgpu_ip_block_version smu_v11_0_ip_block = .funcs = &smu_ip_funcs, }; -const struct amdgpu_ip_block_version smu_v12_0_ip_block = -{ +const struct amdgpu_ip_block_version smu_v12_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_SMC, .major = 12, .minor = 0, @@ -2235,8 +2233,7 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block = .funcs = &smu_ip_funcs, }; -const struct amdgpu_ip_block_version smu_v13_0_ip_block = -{ +const struct amdgpu_ip_block_version smu_v13_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_SMC, .major = 13, .minor = 0, @@ -2337,7 +2334,7 @@ int smu_get_power_limit(void *handle, if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - switch(pp_power_type) { + switch (pp_power_type) { case PP_PWR_TYPE_SUSTAINED: limit_type = SMU_DEFAULT_PPT_LIMIT; break; @@ -2349,7 +2346,7 @@ int smu_get_power_limit(void *handle, break; } - switch(pp_limit_level){ + switch (pp_limit_level) { case PP_PWR_LIMIT_CURRENT: limit_level = SMU_PPT_LIMIT_CURRENT; break; @@ -2595,7 +2592,7 @@ static int smu_read_sensor(void *handle, *size = 4; break; case AMDGPU_PP_SENSOR_VCN_POWER_STATE: - *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1; + *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1; *size = 4; break; case AMDGPU_PP_SENSOR_MIN_FAN_RPM: @@ -2868,7 +2865,7 @@ static int smu_set_xgmi_pstate(void *handle, if (smu->ppt_funcs->set_xgmi_pstate) ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); - if(ret) + if (ret) dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 6e2069dcb6b9..95eb8a5eb54f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -200,29 +200,25 @@ struct smu_power_state { struct smu_hw_power_state hardware; }; -enum smu_power_src_type -{ +enum smu_power_src_type { SMU_POWER_SOURCE_AC, SMU_POWER_SOURCE_DC, SMU_POWER_SOURCE_COUNT, }; -enum smu_ppt_limit_type -{ +enum smu_ppt_limit_type { SMU_DEFAULT_PPT_LIMIT = 0, SMU_FAST_PPT_LIMIT, }; -enum smu_ppt_limit_level -{ +enum smu_ppt_limit_level { SMU_PPT_LIMIT_MIN = -1, SMU_PPT_LIMIT_CURRENT, SMU_PPT_LIMIT_DEFAULT, SMU_PPT_LIMIT_MAX, }; -enum smu_memory_pool_size -{ +enum smu_memory_pool_size { SMU_MEMORY_POOL_SIZE_ZERO = 0, SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000, SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000, @@ -282,8 +278,7 @@ struct smu_clock_info { uint32_t max_bus_bandwidth; }; -struct smu_bios_boot_up_values -{ +struct smu_bios_boot_up_values { uint32_t revision; uint32_t gfxclk; uint32_t uclk; @@ -305,8 +300,7 @@ struct smu_bios_boot_up_values uint32_t firmware_caps; }; -enum smu_table_id -{ +enum smu_table_id { SMU_TABLE_PPTABLE = 0, SMU_TABLE_WATERMARKS, SMU_TABLE_CUSTOM_DPM, @@ -326,8 +320,7 @@ enum smu_table_id SMU_TABLE_COUNT, }; -struct smu_table_context -{ +struct smu_table_context { void *power_play_table; uint32_t power_play_table_size; void *hardcode_pptable; @@ -390,8 +383,7 @@ struct smu_power_context { }; #define SMU_FEATURE_MAX (64) -struct smu_feature -{ +struct smu_feature { uint32_t feature_num; DECLARE_BITMAP(supported, SMU_FEATURE_MAX); DECLARE_BITMAP(allowed, SMU_FEATURE_MAX); @@ -416,21 +408,18 @@ struct mclock_latency_table { struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM]; }; -enum smu_reset_mode -{ +enum smu_reset_mode { SMU_RESET_MODE_0, SMU_RESET_MODE_1, SMU_RESET_MODE_2, }; -enum smu_baco_state -{ +enum smu_baco_state { SMU_BACO_STATE_ENTER = 0, SMU_BACO_STATE_EXIT, }; -struct smu_baco_context -{ +struct smu_baco_context { uint32_t state; bool platform_support; bool maco_support; @@ -478,8 +467,7 @@ struct stb_context { #define WORKLOAD_POLICY_MAX 7 -struct smu_context -{ +struct smu_context { struct amdgpu_device *adev; struct amdgpu_irq_src irq_source; @@ -1398,6 +1386,7 @@ typedef enum { METRICS_PCIE_RATE, METRICS_PCIE_WIDTH, METRICS_CURR_FANPWM, + METRICS_CURR_SOCKETPOWER, } MetricsMember_t; enum smu_cmn2asic_mapping_type { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h index 7589faa0232d..779c2524806c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h @@ -23,7 +23,7 @@ #ifndef __SMU13_DRIVER_IF_V13_0_5_H__ #define __SMU13_DRIVER_IF_V13_0_5_H__ -#define SMU13_0_5_DRIVER_IF_VERSION 4 +#define SMU13_0_5_DRIVER_IF_VERSION 5 // Throttler Status Bitmask #define THROTTLER_STATUS_BIT_SPL 0 @@ -103,7 +103,6 @@ typedef struct { uint16_t ThrottlerStatus; uint16_t CurrentSocketPower; //[mW] - uint16_t spare1; } SmuMetrics_t; //Freq in MHz diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h index beab6d7b28b7..630132c4a76b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h @@ -52,8 +52,7 @@ static unsigned int DbiPrbs7[] = //4096 bytes, 256 byte aligned -static unsigned int NoDbiPrbs7[] = -{ +static unsigned int NoDbiPrbs7[] = { 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, @@ -121,8 +120,7 @@ static unsigned int NoDbiPrbs7[] = }; // 4096 bytes, 256 byte aligned -static unsigned int DbiPrbs7[] = -{ +static unsigned int DbiPrbs7[] = { 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h index d466db6f0ad4..a0e5ad0381d6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h @@ -64,11 +64,9 @@ #define LINK_SPEED_MAX 3 static const __maybe_unused uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16}; -static const __maybe_unused uint16_t link_speed[] = {25, 50, 80, 160}; static const -struct smu_temperature_range __maybe_unused smu11_thermal_policy[] = -{ +struct smu_temperature_range __maybe_unused smu11_thermal_policy[] = { {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, }; @@ -96,8 +94,8 @@ struct smu_11_0_dpm_table { }; struct smu_11_0_pcie_table { - uint8_t pcie_gen[MAX_PCIE_CONF]; - uint8_t pcie_lane[MAX_PCIE_CONF]; + uint8_t pcie_gen[MAX_PCIE_CONF]; + uint8_t pcie_lane[MAX_PCIE_CONF]; }; struct smu_11_0_dpm_tables { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h index 0116e3d04fad..df7430876e0c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h @@ -101,8 +101,7 @@ enum SMU_11_0_ODSETTING_ID { }; #define SMU_11_0_MAX_ODSETTING 32 //Maximum Number of ODSettings -struct smu_11_0_overdrive_table -{ +struct smu_11_0_overdrive_table { uint8_t revision; //Revision = SMU_11_0_PP_OVERDRIVE_VERSION uint8_t reserve[3]; //Zero filled field reserved for future use uint32_t feature_count; //Total number of supported features @@ -127,8 +126,7 @@ enum SMU_11_0_PPCLOCK_ID { }; #define SMU_11_0_MAX_PPCLOCK 16 //Maximum Number of PP Clocks -struct smu_11_0_power_saving_clock_table -{ +struct smu_11_0_power_saving_clock_table { uint8_t revision; //Revision = SMU_11_0_PP_POWERSAVINGCLOCK_VERSION uint8_t reserve[3]; //Zero filled field reserved for future use uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT @@ -136,8 +134,7 @@ struct smu_11_0_power_saving_clock_table uint32_t min[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz }; -struct smu_11_0_powerplay_table -{ +struct smu_11_0_powerplay_table { struct atom_common_table_header header; uint8_t table_revision; uint16_t table_size; //Driver portion table size. The offset to smc_pptable including header size @@ -145,14 +142,14 @@ struct smu_11_0_powerplay_table uint32_t golden_revision; uint16_t format_id; uint32_t platform_caps; //POWERPLAYABLE::ulPlatformCaps - + uint8_t thermal_controller_type; //one of SMU_11_0_PP_THERMALCONTROLLER uint16_t small_power_limit1; uint16_t small_power_limit2; uint16_t boost_power_limit; - uint16_t od_turbo_power_limit; //Power limit setting for Turbo mode in Performance UI Tuning. - uint16_t od_power_save_power_limit; //Power limit setting for PowerSave/Optimal mode in Performance UI Tuning. + uint16_t od_turbo_power_limit; //Power limit setting for Turbo mode in Performance UI Tuning. + uint16_t od_power_save_power_limit; //Power limit setting for PowerSave/Optimal mode in Performance UI Tuning. uint16_t software_shutdown_temp; uint16_t reserve[6]; //Zero filled field reserved for future use diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h index eadbe0149cae..eb694f9f556d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h @@ -41,8 +41,7 @@ #define SMU_13_0_7_PP_OVERDRIVE_VERSION 0x83 // OverDrive 8 Table Version 0.2 #define SMU_13_0_7_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 -enum SMU_13_0_7_ODFEATURE_CAP -{ +enum SMU_13_0_7_ODFEATURE_CAP { SMU_13_0_7_ODCAP_GFXCLK_LIMITS = 0, SMU_13_0_7_ODCAP_UCLK_LIMITS, SMU_13_0_7_ODCAP_POWER_LIMIT, @@ -62,8 +61,7 @@ enum SMU_13_0_7_ODFEATURE_CAP SMU_13_0_7_ODCAP_COUNT, }; -enum SMU_13_0_7_ODFEATURE_ID -{ +enum SMU_13_0_7_ODFEATURE_ID { SMU_13_0_7_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_13_0_7_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature SMU_13_0_7_ODFEATURE_UCLK_LIMITS = 1 << SMU_13_0_7_ODCAP_UCLK_LIMITS, //UCLK Limit feature SMU_13_0_7_ODFEATURE_POWER_LIMIT = 1 << SMU_13_0_7_ODCAP_POWER_LIMIT, //Power Limit feature @@ -85,8 +83,7 @@ enum SMU_13_0_7_ODFEATURE_ID #define SMU_13_0_7_MAX_ODFEATURE 32 //Maximum Number of OD Features -enum SMU_13_0_7_ODSETTING_ID -{ +enum SMU_13_0_7_ODSETTING_ID { SMU_13_0_7_ODSETTING_GFXCLKFMAX = 0, SMU_13_0_7_ODSETTING_GFXCLKFMIN, SMU_13_0_7_ODSETTING_UCLKFMIN, @@ -123,8 +120,7 @@ enum SMU_13_0_7_ODSETTING_ID }; #define SMU_13_0_7_MAX_ODSETTING 64 //Maximum Number of ODSettings -enum SMU_13_0_7_PWRMODE_SETTING -{ +enum SMU_13_0_7_PWRMODE_SETTING { SMU_13_0_7_PMSETTING_POWER_LIMIT_QUIET = 0, SMU_13_0_7_PMSETTING_POWER_LIMIT_BALANCE, SMU_13_0_7_PMSETTING_POWER_LIMIT_TURBO, @@ -144,8 +140,7 @@ enum SMU_13_0_7_PWRMODE_SETTING }; #define SMU_13_0_7_MAX_PMSETTING 32 //Maximum Number of PowerMode Settings -struct smu_13_0_7_overdrive_table -{ +struct smu_13_0_7_overdrive_table { uint8_t revision; //Revision = SMU_13_0_7_PP_OVERDRIVE_VERSION uint8_t reserve[3]; //Zero filled field reserved for future use uint32_t feature_count; //Total number of supported features @@ -156,8 +151,7 @@ struct smu_13_0_7_overdrive_table int16_t pm_setting[SMU_13_0_7_MAX_PMSETTING]; //Optimized power mode feature settings }; -enum SMU_13_0_7_PPCLOCK_ID -{ +enum SMU_13_0_7_PPCLOCK_ID { SMU_13_0_7_PPCLOCK_GFXCLK = 0, SMU_13_0_7_PPCLOCK_SOCCLK, SMU_13_0_7_PPCLOCK_UCLK, @@ -175,8 +169,7 @@ enum SMU_13_0_7_PPCLOCK_ID }; #define SMU_13_0_7_MAX_PPCLOCK 16 //Maximum Number of PP Clocks -struct smu_13_0_7_powerplay_table -{ +struct smu_13_0_7_powerplay_table { struct atom_common_table_header header; //For PLUM_BONITO, header.format_revision = 15, header.content_revision = 0 uint8_t table_revision; //For PLUM_BONITO, table_revision = 2 uint8_t padding; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 3bb18396d2f9..704a2b577a0e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -598,7 +598,7 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; @@ -1130,7 +1130,7 @@ static int arcturus_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = arcturus_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); @@ -1169,6 +1169,7 @@ static int arcturus_read_sensor(struct smu_context *smu, ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: default: ret = -EOPNOTSUPP; break; @@ -1482,7 +1483,7 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, return ret; if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && - (smu_version >=0x360d00)) { + (smu_version >= 0x360d00)) { ret = smu_cmn_update_table(smu, SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index ca4d97b7f576..9548bd3c624b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -154,10 +154,14 @@ cyan_skillfish_get_smu_metrics_data(struct smu_context *smu, case METRICS_CURR_UCLK: *value = metrics->Current.MemclkFrequency; break; - case METRICS_AVERAGE_SOCKETPOWER: + case METRICS_CURR_SOCKETPOWER: *value = (metrics->Current.CurrentSocketPower << 8) / 1000; break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = (metrics->Average.CurrentSocketPower << 8) / + 1000; + break; case METRICS_TEMPERATURE_EDGE: *value = metrics->Current.GfxTemperature / 100 * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; @@ -208,12 +212,18 @@ static int cyan_skillfish_read_sensor(struct smu_context *smu, *(uint32_t *)data *= 100; *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_CURR_SOCKETPOWER, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_TEMPERATURE_HOTSPOT, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 95f6d821bacb..18487ae10bcf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -136,7 +136,7 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0), MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), - MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange, 0), + MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALDisableDummyPstateChange, 0), MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange, 0), MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm, 0), MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0), @@ -556,7 +556,7 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; int ret = 0; @@ -642,7 +642,7 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; @@ -731,7 +731,7 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_NV12_legacy_t *metrics = (SmuMetrics_NV12_legacy_t *)smu_table->metrics_table; int ret = 0; @@ -817,7 +817,7 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_NV12_t *metrics = (SmuMetrics_NV12_t *)smu_table->metrics_table; int ret = 0; @@ -1686,7 +1686,7 @@ static int navi10_force_clk_levels(struct smu_context *smu, return 0; break; case SMU_DCEFCLK: - dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n"); + dev_info(smu->adev->dev, "Setting DCEFCLK min/max dpm level is not supported!\n"); break; default: @@ -2182,7 +2182,7 @@ static int navi10_read_sensor(struct smu_context *smu, struct smu_table_context *table_context = &smu->smu_table; PPTable_t *pptable = table_context->driver_pptable; - if(!data || !size) + if (!data || !size) return -EINVAL; switch (sensor) { @@ -2202,7 +2202,7 @@ static int navi10_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = navi1x_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); @@ -2240,6 +2240,7 @@ static int navi10_read_sensor(struct smu_context *smu, ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: default: ret = -EOPNOTSUPP; break; @@ -2317,15 +2318,15 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu, uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal; uint32_t max_memory_clock = max_sustainable_clocks->uclock; - if(smu->disable_uclk_switch == disable_memory_clock_switch) + if (smu->disable_uclk_switch == disable_memory_clock_switch) return 0; - if(disable_memory_clock_switch) + if (disable_memory_clock_switch) ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, max_memory_clock, 0); else ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_memory_clock, 0); - if(!ret) + if (!ret) smu->disable_uclk_switch = disable_memory_clock_switch; return ret; @@ -2559,7 +2560,8 @@ static int navi10_set_default_od_settings(struct smu_context *smu) return 0; } -static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) { +static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) +{ int i; int ret = 0; struct smu_table_context *table_context = &smu->smu_table; @@ -3368,7 +3370,7 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu, ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00)) ret = navi10_get_gpu_metrics(smu, table); else - ret =navi10_get_legacy_gpu_metrics(smu, table); + ret = navi10_get_legacy_gpu_metrics(smu, table); break; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index f0800c0c5168..4bb289f9b4b8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1902,7 +1902,7 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = sienna_cichlid_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); @@ -1962,6 +1962,7 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu, ret = -EOPNOTSUPP; } break; + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: default: ret = -EOPNOTSUPP; break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 067b4e0b026c..201cec599842 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -390,6 +390,10 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu, *value = metrics->Current.UvdActivity; break; case METRICS_AVERAGE_SOCKETPOWER: + *value = (metrics->Average.CurrentSocketPower << 8) / + 1000; + break; + case METRICS_CURR_SOCKETPOWER: *value = (metrics->Current.CurrentSocketPower << 8) / 1000; break; @@ -1536,12 +1540,18 @@ static int vangogh_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = vangogh_common_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: + ret = vangogh_common_get_smu_metrics_data(smu, + METRICS_CURR_SOCKETPOWER, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_EDGE_TEMP: ret = vangogh_common_get_smu_metrics_data(smu, METRICS_TEMPERATURE_EDGE, @@ -1854,6 +1864,86 @@ static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu, return sizeof(struct gpu_metrics_v2_3); } +static ssize_t vangogh_get_gpu_metrics_v2_4(struct smu_context *smu, + void **table) +{ + SmuMetrics_t metrics; + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v2_4 *gpu_metrics = + (struct gpu_metrics_v2_4 *)smu_table->gpu_metrics_table; + int ret = 0; + + ret = smu_cmn_get_metrics_table(smu, &metrics, true); + if (ret) + return ret; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 4); + + gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; + gpu_metrics->temperature_soc = metrics.Current.SocTemperature; + memcpy(&gpu_metrics->temperature_core[0], + &metrics.Current.CoreTemperature[0], + sizeof(uint16_t) * 4); + gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; + + gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature; + gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature; + memcpy(&gpu_metrics->average_temperature_core[0], + &metrics.Average.CoreTemperature[0], + sizeof(uint16_t) * 4); + gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0]; + + gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; + gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; + + gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; + gpu_metrics->average_cpu_power = metrics.Current.Power[0]; + gpu_metrics->average_soc_power = metrics.Current.Power[1]; + gpu_metrics->average_gfx_power = metrics.Current.Power[2]; + + gpu_metrics->average_cpu_voltage = metrics.Current.Voltage[0]; + gpu_metrics->average_soc_voltage = metrics.Current.Voltage[1]; + gpu_metrics->average_gfx_voltage = metrics.Current.Voltage[2]; + + gpu_metrics->average_cpu_current = metrics.Current.Current[0]; + gpu_metrics->average_soc_current = metrics.Current.Current[1]; + gpu_metrics->average_gfx_current = metrics.Current.Current[2]; + + memcpy(&gpu_metrics->average_core_power[0], + &metrics.Average.CorePower[0], + sizeof(uint16_t) * 4); + + gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; + gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; + + gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; + gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; + gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_vclk = metrics.Current.VclkFrequency; + gpu_metrics->current_dclk = metrics.Current.DclkFrequency; + + memcpy(&gpu_metrics->current_coreclk[0], + &metrics.Current.CoreFrequency[0], + sizeof(uint16_t) * 4); + gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; + + gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; + gpu_metrics->indep_throttle_status = + smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, + vangogh_throttler_map); + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v2_4); +} + static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, void **table) { @@ -1923,23 +2013,34 @@ static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, { uint32_t if_version; uint32_t smu_version; + uint32_t smu_program; + uint32_t fw_version; int ret = 0; ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); - if (ret) { + if (ret) return ret; - } - if (smu_version >= 0x043F3E00) { - if (if_version < 0x3) - ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table); + smu_program = (smu_version >> 24) & 0xff; + fw_version = smu_version & 0xffffff; + if (smu_program == 6) { + if (fw_version >= 0x3F0800) + ret = vangogh_get_gpu_metrics_v2_4(smu, table); else ret = vangogh_get_gpu_metrics_v2_3(smu, table); + } else { - if (if_version < 0x3) - ret = vangogh_get_legacy_gpu_metrics(smu, table); - else - ret = vangogh_get_gpu_metrics(smu, table); + if (smu_version >= 0x043F3E00) { + if (if_version < 0x3) + ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table); + else + ret = vangogh_get_gpu_metrics_v2_3(smu, table); + } else { + if (if_version < 0x3) + ret = vangogh_get_legacy_gpu_metrics(smu, table); + else + ret = vangogh_get_gpu_metrics(smu, table); + } } return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 8a8ba25c9ad7..c8119491c516 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -262,15 +262,15 @@ static int renoir_get_profiling_clk_mask(struct smu_context *smu, /* mclk levels are in reverse order */ *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1; } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { - if(sclk_mask) + if (sclk_mask) /* The sclk as gfxclk and has three level about max/min/current */ *sclk_mask = 3 - 1; - if(mclk_mask) + if (mclk_mask) /* mclk levels are in reverse order */ *mclk_mask = 0; - if(soc_mask) + if (soc_mask) *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1; } @@ -1197,7 +1197,7 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_VCNACTIVITY: *value = metrics->AverageUvdActivity / 100; break; - case METRICS_AVERAGE_SOCKETPOWER: + case METRICS_CURR_SOCKETPOWER: if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) || ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200))) *value = metrics->CurrentSocketPower << 8; @@ -1297,9 +1297,9 @@ static int renoir_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = renoir_get_smu_metrics_data(smu, - METRICS_AVERAGE_SOCKETPOWER, + METRICS_CURR_SOCKETPOWER, (uint32_t *)data); *size = 4; break; @@ -1315,6 +1315,7 @@ static int renoir_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: default: ret = -EOPNOTSUPP; break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index c788aa7a99a9..5e408a195860 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -205,7 +205,8 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu) return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); } -int smu_v12_0_mode2_reset(struct smu_context *smu){ +int smu_v12_0_mode2_reset(struct smu_context *smu) +{ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index ce50ef46e73f..cc3169400c9b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -94,8 +94,7 @@ */ #define SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION 0x00443300 -static const struct smu_temperature_range smu13_thermal_policy[] = -{ +static const struct smu_temperature_range smu13_thermal_policy[] = { {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, }; @@ -196,7 +195,7 @@ static const struct cmn2asic_mapping aldebaran_feature_mask_map[SMU_FEATURE_COUN ALDEBARAN_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF_BIT), ALDEBARAN_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL_BIT), ALDEBARAN_FEA_MAP(SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, FEATURE_OUT_OF_BAND_MONITOR_BIT), - ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT,FEATURE_XGMI_PER_LINK_PWR_DWN), + ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DWN), ALDEBARAN_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE), }; @@ -580,7 +579,7 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; @@ -626,9 +625,10 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu, break; case METRICS_AVERAGE_SOCKETPOWER: /* Valid power data is available only from primary die */ - *value = aldebaran_is_primary(smu) ? - metrics->AverageSocketPower << 8 : - 0; + if (aldebaran_is_primary(smu)) + *value = metrics->AverageSocketPower << 8; + else + ret = -EOPNOTSUPP; break; case METRICS_TEMPERATURE_EDGE: *value = metrics->TemperatureEdge * @@ -1095,16 +1095,6 @@ static int aldebaran_get_current_activity_percent(struct smu_context *smu, return ret; } -static int aldebaran_get_gpu_power(struct smu_context *smu, uint32_t *value) -{ - if (!value) - return -EINVAL; - - return aldebaran_get_smu_metrics_data(smu, - METRICS_AVERAGE_SOCKETPOWER, - value); -} - static int aldebaran_thermal_get_temperature(struct smu_context *smu, enum amd_pp_sensors sensor, uint32_t *value) @@ -1158,8 +1148,10 @@ static int aldebaran_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: - ret = aldebaran_get_gpu_power(smu, (uint32_t *)data); + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: + ret = aldebaran_get_smu_metrics_data(smu, + METRICS_AVERAGE_SOCKETPOWER, + (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: @@ -1184,6 +1176,7 @@ static int aldebaran_read_sensor(struct smu_context *smu, ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: default: ret = -EOPNOTSUPP; break; @@ -1906,8 +1899,7 @@ static int aldebaran_mode1_reset(struct smu_context *smu) smu_cmn_get_smc_version(smu, NULL, &smu_version); if (smu_version < 0x00440700) { ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); - } - else { + } else { /* fatal error triggered by ras, PMFW supports the flag from 68.44.0 */ if ((smu_version >= 0x00442c00) && ras && @@ -2116,7 +2108,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .register_irq_handler = smu_v13_0_register_irq_handler, .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc, - .baco_is_support= aldebaran_is_baco_supported, + .baco_is_support = aldebaran_is_baco_supported, .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 9b62b45ebb7f..f1282fc4b90a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -83,7 +83,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; -static const int link_speed[] = {25, 50, 80, 160}; const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5}; const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; @@ -1121,7 +1120,7 @@ smu_v13_0_display_clock_voltage_request(struct smu_context *smu, ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); - if(clk_select == SMU_UCLK) + if (clk_select == SMU_UCLK) smu->hard_min_uclk_req_from_dal = clk_freq; } @@ -1437,8 +1436,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, return 0; } -static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs = -{ +static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs = { .set = smu_v13_0_set_irq_state, .process = smu_v13_0_irq_process, }; @@ -1933,7 +1931,7 @@ static int smu_v13_0_get_dpm_level_count(struct smu_context *smu, ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value); /* SMU v13.0.2 FW returns 0 based max level, increment by one for it */ - if((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) && (!ret && value)) + if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) && (!ret && value)) ++(*value); return ret; @@ -2264,7 +2262,7 @@ int smu_v13_0_baco_set_state(struct smu_context *smu, if (state == SMU_BACO_STATE_ENTER) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, - smu_baco->maco_support ? + (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? BACO_SEQ_BAMACO : BACO_SEQ_BACO, NULL); } else { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 0fb6be11a0cc..8b7403ba89d7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -96,6 +96,14 @@ */ #define SUPPORT_ECCTABLE_SMU_13_0_10_VERSION 0x00502200 +#define PP_OD_FEATURE_GFXCLK_FMIN 0 +#define PP_OD_FEATURE_GFXCLK_FMAX 1 +#define PP_OD_FEATURE_UCLK_FMIN 2 +#define PP_OD_FEATURE_UCLK_FMAX 3 +#define PP_OD_FEATURE_GFX_VF_CURVE 4 + +#define LINK_SPEED_MAX 3 + static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -947,7 +955,7 @@ static int smu_v13_0_0_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_0_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); @@ -991,6 +999,7 @@ static int smu_v13_0_0_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: default: ret = -EOPNOTSUPP; break; @@ -1058,7 +1067,6 @@ static bool smu_v13_0_0_is_od_feature_supported(struct smu_context *smu, static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu, int od_feature_bit, - bool lower_boundary, int32_t *min, int32_t *max) { @@ -1070,29 +1078,28 @@ static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu, int32_t od_min_setting, od_max_setting; switch (od_feature_bit) { - case PP_OD_FEATURE_GFXCLK_BIT: - if (lower_boundary) { - od_min_setting = overdrive_lowerlimits->GfxclkFmin; - od_max_setting = overdrive_upperlimits->GfxclkFmin; - } else { - od_min_setting = overdrive_lowerlimits->GfxclkFmax; - od_max_setting = overdrive_upperlimits->GfxclkFmax; - } + case PP_OD_FEATURE_GFXCLK_FMIN: + od_min_setting = overdrive_lowerlimits->GfxclkFmin; + od_max_setting = overdrive_upperlimits->GfxclkFmin; break; - case PP_OD_FEATURE_UCLK_BIT: - if (lower_boundary) { - od_min_setting = overdrive_lowerlimits->UclkFmin; - od_max_setting = overdrive_upperlimits->UclkFmin; - } else { - od_min_setting = overdrive_lowerlimits->UclkFmax; - od_max_setting = overdrive_upperlimits->UclkFmax; - } + case PP_OD_FEATURE_GFXCLK_FMAX: + od_min_setting = overdrive_lowerlimits->GfxclkFmax; + od_max_setting = overdrive_upperlimits->GfxclkFmax; break; - case PP_OD_FEATURE_GFX_VF_CURVE_BIT: + case PP_OD_FEATURE_UCLK_FMIN: + od_min_setting = overdrive_lowerlimits->UclkFmin; + od_max_setting = overdrive_upperlimits->UclkFmin; + break; + case PP_OD_FEATURE_UCLK_FMAX: + od_min_setting = overdrive_lowerlimits->UclkFmax; + od_max_setting = overdrive_upperlimits->UclkFmax; + break; + case PP_OD_FEATURE_GFX_VF_CURVE: od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary; od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary; break; default: + od_min_setting = od_max_setting = INT_MAX; break; } @@ -1318,13 +1325,11 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { smu_v13_0_0_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_BIT, - true, + PP_OD_FEATURE_GFXCLK_FMIN, &min_value, NULL); smu_v13_0_0_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_BIT, - false, + PP_OD_FEATURE_GFXCLK_FMAX, NULL, &max_value); size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", @@ -1333,13 +1338,11 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { smu_v13_0_0_get_od_setting_limits(smu, - PP_OD_FEATURE_UCLK_BIT, - true, + PP_OD_FEATURE_UCLK_FMIN, &min_value, NULL); smu_v13_0_0_get_od_setting_limits(smu, - PP_OD_FEATURE_UCLK_BIT, - false, + PP_OD_FEATURE_UCLK_FMAX, NULL, &max_value); size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", @@ -1348,8 +1351,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { smu_v13_0_0_get_od_setting_limits(smu, - PP_OD_FEATURE_GFX_VF_CURVE_BIT, - true, + PP_OD_FEATURE_GFX_VF_CURVE, &min_value, &max_value); size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n", @@ -1373,7 +1375,7 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, OverDriveTableExternal_t *od_table = (OverDriveTableExternal_t *)table_context->overdrive_table; struct amdgpu_device *adev = smu->adev; - uint32_t offset_of_featurectrlmask; + uint32_t offset_of_voltageoffset; int32_t minimum, maximum; uint32_t feature_ctrlmask; int i, ret = 0; @@ -1394,8 +1396,7 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, switch (input[i]) { case 0: smu_v13_0_0_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_BIT, - true, + PP_OD_FEATURE_GFXCLK_FMIN, &minimum, &maximum); if (input[i + 1] < minimum || @@ -1411,8 +1412,7 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, case 1: smu_v13_0_0_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_BIT, - false, + PP_OD_FEATURE_GFXCLK_FMAX, &minimum, &maximum); if (input[i + 1] < minimum || @@ -1457,8 +1457,7 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, switch (input[i]) { case 0: smu_v13_0_0_get_od_setting_limits(smu, - PP_OD_FEATURE_UCLK_BIT, - true, + PP_OD_FEATURE_UCLK_FMIN, &minimum, &maximum); if (input[i + 1] < minimum || @@ -1474,8 +1473,7 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, case 1: smu_v13_0_0_get_od_setting_limits(smu, - PP_OD_FEATURE_UCLK_BIT, - false, + PP_OD_FEATURE_UCLK_FMAX, &minimum, &maximum); if (input[i + 1] < minimum || @@ -1516,8 +1514,7 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, return -EINVAL; smu_v13_0_0_get_od_setting_limits(smu, - PP_OD_FEATURE_GFX_VF_CURVE_BIT, - true, + PP_OD_FEATURE_GFX_VF_CURVE, &minimum, &maximum); if (input[1] < minimum || @@ -1547,10 +1544,10 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, * It does not contain actual informations about user's custom * settings. Thus we do not cache it. */ - offset_of_featurectrlmask = offsetof(OverDriveTable_t, FeatureCtrlMask); - if (memcmp((u8 *)od_table + offset_of_featurectrlmask, - table_context->user_overdrive_table + offset_of_featurectrlmask, - sizeof(OverDriveTableExternal_t) - offset_of_featurectrlmask)) { + offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary); + if (memcmp((u8 *)od_table + offset_of_voltageoffset, + table_context->user_overdrive_table + offset_of_voltageoffset, + sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) { smu_v13_0_0_dump_od_table(smu, od_table); ret = smu_v13_0_0_upload_overdrive_table(smu, od_table); @@ -1560,9 +1557,9 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, } od_table->OverDriveTable.FeatureCtrlMask = 0; - memcpy(table_context->user_overdrive_table + offset_of_featurectrlmask, - (u8 *)od_table + offset_of_featurectrlmask, - sizeof(OverDriveTableExternal_t) - offset_of_featurectrlmask); + memcpy(table_context->user_overdrive_table + offset_of_voltageoffset, + (u8 *)od_table + offset_of_voltageoffset, + sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset); if (!memcmp(table_context->user_overdrive_table, table_context->boot_overdrive_table, @@ -1765,7 +1762,10 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu, gpu_metrics->current_fan_speed = metrics->AvgFanRpm; gpu_metrics->pcie_link_width = metrics->PcieWidth; - gpu_metrics->pcie_link_speed = metrics->PcieRate; + if ((metrics->PcieRate - 1) > LINK_SPEED_MAX) + gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1); + else + gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate); gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); @@ -2211,7 +2211,8 @@ static int smu_v13_0_0_baco_enter(struct smu_context *smu) if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) return smu_v13_0_baco_set_armd3_sequence(smu, - smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); + (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + BACO_SEQ_BAMACO : BACO_SEQ_BACO); else return smu_v13_0_baco_enter(smu); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index ef37dda9908f..626591f54bc4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -257,7 +257,7 @@ static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_gfx_activity = metrics.GfxActivity; gpu_metrics->average_mm_activity = metrics.UvdActivity; - gpu_metrics->average_socket_power = metrics.CurrentSocketPower; + gpu_metrics->average_socket_power = metrics.AverageSocketPower; gpu_metrics->average_gfx_power = metrics.Power[0]; gpu_metrics->average_soc_power = metrics.Power[1]; memcpy(&gpu_metrics->average_core_power[0], @@ -321,6 +321,9 @@ static int smu_v13_0_4_get_smu_metrics_data(struct smu_context *smu, *value = metrics->UvdActivity; break; case METRICS_AVERAGE_SOCKETPOWER: + *value = (metrics->AverageSocketPower << 8) / 1000; + break; + case METRICS_CURR_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / 1000; break; case METRICS_TEMPERATURE_EDGE: @@ -569,12 +572,18 @@ static int smu_v13_0_4_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_4_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: + ret = smu_v13_0_4_get_smu_metrics_data(smu, + METRICS_CURR_SOCKETPOWER, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_EDGE_TEMP: ret = smu_v13_0_4_get_smu_metrics_data(smu, METRICS_TEMPERATURE_EDGE, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 42f110602eb1..c6e7c2115a26 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -75,7 +75,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_5_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), - MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu , 1), + MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1), MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1), MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1), MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1), @@ -288,7 +288,7 @@ static int smu_v13_0_5_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_VCNACTIVITY: *value = metrics->UvdActivity; break; - case METRICS_AVERAGE_SOCKETPOWER: + case METRICS_CURR_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / 1000; break; case METRICS_TEMPERATURE_EDGE: @@ -332,9 +332,9 @@ static int smu_v13_0_5_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = smu_v13_0_5_get_smu_metrics_data(smu, - METRICS_AVERAGE_SOCKETPOWER, + METRICS_CURR_SOCKETPOWER, (uint32_t *)data); *size = 4; break; @@ -388,6 +388,7 @@ static int smu_v13_0_5_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: default: ret = -EOPNOTSUPP; break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index dc6104a04dce..6ed9cd0a1e4e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -80,18 +80,23 @@ /* possible frequency drift (1Mhz) */ #define EPSILON 1 -#define smnPCIE_ESM_CTRL 0x193D0 +#define smnPCIE_ESM_CTRL 0x93D0 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 #define MAX_LINK_WIDTH 6 +#define smnPCIE_LC_SPEED_CNTL 0x1a340290 +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0 +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 +#define LINK_SPEED_MAX 4 + static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), - MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1), - MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0), MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1), MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1), @@ -102,8 +107,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), - MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 0), - MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 0), + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), @@ -122,8 +127,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0), MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0), MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), - MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 0), - MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 0), + MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1), + MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1), MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0), MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0), @@ -326,14 +331,24 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; - int ret; - int i; + int ret, i, retry = 100; /* Store one-time values in driver PPTable */ if (!pptable->Init) { - ret = smu_v13_0_6_get_metrics_table(smu, NULL, false); - if (ret) - return ret; + while (retry--) { + ret = smu_v13_0_6_get_metrics_table(smu, NULL, true); + if (ret) + return ret; + + /* Ensure that metrics have been updated */ + if (metrics->AccumulationCounter) + break; + + usleep_range(1000, 1100); + } + + if (!retry) + return -ETIME; pptable->MaxSocketPowerLimit = SMUQ10_TO_UINT(metrics->MaxSocketPowerLimit); @@ -705,7 +720,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_MEMACTIVITY: *value = SMUQ10_TO_UINT(metrics->DramBandwidthUtilization); break; - case METRICS_AVERAGE_SOCKETPOWER: + case METRICS_CURR_SOCKETPOWER: *value = SMUQ10_TO_UINT(metrics->SocketPower) << 8; break; case METRICS_TEMPERATURE_HOTSPOT: @@ -776,8 +791,6 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, struct smu_13_0_dpm_table *single_dpm_table; struct smu_dpm_context *smu_dpm = &smu->smu_dpm; struct smu_13_0_dpm_context *dpm_context = NULL; - uint32_t display_levels; - uint32_t freq_values[3] = { 0 }; uint32_t min_clk, max_clk; smu_cmn_get_sysfs_buf(&buf, &size); @@ -802,50 +815,24 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, return ret; } - single_dpm_table = &(dpm_context->dpm_tables.gfx_table); - ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); - if (ret) { - dev_err(smu->adev->dev, - "Attempt to get gfx clk levels Failed!"); - return ret; - } - - display_levels = clocks.num_levels; - min_clk = pstate_table->gfxclk_pstate.curr.min; max_clk = pstate_table->gfxclk_pstate.curr.max; - freq_values[0] = min_clk; - freq_values[1] = max_clk; - - /* fine-grained dpm has only 2 levels */ - if (now > min_clk && now < max_clk) { - display_levels = clocks.num_levels + 1; - freq_values[2] = max_clk; - freq_values[1] = now; - } - - /* - * For DPM disabled case, there will be only one clock level. - * And it's safe to assume that is always the current clock. - */ - if (display_levels == clocks.num_levels) { - for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at( - buf, size, "%d: %uMhz %s\n", i, - freq_values[i], - (clocks.num_levels == 1) ? - "*" : - (smu_v13_0_6_freqs_in_same_level( - freq_values[i], now) ? - "*" : - "")); + if (!smu_v13_0_6_freqs_in_same_level(now, min_clk) && + !smu_v13_0_6_freqs_in_same_level(now, max_clk)) { + size += sysfs_emit_at(buf, size, "0: %uMhz\n", + min_clk); + size += sysfs_emit_at(buf, size, "1: %uMhz *\n", + now); + size += sysfs_emit_at(buf, size, "2: %uMhz\n", + max_clk); } else { - for (i = 0; i < display_levels; i++) - size += sysfs_emit_at(buf, size, - "%d: %uMhz %s\n", i, - freq_values[i], - i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", + min_clk, + smu_v13_0_6_freqs_in_same_level(now, min_clk) ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + max_clk, + smu_v13_0_6_freqs_in_same_level(now, max_clk) ? "*" : ""); } break; @@ -1158,15 +1145,6 @@ static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu, return ret; } -static int smu_v13_0_6_get_gpu_power(struct smu_context *smu, uint32_t *value) -{ - if (!value) - return -EINVAL; - - return smu_v13_0_6_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, - value); -} - static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu, enum amd_pp_sensors sensor, uint32_t *value) @@ -1212,8 +1190,10 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: - ret = smu_v13_0_6_get_gpu_power(smu, (uint32_t *)data); + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: + ret = smu_v13_0_6_get_smu_metrics_data(smu, + METRICS_CURR_SOCKETPOWER, + (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: @@ -1239,6 +1219,7 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu, ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: default: ret = -EOPNOTSUPP; break; @@ -1252,9 +1233,9 @@ static int smu_v13_0_6_get_power_limit(struct smu_context *smu, uint32_t *default_power_limit, uint32_t *max_power_limit) { - struct smu_table_context *smu_table = &smu->smu_table; - struct PPTable_t *pptable = - (struct PPTable_t *)smu_table->driver_pptable; + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; uint32_t power_limit = 0; int ret; @@ -1370,8 +1351,7 @@ static int smu_v13_0_6_set_irq_state(struct amdgpu_device *adev, return 0; } -static const struct amdgpu_irq_src_funcs smu_v13_0_6_irq_funcs = -{ +static const struct amdgpu_irq_src_funcs smu_v13_0_6_irq_funcs = { .set = smu_v13_0_6_set_irq_state, .process = smu_v13_0_6_irq_process, }; @@ -1418,6 +1398,9 @@ static int smu_v13_0_6_system_features_control(struct smu_context *smu, struct amdgpu_device *adev = smu->adev; int ret = 0; + if (amdgpu_sriov_vf(adev)) + return 0; + if (enable) { if (!(adev->flags & AMD_IS_APU)) ret = smu_v13_0_system_features_control(smu, enable); @@ -1952,6 +1935,7 @@ smu_v13_0_6_get_current_pcie_link_width_level(struct smu_context *smu) static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; + uint32_t speed_level; uint32_t esm_ctrl; /* TODO: confirm this on real target */ @@ -1959,7 +1943,13 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) if ((esm_ctrl >> 15) & 0x1FFFF) return (((esm_ctrl >> 8) & 0x3F) + 128); - return smu_v13_0_get_current_pcie_link_speed(smu); + speed_level = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) + >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; + if (speed_level > LINK_SPEED_MAX) + speed_level = 0; + + return pcie_gen_to_speed(speed_level + 1); } static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 62f2886ab4df..94ef5b4d116d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -72,6 +72,14 @@ #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 +#define PP_OD_FEATURE_GFXCLK_FMIN 0 +#define PP_OD_FEATURE_GFXCLK_FMAX 1 +#define PP_OD_FEATURE_UCLK_FMIN 2 +#define PP_OD_FEATURE_UCLK_FMAX 3 +#define PP_OD_FEATURE_GFX_VF_CURVE 4 + +#define LINK_SPEED_MAX 3 + static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -392,8 +400,7 @@ static int smu_v13_0_7_check_fw_status(struct smu_context *smu) } #ifndef atom_smc_dpm_info_table_13_0_7 -struct atom_smc_dpm_info_table_13_0_7 -{ +struct atom_smc_dpm_info_table_13_0_7 { struct atom_common_table_header table_header; BoardTable_t BoardTable; }; @@ -500,7 +507,7 @@ static int smu_v13_0_7_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM); + AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); @@ -734,7 +741,7 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; SmuMetrics_t *metrics = &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); int ret = 0; @@ -929,7 +936,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_7_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); @@ -973,6 +980,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: default: ret = -EOPNOTSUPP; break; @@ -1040,7 +1048,6 @@ static bool smu_v13_0_7_is_od_feature_supported(struct smu_context *smu, static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu, int od_feature_bit, - bool lower_boundary, int32_t *min, int32_t *max) { @@ -1052,29 +1059,28 @@ static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu, int32_t od_min_setting, od_max_setting; switch (od_feature_bit) { - case PP_OD_FEATURE_GFXCLK_BIT: - if (lower_boundary) { - od_min_setting = overdrive_lowerlimits->GfxclkFmin; - od_max_setting = overdrive_upperlimits->GfxclkFmin; - } else { - od_min_setting = overdrive_lowerlimits->GfxclkFmax; - od_max_setting = overdrive_upperlimits->GfxclkFmax; - } + case PP_OD_FEATURE_GFXCLK_FMIN: + od_min_setting = overdrive_lowerlimits->GfxclkFmin; + od_max_setting = overdrive_upperlimits->GfxclkFmin; break; - case PP_OD_FEATURE_UCLK_BIT: - if (lower_boundary) { - od_min_setting = overdrive_lowerlimits->UclkFmin; - od_max_setting = overdrive_upperlimits->UclkFmin; - } else { - od_min_setting = overdrive_lowerlimits->UclkFmax; - od_max_setting = overdrive_upperlimits->UclkFmax; - } + case PP_OD_FEATURE_GFXCLK_FMAX: + od_min_setting = overdrive_lowerlimits->GfxclkFmax; + od_max_setting = overdrive_upperlimits->GfxclkFmax; break; - case PP_OD_FEATURE_GFX_VF_CURVE_BIT: + case PP_OD_FEATURE_UCLK_FMIN: + od_min_setting = overdrive_lowerlimits->UclkFmin; + od_max_setting = overdrive_upperlimits->UclkFmin; + break; + case PP_OD_FEATURE_UCLK_FMAX: + od_min_setting = overdrive_lowerlimits->UclkFmax; + od_max_setting = overdrive_upperlimits->UclkFmax; + break; + case PP_OD_FEATURE_GFX_VF_CURVE: od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary; od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary; break; default: + od_min_setting = od_max_setting = INT_MAX; break; } @@ -1300,13 +1306,11 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { smu_v13_0_7_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_BIT, - true, + PP_OD_FEATURE_GFXCLK_FMIN, &min_value, NULL); smu_v13_0_7_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_BIT, - false, + PP_OD_FEATURE_GFXCLK_FMAX, NULL, &max_value); size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", @@ -1315,13 +1319,11 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { smu_v13_0_7_get_od_setting_limits(smu, - PP_OD_FEATURE_UCLK_BIT, - true, + PP_OD_FEATURE_UCLK_FMIN, &min_value, NULL); smu_v13_0_7_get_od_setting_limits(smu, - PP_OD_FEATURE_UCLK_BIT, - false, + PP_OD_FEATURE_UCLK_FMAX, NULL, &max_value); size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", @@ -1330,8 +1332,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { smu_v13_0_7_get_od_setting_limits(smu, - PP_OD_FEATURE_GFX_VF_CURVE_BIT, - true, + PP_OD_FEATURE_GFX_VF_CURVE, &min_value, &max_value); size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n", @@ -1355,7 +1356,7 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, OverDriveTableExternal_t *od_table = (OverDriveTableExternal_t *)table_context->overdrive_table; struct amdgpu_device *adev = smu->adev; - uint32_t offset_of_featurectrlmask; + uint32_t offset_of_voltageoffset; int32_t minimum, maximum; uint32_t feature_ctrlmask; int i, ret = 0; @@ -1376,8 +1377,7 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, switch (input[i]) { case 0: smu_v13_0_7_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_BIT, - true, + PP_OD_FEATURE_GFXCLK_FMIN, &minimum, &maximum); if (input[i + 1] < minimum || @@ -1393,8 +1393,7 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, case 1: smu_v13_0_7_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_BIT, - false, + PP_OD_FEATURE_GFXCLK_FMAX, &minimum, &maximum); if (input[i + 1] < minimum || @@ -1439,8 +1438,7 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, switch (input[i]) { case 0: smu_v13_0_7_get_od_setting_limits(smu, - PP_OD_FEATURE_UCLK_BIT, - true, + PP_OD_FEATURE_UCLK_FMIN, &minimum, &maximum); if (input[i + 1] < minimum || @@ -1456,8 +1454,7 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, case 1: smu_v13_0_7_get_od_setting_limits(smu, - PP_OD_FEATURE_UCLK_BIT, - false, + PP_OD_FEATURE_UCLK_FMAX, &minimum, &maximum); if (input[i + 1] < minimum || @@ -1498,8 +1495,7 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, return -EINVAL; smu_v13_0_7_get_od_setting_limits(smu, - PP_OD_FEATURE_GFX_VF_CURVE_BIT, - true, + PP_OD_FEATURE_GFX_VF_CURVE, &minimum, &maximum); if (input[1] < minimum || @@ -1529,10 +1525,10 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, * It does not contain actual informations about user's custom * settings. Thus we do not cache it. */ - offset_of_featurectrlmask = offsetof(OverDriveTable_t, FeatureCtrlMask); - if (memcmp((u8 *)od_table + offset_of_featurectrlmask, - table_context->user_overdrive_table + offset_of_featurectrlmask, - sizeof(OverDriveTableExternal_t) - offset_of_featurectrlmask)) { + offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary); + if (memcmp((u8 *)od_table + offset_of_voltageoffset, + table_context->user_overdrive_table + offset_of_voltageoffset, + sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) { smu_v13_0_7_dump_od_table(smu, od_table); ret = smu_v13_0_7_upload_overdrive_table(smu, od_table); @@ -1542,9 +1538,9 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, } od_table->OverDriveTable.FeatureCtrlMask = 0; - memcpy(table_context->user_overdrive_table + offset_of_featurectrlmask, - (u8 *)od_table + offset_of_featurectrlmask, - sizeof(OverDriveTableExternal_t) - offset_of_featurectrlmask); + memcpy(table_context->user_overdrive_table + offset_of_voltageoffset, + (u8 *)od_table + offset_of_voltageoffset, + sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset); if (!memcmp(table_context->user_overdrive_table, table_context->boot_overdrive_table, @@ -1641,8 +1637,7 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu, return ret; } -static const struct smu_temperature_range smu13_thermal_policy[] = -{ +static const struct smu_temperature_range smu13_thermal_policy[] = { {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, }; @@ -1743,7 +1738,10 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu, gpu_metrics->current_fan_speed = metrics->AvgFanRpm; gpu_metrics->pcie_link_width = metrics->PcieWidth; - gpu_metrics->pcie_link_speed = metrics->PcieRate; + if ((metrics->PcieRate - 1) > LINK_SPEED_MAX) + gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1); + else + gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate); gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); @@ -2141,7 +2139,8 @@ static int smu_v13_0_7_baco_enter(struct smu_context *smu) if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) return smu_v13_0_baco_set_armd3_sequence(smu, - smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); + (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + BACO_SEQ_BAMACO : BACO_SEQ_BACO); else return smu_v13_0_baco_enter(smu); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index a1be2029ba4a..2e74d749efdd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -365,7 +365,7 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_VCNACTIVITY: *value = metrics->UvdActivity; break; - case METRICS_AVERAGE_SOCKETPOWER: + case METRICS_CURR_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / 1000; break; case METRICS_TEMPERATURE_EDGE: @@ -423,9 +423,9 @@ static int yellow_carp_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_GPU_POWER: + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = yellow_carp_get_smu_metrics_data(smu, - METRICS_AVERAGE_SOCKETPOWER, + METRICS_CURR_SOCKETPOWER, (uint32_t *)data); *size = 4; break; @@ -479,6 +479,7 @@ static int yellow_carp_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: default: ret = -EOPNOTSUPP; break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 3ecb900e6ecd..12618a583e97 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -39,6 +39,8 @@ #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL +const int link_speed[] = {25, 50, 80, 160, 320, 640}; + #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) #type static const char * const __smu_message_names[] = { @@ -691,7 +693,7 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu, #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(fea) #fea -static const char* __smu_feature_names[] = { +static const char *__smu_feature_names[] = { SMU_FEATURE_MASKS }; @@ -927,7 +929,7 @@ int smu_cmn_get_metrics_table(struct smu_context *smu, void *metrics_table, bool bypass_cache) { - struct smu_table_context *smu_table= &smu->smu_table; + struct smu_table_context *smu_table = &smu->smu_table; uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; int ret = 0; @@ -969,7 +971,7 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) struct metrics_table_header *header = (struct metrics_table_header *)table; uint16_t structure_size; -#define METRICS_VERSION(a, b) ((a << 16) | b ) +#define METRICS_VERSION(a, b) ((a << 16) | b) switch (METRICS_VERSION(frev, crev)) { case METRICS_VERSION(1, 0): @@ -996,6 +998,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) case METRICS_VERSION(2, 3): structure_size = sizeof(struct gpu_metrics_v2_3); break; + case METRICS_VERSION(2, 4): + structure_size = sizeof(struct gpu_metrics_v2_4); + break; default: return; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index d7cd358a53bd..cc590e27d88a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -30,6 +30,14 @@ #define FDO_PWM_MODE_STATIC 1 #define FDO_PWM_MODE_STATIC_RPM 5 +extern const int link_speed[]; + +/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */ +static inline int pcie_gen_to_speed(uint32_t gen) +{ + return ((gen == 0) ? link_speed[0] : link_speed[gen - 1]); +} + int smu_cmn_send_msg_without_waiting(struct smu_context *smu, uint16_t msg_index, uint32_t param); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h index ceb13c838067..bcc42abfc768 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h @@ -61,14 +61,14 @@ #define smu_feature_get_enabled_mask(smu, mask) smu_ppt_funcs(get_enabled_mask, -EOPNOTSUPP, smu, mask) #define smu_feature_is_enabled(smu, mask) smu_ppt_funcs(feature_is_enabled, 0, smu, mask) #define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask) -#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu) +#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0, smu) #define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu) #define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu) #define smu_enable_thermal_alert(smu) smu_ppt_funcs(enable_thermal_alert, 0, smu) #define smu_disable_thermal_alert(smu) smu_ppt_funcs(disable_thermal_alert, 0, smu) #define smu_smc_read_sensor(smu, sensor, data, size) smu_ppt_funcs(read_sensor, -EINVAL, smu, sensor, data, size) #define smu_pre_display_config_changed(smu) smu_ppt_funcs(pre_display_config_changed, 0, smu) -#define smu_display_config_changed(smu) smu_ppt_funcs(display_config_changed, 0 , smu) +#define smu_display_config_changed(smu) smu_ppt_funcs(display_config_changed, 0, smu) #define smu_apply_clocks_adjust_rules(smu) smu_ppt_funcs(apply_clocks_adjust_rules, 0, smu) #define smu_notify_smc_display_config(smu) smu_ppt_funcs(notify_smc_display_config, 0, smu) #define smu_run_btc(smu) smu_ppt_funcs(run_btc, 0, smu) diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig index c1b89274d2a4..ddf20708370f 100644 --- a/drivers/gpu/drm/arm/Kconfig +++ b/drivers/gpu/drm/arm/Kconfig @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 menu "ARM devices" + depends on DRM config DRM_HDLCD tristate "ARM HDLCD" diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index cea3fd5772b5..2c661f28410e 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include "komeda_dev.h" #include "komeda_kms.h" @@ -612,9 +614,11 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms, struct komeda_crtc *kcrtc) { struct drm_crtc *crtc = &kcrtc->base; + struct drm_device *base = &kms->base; + struct drm_bridge *bridge; int err; - err = drm_crtc_init_with_planes(&kms->base, crtc, + err = drm_crtc_init_with_planes(base, crtc, get_crtc_primary(kms, kcrtc), NULL, &komeda_crtc_funcs, NULL); if (err) @@ -624,6 +628,22 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms, crtc->port = kcrtc->master->of_output_port; + /* Construct an encoder for each pipeline and attach it to the remote + * bridge + */ + kcrtc->encoder.possible_crtcs = drm_crtc_mask(crtc); + err = drm_simple_encoder_init(base, &kcrtc->encoder, + DRM_MODE_ENCODER_TMDS); + if (err) + return err; + + bridge = devm_drm_of_get_bridge(base->dev, kcrtc->master->of_node, + KOMEDA_OF_PORT_OUTPUT, 0); + if (IS_ERR(bridge)) + return PTR_ERR(bridge); + + err = drm_bridge_attach(&kcrtc->encoder, bridge, NULL, 0); + drm_crtc_enable_color_mgmt(crtc, 0, true, KOMEDA_COLOR_LUT_SIZE); return err; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c index cc7664c95a54..14ee79becacb 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c @@ -6,7 +6,7 @@ */ #include #include -#include +#include #include #include #include diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c index 28f76e07dd95..cb2a2be24c5f 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include @@ -28,13 +27,11 @@ struct komeda_dev *dev_to_mdev(struct device *dev) return mdrv ? mdrv->mdev : NULL; } -static void komeda_unbind(struct device *dev) +static void komeda_platform_remove(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct komeda_drv *mdrv = dev_get_drvdata(dev); - if (!mdrv) - return; - komeda_kms_detach(mdrv->kms); if (pm_runtime_enabled(dev)) @@ -48,8 +45,9 @@ static void komeda_unbind(struct device *dev) devm_kfree(dev, mdrv); } -static int komeda_bind(struct device *dev) +static int komeda_platform_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct komeda_drv *mdrv; int err; @@ -91,52 +89,6 @@ free_mdrv: return err; } -static const struct component_master_ops komeda_master_ops = { - .bind = komeda_bind, - .unbind = komeda_unbind, -}; - -static void komeda_add_slave(struct device *master, - struct component_match **match, - struct device_node *np, - u32 port, u32 endpoint) -{ - struct device_node *remote; - - remote = of_graph_get_remote_node(np, port, endpoint); - if (remote) { - drm_of_component_match_add(master, match, component_compare_of, remote); - of_node_put(remote); - } -} - -static int komeda_platform_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct component_match *match = NULL; - struct device_node *child; - - if (!dev->of_node) - return -ENODEV; - - for_each_available_child_of_node(dev->of_node, child) { - if (of_node_cmp(child->name, "pipeline") != 0) - continue; - - /* add connector */ - komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT, 0); - komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT, 1); - } - - return component_master_add_with_match(dev, &komeda_master_ops, match); -} - -static int komeda_platform_remove(struct platform_device *pdev) -{ - component_master_del(&pdev->dev, &komeda_master_ops); - return 0; -} - static const struct of_device_id komeda_of_match[] = { { .compatible = "arm,mali-d71", .data = d71_identify, }, { .compatible = "arm,mali-d32", .data = d71_identify, }, @@ -189,7 +141,7 @@ static const struct dev_pm_ops komeda_pm_ops = { static struct platform_driver komeda_platform_driver = { .probe = komeda_platform_probe, - .remove = komeda_platform_remove, + .remove_new = komeda_platform_remove, .driver = { .name = "komeda", .of_match_table = komeda_of_match, diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index 62dc64550793..9299026701f3 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -4,7 +4,6 @@ * Author: James.Qian.Wang * */ -#include #include #include @@ -305,17 +304,13 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) if (err) goto cleanup_mode_config; - err = component_bind_all(mdev->dev, kms); - if (err) - goto cleanup_mode_config; - drm_mode_config_reset(drm); err = devm_request_irq(drm->dev, mdev->irq, komeda_kms_irq_handler, IRQF_SHARED, drm->driver->name, drm); if (err) - goto free_component_binding; + goto cleanup_mode_config; drm_kms_helper_poll_init(drm); @@ -327,8 +322,6 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) free_interrupts: drm_kms_helper_poll_fini(drm); -free_component_binding: - component_unbind_all(mdev->dev, drm); cleanup_mode_config: drm_mode_config_cleanup(drm); komeda_kms_cleanup_private_objs(kms); @@ -339,12 +332,10 @@ cleanup_mode_config: void komeda_kms_detach(struct komeda_kms_dev *kms) { struct drm_device *drm = &kms->base; - struct komeda_dev *mdev = drm->dev_private; drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); - component_unbind_all(mdev->dev, drm); drm_mode_config_cleanup(drm); komeda_kms_cleanup_private_objs(kms); drm->dev_private = NULL; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h index 3a872c292091..6ef655326357 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h @@ -84,6 +84,9 @@ struct komeda_crtc { /** @disable_done: this flip_done is for tracing the disable */ struct completion *disable_done; + + /** @encoder: encoder at the end of the pipeline */ + struct drm_encoder encoder; }; /** diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index 12f5a2c7f03d..aa06f9838015 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -367,10 +367,9 @@ static int hdlcd_probe(struct platform_device *pdev) match); } -static int hdlcd_remove(struct platform_device *pdev) +static void hdlcd_remove(struct platform_device *pdev) { component_master_del(&pdev->dev, &hdlcd_master_ops); - return 0; } static const struct of_device_id hdlcd_of_match[] = { @@ -399,7 +398,7 @@ static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume); static struct platform_driver hdlcd_platform_driver = { .probe = hdlcd_probe, - .remove = hdlcd_remove, + .remove_new = hdlcd_remove, .driver = { .name = "hdlcd", .pm = &hdlcd_pm_ops, diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index c03cfd57b752..62329d5dd992 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -935,10 +936,9 @@ static int malidp_platform_probe(struct platform_device *pdev) match); } -static int malidp_platform_remove(struct platform_device *pdev) +static void malidp_platform_remove(struct platform_device *pdev) { component_master_del(&pdev->dev, &malidp_master_ops); - return 0; } static int __maybe_unused malidp_pm_suspend(struct device *dev) @@ -981,7 +981,7 @@ static const struct dev_pm_ops malidp_pm_ops = { static struct platform_driver malidp_platform_driver = { .probe = malidp_platform_probe, - .remove = malidp_platform_remove, + .remove_new = malidp_platform_remove, .driver = { .name = "mali-dp", .pm = &malidp_pm_ops, diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig index 5afade25e217..e5597d7c9ae1 100644 --- a/drivers/gpu/drm/armada/Kconfig +++ b/drivers/gpu/drm/armada/Kconfig @@ -3,7 +3,7 @@ config DRM_ARMADA tristate "DRM support for Marvell Armada SoCs" depends on DRM && HAVE_CLK && ARM && MMU select DRM_KMS_HELPER - select FB_IO_HELPERS if DRM_FBDEV_EMULATION + select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION help Support the "LCD" controllers found on the Marvell Armada 510 devices. There are two controllers on the device, each controller diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index e120144d4b47..e8d2fe955909 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -37,8 +37,6 @@ static const struct drm_ioctl_desc armada_ioctls[] = { DEFINE_DRM_GEM_FOPS(armada_drm_fops); static const struct drm_driver armada_drm_driver = { - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = armada_gem_prime_import, .dumb_create = armada_gem_dumb_create, .major = 1, diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index e40a95e51785..d223176912b6 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -34,7 +34,7 @@ static void armada_fbdev_fb_destroy(struct fb_info *info) static const struct fb_ops armada_fb_ops = { .owner = THIS_MODULE, - FB_DEFAULT_IO_OPS, + FB_DEFAULT_IOMEM_OPS, DRM_FB_HELPER_DEFAULT_OPS, .fb_destroy = armada_fbdev_fb_destroy, }; diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index f21eb8fb76d8..3b9bd8ecda13 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -4,6 +4,8 @@ * Rewritten from the dovefb driver, and Armada510 manuals. */ +#include + #include #include #include @@ -445,8 +447,8 @@ static int armada_overlay_get_property(struct drm_plane *plane, drm_to_overlay_state(state)->colorkey_ug, drm_to_overlay_state(state)->colorkey_vb, 0); } else if (property == priv->colorkey_mode_prop) { - *val = (drm_to_overlay_state(state)->colorkey_mode & - CFG_CKMODE_MASK) >> ffs(CFG_CKMODE_MASK); + *val = FIELD_GET(CFG_CKMODE_MASK, + drm_to_overlay_state(state)->colorkey_mode); } else if (property == priv->brightness_prop) { *val = drm_to_overlay_state(state)->brightness + 256; } else if (property == priv->contrast_prop) { diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index c8c7f8215155..d207b03f8357 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -351,20 +351,18 @@ err_unload: return ret; } -static int aspeed_gfx_remove(struct platform_device *pdev) +static void aspeed_gfx_remove(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group); drm_dev_unregister(drm); aspeed_gfx_unload(drm); - - return 0; } static struct platform_driver aspeed_gfx_platform_driver = { .probe = aspeed_gfx_probe, - .remove = aspeed_gfx_remove, + .remove_new = aspeed_gfx_remove, .driver = { .name = "aspeed_gfx", .of_match_table = aspeed_gfx_match, diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index 6dc1a09504e1..fdd9a493aa9c 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -7,6 +7,17 @@ #include #include "ast_drv.h" +bool ast_astdp_is_connected(struct ast_device *ast) +{ + if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING)) + return false; + if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD)) + return false; + if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS)) + return false; + return true; +} + int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata) { struct ast_device *ast = to_ast_device(dev); diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c index 1bc35a992369..f10d53b0c94f 100644 --- a/drivers/gpu/drm/ast/ast_dp501.c +++ b/drivers/gpu/drm/ast/ast_dp501.c @@ -272,11 +272,9 @@ static bool ast_launch_m68k(struct drm_device *dev) return true; } -bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata) +bool ast_dp501_is_connected(struct ast_device *ast) { - struct ast_device *ast = to_ast_device(dev); - u32 i, boot_address, offset, data; - u32 *pEDIDidx; + u32 boot_address, offset, data; if (ast->config_mode == ast_use_p2a) { boot_address = get_fw_base(ast); @@ -292,14 +290,6 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata) data = ast_mindwm(ast, boot_address + offset); if (!(data & AST_DP501_PNP_CONNECTED)) return false; - - /* Read EDID */ - offset = AST_DP501_EDID_DATA; - for (i = 0; i < 128; i += 4) { - data = ast_mindwm(ast, boot_address + offset + i); - pEDIDidx = (u32 *)(ediddata + i); - *pEDIDidx = data; - } } else { if (!ast->dp501_fw_buf) return false; @@ -319,7 +309,30 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata) data = readl(ast->dp501_fw_buf + offset); if (!(data & AST_DP501_PNP_CONNECTED)) return false; + } + return true; +} +bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata) +{ + struct ast_device *ast = to_ast_device(dev); + u32 i, boot_address, offset, data; + u32 *pEDIDidx; + + if (!ast_dp501_is_connected(ast)) + return false; + + if (ast->config_mode == ast_use_p2a) { + boot_address = get_fw_base(ast); + + /* Read EDID */ + offset = AST_DP501_EDID_DATA; + for (i = 0; i < 128; i += 4) { + data = ast_mindwm(ast, boot_address + offset + i); + pEDIDidx = (u32 *)(ediddata + i); + *pEDIDidx = data; + } + } else { /* Read EDID */ offset = AST_DP501_EDID_DATA; for (i = 0; i < 128; i += 4) { @@ -350,7 +363,7 @@ static bool ast_init_dvo(struct drm_device *dev) data |= 0x00000500; ast_write32(ast, 0x12008, data); - if (ast->chip == AST2300) { + if (IS_AST_GEN4(ast)) { data = ast_read32(ast, 0x12084); /* multi-pins for DVO single-edge */ data |= 0xfffe0000; @@ -366,7 +379,7 @@ static bool ast_init_dvo(struct drm_device *dev) data &= 0xffffffcf; data |= 0x00000020; ast_write32(ast, 0x12090, data); - } else { /* AST2400 */ + } else { /* AST GEN5+ */ data = ast_read32(ast, 0x12088); /* multi-pins for DVO single-edge */ data |= 0x30000000; @@ -437,7 +450,7 @@ void ast_init_3rdtx(struct drm_device *dev) struct ast_device *ast = to_ast_device(dev); u8 jreg; - if (ast->chip == AST2300 || ast->chip == AST2400) { + if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) { jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); switch (jreg & 0x0e) { case 0x04: diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 5498a6676f2e..848a9f1403e8 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -52,19 +52,38 @@ #define PCI_CHIP_AST2000 0x2000 #define PCI_CHIP_AST2100 0x2010 +#define __AST_CHIP(__gen, __index) ((__gen) << 16 | (__index)) enum ast_chip { - AST2000, - AST2100, - AST1100, - AST2200, - AST2150, - AST2300, - AST2400, - AST2500, - AST2600, + /* 1st gen */ + AST1000 = __AST_CHIP(1, 0), // unused + AST2000 = __AST_CHIP(1, 1), + /* 2nd gen */ + AST1100 = __AST_CHIP(2, 0), + AST2100 = __AST_CHIP(2, 1), + AST2050 = __AST_CHIP(2, 2), // unused + /* 3rd gen */ + AST2200 = __AST_CHIP(3, 0), + AST2150 = __AST_CHIP(3, 1), + /* 4th gen */ + AST2300 = __AST_CHIP(4, 0), + AST1300 = __AST_CHIP(4, 1), + AST1050 = __AST_CHIP(4, 2), // unused + /* 5th gen */ + AST2400 = __AST_CHIP(5, 0), + AST1400 = __AST_CHIP(5, 1), + AST1250 = __AST_CHIP(5, 2), // unused + /* 6th gen */ + AST2500 = __AST_CHIP(6, 0), + AST2510 = __AST_CHIP(6, 1), + AST2520 = __AST_CHIP(6, 2), // unused + /* 7th gen */ + AST2600 = __AST_CHIP(7, 0), + AST2620 = __AST_CHIP(7, 1), // unused }; +#define __AST_CHIP_GEN(__chip) (((unsigned long)(__chip)) >> 16) + enum ast_tx_chip { AST_TX_NONE, AST_TX_SIL164, @@ -166,7 +185,6 @@ struct ast_device { void __iomem *dp501_fw_buf; enum ast_chip chip; - bool vga2_clone; uint32_t dram_bus_width; uint32_t dram_type; uint32_t mclk; @@ -196,6 +214,10 @@ struct ast_device { struct drm_encoder encoder; struct drm_connector connector; } astdp; + struct { + struct drm_encoder encoder; + struct drm_connector connector; + } bmc; } output; bool support_wide_screen; @@ -219,6 +241,24 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, struct pci_dev *pdev, unsigned long flags); +static inline unsigned long __ast_gen(struct ast_device *ast) +{ + return __AST_CHIP_GEN(ast->chip); +} +#define AST_GEN(__ast) __ast_gen(__ast) + +static inline bool __ast_gen_is_eq(struct ast_device *ast, unsigned long gen) +{ + return __ast_gen(ast) == gen; +} +#define IS_AST_GEN1(__ast) __ast_gen_is_eq(__ast, 1) +#define IS_AST_GEN2(__ast) __ast_gen_is_eq(__ast, 2) +#define IS_AST_GEN3(__ast) __ast_gen_is_eq(__ast, 3) +#define IS_AST_GEN4(__ast) __ast_gen_is_eq(__ast, 4) +#define IS_AST_GEN5(__ast) __ast_gen_is_eq(__ast, 5) +#define IS_AST_GEN6(__ast) __ast_gen_is_eq(__ast, 6) +#define IS_AST_GEN7(__ast) __ast_gen_is_eq(__ast, 7) + #define AST_IO_AR_PORT_WRITE (0x40) #define AST_IO_MISC_PORT_WRITE (0x42) #define AST_IO_VGA_ENABLE_PORT (0x43) @@ -258,26 +298,35 @@ static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val) iowrite8(val, ast->ioregs + reg); } -static inline void ast_set_index_reg(struct ast_device *ast, - uint32_t base, uint8_t index, - uint8_t val) +static inline u8 ast_get_index_reg(struct ast_device *ast, u32 base, u8 index) +{ + ast_io_write8(ast, base, index); + ++base; + return ast_io_read8(ast, base); +} + +static inline u8 ast_get_index_reg_mask(struct ast_device *ast, u32 base, u8 index, + u8 preserve_mask) +{ + u8 val = ast_get_index_reg(ast, base, index); + + return val & preserve_mask; +} + +static inline void ast_set_index_reg(struct ast_device *ast, u32 base, u8 index, u8 val) { ast_io_write8(ast, base, index); ++base; ast_io_write8(ast, base, val); } -void ast_set_index_reg_mask(struct ast_device *ast, - uint32_t base, uint8_t index, - uint8_t mask, uint8_t val); -uint8_t ast_get_index_reg(struct ast_device *ast, - uint32_t base, uint8_t index); -uint8_t ast_get_index_reg_mask(struct ast_device *ast, - uint32_t base, uint8_t index, uint8_t mask); - -static inline void ast_open_key(struct ast_device *ast) +static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 index, + u8 preserve_mask, u8 val) { - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8); + u8 tmp = ast_get_index_reg_mask(ast, base, index, preserve_mask); + + tmp |= val; + ast_set_index_reg(ast, base, index, tmp); } #define AST_VIDMEM_SIZE_8M 0x00800000 @@ -458,9 +507,6 @@ int ast_mode_config_init(struct ast_device *ast); int ast_mm_init(struct ast_device *ast); /* ast post */ -void ast_enable_vga(struct drm_device *dev); -void ast_enable_mmio(struct drm_device *dev); -bool ast_is_vga_enabled(struct drm_device *dev); void ast_post_gpu(struct drm_device *dev); u32 ast_mindwm(struct ast_device *ast, u32 r); void ast_moutdwm(struct ast_device *ast, u32 r, u32 v); @@ -468,6 +514,7 @@ void ast_patch_ahb_2500(struct ast_device *ast); /* ast dp501 */ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); +bool ast_dp501_is_connected(struct ast_device *ast); bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata); u8 ast_get_dp501_max_clk(struct drm_device *dev); void ast_init_3rdtx(struct drm_device *dev); @@ -476,6 +523,7 @@ void ast_init_3rdtx(struct drm_device *dev); struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev); /* aspeed DP */ +bool ast_astdp_is_connected(struct ast_device *ast); int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata); void ast_dp_launch(struct drm_device *dev); void ast_dp_power_on_off(struct drm_device *dev, bool no); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 1f35438f614a..dae365ed3969 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -35,131 +35,153 @@ #include "ast_drv.h" -void ast_set_index_reg_mask(struct ast_device *ast, - uint32_t base, uint8_t index, - uint8_t mask, uint8_t val) +static bool ast_is_vga_enabled(struct drm_device *dev) { - u8 tmp; - ast_io_write8(ast, base, index); - tmp = (ast_io_read8(ast, base + 1) & mask) | val; - ast_set_index_reg(ast, base, index, tmp); -} - -uint8_t ast_get_index_reg(struct ast_device *ast, - uint32_t base, uint8_t index) -{ - uint8_t ret; - ast_io_write8(ast, base, index); - ret = ast_io_read8(ast, base + 1); - return ret; -} - -uint8_t ast_get_index_reg_mask(struct ast_device *ast, - uint32_t base, uint8_t index, uint8_t mask) -{ - uint8_t ret; - ast_io_write8(ast, base, index); - ret = ast_io_read8(ast, base + 1) & mask; - return ret; -} - -static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) -{ - struct device_node *np = dev->dev->of_node; struct ast_device *ast = to_ast_device(dev); + u8 ch; + + ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT); + + return !!(ch & 0x01); +} + +static void ast_enable_vga(struct drm_device *dev) +{ + struct ast_device *ast = to_ast_device(dev); + + ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01); + ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01); +} + +/* + * Run this function as part of the HW device cleanup; not + * when the DRM device gets released. + */ +static void ast_enable_mmio_release(void *data) +{ + struct ast_device *ast = data; + + /* enable standard VGA decode */ + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04); +} + +static int ast_enable_mmio(struct ast_device *ast) +{ + struct drm_device *dev = &ast->base; + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); + + return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast); +} + +static void ast_open_key(struct ast_device *ast) +{ + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8); +} + +static int ast_device_config_init(struct ast_device *ast) +{ + struct drm_device *dev = &ast->base; struct pci_dev *pdev = to_pci_dev(dev->dev); - uint32_t data, jregd0, jregd1; - - /* Defaults */ - ast->config_mode = ast_use_defaults; - *scu_rev = 0xffffffff; - - /* Check if we have device-tree properties */ - if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", - scu_rev)) { - /* We do, disable P2A access */ - ast->config_mode = ast_use_dt; - drm_info(dev, "Using device-tree for configuration\n"); - return; - } - - /* Not all families have a P2A bridge */ - if (pdev->device != PCI_CHIP_AST2000) - return; + struct device_node *np = dev->dev->of_node; + uint32_t scu_rev = 0xffffffff; + u32 data; + u8 jregd0, jregd1; /* - * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge - * is disabled. We force using P2A if VGA only mode bit - * is set D[7] + * Find configuration mode and read SCU revision */ - jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); - jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); - if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { - /* Patch AST2500 */ - if (((pdev->revision & 0xF0) == 0x40) - && ((jregd0 & AST_VRAM_INIT_STATUS_MASK) == 0)) - ast_patch_ahb_2500(ast); - /* Double check it's actually working */ - data = ast_read32(ast, 0xf004); - if ((data != 0xFFFFFFFF) && (data != 0x00)) { - /* P2A works, grab silicon revision */ - ast->config_mode = ast_use_p2a; + ast->config_mode = ast_use_defaults; - drm_info(dev, "Using P2A bridge for configuration\n"); + /* Check if we have device-tree properties */ + if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) { + /* We do, disable P2A access */ + ast->config_mode = ast_use_dt; + scu_rev = data; + } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge + /* + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge + * is disabled. We force using P2A if VGA only mode bit + * is set D[7] + */ + jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - *scu_rev = ast_read32(ast, 0x1207c); - return; + /* + * We have a P2A bridge and it is enabled. + */ + + /* Patch AST2500/AST2510 */ + if ((pdev->revision & 0xf0) == 0x40) { + if (!(jregd0 & AST_VRAM_INIT_STATUS_MASK)) + ast_patch_ahb_2500(ast); + } + + /* Double check that it's actually working */ + data = ast_read32(ast, 0xf004); + if ((data != 0xffffffff) && (data != 0x00)) { + ast->config_mode = ast_use_p2a; + + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + scu_rev = ast_read32(ast, 0x1207c); + } } } - /* We have a P2A bridge but it's disabled */ - drm_info(dev, "P2A bridge disabled, using default configuration\n"); -} - -static int ast_detect_chip(struct drm_device *dev, bool *need_post) -{ - struct ast_device *ast = to_ast_device(dev); - struct pci_dev *pdev = to_pci_dev(dev->dev); - uint32_t jreg, scu_rev; + switch (ast->config_mode) { + case ast_use_defaults: + drm_info(dev, "Using default configuration\n"); + break; + case ast_use_dt: + drm_info(dev, "Using device-tree for configuration\n"); + break; + case ast_use_p2a: + drm_info(dev, "Using P2A bridge for configuration\n"); + break; + } /* - * If VGA isn't enabled, we need to enable now or subsequent - * access to the scratch registers will fail. We also inform - * our caller that it needs to POST the chip - * (Assumption: VGA not enabled -> need to POST) + * Identify chipset */ - if (!ast_is_vga_enabled(dev)) { - ast_enable_vga(dev); - drm_info(dev, "VGA not enabled on entry, requesting chip POST\n"); - *need_post = true; - } else - *need_post = false; - - /* Enable extended register access */ - ast_open_key(ast); - ast_enable_mmio(dev); - - /* Find out whether P2A works or whether to use device-tree */ - ast_detect_config_mode(dev, &scu_rev); - - /* Identify chipset */ if (pdev->revision >= 0x50) { ast->chip = AST2600; drm_info(dev, "AST 2600 detected\n"); } else if (pdev->revision >= 0x40) { - ast->chip = AST2500; - drm_info(dev, "AST 2500 detected\n"); + switch (scu_rev & 0x300) { + case 0x0100: + ast->chip = AST2510; + drm_info(dev, "AST 2510 detected\n"); + break; + default: + ast->chip = AST2500; + drm_info(dev, "AST 2500 detected\n"); + } } else if (pdev->revision >= 0x30) { - ast->chip = AST2400; - drm_info(dev, "AST 2400 detected\n"); + switch (scu_rev & 0x300) { + case 0x0100: + ast->chip = AST1400; + drm_info(dev, "AST 1400 detected\n"); + break; + default: + ast->chip = AST2400; + drm_info(dev, "AST 2400 detected\n"); + } } else if (pdev->revision >= 0x20) { - ast->chip = AST2300; - drm_info(dev, "AST 2300 detected\n"); + switch (scu_rev & 0x300) { + case 0x0000: + ast->chip = AST1300; + drm_info(dev, "AST 1300 detected\n"); + break; + default: + ast->chip = AST2300; + drm_info(dev, "AST 2300 detected\n"); + break; + } } else if (pdev->revision >= 0x10) { switch (scu_rev & 0x0300) { case 0x0200: @@ -179,15 +201,21 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) drm_info(dev, "AST 2100 detected\n"); break; } - ast->vga2_clone = false; } else { ast->chip = AST2000; drm_info(dev, "AST 2000 detected\n"); } + return 0; +} + +static void ast_detect_widescreen(struct ast_device *ast) +{ + u8 jreg; + /* Check if we support wide screen */ - switch (ast->chip) { - case AST2000: + switch (AST_GEN(ast)) { + case 1: ast->support_wide_screen = false; break; default: @@ -198,20 +226,23 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->support_wide_screen = true; else { ast->support_wide_screen = false; - if (ast->chip == AST2300 && - (scu_rev & 0x300) == 0x0) /* ast1300 */ + if (ast->chip == AST1300) ast->support_wide_screen = true; - if (ast->chip == AST2400 && - (scu_rev & 0x300) == 0x100) /* ast1400 */ + if (ast->chip == AST1400) ast->support_wide_screen = true; - if (ast->chip == AST2500 && - scu_rev == 0x100) /* ast2510 */ + if (ast->chip == AST2510) ast->support_wide_screen = true; - if (ast->chip == AST2600) /* ast2600 */ + if (IS_AST_GEN7(ast)) ast->support_wide_screen = true; } break; } +} + +static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) +{ + struct drm_device *dev = &ast->base; + u8 jreg; /* Check 3rd Tx option (digital output afaik) */ ast->tx_chip_types |= AST_TX_NONE_BIT; @@ -224,15 +255,15 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) * is at power-on reset, otherwise we'll incorrectly "detect" a * SIL164 when there is none. */ - if (!*need_post) { + if (!need_post) { jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff); if (jreg & 0x80) ast->tx_chip_types = AST_TX_SIL164_BIT; } - if ((ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST2500)) { + if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) { /* - * On AST2300 and 2400, look the configuration set by the SoC in + * On AST GEN4+, look the configuration set by the SoC in * the SOC scratch register #1 bits 11:8 (interestingly marked * as "reserved" in the spec) */ @@ -254,7 +285,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) case 0x0c: ast->tx_chip_types = AST_TX_DP501_BIT; } - } else if (ast->chip == AST2600) { + } else if (IS_AST_GEN7(ast)) { if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK) == ASTDP_DPMCU_TX) { ast->tx_chip_types = AST_TX_ASTDP_BIT; @@ -271,8 +302,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) drm_info(dev, "Using DP501 DisplayPort transmitter\n"); if (ast->tx_chip_types & AST_TX_ASTDP_BIT) drm_info(dev, "Using ASPEED DisplayPort transmitter\n"); - - return 0; } static int ast_get_dram_info(struct drm_device *dev) @@ -286,7 +315,7 @@ static int ast_get_dram_info(struct drm_device *dev) case ast_use_dt: /* * If some properties are missing, use reasonable - * defaults for AST2400 + * defaults for GEN5 */ if (of_property_read_u32(np, "aspeed,mcr-configuration", &mcr_cfg)) @@ -309,7 +338,7 @@ static int ast_get_dram_info(struct drm_device *dev) default: ast->dram_bus_width = 16; ast->dram_type = AST_DRAM_1Gx16; - if (ast->chip == AST2500) + if (IS_AST_GEN6(ast)) ast->mclk = 800; else ast->mclk = 396; @@ -321,7 +350,7 @@ static int ast_get_dram_info(struct drm_device *dev) else ast->dram_bus_width = 32; - if (ast->chip == AST2500) { + if (IS_AST_GEN6(ast)) { switch (mcr_cfg & 0x03) { case 0: ast->dram_type = AST_DRAM_1Gx16; @@ -337,7 +366,7 @@ static int ast_get_dram_info(struct drm_device *dev) ast->dram_type = AST_DRAM_8Gx16; break; } - } else if (ast->chip == AST2300 || ast->chip == AST2400) { + } else if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) { switch (mcr_cfg & 0x03) { case 0: ast->dram_type = AST_DRAM_512Mx16; @@ -395,25 +424,13 @@ static int ast_get_dram_info(struct drm_device *dev) return 0; } -/* - * Run this function as part of the HW device cleanup; not - * when the DRM device gets released. - */ -static void ast_device_release(void *data) -{ - struct ast_device *ast = data; - - /* enable standard VGA decode */ - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04); -} - struct ast_device *ast_device_create(const struct drm_driver *drv, struct pci_dev *pdev, unsigned long flags) { struct drm_device *dev; struct ast_device *ast; - bool need_post; + bool need_post = false; int ret = 0; ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); @@ -449,7 +466,30 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, return ERR_PTR(-EIO); } - ast_detect_chip(dev, &need_post); + if (!ast_is_vga_enabled(dev)) { + drm_info(dev, "VGA not enabled on entry, requesting chip POST\n"); + need_post = true; + } + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. + */ + if (need_post) + ast_enable_vga(dev); + + /* Enable extended register access */ + ast_open_key(ast); + ret = ast_enable_mmio(ast); + if (ret) + return ERR_PTR(ret); + + ret = ast_device_config_init(ast); + if (ret) + return ERR_PTR(ret); + + ast_detect_widescreen(ast); + ast_detect_tx_chip(ast, need_post); ret = ast_get_dram_info(dev); if (ret) @@ -477,9 +517,5 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, if (ret) return ERR_PTR(ret); - ret = devm_add_action_or_reset(dev->dev, ast_device_release, ast); - if (ret) - return ERR_PTR(ret); - return ast; } diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c index e16af60deef9..bc174bd933b9 100644 --- a/drivers/gpu/drm/ast/ast_mm.c +++ b/drivers/gpu/drm/ast/ast_mm.c @@ -38,8 +38,6 @@ static u32 ast_get_vram_size(struct ast_device *ast) u8 jreg; u32 vram_size; - ast_open_key(ast); - vram_size = AST_VIDMEM_DEFAULT_SIZE; jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff); switch (jreg & 3) { diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index b3c670af6ef2..32f04ec6c386 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -342,7 +342,7 @@ static void ast_set_crtc_reg(struct ast_device *ast, u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0; u16 temp, precache = 0; - if ((ast->chip == AST2500 || ast->chip == AST2600) && + if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) && (vbios_mode->enh_table->flags & AST2500PreCatchCRT)) precache = 40; @@ -384,7 +384,7 @@ static void ast_set_crtc_reg(struct ast_device *ast, ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD); // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels); - if ((ast->chip == AST2600) && (mode->crtc_vdisplay == 1080)) + if (IS_AST_GEN7(ast) && (mode->crtc_vdisplay == 1080)) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x02); else ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x00); @@ -466,7 +466,7 @@ static void ast_set_dclk_reg(struct ast_device *ast, { const struct ast_vbios_dclk_info *clk_info; - if ((ast->chip == AST2500) || (ast->chip == AST2600)) + if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) clk_info = &dclk_table_ast2500[vbios_mode->enh_table->dclk_index]; else clk_info = &dclk_table[vbios_mode->enh_table->dclk_index]; @@ -510,17 +510,13 @@ static void ast_set_color_reg(struct ast_device *ast, static void ast_set_crtthd_reg(struct ast_device *ast) { /* Set Threshold */ - if (ast->chip == AST2600) { + if (IS_AST_GEN7(ast)) { ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0xe0); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0xa0); - } else if (ast->chip == AST2300 || ast->chip == AST2400 || - ast->chip == AST2500) { + } else if (IS_AST_GEN6(ast) || IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) { ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60); - } else if (ast->chip == AST2100 || - ast->chip == AST1100 || - ast->chip == AST2200 || - ast->chip == AST2150) { + } else if (IS_AST_GEN3(ast) || IS_AST_GEN2(ast)) { ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f); } else { @@ -1082,9 +1078,10 @@ ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode if ((mode->hdisplay == 1152) && (mode->vdisplay == 864)) return MODE_OK; - if ((ast->chip == AST2100) || (ast->chip == AST2200) || - (ast->chip == AST2300) || (ast->chip == AST2400) || - (ast->chip == AST2500) || (ast->chip == AST2600)) { + if ((ast->chip == AST2100) || // GEN2, but not AST1100 (?) + (ast->chip == AST2200) || // GEN3, but not AST2150 (?) + IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || + IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) { if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080)) return MODE_OK; @@ -1585,8 +1582,20 @@ err_drm_connector_update_edid_property: return 0; } +static int ast_dp501_connector_helper_detect_ctx(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct ast_device *ast = to_ast_device(connector->dev); + + if (ast_dp501_is_connected(ast)) + return connector_status_connected; + return connector_status_disconnected; +} + static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = { .get_modes = ast_dp501_connector_helper_get_modes, + .detect_ctx = ast_dp501_connector_helper_detect_ctx, }; static const struct drm_connector_funcs ast_dp501_connector_funcs = { @@ -1611,7 +1620,7 @@ static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; return 0; } @@ -1683,8 +1692,20 @@ err_drm_connector_update_edid_property: return 0; } +static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct ast_device *ast = to_ast_device(connector->dev); + + if (ast_astdp_is_connected(ast)) + return connector_status_connected; + return connector_status_disconnected; +} + static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = { .get_modes = ast_astdp_connector_helper_get_modes, + .detect_ctx = ast_astdp_connector_helper_detect_ctx, }; static const struct drm_connector_funcs ast_astdp_connector_funcs = { @@ -1709,7 +1730,7 @@ static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; return 0; } @@ -1738,6 +1759,60 @@ static int ast_astdp_output_init(struct ast_device *ast) return 0; } +/* + * BMC virtual Connector + */ + +static const struct drm_encoder_funcs ast_bmc_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector) +{ + return drm_add_modes_noedid(connector, 4096, 4096); +} + +static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = { + .get_modes = ast_bmc_connector_helper_get_modes, +}; + +static const struct drm_connector_funcs ast_bmc_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_bmc_output_init(struct ast_device *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.bmc.encoder; + struct drm_connector *connector = &ast->output.bmc.connector; + int ret; + + ret = drm_encoder_init(dev, encoder, + &ast_bmc_encoder_funcs, + DRM_MODE_ENCODER_VIRTUAL, "ast_bmc"); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs); + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} + /* * Mode config */ @@ -1800,12 +1875,12 @@ int ast_mode_config_init(struct ast_device *ast) dev->mode_config.min_height = 0; dev->mode_config.preferred_depth = 24; - if (ast->chip == AST2100 || - ast->chip == AST2200 || - ast->chip == AST2300 || - ast->chip == AST2400 || - ast->chip == AST2500 || - ast->chip == AST2600) { + if (ast->chip == AST2100 || // GEN2, but not AST1100 (?) + ast->chip == AST2200 || // GEN3, but not AST2150 (?) + IS_AST_GEN7(ast) || + IS_AST_GEN6(ast) || + IS_AST_GEN5(ast) || + IS_AST_GEN4(ast)) { dev->mode_config.max_width = 1920; dev->mode_config.max_height = 2048; } else { @@ -1845,8 +1920,13 @@ int ast_mode_config_init(struct ast_device *ast) if (ret) return ret; } + ret = ast_bmc_output_init(ast); + if (ret) + return ret; drm_mode_config_reset(dev); + drm_kms_helper_poll_init(dev); + return 0; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index a005aec18a02..13e15173f2c5 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -37,41 +37,13 @@ static void ast_post_chip_2300(struct drm_device *dev); static void ast_post_chip_2500(struct drm_device *dev); -void ast_enable_vga(struct drm_device *dev) -{ - struct ast_device *ast = to_ast_device(dev); - - ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01); - ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01); -} - -void ast_enable_mmio(struct drm_device *dev) -{ - struct ast_device *ast = to_ast_device(dev); - - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); -} - - -bool ast_is_vga_enabled(struct drm_device *dev) -{ - struct ast_device *ast = to_ast_device(dev); - u8 ch; - - ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT); - - return !!(ch & 0x01); -} - static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; -static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff }; static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff }; static void ast_set_def_ext_reg(struct drm_device *dev) { struct ast_device *ast = to_ast_device(dev); - struct pci_dev *pdev = to_pci_dev(dev->dev); u8 i, index, reg; const u8 *ext_reg_info; @@ -79,13 +51,9 @@ ast_set_def_ext_reg(struct drm_device *dev) for (i = 0x81; i <= 0x9f; i++) ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00); - if (ast->chip == AST2300 || ast->chip == AST2400 || - ast->chip == AST2500) { - if (pdev->revision >= 0x20) - ext_reg_info = extreginfo_ast2300; - else - ext_reg_info = extreginfo_ast2300a0; - } else + if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) + ext_reg_info = extreginfo_ast2300; + else ext_reg_info = extreginfo; index = 0xa0; @@ -104,8 +72,7 @@ ast_set_def_ext_reg(struct drm_device *dev) /* Enable RAMDAC for A1 */ reg = 0x04; - if (ast->chip == AST2300 || ast->chip == AST2400 || - ast->chip == AST2500) + if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) reg |= 0x20; ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg); } @@ -281,7 +248,7 @@ static void ast_init_dram_reg(struct drm_device *dev) j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); if ((j & 0x80) == 0) { /* VGA only */ - if (ast->chip == AST2000) { + if (IS_AST_GEN1(ast)) { dram_reg_info = ast2000_dram_table_data; ast_write32(ast, 0xf004, 0x1e6e0000); ast_write32(ast, 0xf000, 0x1); @@ -290,8 +257,8 @@ static void ast_init_dram_reg(struct drm_device *dev) do { ; } while (ast_read32(ast, 0x10100) != 0xa8); - } else {/* AST2100/1100 */ - if (ast->chip == AST2100 || ast->chip == 2200) + } else { /* GEN2/GEN3 */ + if (ast->chip == AST2100 || ast->chip == AST2200) dram_reg_info = ast2100_dram_table_data; else dram_reg_info = ast1100_dram_table_data; @@ -313,7 +280,7 @@ static void ast_init_dram_reg(struct drm_device *dev) if (dram_reg_info->index == 0xff00) {/* delay fn */ for (i = 0; i < 15; i++) udelay(dram_reg_info->data); - } else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) { + } else if (dram_reg_info->index == 0x4 && !IS_AST_GEN1(ast)) { data = dram_reg_info->data; if (ast->dram_type == AST_DRAM_1Gx16) data = 0x00000d89; @@ -339,15 +306,13 @@ static void ast_init_dram_reg(struct drm_device *dev) cbrdlli_ast2150(ast, 32); /* 32 bits */ } - switch (ast->chip) { - case AST2000: + switch (AST_GEN(ast)) { + case 1: temp = ast_read32(ast, 0x10140); ast_write32(ast, 0x10140, temp | 0x40); break; - case AST1100: - case AST2100: - case AST2200: - case AST2150: + case 2: + case 3: temp = ast_read32(ast, 0x1200c); ast_write32(ast, 0x1200c, temp & 0xfffffffd); temp = ast_read32(ast, 0x12040); @@ -367,25 +332,16 @@ static void ast_init_dram_reg(struct drm_device *dev) void ast_post_gpu(struct drm_device *dev) { struct ast_device *ast = to_ast_device(dev); - struct pci_dev *pdev = to_pci_dev(dev->dev); - u32 reg; - pci_read_config_dword(pdev, 0x04, ®); - reg |= 0x3; - pci_write_config_dword(pdev, 0x04, reg); - - ast_enable_vga(dev); - ast_open_key(ast); - ast_enable_mmio(dev); ast_set_def_ext_reg(dev); - if (ast->chip == AST2600) { + if (IS_AST_GEN7(ast)) { if (ast->tx_chip_types & AST_TX_ASTDP_BIT) ast_dp_launch(dev); } else if (ast->config_mode == ast_use_p2a) { - if (ast->chip == AST2500) + if (IS_AST_GEN6(ast)) ast_post_chip_2500(dev); - else if (ast->chip == AST2300 || ast->chip == AST2400) + else if (IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) ast_post_chip_2300(dev); else ast_init_dram_reg(dev); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 58184cd6ab0b..cc5cf4c2faf7 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -68,7 +68,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); struct regmap *regmap = crtc->dc->hlcdc->regmap; struct drm_display_mode *adj = &c->state->adjusted_mode; + struct drm_encoder *encoder = NULL, *en_iter; + struct drm_connector *connector = NULL; struct atmel_hlcdc_crtc_state *state; + struct drm_device *ddev = c->dev; + struct drm_connector_list_iter iter; unsigned long mode_rate; struct videomode vm; unsigned long prate; @@ -76,6 +80,23 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) unsigned int cfg = 0; int div, ret; + /* get encoder from crtc */ + drm_for_each_encoder(en_iter, ddev) { + if (en_iter->crtc == c) { + encoder = en_iter; + break; + } + } + + if (encoder) { + /* Get the connector from encoder */ + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) + if (connector->encoder == encoder) + break; + drm_connector_list_iter_end(&iter); + } + ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk); if (ret) return; @@ -134,6 +155,10 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) cfg |= ATMEL_HLCDC_CLKDIV(div); + if (connector && + connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) + cfg |= ATMEL_HLCDC_CLKPOL; + regmap_update_bits(regmap, ATMEL_HLCDC_CFG(0), mask, cfg); state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 29603561d501..fa0f9a93d50d 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -773,15 +773,13 @@ err_put: return ret; } -static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev) +static void atmel_hlcdc_dc_drm_remove(struct platform_device *pdev) { struct drm_device *ddev = platform_get_drvdata(pdev); drm_dev_unregister(ddev); atmel_hlcdc_dc_unload(ddev); drm_dev_put(ddev); - - return 0; } static int atmel_hlcdc_dc_drm_suspend(struct device *dev) @@ -826,7 +824,7 @@ static const struct of_device_id atmel_hlcdc_dc_of_match[] = { static struct platform_driver atmel_hlcdc_dc_platform_driver = { .probe = atmel_hlcdc_dc_drm_probe, - .remove = atmel_hlcdc_dc_drm_remove, + .remove_new = atmel_hlcdc_dc_drm_remove, .driver = { .name = "atmel-hlcdc-display-controller", .pm = pm_sleep_ptr(&atmel_hlcdc_dc_drm_pm_ops), diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 82c68b042444..44a660a4bdbf 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -74,19 +74,19 @@ config DRM_FSL_LDB Support for i.MX8MP DPI-to-LVDS on-SoC encoder. config DRM_ITE_IT6505 - tristate "ITE IT6505 DisplayPort bridge" - depends on OF + tristate "ITE IT6505 DisplayPort bridge" + depends on OF select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HELPER - select DRM_DP_AUX_BUS - select DRM_KMS_HELPER - select DRM_DP_HELPER - select EXTCON - select CRYPTO - select CRYPTO_HASH - help - ITE IT6505 DisplayPort bridge chip driver. + select DRM_DP_AUX_BUS + select DRM_KMS_HELPER + select DRM_DP_HELPER + select EXTCON + select CRYPTO + select CRYPTO_HASH + help + ITE IT6505 DisplayPort bridge chip driver. config DRM_LONTIUM_LT8912B tristate "Lontium LT8912B DSI/HDMI bridge" diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c index 99964f5a5457..2a6b91f752cb 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c @@ -7,7 +7,6 @@ #include #include -#include #include #include diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 2254457ab5d0..2611afd2c1c1 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include @@ -786,8 +786,13 @@ static void adv7511_mode_set(struct adv7511 *adv7511, else low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE; - regmap_update_bits(adv7511->regmap, 0xfb, - 0x6, low_refresh_rate << 1); + if (adv7511->type == ADV7511) + regmap_update_bits(adv7511->regmap, 0xfb, + 0x6, low_refresh_rate << 1); + else + regmap_update_bits(adv7511->regmap, 0x4a, + 0xc, low_refresh_rate << 2); + regmap_update_bits(adv7511->regmap, 0x17, 0x60, (vsync_polarity << 6) | (hsync_polarity << 5)); diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c index 72ab2ab77081..c9e35731e6a1 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c @@ -813,7 +813,7 @@ MODULE_DEVICE_TABLE(of, anx6345_match_table); static struct i2c_driver anx6345_driver = { .driver = { .name = "anx6345", - .of_match_table = of_match_ptr(anx6345_match_table), + .of_match_table = anx6345_match_table, }, .probe = anx6345_i2c_probe, .remove = anx6345_i2c_remove, diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c index 06a3e3243e19..800555aef97f 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c @@ -1373,7 +1373,6 @@ static const struct i2c_device_id anx78xx_id[] = { }; MODULE_DEVICE_TABLE(i2c, anx78xx_id); -#if IS_ENABLED(CONFIG_OF) static const struct of_device_id anx78xx_match_table[] = { { .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses }, { .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses }, @@ -1382,12 +1381,11 @@ static const struct of_device_id anx78xx_match_table[] = { { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, anx78xx_match_table); -#endif static struct i2c_driver anx78xx_driver = { .driver = { .name = "anx7814", - .of_match_table = of_match_ptr(anx78xx_match_table), + .of_match_table = anx78xx_match_table, }, .probe = anx78xx_i2c_probe, .remove = anx78xx_i2c_remove, diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 8b985efdc086..51abe42c639e 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -206,7 +206,7 @@ static int anx7625_read_ctrl_status_p0(struct anx7625_data *ctx) static int wait_aux_op_finish(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; int val; int ret; @@ -233,7 +233,7 @@ static int wait_aux_op_finish(struct anx7625_data *ctx) static int anx7625_aux_trans(struct anx7625_data *ctx, u8 op, u32 address, u8 len, u8 *buf) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; int ret; u8 addrh, addrm, addrl; u8 cmd; @@ -426,7 +426,7 @@ static int anx7625_odfc_config(struct anx7625_data *ctx, u8 post_divider) { int ret; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; /* Config input reference clock frequency 27MHz/19.2MHz */ ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16, @@ -476,7 +476,7 @@ static int anx7625_set_k_value(struct anx7625_data *ctx) static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; unsigned long m, n; u16 htotal; int ret; @@ -574,7 +574,7 @@ static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx) static int anx7625_swap_dsi_lane3(struct anx7625_data *ctx) { int val; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; /* Swap MIPI-DSI data lane 3 P and N */ val = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP); @@ -591,7 +591,7 @@ static int anx7625_api_dsi_config(struct anx7625_data *ctx) { int val, ret; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; /* Swap MIPI-DSI data lane 3 P and N */ ret = anx7625_swap_dsi_lane3(ctx); @@ -656,7 +656,7 @@ static int anx7625_api_dsi_config(struct anx7625_data *ctx) static int anx7625_dsi_config(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; int ret; DRM_DEV_DEBUG_DRIVER(dev, "config dsi.\n"); @@ -688,7 +688,7 @@ static int anx7625_dsi_config(struct anx7625_data *ctx) static int anx7625_api_dpi_config(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; u16 freq = ctx->dt.pixelclock.min / 1000; int ret; @@ -719,7 +719,7 @@ static int anx7625_api_dpi_config(struct anx7625_data *ctx) static int anx7625_dpi_config(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; int ret; DRM_DEV_DEBUG_DRIVER(dev, "config dpi\n"); @@ -764,7 +764,7 @@ static int anx7625_read_flash_status(struct anx7625_data *ctx) static int anx7625_hdcp_key_probe(struct anx7625_data *ctx) { int ret, val; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; u8 ident[FLASH_BUF_LEN]; ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, @@ -814,7 +814,7 @@ static int anx7625_hdcp_key_probe(struct anx7625_data *ctx) static int anx7625_hdcp_key_load(struct anx7625_data *ctx) { int ret; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; /* Select HDCP 1.4 KEY */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, @@ -842,7 +842,7 @@ static int anx7625_hdcp_key_load(struct anx7625_data *ctx) static int anx7625_hdcp_disable(struct anx7625_data *ctx) { int ret; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; dev_dbg(dev, "disable HDCP 1.4\n"); @@ -863,7 +863,7 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx) { u8 bcap; int ret; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; ret = anx7625_hdcp_key_probe(ctx); if (ret) { @@ -872,11 +872,11 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx) } /* Read downstream capability */ - ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, 0x68028, 1, &bcap); + ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, DP_AUX_HDCP_BCAPS, 1, &bcap); if (ret < 0) return ret; - if (!(bcap & 0x01)) { + if (!(bcap & DP_BCAPS_HDCP_CAPABLE)) { pr_warn("downstream not support HDCP 1.4, cap(%x).\n", bcap); return 0; } @@ -921,7 +921,7 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx) static void anx7625_dp_start(struct anx7625_data *ctx) { int ret; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; u8 data; if (!ctx->display_timing_valid) { @@ -931,8 +931,8 @@ static void anx7625_dp_start(struct anx7625_data *ctx) dev_dbg(dev, "set downstream sink into normal\n"); /* Downstream sink enter into normal mode */ - data = 1; - ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data); + data = DP_SET_POWER_D0; + ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data); if (ret < 0) dev_err(dev, "IO error : set sink into normal mode fail\n"); @@ -954,7 +954,7 @@ static void anx7625_dp_start(struct anx7625_data *ctx) static void anx7625_dp_stop(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; int ret; u8 data; @@ -971,8 +971,8 @@ static void anx7625_dp_stop(struct anx7625_data *ctx) dev_dbg(dev, "notify downstream enter into standby\n"); /* Downstream monitor enter into standby mode */ - data = 2; - ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data); + data = DP_SET_POWER_D3; + ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data); if (ret < 0) DRM_DEV_ERROR(dev, "IO error : mute video fail\n"); @@ -1019,7 +1019,7 @@ static int sp_tx_aux_rd(struct anx7625_data *ctx, u8 len_cmd) static int sp_tx_get_edid_block(struct anx7625_data *ctx) { int c = 0; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; sp_tx_aux_wr(ctx, 0x7e); sp_tx_aux_rd(ctx, 0x01); @@ -1041,7 +1041,7 @@ static int edid_read(struct anx7625_data *ctx, u8 offset, u8 *pblock_buf) { int ret, cnt; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) { sp_tx_aux_wr(ctx, offset); @@ -1072,7 +1072,7 @@ static int segments_edid_read(struct anx7625_data *ctx, { u8 cnt; int ret; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; /* Write address only */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, @@ -1127,7 +1127,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx, u8 i, j; int g_edid_break = 0; int ret; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; /* Address initial */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, @@ -1234,7 +1234,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx, static void anx7625_power_on(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; int ret, i; if (!ctx->pdata.low_power_mode) { @@ -1270,7 +1270,7 @@ reg_err: static void anx7625_power_standby(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; int ret; if (!ctx->pdata.low_power_mode) { @@ -1300,7 +1300,7 @@ static void anx7625_config(struct anx7625_data *ctx) static void anx7625_disable_pd_protocol(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; int ret; /* Reset main ocm */ @@ -1320,7 +1320,7 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx) static int anx7625_ocm_loading_check(struct anx7625_data *ctx) { int ret; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; /* Check interface workable */ ret = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, @@ -1366,7 +1366,7 @@ static void anx7625_power_on_init(struct anx7625_data *ctx) static void anx7625_init_gpio(struct anx7625_data *platform) { - struct device *dev = &platform->client->dev; + struct device *dev = platform->dev; DRM_DEV_DEBUG_DRIVER(dev, "init gpio\n"); @@ -1406,7 +1406,7 @@ static void anx7625_stop_dp_work(struct anx7625_data *ctx) static void anx7625_start_dp_work(struct anx7625_data *ctx) { int ret; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; if (ctx->hpd_high_cnt >= 2) { DRM_DEV_DEBUG_DRIVER(dev, "filter useless HPD\n"); @@ -1458,7 +1458,7 @@ static int _anx7625_hpd_polling(struct anx7625_data *ctx, unsigned long wait_us) { int ret, val; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; /* Interrupt mode, no need poll HPD status, just return */ if (ctx->pdata.intp_irq) @@ -1492,7 +1492,7 @@ static int anx7625_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us) { struct anx7625_data *ctx = container_of(aux, struct anx7625_data, aux); - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; int ret; pm_runtime_get_sync(dev); @@ -1525,7 +1525,7 @@ static void anx7625_dp_adjust_swing(struct anx7625_data *ctx) static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; /* HPD changed */ DRM_DEV_DEBUG_DRIVER(dev, "dp_hpd_change_default_func: %d\n", @@ -1545,7 +1545,7 @@ static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on) static int anx7625_hpd_change_detect(struct anx7625_data *ctx) { int intr_vector, status; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; status = anx7625_reg_write(ctx, ctx->i2c.tcpc_client, INTR_ALERT_1, 0xFF); @@ -1593,18 +1593,20 @@ static void anx7625_work_func(struct work_struct *work) mutex_lock(&ctx->lock); - if (pm_runtime_suspended(&ctx->client->dev)) - goto unlock; + if (pm_runtime_suspended(ctx->dev)) { + mutex_unlock(&ctx->lock); + return; + } event = anx7625_hpd_change_detect(ctx); + + mutex_unlock(&ctx->lock); + if (event < 0) - goto unlock; + return; if (ctx->bridge_attached) drm_helper_hpd_irq_event(ctx->bridge.dev); - -unlock: - mutex_unlock(&ctx->lock); } static irqreturn_t anx7625_intr_hpd_isr(int irq, void *data) @@ -1735,7 +1737,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct anx7625_data *ctx = container_of(aux, struct anx7625_data, aux); - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; u8 request = msg->request & ~DP_AUX_I2C_MOT; int ret = 0; @@ -1761,7 +1763,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux, static struct edid *anx7625_get_edid(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; struct s_edid_data *p_edid = &ctx->slimport_edid_p; int edid_num; u8 *edid; @@ -1797,7 +1799,7 @@ static struct edid *anx7625_get_edid(struct anx7625_data *ctx) static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "sink detect\n"); @@ -2006,7 +2008,7 @@ static const struct hdmi_codec_ops anx7625_codec_ops = { static void anx7625_unregister_audio(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; if (ctx->audio_pdev) { platform_device_unregister(ctx->audio_pdev); @@ -2042,7 +2044,7 @@ static int anx7625_register_audio(struct device *dev, struct anx7625_data *ctx) static int anx7625_setup_dsi_device(struct anx7625_data *ctx) { struct mipi_dsi_device *dsi; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; struct mipi_dsi_host *host; const struct mipi_dsi_device_info info = { .type = "anx7625", @@ -2076,7 +2078,7 @@ static int anx7625_setup_dsi_device(struct anx7625_data *ctx) static int anx7625_attach_dsi(struct anx7625_data *ctx) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; int ret; DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n"); @@ -2102,7 +2104,7 @@ static void hdcp_check_work_func(struct work_struct *work) dwork = to_delayed_work(work); ctx = container_of(dwork, struct anx7625_data, hdcp_work); - dev = &ctx->client->dev; + dev = ctx->dev; if (!ctx->connector) { dev_err(dev, "HDCP connector is null!"); @@ -2129,7 +2131,7 @@ static void hdcp_check_work_func(struct work_struct *work) static int anx7625_connector_atomic_check(struct anx7625_data *ctx, struct drm_connector_state *state) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; int cp; dev_dbg(dev, "hdcp state check\n"); @@ -2174,7 +2176,7 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge, { struct anx7625_data *ctx = bridge_to_anx7625(bridge); int err; - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "drm attach\n"); if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) @@ -2218,7 +2220,7 @@ anx7625_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_mode *mode) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "drm mode checking\n"); @@ -2239,7 +2241,7 @@ static void anx7625_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "drm mode set\n"); @@ -2285,7 +2287,7 @@ static bool anx7625_bridge_mode_fixup(struct drm_bridge *bridge, struct drm_display_mode *adj) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; u32 hsync, hfp, hbp, hblanking; u32 adj_hsync, adj_hfp, adj_hbp, adj_hblanking, delta_adj; u32 vref, adj_clock; @@ -2403,7 +2405,7 @@ static int anx7625_bridge_atomic_check(struct drm_bridge *bridge, struct drm_connector_state *conn_state) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; dev_dbg(dev, "drm bridge atomic check\n"); @@ -2417,7 +2419,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *state) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; struct drm_connector *connector; dev_dbg(dev, "drm atomic enable\n"); @@ -2444,7 +2446,7 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; dev_dbg(dev, "drm atomic disable\n"); @@ -2458,7 +2460,7 @@ static enum drm_connector_status anx7625_bridge_detect(struct drm_bridge *bridge) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n"); @@ -2469,7 +2471,7 @@ static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "drm bridge get edid\n"); @@ -2494,7 +2496,7 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = { static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx, struct i2c_client *client) { - struct device *dev = &ctx->client->dev; + struct device *dev = ctx->dev; ctx->i2c.tx_p0_client = devm_i2c_new_dummy_device(dev, client->adapter, TX_P0_ADDR >> 1); @@ -2629,7 +2631,7 @@ static int anx7625_i2c_probe(struct i2c_client *client) pdata = &platform->pdata; - platform->client = client; + platform->dev = &client->dev; i2c_set_clientdata(client, platform); pdata->supplies[0].supply = "vdd10"; diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h index 14f33d6be289..5af819611ebc 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.h +++ b/drivers/gpu/drm/bridge/analogix/anx7625.h @@ -458,7 +458,7 @@ struct anx7625_data { int hdcp_cp; /* Lock for work queue */ struct mutex lock; - struct i2c_client *client; + struct device *dev; struct anx7625_i2c_client i2c; struct i2c_client *last_client; struct timer_list hdcp_timer; diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c index f50d65f54314..7457d38622b0 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c @@ -14,8 +14,7 @@ #include #include #include -#include -#include +#include #include #include #include diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index f6822dfa3805..6af565ac307a 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -54,6 +53,26 @@ #include "cdns-mhdp8546-hdcp.h" #include "cdns-mhdp8546-j721e.h" +static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + + /* Enable SW event interrupts */ + if (mhdp->bridge_attached) + writel(readl(mhdp->regs + CDNS_APB_INT_MASK) & + ~CDNS_APB_INT_MASK_SW_EVENT_INT, + mhdp->regs + CDNS_APB_INT_MASK); +} + +static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + + writel(readl(mhdp->regs + CDNS_APB_INT_MASK) | + CDNS_APB_INT_MASK_SW_EVENT_INT, + mhdp->regs + CDNS_APB_INT_MASK); +} + static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp) { int ret, empty; @@ -749,9 +768,7 @@ static int cdns_mhdp_fw_activate(const struct firmware *fw, * MHDP_HW_STOPPED happens only due to driver removal when * bridge should already be detached. */ - if (mhdp->bridge_attached) - writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT, - mhdp->regs + CDNS_APB_INT_MASK); + cdns_mhdp_bridge_hpd_enable(&mhdp->bridge); spin_unlock(&mhdp->start_lock); @@ -1740,8 +1757,7 @@ static int cdns_mhdp_attach(struct drm_bridge *bridge, /* Enable SW event interrupts */ if (hw_ready) - writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT, - mhdp->regs + CDNS_APB_INT_MASK); + cdns_mhdp_bridge_hpd_enable(bridge); return 0; aux_unregister: @@ -2146,6 +2162,27 @@ cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge) return &cdns_mhdp_state->base; } +static u32 *cdns_mhdp_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + u32 *input_fmts; + + *num_input_fmts = 0; + + input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL); + if (!input_fmts) + return NULL; + + *num_input_fmts = 1; + input_fmts[0] = MEDIA_BUS_FMT_RGB121212_1X36; + + return input_fmts; +} + static int cdns_mhdp_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, @@ -2165,6 +2202,13 @@ static int cdns_mhdp_atomic_check(struct drm_bridge *bridge, return -EINVAL; } + /* + * There might be flags negotiation supported in future. + * Set the bus flags in atomic_check statically for now. + */ + if (mhdp->info) + bridge_state->input_bus_cfg.flags = *mhdp->info->input_bus_flags; + mutex_unlock(&mhdp->link_mutex); return 0; } @@ -2184,23 +2228,6 @@ static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge, return cdns_mhdp_get_edid(mhdp, connector); } -static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge) -{ - struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); - - /* Enable SW event interrupts */ - if (mhdp->bridge_attached) - writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT, - mhdp->regs + CDNS_APB_INT_MASK); -} - -static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge) -{ - struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); - - writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK); -} - static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = { .atomic_enable = cdns_mhdp_atomic_enable, .atomic_disable = cdns_mhdp_atomic_disable, @@ -2210,6 +2237,7 @@ static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = { .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state, .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state, .atomic_reset = cdns_mhdp_bridge_atomic_reset, + .atomic_get_input_bus_fmts = cdns_mhdp_get_input_bus_fmts, .detect = cdns_mhdp_bridge_detect, .get_edid = cdns_mhdp_bridge_get_edid, .hpd_enable = cdns_mhdp_bridge_hpd_enable, @@ -2529,8 +2557,6 @@ static int cdns_mhdp_probe(struct platform_device *pdev) mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; - if (mhdp->info) - mhdp->bridge.timings = mhdp->info->timings; ret = phy_init(mhdp->phy); if (ret) { @@ -2617,7 +2643,7 @@ static const struct of_device_id mhdp_ids[] = { #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E { .compatible = "ti,j721e-mhdp8546", .data = &(const struct cdns_mhdp_platform_info) { - .timings = &mhdp_ti_j721e_bridge_timings, + .input_bus_flags = &mhdp_ti_j721e_bridge_input_bus_flags, .ops = &mhdp_ti_j721e_ops, }, }, @@ -2629,7 +2655,7 @@ MODULE_DEVICE_TABLE(of, mhdp_ids); static struct platform_driver mhdp_driver = { .driver = { .name = "cdns-mhdp8546", - .of_match_table = of_match_ptr(mhdp_ids), + .of_match_table = mhdp_ids, }, .probe = cdns_mhdp_probe, .remove = cdns_mhdp_remove, diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h index bedddd510d17..bad2fc0c7306 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h @@ -336,7 +336,7 @@ struct cdns_mhdp_bridge_state { }; struct cdns_mhdp_platform_info { - const struct drm_bridge_timings *timings; + const u32 *input_bus_flags; const struct mhdp_platform_ops *ops; }; diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c index dfe1b59514f7..12d04be4e242 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c @@ -71,8 +71,7 @@ const struct mhdp_platform_ops mhdp_ti_j721e_ops = { .disable = cdns_mhdp_j721e_disable, }; -const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings = { - .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE | - DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE | - DRM_BUS_FLAG_DE_HIGH, -}; +const u32 +mhdp_ti_j721e_bridge_input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE | + DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE | + DRM_BUS_FLAG_DE_HIGH; diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h index 97d20d115a24..5ddca07a4255 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h @@ -14,6 +14,6 @@ struct mhdp_platform_ops; extern const struct mhdp_platform_ops mhdp_ti_j721e_ops; -extern const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings; +extern const u32 mhdp_ti_j721e_bridge_input_bus_flags; #endif /* !CDNS_MHDP8546_J721E_H */ diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c index 8bfce21d6b90..d205e755e524 100644 --- a/drivers/gpu/drm/bridge/chipone-icn6211.c +++ b/drivers/gpu/drm/bridge/chipone-icn6211.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c index a854eb84e399..483c28c7fc99 100644 --- a/drivers/gpu/drm/bridge/chrontel-ch7033.c +++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c @@ -607,7 +607,7 @@ static struct i2c_driver ch7033_driver = { .remove = ch7033_remove, .driver = { .name = "ch7033", - .of_match_table = of_match_ptr(ch7033_dt_ids), + .of_match_table = ch7033_dt_ids, }, .id_table = ch7033_ids, }; diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index f7f436cf96e0..08bd5695ddae 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c index b8e52156b07a..0e4bac7dd04f 100644 --- a/drivers/gpu/drm/bridge/fsl-ldb.c +++ b/drivers/gpu/drm/bridge/fsl-ldb.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c index 386032a02599..21471a9a28b2 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c @@ -9,9 +9,9 @@ #include #include #include -#include #include #include +#include #include #include diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c index c806576b1e22..7984da9c0a35 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index aadb396508c5..2f300f5ca051 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -404,7 +404,7 @@ struct debugfs_entries { struct it6505 { struct drm_dp_aux aux; struct drm_bridge bridge; - struct i2c_client *client; + struct device *dev; struct it6505_drm_dp_link link; struct it6505_platform_data pdata; /* @@ -524,7 +524,7 @@ static int it6505_read(struct it6505 *it6505, unsigned int reg_addr) { unsigned int value; int err; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; if (!it6505->powered) return -ENODEV; @@ -542,7 +542,7 @@ static int it6505_write(struct it6505 *it6505, unsigned int reg_addr, unsigned int reg_val) { int err; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; if (!it6505->powered) return -ENODEV; @@ -562,7 +562,7 @@ static int it6505_set_bits(struct it6505 *it6505, unsigned int reg, unsigned int mask, unsigned int value) { int err; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; if (!it6505->powered) return -ENODEV; @@ -580,7 +580,7 @@ static int it6505_set_bits(struct it6505 *it6505, unsigned int reg, static void it6505_debug_print(struct it6505 *it6505, unsigned int reg, const char *prefix) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; int val; if (!drm_debug_enabled(DRM_UT_DRIVER)) @@ -599,7 +599,7 @@ static int it6505_dpcd_read(struct it6505 *it6505, unsigned long offset) { u8 value; int ret; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; ret = drm_dp_dpcd_readb(&it6505->aux, offset, &value); if (ret < 0) { @@ -613,7 +613,7 @@ static int it6505_dpcd_write(struct it6505 *it6505, unsigned long offset, u8 datain) { int ret; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; ret = drm_dp_dpcd_writeb(&it6505->aux, offset, datain); if (ret < 0) { @@ -626,7 +626,7 @@ static int it6505_dpcd_write(struct it6505 *it6505, unsigned long offset, static int it6505_get_dpcd(struct it6505 *it6505, int offset, u8 *dpcd, int num) { int ret; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; ret = drm_dp_dpcd_read(&it6505->aux, offset, dpcd, num); @@ -643,7 +643,7 @@ static void it6505_dump(struct it6505 *it6505) { unsigned int i, j; u8 regs[16]; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; for (i = 0; i <= 0xff; i += 16) { for (j = 0; j < 16; j++) @@ -682,7 +682,7 @@ static int it6505_read_word(struct it6505 *it6505, unsigned int reg) static void it6505_calc_video_info(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; int hsync_pol, vsync_pol, interlaced; int htotal, hdes, hdew, hfph, hsyncw; int vtotal, vdes, vdew, vfph, vsyncw; @@ -926,7 +926,7 @@ static int it6505_aux_wait(struct it6505 *it6505) { int status; unsigned long timeout; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1; @@ -1141,7 +1141,7 @@ static int it6505_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { struct it6505 *it6505 = data; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; enum aux_cmd_reply reply; int offset, ret, aux_retry = 100; @@ -1201,7 +1201,7 @@ static int it6505_send_video_infoframe(struct it6505 *it6505, { u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; int err; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; err = hdmi_avi_infoframe_pack(frame, buffer, sizeof(buffer)); if (err < 0) { @@ -1231,7 +1231,7 @@ static void it6505_get_extcon_property(struct it6505 *it6505) { int err; union extcon_property_value property; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; if (it6505->extcon && !it6505->lane_swap_disabled) { err = extcon_get_property(it6505->extcon, EXTCON_DISP_DP, @@ -1382,7 +1382,7 @@ static void it6505_enable_audio_source(struct it6505 *it6505) static void it6505_enable_audio_infoframe(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; u8 audio_info_ca[] = { 0x00, 0x00, 0x01, 0x03, 0x07, 0x0B, 0x0F, 0x1F }; DRM_DEV_DEBUG_DRIVER(dev, "infoframe channel_allocation:0x%02x", @@ -1411,7 +1411,7 @@ static void it6505_disable_audio(struct it6505 *it6505) static void it6505_enable_audio(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; int regbe; DRM_DEV_DEBUG_DRIVER(dev, "start"); @@ -1446,7 +1446,7 @@ static bool it6505_use_step_train_check(struct it6505 *it6505) static void it6505_parse_link_capabilities(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; struct it6505_drm_dp_link *link = &it6505->link; int bcaps; @@ -1557,7 +1557,7 @@ static void it6505_lane_count_setup(struct it6505 *it6505) static void it6505_link_training_setup(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; if (it6505->enable_enhanced_frame) it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, @@ -1708,7 +1708,7 @@ it6505_step_cr_train(struct it6505 *it6505, FORCE_CR_DONE); return true; } - DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "cr not done"); + DRM_DEV_DEBUG_DRIVER(it6505->dev, "cr not done"); if (it6505_check_max_voltage_swing_reached(lane_level_config, it6505->lane_count)) @@ -1785,7 +1785,7 @@ it6505_step_eq_train(struct it6505 *it6505, FORCE_EQ_DONE); return true; } - DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "eq not done"); + DRM_DEV_DEBUG_DRIVER(it6505->dev, "eq not done"); for (i = 0; i < it6505->lane_count; i++) { lane_voltage_pre_emphasis->voltage_swing[i] = @@ -1820,7 +1820,7 @@ static bool it6505_link_start_step_train(struct it6505 *it6505) .pre_emphasis = { 0 }, }; - DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "start"); + DRM_DEV_DEBUG_DRIVER(it6505->dev, "start"); err = it6505_drm_dp_link_configure(it6505); if (err < 0) @@ -1854,7 +1854,7 @@ static void it6505_reset_hdcp(struct it6505 *it6505) static void it6505_start_hdcp(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "start"); it6505_reset_hdcp(it6505); @@ -1882,7 +1882,7 @@ static bool it6505_hdcp_is_ksv_valid(u8 *ksv) static void it6505_hdcp_part1_auth(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; u8 hdcp_bcaps; it6505_set_bits(it6505, REG_RESET_CTRL, HDCP_RESET, 0x00); @@ -1923,7 +1923,7 @@ static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input, struct shash_desc *desc; struct crypto_shash *tfm; int err; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; tfm = crypto_alloc_shash("sha1", 0, 0); if (IS_ERR(tfm)) { @@ -1948,7 +1948,7 @@ static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input, static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; u8 binfo[2]; int down_stream_count, i, err, msg_count = 0; @@ -2012,7 +2012,7 @@ static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input) static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; u8 av[5][4], bv[5][4]; int i, err; @@ -2045,7 +2045,7 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work) { struct it6505 *it6505 = container_of(work, struct it6505, hdcp_wait_ksv_list); - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; unsigned int timeout = 5000; u8 bstatus = 0; bool ksv_list_check; @@ -2087,7 +2087,7 @@ static void it6505_hdcp_work(struct work_struct *work) { struct it6505 *it6505 = container_of(work, struct it6505, hdcp_work.work); - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; int ret; u8 link_status[DP_LINK_STATUS_SIZE] = { 0 }; @@ -2128,7 +2128,7 @@ static void it6505_hdcp_work(struct work_struct *work) static void it6505_show_hdcp_info(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; int i; u8 *sha1 = it6505->sha1_input; @@ -2162,7 +2162,7 @@ static void it6505_stop_link_train(struct it6505 *it6505) static void it6505_link_train_ok(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; it6505->link_state = LINK_OK; /* disalbe mute enable avi info frame */ @@ -2181,7 +2181,7 @@ static void it6505_link_train_ok(struct it6505 *it6505) static void it6505_link_step_train_process(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; int ret, i, step_retry = 3; DRM_DEV_DEBUG_DRIVER(dev, "Start step train"); @@ -2219,7 +2219,7 @@ static void it6505_link_step_train_process(struct it6505 *it6505) static void it6505_link_training_work(struct work_struct *work) { struct it6505 *it6505 = container_of(work, struct it6505, link_works); - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; int ret; DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d", @@ -2267,7 +2267,7 @@ static void it6505_remove_edid(struct it6505 *it6505) static int it6505_process_hpd_irq(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; int ret, dpcd_sink_count, dp_irq_vector, bstatus; u8 link_status[DP_LINK_STATUS_SIZE]; @@ -2331,7 +2331,7 @@ static int it6505_process_hpd_irq(struct it6505 *it6505) static void it6505_irq_hpd(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; int dp_sink_count; it6505->hpd_state = it6505_get_sink_hpd_status(it6505); @@ -2393,7 +2393,7 @@ static void it6505_irq_hpd(struct it6505 *it6505) static void it6505_irq_hpd_irq(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "hpd_irq interrupt"); @@ -2403,7 +2403,7 @@ static void it6505_irq_hpd_irq(struct it6505 *it6505) static void it6505_irq_scdt(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; bool data; data = it6505_get_video_status(it6505); @@ -2418,7 +2418,7 @@ static void it6505_irq_scdt(struct it6505 *it6505) static void it6505_irq_hdcp_done(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "hdcp done interrupt"); it6505->hdcp_status = HDCP_AUTH_DONE; @@ -2427,7 +2427,7 @@ static void it6505_irq_hdcp_done(struct it6505 *it6505) static void it6505_irq_hdcp_fail(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "hdcp fail interrupt"); it6505->hdcp_status = HDCP_AUTH_IDLE; @@ -2437,14 +2437,14 @@ static void it6505_irq_hdcp_fail(struct it6505 *it6505) static void it6505_irq_aux_cmd_fail(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "AUX PC Request Fail Interrupt"); } static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt"); schedule_work(&it6505->hdcp_wait_ksv_list); @@ -2452,7 +2452,7 @@ static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505) static void it6505_irq_audio_fifo_error(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "audio fifo error Interrupt"); @@ -2462,7 +2462,7 @@ static void it6505_irq_audio_fifo_error(struct it6505 *it6505) static void it6505_irq_link_train_fail(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "link training fail interrupt"); schedule_work(&it6505->link_works); @@ -2470,7 +2470,7 @@ static void it6505_irq_link_train_fail(struct it6505 *it6505) static void it6505_irq_video_fifo_error(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "video fifo overflow interrupt"); it6505->auto_train_retry = AUTO_TRAIN_RETRY; @@ -2481,7 +2481,7 @@ static void it6505_irq_video_fifo_error(struct it6505 *it6505) static void it6505_irq_io_latch_fifo_overflow(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "IO latch fifo overflow interrupt"); it6505->auto_train_retry = AUTO_TRAIN_RETRY; @@ -2498,7 +2498,7 @@ static bool it6505_test_bit(unsigned int bit, const unsigned int *addr) static irqreturn_t it6505_int_threaded_handler(int unused, void *data) { struct it6505 *it6505 = data; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; static const struct { int bit; void (*handler)(struct it6505 *it6505); @@ -2552,7 +2552,7 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data) static int it6505_poweron(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; struct it6505_platform_data *pdata = &it6505->pdata; int err; @@ -2601,7 +2601,7 @@ static int it6505_poweron(struct it6505 *it6505) static int it6505_poweroff(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; struct it6505_platform_data *pdata = &it6505->pdata; int err; @@ -2635,7 +2635,7 @@ static int it6505_poweroff(struct it6505 *it6505) static enum drm_connector_status it6505_detect(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; enum drm_connector_status status = connector_status_disconnected; int dp_sink_count; @@ -2696,7 +2696,7 @@ static int it6505_extcon_notifier(struct notifier_block *self, static void it6505_extcon_work(struct work_struct *work) { struct it6505 *it6505 = container_of(work, struct it6505, extcon_wq); - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; int state, ret; if (it6505->enable_drv_hold) @@ -2741,11 +2741,11 @@ unlock: static int it6505_use_notifier_module(struct it6505 *it6505) { int ret; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; it6505->event_nb.notifier_call = it6505_extcon_notifier; INIT_WORK(&it6505->extcon_wq, it6505_extcon_work); - ret = devm_extcon_register_notifier(&it6505->client->dev, + ret = devm_extcon_register_notifier(it6505->dev, it6505->extcon, EXTCON_DISP_DP, &it6505->event_nb); if (ret) { @@ -2761,7 +2761,7 @@ static int it6505_use_notifier_module(struct it6505 *it6505) static void it6505_remove_notifier_module(struct it6505 *it6505) { if (it6505->extcon) { - devm_extcon_unregister_notifier(&it6505->client->dev, + devm_extcon_unregister_notifier(it6505->dev, it6505->extcon, EXTCON_DISP_DP, &it6505->event_nb); @@ -2774,7 +2774,7 @@ static void __maybe_unused it6505_delayed_audio(struct work_struct *work) struct it6505 *it6505 = container_of(work, struct it6505, delayed_audio.work); - DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "start"); + DRM_DEV_DEBUG_DRIVER(it6505->dev, "start"); if (!it6505->powered) return; @@ -2787,7 +2787,7 @@ static int __maybe_unused it6505_audio_setup_hw_params(struct it6505 *it6505, struct hdmi_codec_params *params) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; int i = 0; DRM_DEV_DEBUG_DRIVER(dev, "%s %d Hz, %d bit, %d channels\n", __func__, @@ -2871,7 +2871,7 @@ static int it6505_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct it6505 *it6505 = bridge_to_it6505(bridge); - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; int ret; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { @@ -2935,7 +2935,7 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_state) { struct it6505 *it6505 = bridge_to_it6505(bridge); - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; struct drm_atomic_state *state = old_state->base.state; struct hdmi_avi_infoframe frame; struct drm_crtc_state *crtc_state; @@ -2991,7 +2991,7 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_state) { struct it6505 *it6505 = bridge_to_it6505(bridge); - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "start"); @@ -3006,7 +3006,7 @@ static void it6505_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_state) { struct it6505 *it6505 = bridge_to_it6505(bridge); - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "start"); @@ -3017,7 +3017,7 @@ static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_state) { struct it6505 *it6505 = bridge_to_it6505(bridge); - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "start"); @@ -3036,7 +3036,7 @@ static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct it6505 *it6505 = bridge_to_it6505(bridge); - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; if (!it6505->cached_edid) { it6505->cached_edid = drm_do_get_edid(connector, it6505_get_edid_block, @@ -3088,7 +3088,7 @@ static const struct dev_pm_ops it6505_bridge_pm_ops = { static int it6505_init_pdata(struct it6505 *it6505) { struct it6505_platform_data *pdata = &it6505->pdata; - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; /* 1.0V digital core power regulator */ pdata->pwr18 = devm_regulator_get(dev, "pwr18"); @@ -3130,7 +3130,7 @@ static int it6505_get_data_lanes_count(const struct device_node *endpoint, static void it6505_parse_dt(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; struct device_node *np = dev->of_node, *ep = NULL; int len; u64 link_frequencies; @@ -3335,7 +3335,7 @@ static void debugfs_create_files(struct it6505 *it6505) static void debugfs_init(struct it6505 *it6505) { - struct device *dev = &it6505->client->dev; + struct device *dev = it6505->dev; it6505->debugfs = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); @@ -3377,7 +3377,7 @@ static int it6505_i2c_probe(struct i2c_client *client) it6505->bridge.of_node = client->dev.of_node; it6505->connector_status = connector_status_disconnected; - it6505->client = client; + it6505->dev = &client->dev; i2c_set_clientdata(client, it6505); /* get extcon device from DTS */ diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c index aa8d47e7f40d..4d404f5ef87e 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9211.c +++ b/drivers/gpu/drm/bridge/lontium-lt9211.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 2a57e804ea02..22c84d29c2bc 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -28,6 +28,8 @@ #define EDID_BLOCK_SIZE 128 #define EDID_NUM_BLOCKS 2 +#define FW_FILE "lt9611uxc_fw.bin" + struct lt9611uxc { struct device *dev; struct drm_bridge bridge; @@ -754,7 +756,7 @@ static int lt9611uxc_firmware_update(struct lt9611uxc *lt9611uxc) REG_SEQ0(0x805a, 0x00), }; - ret = request_firmware(&fw, "lt9611uxc_fw.bin", lt9611uxc->dev); + ret = request_firmware(&fw, FW_FILE, lt9611uxc->dev); if (ret < 0) return ret; @@ -1019,3 +1021,5 @@ module_i2c_driver(lt9611uxc_driver); MODULE_AUTHOR("Dmitry Baryshkov "); MODULE_LICENSE("GPL v2"); + +MODULE_FIRMWARE(FW_FILE); diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index 67368f23d4aa..8c5668dca0c4 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index 4a5f5c4f5dcc..8d54091ec66e 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -16,8 +16,8 @@ #include #include #include -#include #include +#include #include #include #include diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index c9b6cb7678e3..ae3ab9262ef1 100644 --- a/drivers/gpu/drm/bridge/parade-ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index 8801cdd033b5..8161b1a1a4b1 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -105,7 +105,6 @@ struct ps8640 { struct gpio_desc *gpio_reset; struct gpio_desc *gpio_powerdown; struct device_link *link; - struct edid *edid; bool pre_enabled; bool need_post_hpd_delay; }; @@ -155,23 +154,6 @@ static inline struct ps8640 *aux_to_ps8640(struct drm_dp_aux *aux) return container_of(aux, struct ps8640, aux); } -static bool ps8640_of_panel_on_aux_bus(struct device *dev) -{ - struct device_node *bus, *panel; - - bus = of_get_child_by_name(dev->of_node, "aux-bus"); - if (!bus) - return false; - - panel = of_get_child_by_name(bus, "panel"); - of_node_put(bus); - if (!panel) - return false; - of_node_put(panel); - - return true; -} - static int _ps8640_wait_hpd_asserted(struct ps8640 *ps_bridge, unsigned long wait_us) { struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL]; @@ -539,50 +521,6 @@ static void ps8640_bridge_detach(struct drm_bridge *bridge) device_link_del(ps_bridge->link); } -static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge, - struct drm_connector *connector) -{ - struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); - struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; - bool poweroff = !ps_bridge->pre_enabled; - - if (!ps_bridge->edid) { - /* - * When we end calling get_edid() triggered by an ioctl, i.e - * - * drm_mode_getconnector (ioctl) - * -> drm_helper_probe_single_connector_modes - * -> drm_bridge_connector_get_modes - * -> ps8640_bridge_get_edid - * - * We need to make sure that what we need is enabled before - * reading EDID, for this chip, we need to do a full poweron, - * otherwise it will fail. - */ - if (poweroff) - drm_atomic_bridge_chain_pre_enable(bridge, - connector->state->state); - - ps_bridge->edid = drm_get_edid(connector, - ps_bridge->page[PAGE0_DP_CNTL]->adapter); - - /* - * If we call the get_edid() function without having enabled the - * chip before, return the chip to its original power state. - */ - if (poweroff) - drm_atomic_bridge_chain_post_disable(bridge, - connector->state->state); - } - - if (!ps_bridge->edid) { - dev_err(dev, "Failed to get EDID\n"); - return NULL; - } - - return drm_edid_duplicate(ps_bridge->edid); -} - static void ps8640_runtime_disable(void *data) { pm_runtime_dont_use_autosuspend(data); @@ -592,7 +530,6 @@ static void ps8640_runtime_disable(void *data) static const struct drm_bridge_funcs ps8640_bridge_funcs = { .attach = ps8640_bridge_attach, .detach = ps8640_bridge_detach, - .get_edid = ps8640_bridge_get_edid, .atomic_post_disable = ps8640_atomic_post_disable, .atomic_pre_enable = ps8640_atomic_pre_enable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, @@ -705,14 +642,6 @@ static int ps8640_probe(struct i2c_client *client) ps_bridge->bridge.of_node = dev->of_node; ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP; - /* - * In the device tree, if panel is listed under aux-bus of the bridge - * node, panel driver should be able to retrieve EDID by itself using - * aux-bus. So let's not set DRM_BRIDGE_OP_EDID here. - */ - if (!ps8640_of_panel_on_aux_bus(&client->dev)) - ps_bridge->bridge.ops = DRM_BRIDGE_OP_EDID; - /* * Get MIPI DSI resources early. These can return -EPROBE_DEFER so * we want to get them out of the way sooner. @@ -777,13 +706,6 @@ static int ps8640_probe(struct i2c_client *client) return ret; } -static void ps8640_remove(struct i2c_client *client) -{ - struct ps8640 *ps_bridge = i2c_get_clientdata(client); - - kfree(ps_bridge->edid); -} - static const struct of_device_id ps8640_match[] = { { .compatible = "parade,ps8640" }, { } @@ -792,7 +714,6 @@ MODULE_DEVICE_TABLE(of, ps8640_match); static struct i2c_driver ps8640_driver = { .probe = ps8640_probe, - .remove = ps8640_remove, .driver = { .name = "ps8640", .of_match_table = ps8640_match, diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index 73ec60757dbc..cf777bdb25d2 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -16,8 +16,9 @@ #include #include #include -#include +#include #include +#include #include