drm for 6.6-rc1

core:
 - fix gfp flags in drmm_kmalloc
 
 gpuva:
 - add new generic GPU VA manager (for nouveau initially)
 
 syncobj:
 - add new DRM_IOCTL_SYNCOBJ_EVENTFD ioctl
 
 dma-buf:
 - acquire resv lock for mmap() in exporters
 - support dma-buf self import automatically
 - docs fixes
 
 backlight:
 - fix fbdev interactions
 
 atomic:
 - improve logging
 
 prime:
 - remove struct gem_prim_mmap plus driver updates
 
 gem:
 - drm_exec: add locking over multiple GEM objects
 - fix lockdep checking
 
 fbdev:
 - make fbdev userspace interfaces optional
 - use linux device instead of fbdev device
 - use deferred i/o helper macros in various drivers
 - Make FB core selectable without drivers
 - Remove obsolete flags FBINFO_DEFAULT and FBINFO_FLAG_DEFAULT
 - Add helper macros and Kconfig tokens for DMA-allocated framebuffer
 
 ttm:
 - support init_on_free
 - swapout fixes
 
 panel:
 - panel-edp: Support AUO B116XAB01.4
 - Support Visionox R66451 plus DT bindings
 - ld9040: Backlight support, magic improved,
           Kconfig fix
 - Convert to of_device_get_match_data()
 - Fix Kconfig dependencies
 - simple: Set bpc value to fix warning; Set connector type for AUO T215HVN01;
   Support Innolux G156HCE-L01 plus DT bindings
 - ili9881: Support TDO TL050HDV35 LCD panel plus DT bindings
 - startek: Support KD070FHFID015 MIPI-DSI panel plus DT bindings
 - sitronix-st7789v: Support Inanbo T28CP45TN89 plus DT bindings;
          Support EDT ET028013DMA plus DT bindings; Various cleanups
 - edp: Add timings for N140HCA-EAC
 - Allow panels and touchscreens to power sequence together
 - Fix Innolux G156HCE-L01 LVDS clock
 
 bridge:
 - debugfs for chains support
 - dw-hdmi: Improve support for YUV420 bus format
            CEC suspend/resume, update EDID on HDMI detect
 - dw-mipi-dsi: Fix enable/disable of DSI controller
 - lt9611uxc: Use MODULE_FIRMWARE()
 - ps8640: Remove broken EDID code
 - samsung-dsim: Fix command transfer
 - tc358764: Handle HS/VS polarity; Use BIT() macro; Various cleanups
 - adv7511: Fix low refresh rate
 - anx7625: Switch to macros instead of hardcoded values
            locking fixes
 - tc358767: fix hardware delays
 - sitronix-st7789v: Support panel orientation; Support rotation
                     property; Add support for Jasonic
                     JT240MHQS-HWT-EK-E3 plus DT bindings
 
 amdgpu:
 - SDMA 6.1.0 support
 - HDP 6.1 support
 - SMUIO 14.0 support
 - PSP 14.0 support
 - IH 6.1 support
 - Lots of checkpatch cleanups
 - GFX 9.4.3 updates
 - Add USB PD and IFWI flashing documentation
 - GPUVM updates
 - RAS fixes
 - DRR fixes
 - FAMS fixes
 - Virtual display fixes
 - Soft IH fixes
 - SMU13 fixes
 - Rework PSP firmware loading for other IPs
 - Kernel doc fixes
 - DCN 3.0.1 fixes
 - LTTPR fixes
 - DP MST fixes
 - DCN 3.1.6 fixes
 - SMU 13.x fixes
 - PSP 13.x fixes
 - SubVP fixes
 - GC 9.4.3 fixes
 - Display bandwidth calculation fixes
 - VCN4 secure submission fixes
 - Allow building DC on RISC-V
 - Add visible FB info to bo_print_info
 - HBR3 fixes
 - GFX9 MCBP fix
 - GMC10 vmhub index fix
 - GMC11 vmhub index fix
 - Create a new doorbell manager
 - SR-IOV fixes
 - initial freesync panel replay support
 - revert zpos properly until igt regression is fixeed
 - use TTM to manage doorbell BAR
 - Expose both current and average power via hwmon if supported
 
 amdkfd:
 - Cleanup CRIU dma-buf handling
 - Use KIQ to unmap HIQ
 - GFX 9.4.3 debugger updates
 - GFX 9.4.2 debugger fixes
 - Enable cooperative groups fof gfx11
 - SVM fixes
 - Convert older APUs to use dGPU path like newer APUs
 - Drop IOMMUv2 path as it is no longer used
 - TBA fix for aldebaran
 
 i915:
 - ICL+ DSI modeset sequence
 - HDCP improvements
 - MTL display fixes and cleanups
 - HSW/BDW PSR1 restored
 - Init DDI ports in VBT order
 - General display refactors
 - Start using plane scale factor for relative data rate
 - Use shmem for dpt objects
 - Expose RPS thresholds in sysfs
 - Apply GuC SLPC min frequency softlimit correctly
 - Extend Wa_14015795083 to TGL, RKL, DG1 and ADL
 - Fix a VMA UAF for multi-gt platform
 - Do not use stolen on MTL due to HW bug
 - Check HuC and GuC version compatibility on MTL
 - avoid infinite GPU waits due to premature release
   of request memory
 - Fixes and updates for GSC memory allocation
 - Display SDVO fixes
 - Take stolen handling out of FBC code
 - Make i915_coherent_map_type GT-centric
 - Simplify shmem_create_from_object map_type
 
 msm:
 - SM6125 MDSS support
 - DPU: SM6125 DPU support
 - DSI: runtime PM support, burst mode support
 - DSI PHY: SM6125 support in 14nm DSI PHY driver
 - GPU: prepare for a7xx
 - fix a690 firmware
 - disable relocs on a6xx and newer
 
 radeon:
 - Lots of checkpatch cleanups
 
 ast:
 - improve device-model detection
 - Represent BMV as virtual connector
 - Report DP connection status
 
 nouveau:
 - add new exec/bind interface to support Vulkan
 - document some getparam ioctls
 - improve VRAM detection
 - various fixes/cleanups
 - workraound DPCD issues
 
 ivpu:
 - MMU updates
 - debugfs support
 - Support vpu4
 
 virtio:
 - add sync object support
 
 atmel-hlcdc:
 - Support inverted pixclock polarity
 
 etnaviv:
 - runtime PM cleanups
 - hang handling fixes
 
 exynos:
 - use fbdev DMA helpers
 - fix possible NULL ptr dereference
 
 komeda:
 - always attach encoder
 
 omapdrm:
 - use fbdev DMA helpers
 ingenic:
 - kconfig regmap fixes
 
 loongson:
 - support display controller
 
 mediatek:
 - Small mtk-dpi cleanups
 - DisplayPort: support eDP and aux-bus
 - Fix coverity issues
 - Fix potential memory leak if vmap() fail
 
 mgag200:
 - minor fixes
 
 mxsfb:
 - support disabling overlay planes
 
 panfrost:
 - fix sync in IRQ handling
 
 ssd130x:
 - Support per-controller default resolution plus DT bindings
 - Reduce memory-allocation overhead
 - Improve intermediate buffer size computation
 - Fix allocation of temporary buffers
 - Fix pitch computation
 - Fix shadow plane allocation
 
 tegra:
 - use fbdev DMA helpers
 - Convert to devm_platform_ioremap_resource()
 - support bridge/connector
 - enable PM
 
 tidss:
 - Support TI AM625 plus DT bindings
 - Implement new connector model plus driver updates
 
 vkms:
 - improve write back support
 - docs fixes
 - support gamma LUT
 
 zynqmp-dpsub:
 - misc fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmTukSYACgkQDHTzWXnE
 hr6vnQ/+J7vBVkBr8JsaEV/twcZwzbNdpivsIagd8U83GQB50nDReVXbNx+Wo0/C
 WiGlrC6Sw3NVOGbkigd5IQ7fb5C/7RnBmzMi/iS7Qnk2uEqLqgV00VxfGwdm6wgr
 0gNB8zuu2xYphHz2K8LzwnmeQRdN+YUQpUa2wNzLO88IEkTvq5vx2rJEn5p9/3hp
 OxbbPBzpDRRPlkNFfVQCN8todbKdsPc4am81Eqgv7BJf21RFgQodPGW5koCYuv0w
 3m+PJh1KkfYAL974EsLr/pkY7yhhiZ6SlFLX8ssg4FyZl/Vthmc9bl14jRq/pqt4
 GBp8yrPq1XjrwXR8wv3MiwNEdANQ+KD9IoGlzLxqVgmEFRE+g4VzZZXeC3AIrTVP
 FPg4iLUrDrmj9RpJmbVqhq9X2jZs+EtRAFkJPrPbq2fItAD2a2dW4X3ISSnnTqDI
 6O2dVwuLCU6OfWnvN4bPW9p8CqRgR8Itqv1SI8qXooDy307YZu1eTUf5JAVwG/SW
 xbDEFVFlMPyFLm+KN5dv1csJKK21vWi9gLg8phK8mTWYWnqMEtJqbxbRzmdBEFmE
 pXKVu01P6ZqgBbaETpCljlOaEDdJnvO4W+o70MgBtpR2IWFMbMNO+iS0EmLZ6Vgj
 9zYZctpL+dMuHV0Of1GMkHFRHTMYEzW4tuctLIQfG13y4WzyczY=
 =CwV9
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2023-08-30' of git://anongit.freedesktop.org/drm/drm

Pull drm updates from Dave Airlie:
 "The drm core grew a new generic gpu virtual address manager, and new
  execution locking helpers. These are used by nouveau now to provide
  uAPI support for the userspace Vulkan driver. AMD had a bunch of new
  IP core support, loads of refactoring around fbdev, but mostly just
  the usual amount of stuff across the board.

  core:
   - fix gfp flags in drmm_kmalloc

  gpuva:
   - add new generic GPU VA manager (for nouveau initially)

  syncobj:
   - add new DRM_IOCTL_SYNCOBJ_EVENTFD ioctl

  dma-buf:
   - acquire resv lock for mmap() in exporters
   - support dma-buf self import automatically
   - docs fixes

  backlight:
   - fix fbdev interactions

  atomic:
   - improve logging

  prime:
   - remove struct gem_prim_mmap plus driver updates

  gem:
   - drm_exec: add locking over multiple GEM objects
   - fix lockdep checking

  fbdev:
   - make fbdev userspace interfaces optional
   - use linux device instead of fbdev device
   - use deferred i/o helper macros in various drivers
   - Make FB core selectable without drivers
   - Remove obsolete flags FBINFO_DEFAULT and FBINFO_FLAG_DEFAULT
   - Add helper macros and Kconfig tokens for DMA-allocated framebuffer

  ttm:
   - support init_on_free
   - swapout fixes

  panel:
   - panel-edp: Support AUO B116XAB01.4
   - Support Visionox R66451 plus DT bindings
   - ld9040:
      - Backlight support
      - magic improved
      - Kconfig fix
   - Convert to of_device_get_match_data()
   - Fix Kconfig dependencies
   - simple:
      - Set bpc value to fix warning
      - Set connector type for AUO T215HVN01
      - Support Innolux G156HCE-L01 plus DT bindings
   - ili9881: Support TDO TL050HDV35 LCD panel plus DT bindings
   - startek: Support KD070FHFID015 MIPI-DSI panel plus DT bindings
   - sitronix-st7789v:
      - Support Inanbo T28CP45TN89 plus DT bindings
      - Support EDT ET028013DMA plus DT bindings
      - Various cleanups
   - edp: Add timings for N140HCA-EAC
   - Allow panels and touchscreens to power sequence together
   - Fix Innolux G156HCE-L01 LVDS clock

  bridge:
   - debugfs for chains support
   - dw-hdmi:
      - Improve support for YUV420 bus format
      - CEC suspend/resume
      - update EDID on HDMI detect
   - dw-mipi-dsi: Fix enable/disable of DSI controller
   - lt9611uxc: Use MODULE_FIRMWARE()
   - ps8640: Remove broken EDID code
   - samsung-dsim: Fix command transfer
   - tc358764:
      - Handle HS/VS polarity
      - Use BIT() macro
      - Various cleanups
   - adv7511: Fix low refresh rate
   - anx7625:
      - Switch to macros instead of hardcoded values
      - locking fixes
   - tc358767: fix hardware delays
   - sitronix-st7789v:
      - Support panel orientation
      - Support rotation property
      - Add support for Jasonic JT240MHQS-HWT-EK-E3 plus DT bindings

  amdgpu:
   - SDMA 6.1.0 support
   - HDP 6.1 support
   - SMUIO 14.0 support
   - PSP 14.0 support
   - IH 6.1 support
   - Lots of checkpatch cleanups
   - GFX 9.4.3 updates
   - Add USB PD and IFWI flashing documentation
   - GPUVM updates
   - RAS fixes
   - DRR fixes
   - FAMS fixes
   - Virtual display fixes
   - Soft IH fixes
   - SMU13 fixes
   - Rework PSP firmware loading for other IPs
   - Kernel doc fixes
   - DCN 3.0.1 fixes
   - LTTPR fixes
   - DP MST fixes
   - DCN 3.1.6 fixes
   - SMU 13.x fixes
   - PSP 13.x fixes
   - SubVP fixes
   - GC 9.4.3 fixes
   - Display bandwidth calculation fixes
   - VCN4 secure submission fixes
   - Allow building DC on RISC-V
   - Add visible FB info to bo_print_info
   - HBR3 fixes
   - GFX9 MCBP fix
   - GMC10 vmhub index fix
   - GMC11 vmhub index fix
   - Create a new doorbell manager
   - SR-IOV fixes
   - initial freesync panel replay support
   - revert zpos properly until igt regression is fixeed
   - use TTM to manage doorbell BAR
   - Expose both current and average power via hwmon if supported

  amdkfd:
   - Cleanup CRIU dma-buf handling
   - Use KIQ to unmap HIQ
   - GFX 9.4.3 debugger updates
   - GFX 9.4.2 debugger fixes
   - Enable cooperative groups fof gfx11
   - SVM fixes
   - Convert older APUs to use dGPU path like newer APUs
   - Drop IOMMUv2 path as it is no longer used
   - TBA fix for aldebaran

  i915:
   - ICL+ DSI modeset sequence
   - HDCP improvements
   - MTL display fixes and cleanups
   - HSW/BDW PSR1 restored
   - Init DDI ports in VBT order
   - General display refactors
   - Start using plane scale factor for relative data rate
   - Use shmem for dpt objects
   - Expose RPS thresholds in sysfs
   - Apply GuC SLPC min frequency softlimit correctly
   - Extend Wa_14015795083 to TGL, RKL, DG1 and ADL
   - Fix a VMA UAF for multi-gt platform
   - Do not use stolen on MTL due to HW bug
   - Check HuC and GuC version compatibility on MTL
   - avoid infinite GPU waits due to premature release of request memory
   - Fixes and updates for GSC memory allocation
   - Display SDVO fixes
   - Take stolen handling out of FBC code
   - Make i915_coherent_map_type GT-centric
   - Simplify shmem_create_from_object map_type

  msm:
   - SM6125 MDSS support
   - DPU: SM6125 DPU support
   - DSI: runtime PM support, burst mode support
   - DSI PHY: SM6125 support in 14nm DSI PHY driver
   - GPU: prepare for a7xx
   - fix a690 firmware
   - disable relocs on a6xx and newer

  radeon:
   - Lots of checkpatch cleanups

  ast:
   - improve device-model detection
   - Represent BMV as virtual connector
   - Report DP connection status

  nouveau:
   - add new exec/bind interface to support Vulkan
   - document some getparam ioctls
   - improve VRAM detection
   - various fixes/cleanups
   - workraound DPCD issues

  ivpu:
   - MMU updates
   - debugfs support
   - Support vpu4

  virtio:
   - add sync object support

  atmel-hlcdc:
   - Support inverted pixclock polarity

  etnaviv:
   - runtime PM cleanups
   - hang handling fixes

  exynos:
   - use fbdev DMA helpers
   - fix possible NULL ptr dereference

  komeda:
   - always attach encoder

  omapdrm:
   - use fbdev DMA helpers
ingenic:
   - kconfig regmap fixes

  loongson:
   - support display controller

  mediatek:
   - Small mtk-dpi cleanups
   - DisplayPort: support eDP and aux-bus
   - Fix coverity issues
   - Fix potential memory leak if vmap() fail

  mgag200:
   - minor fixes

  mxsfb:
   - support disabling overlay planes

  panfrost:
   - fix sync in IRQ handling

  ssd130x:
   - Support per-controller default resolution plus DT bindings
   - Reduce memory-allocation overhead
   - Improve intermediate buffer size computation
   - Fix allocation of temporary buffers
   - Fix pitch computation
   - Fix shadow plane allocation

  tegra:
   - use fbdev DMA helpers
   - Convert to devm_platform_ioremap_resource()
   - support bridge/connector
   - enable PM

  tidss:
   - Support TI AM625 plus DT bindings
   - Implement new connector model plus driver updates

  vkms:
   - improve write back support
   - docs fixes
   - support gamma LUT

  zynqmp-dpsub:
   - misc fixes"

* tag 'drm-next-2023-08-30' of git://anongit.freedesktop.org/drm/drm: (1327 commits)
  drm/gpuva_mgr: remove unused prev pointer in __drm_gpuva_sm_map()
  drm/tests/drm_kunit_helpers: Place correct function name in the comment header
  drm/nouveau: uapi: don't pass NO_PREFETCH flag implicitly
  drm/nouveau: uvmm: fix unset region pointer on remap
  drm/nouveau: sched: avoid job races between entities
  drm/i915: Fix HPD polling, reenabling the output poll work as needed
  drm: Add an HPD poll helper to reschedule the poll work
  drm/i915: Fix TLB-Invalidation seqno store
  drm/ttm/tests: Fix type conversion in ttm_pool_test
  drm/msm/a6xx: Bail out early if setting GPU OOB fails
  drm/msm/a6xx: Move LLC accessors to the common header
  drm/msm/a6xx: Introduce a6xx_llc_read
  drm/ttm/tests: Require MMU when testing
  drm/panel: simple: Fix Innolux G156HCE-L01 LVDS clock
  Revert "Revert "drm/amdgpu/display: change pipe policy for DCN 2.0""
  drm/amdgpu: Add memory vendor information
  drm/amd: flush any delayed gfxoff on suspend entry
  drm/amdgpu: skip fence GFX interrupts disable/enable for S0ix
  drm/amdgpu: Remove gfxoff check in GFX v9.4.3
  drm/amd/pm: Update pci link speed for smu v13.0.6
  ...
This commit is contained in:
Linus Torvalds 2023-08-30 13:34:34 -07:00
commit 461f35f014
1422 changed files with 46020 additions and 17496 deletions

View File

@ -29,6 +29,7 @@ properties:
- description: Link clock from DP PHY
- description: VCO DIV clock from DP PHY
- description: AHB config clock from GCC
- description: GPLL0 div source from GCC
clock-names:
items:
@ -39,6 +40,7 @@ properties:
- const: dp_phy_pll_link_clk
- const: dp_phy_pll_vco_div_clk
- const: cfg_ahb_clk
- const: gcc_disp_gpll0_div_clk_src
'#clock-cells':
const: 1
@ -46,6 +48,16 @@ properties:
'#power-domain-cells':
const: 1
power-domains:
description:
A phandle and PM domain specifier for the CX power domain.
maxItems: 1
required-opps:
description:
A phandle to an OPP node describing the power domain's performance point.
maxItems: 1
reg:
maxItems: 1
@ -63,23 +75,31 @@ examples:
- |
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/clock/qcom,gcc-sm6125.h>
#include <dt-bindings/power/qcom-rpmpd.h>
clock-controller@5f00000 {
compatible = "qcom,sm6125-dispcc";
reg = <0x5f00000 0x20000>;
clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
<&dsi0_phy 0>,
<&dsi0_phy 1>,
<&dsi1_phy 1>,
<&dp_phy 0>,
<&dp_phy 1>,
<&gcc GCC_DISP_AHB_CLK>;
<&gcc GCC_DISP_AHB_CLK>,
<&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>;
clock-names = "bi_tcxo",
"dsi0_phy_pll_out_byteclk",
"dsi0_phy_pll_out_dsiclk",
"dsi1_phy_pll_out_dsiclk",
"dp_phy_pll_link_clk",
"dp_phy_pll_vco_div_clk",
"cfg_ahb_clk";
"cfg_ahb_clk",
"gcc_disp_gpll0_div_clk_src";
required-opps = <&rpmhpd_opp_ret>;
power-domains = <&rpmpd SM6125_VDDCX>;
#clock-cells = <1>;
#power-domain-cells = <1>;
};

View File

@ -49,6 +49,9 @@ properties:
description: |
OF device-tree gpio specification for RSTX pin(active low system reset)
interrupts:
maxItems: 1
toshiba,hpd-pin:
$ref: /schemas/types.yaml#/definitions/uint32
enum:

View File

@ -28,6 +28,7 @@ properties:
- qcom,sm8350-dp
- items:
- enum:
- qcom,sm8250-dp
- qcom,sm8450-dp
- qcom,sm8550-dp
- const: qcom,sm8350-dp

View File

@ -27,6 +27,7 @@ properties:
- qcom,sdm660-dsi-ctrl
- qcom,sdm845-dsi-ctrl
- qcom,sm6115-dsi-ctrl
- qcom,sm6125-dsi-ctrl
- qcom,sm6350-dsi-ctrl
- qcom,sm6375-dsi-ctrl
- qcom,sm8150-dsi-ctrl
@ -166,6 +167,10 @@ properties:
description:
Phandle to vdd regulator device node
refgen-supply:
description:
Phandle to REFGEN regulator device node
vcca-supply:
description:
Phandle to vdd regulator device node
@ -301,6 +306,7 @@ allOf:
contains:
enum:
- qcom,msm8998-dsi-ctrl
- qcom,sm6125-dsi-ctrl
- qcom,sm6350-dsi-ctrl
then:
properties:

View File

@ -19,6 +19,7 @@ properties:
- qcom,dsi-phy-14nm-2290
- qcom,dsi-phy-14nm-660
- qcom,dsi-phy-14nm-8953
- qcom,sm6125-dsi-phy-14nm
reg:
items:
@ -35,6 +36,16 @@ properties:
vcca-supply:
description: Phandle to vcca regulator device node.
power-domains:
description:
A phandle and PM domain specifier for an optional power domain.
maxItems: 1
required-opps:
description:
A phandle to an OPP node describing the power domain's performance point.
maxItems: 1
required:
- compatible
- reg

View File

@ -13,6 +13,12 @@ maintainers:
properties:
compatible:
oneOf:
- description: |
The driver is parsing the compat string for Adreno to
figure out the chip-id.
items:
- pattern: '^qcom,adreno-[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]$'
- const: qcom,adreno
- description: |
The driver is parsing the compat string for Adreno to
figure out the gpu-id and patch level.

View File

@ -77,6 +77,12 @@ properties:
items:
- description: MDSS_CORE reset
memory-region:
maxItems: 1
description:
Phandle to a node describing a reserved framebuffer memory region.
For example, the splash memory region set up by the bootloader.
required:
- reg
- reg-names

View File

@ -15,6 +15,7 @@ properties:
compatible:
enum:
- qcom,sc7180-dpu
- qcom,sm6125-dpu
- qcom,sm6350-dpu
- qcom,sm6375-dpu
@ -63,7 +64,9 @@ allOf:
- if:
properties:
compatible:
const: qcom,sm6375-dpu
enum:
- qcom,sm6375-dpu
- qcom,sm6125-dpu
then:
properties:

View File

@ -0,0 +1,213 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/msm/qcom,sm6125-mdss.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm SM6125 Display MDSS
maintainers:
- Marijn Suijten <marijn.suijten@somainline.org>
description:
SM6125 MSM Mobile Display Subsystem (MDSS), which encapsulates sub-blocks
like DPU display controller, DSI and DP interfaces etc.
$ref: /schemas/display/msm/mdss-common.yaml#
properties:
compatible:
const: qcom,sm6125-mdss
clocks:
items:
- description: Display AHB clock from gcc
- description: Display AHB clock
- description: Display core clock
clock-names:
items:
- const: iface
- const: ahb
- const: core
iommus:
maxItems: 1
interconnects:
maxItems: 2
interconnect-names:
maxItems: 2
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
properties:
compatible:
const: qcom,sm6125-dpu
"^dsi@[0-9a-f]+$":
type: object
properties:
compatible:
items:
- const: qcom,sm6125-dsi-ctrl
- const: qcom,mdss-dsi-ctrl
"^phy@[0-9a-f]+$":
type: object
properties:
compatible:
const: qcom,sm6125-dsi-phy-14nm
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,dispcc-sm6125.h>
#include <dt-bindings/clock/qcom,gcc-sm6125.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom-rpmpd.h>
display-subsystem@5e00000 {
compatible = "qcom,sm6125-mdss";
reg = <0x05e00000 0x1000>;
reg-names = "mdss";
interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <1>;
clocks = <&gcc GCC_DISP_AHB_CLK>,
<&dispcc DISP_CC_MDSS_AHB_CLK>,
<&dispcc DISP_CC_MDSS_MDP_CLK>;
clock-names = "iface",
"ahb",
"core";
power-domains = <&dispcc MDSS_GDSC>;
iommus = <&apps_smmu 0x400 0x0>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
display-controller@5e01000 {
compatible = "qcom,sm6125-dpu";
reg = <0x05e01000 0x83208>,
<0x05eb0000 0x2008>;
reg-names = "mdp", "vbif";
interrupt-parent = <&mdss>;
interrupts = <0>;
clocks = <&gcc GCC_DISP_HF_AXI_CLK>,
<&dispcc DISP_CC_MDSS_AHB_CLK>,
<&dispcc DISP_CC_MDSS_ROT_CLK>,
<&dispcc DISP_CC_MDSS_MDP_LUT_CLK>,
<&dispcc DISP_CC_MDSS_MDP_CLK>,
<&dispcc DISP_CC_MDSS_VSYNC_CLK>,
<&gcc GCC_DISP_THROTTLE_CORE_CLK>;
clock-names = "bus",
"iface",
"rot",
"lut",
"core",
"vsync",
"throttle";
assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
assigned-clock-rates = <19200000>;
operating-points-v2 = <&mdp_opp_table>;
power-domains = <&rpmpd SM6125_VDDCX>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dpu_intf1_out: endpoint {
remote-endpoint = <&mdss_dsi0_in>;
};
};
};
};
dsi@5e94000 {
compatible = "qcom,sm6125-dsi-ctrl", "qcom,mdss-dsi-ctrl";
reg = <0x05e94000 0x400>;
reg-names = "dsi_ctrl";
interrupt-parent = <&mdss>;
interrupts = <4>;
clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
<&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK>,
<&dispcc DISP_CC_MDSS_ESC0_CLK>,
<&dispcc DISP_CC_MDSS_AHB_CLK>,
<&gcc GCC_DISP_HF_AXI_CLK>;
clock-names = "byte",
"byte_intf",
"pixel",
"core",
"iface",
"bus";
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmpd SM6125_VDDCX>;
phys = <&mdss_dsi0_phy>;
phy-names = "dsi";
#address-cells = <1>;
#size-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
mdss_dsi0_in: endpoint {
remote-endpoint = <&dpu_intf1_out>;
};
};
port@1 {
reg = <1>;
mdss_dsi0_out: endpoint {
};
};
};
};
phy@5e94400 {
compatible = "qcom,sm6125-dsi-phy-14nm";
reg = <0x05e94400 0x100>,
<0x05e94500 0x300>,
<0x05e94800 0x188>;
reg-names = "dsi_phy",
"dsi_phy_lane",
"dsi_pll";
#clock-cells = <1>;
#phy-cells = <0>;
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
<&rpmcc RPM_SMD_XO_CLK_SRC>;
clock-names = "iface",
"ref";
required-opps = <&rpmpd_opp_nom>;
power-domains = <&rpmpd SM6125_VDDMX>;
};
};
...

View File

@ -131,13 +131,6 @@ examples:
remote-endpoint = <&dsi0_in>;
};
};
port@1 {
reg = <1>;
dpu_intf2_out: endpoint {
remote-endpoint = <&dsi1_in>;
};
};
};
};

View File

@ -132,13 +132,6 @@ examples:
remote-endpoint = <&dsi0_in>;
};
};
port@1 {
reg = <1>;
dpu_intf2_out: endpoint {
remote-endpoint = <&dsi1_in>;
};
};
};
};

View File

@ -52,6 +52,12 @@ patternProperties:
compatible:
const: qcom,sm8350-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
properties:
compatible:
const: qcom,sm8350-dp
"^dsi@[0-9a-f]+$":
type: object
properties:

View File

@ -42,6 +42,14 @@ patternProperties:
compatible:
const: qcom,sm8450-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
properties:
compatible:
items:
- const: qcom,sm8450-dp
- const: qcom,sm8350-dp
"^dsi@[0-9a-f]+$":
type: object
properties:

View File

@ -42,6 +42,14 @@ patternProperties:
compatible:
const: qcom,sm8550-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
properties:
compatible:
items:
- const: qcom,sm8550-dp
- const: qcom,sm8350-dp
"^dsi@[0-9a-f]+$":
type: object
properties:

View File

@ -18,6 +18,7 @@ properties:
- enum:
- bananapi,lhr050h41
- feixin,k101-im2byl02
- tdo,tl050hdv35
- wanchanglong,w552946aba
- const: ilitek,ili9881c

View File

@ -40,6 +40,12 @@ properties:
items:
- enum:
- auo,b101ew05
# Chunghwa Picture Tubes Ltd. 7" WXGA (800x1280) TFT LCD LVDS panel
- chunghwa,claa070wp03xg
# HannStar Display Corp. HSD101PWW2 10.1" WXGA (1280x800) LVDS panel
- hannstar,hsd101pww2
# Hydis Technologies 7" WXGA (800x1280) TFT LCD LVDS panel
- hydis,hv070wx2-1e0
- tbs,a711-panel
- const: panel-lvds

View File

@ -103,8 +103,6 @@ properties:
- cdtech,s070wv95-ct16
# Chefree CH101OLHLWH-002 10.1" (1280x800) color TFT LCD panel
- chefree,ch101olhlwh-002
# Chunghwa Picture Tubes Ltd. 7" WXGA TFT LCD panel
- chunghwa,claa070wp03xg
# Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
- chunghwa,claa101wa01a
# Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
@ -168,8 +166,6 @@ properties:
- hannstar,hsd070pww1
# HannStar Display Corp. HSD100PXN1 10.1" XGA LVDS panel
- hannstar,hsd100pxn1
# HannStar Display Corp. HSD101PWW2 10.1" WXGA (1280x800) LVDS panel
- hannstar,hsd101pww2
# Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
- hit,tx23d38vm0caa
# InfoVision Optoelectronics M133NWF4 R0 13.3" FHD (1920x1080) TFT LCD panel
@ -196,6 +192,8 @@ properties:
- innolux,n116bge
# InnoLux 13.3" FHD (1920x1080) eDP TFT LCD panel
- innolux,n125hce-gn1
# InnoLux 15.6" FHD (1920x1080) TFT LCD panel
- innolux,g156hce-l01
# InnoLux 15.6" WXGA TFT LCD panel
- innolux,n156bge-l21
# Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel

View File

@ -15,17 +15,26 @@ allOf:
properties:
compatible:
const: sitronix,st7789v
enum:
- edt,et028013dma
- inanbo,t28cp45tn89-v17
- jasonic,jt240mhqs-hwt-ek-e3
- sitronix,st7789v
reg: true
reset-gpios: true
power-supply: true
backlight: true
port: true
rotation: true
spi-cpha: true
spi-cpol: true
spi-rx-bus-width:
minimum: 0
maximum: 1
dc-gpios:
maxItems: 1
description: DCX pin, Display data/command selection pin in parallel interface
@ -33,7 +42,6 @@ properties:
required:
- compatible
- reg
- reset-gpios
- power-supply
unevaluatedProperties: false
@ -52,6 +60,7 @@ examples:
reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>;
backlight = <&pwm_bl>;
power-supply = <&power>;
rotation = <180>;
spi-max-frequency = <100000>;
spi-cpol;
spi-cpha;

View File

@ -0,0 +1,69 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/startek,kd070fhfid015.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Startek Electronic Technology Co. kd070fhfid015 7 inch TFT LCD panel
maintainers:
- Alexandre Mergnat <amergnat@baylibre.com>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: startek,kd070fhfid015
enable-gpios: true
iovcc-supply:
description: Reference to the regulator powering the panel IO pins.
reg:
maxItems: 1
description: DSI virtual channel
reset-gpios: true
port: true
power-supply: true
additionalProperties: false
required:
- compatible
- enable-gpios
- iovcc-supply
- reg
- reset-gpios
- port
- power-supply
examples:
- |
#include <dt-bindings/gpio/gpio.h>
dsi0 {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "startek,kd070fhfid015";
reg = <0>;
enable-gpios = <&pio 67 GPIO_ACTIVE_HIGH>;
reset-gpios = <&pio 20 GPIO_ACTIVE_HIGH>;
iovcc-supply = <&mt6357_vsim1_reg>;
power-supply = <&vsys_lcm_reg>;
port {
panel_in: endpoint {
remote-endpoint = <&dsi_out>;
};
};
};
};
...

View File

@ -0,0 +1,59 @@
# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/visionox,r66451.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Visionox R66451 AMOLED DSI Panel
maintainers:
- Jessica Zhang <quic_jesszhan@quicinc.com>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: visionox,r66451
reg:
maxItems: 1
description: DSI virtual channel
vddio-supply: true
vdd-supply: true
port: true
reset-gpios: true
additionalProperties: false
required:
- compatible
- reg
- vddio-supply
- vdd-supply
- reset-gpios
- port
examples:
- |
#include <dt-bindings/gpio/gpio.h>
dsi {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "visionox,r66451";
reg = <0>;
vddio-supply = <&vreg_l12c_1p8>;
vdd-supply = <&vreg_l13c_3p0>;
reset-gpios = <&tlmm 24 GPIO_ACTIVE_LOW>;
port {
panel0_in: endpoint {
remote-endpoint = <&dsi0_out>;
};
};
};
};
...

View File

@ -49,15 +49,15 @@ properties:
solomon,height:
$ref: /schemas/types.yaml#/definitions/uint32
default: 16
description:
Height in pixel of the screen driven by the controller
Height in pixel of the screen driven by the controller.
The default value is controller-dependent.
solomon,width:
$ref: /schemas/types.yaml#/definitions/uint32
default: 96
description:
Width in pixel of the screen driven by the controller
Width in pixel of the screen driven by the controller.
The default value is controller-dependent.
solomon,page-offset:
$ref: /schemas/types.yaml#/definitions/uint32
@ -157,6 +157,10 @@ allOf:
const: sinowealth,sh1106
then:
properties:
width:
default: 132
height:
default: 64
solomon,dclk-div:
default: 1
solomon,dclk-frq:
@ -171,6 +175,10 @@ allOf:
- solomon,ssd1305
then:
properties:
width:
default: 132
height:
default: 64
solomon,dclk-div:
default: 1
solomon,dclk-frq:
@ -185,6 +193,10 @@ allOf:
- solomon,ssd1306
then:
properties:
width:
default: 128
height:
default: 64
solomon,dclk-div:
default: 1
solomon,dclk-frq:
@ -199,6 +211,10 @@ allOf:
- solomon,ssd1307
then:
properties:
width:
default: 128
height:
default: 39
solomon,dclk-div:
default: 2
solomon,dclk-frq:
@ -215,6 +231,10 @@ allOf:
- solomon,ssd1309
then:
properties:
width:
default: 128
height:
default: 64
solomon,dclk-div:
default: 1
solomon,dclk-frq:

View File

@ -12,14 +12,18 @@ maintainers:
- Tomi Valkeinen <tomi.valkeinen@ti.com>
description: |
The AM65x TI Keystone Display SubSystem with two output ports and
two video planes. The first video port supports OLDI and the second
supports DPI format. The fist plane is full video plane with all
features and the second is a "lite plane" without scaling support.
The AM625 and AM65x TI Keystone Display SubSystem with two output
ports and two video planes. In AM65x DSS, the first video port
supports 1 OLDI TX and in AM625 DSS, the first video port output is
internally routed to 2 OLDI TXes. The second video port supports DPI
format. The first plane is full video plane with all features and the
second is a "lite plane" without scaling support.
properties:
compatible:
const: ti,am65x-dss
enum:
- ti,am625-dss
- ti,am65x-dss
reg:
description:
@ -80,7 +84,9 @@ properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description:
The DSS OLDI output port node form video port 1
For AM65x DSS, the OLDI output port node from video port 1.
For AM625 DSS, the internal DPI output port node from video
port 1.
port@1:
$ref: /schemas/graph.yaml#/properties/port

View File

@ -13,6 +13,9 @@ description:
Supports the Elan eKTH6915 touchscreen controller.
This touchscreen controller uses the i2c-hid protocol with a reset GPIO.
allOf:
- $ref: /schemas/input/touchscreen/touchscreen.yaml#
properties:
compatible:
items:
@ -24,6 +27,8 @@ properties:
interrupts:
maxItems: 1
panel: true
reset-gpios:
description: Reset GPIO; not all touchscreens using eKTH6915 hook this up.

View File

@ -14,6 +14,9 @@ description:
This touchscreen uses the i2c-hid protocol but has some non-standard
power sequencing required.
allOf:
- $ref: /schemas/input/touchscreen/touchscreen.yaml#
properties:
compatible:
oneOf:
@ -30,6 +33,8 @@ properties:
interrupts:
maxItems: 1
panel: true
reset-gpios:
true

View File

@ -44,6 +44,8 @@ properties:
description: HID descriptor address
$ref: /schemas/types.yaml#/definitions/uint32
panel: true
post-power-on-delay-ms:
description: Time required by the device after enabling its regulators
or powering it on, before it is ready for communication.

View File

@ -10,6 +10,13 @@ maintainers:
- Dmitry Torokhov <dmitry.torokhov@gmail.com>
properties:
panel:
description: If this touchscreen is integrally connected to a panel, this
is a reference to that panel. The presence of this reference indicates
that the touchscreen should be power sequenced together with the panel
and that they may share power and/or reset signals.
$ref: /schemas/types.yaml#/definitions/phandle
touchscreen-min-x:
description: minimum x coordinate reported
$ref: /schemas/types.yaml#/definitions/uint32

View File

@ -617,6 +617,8 @@ patternProperties:
description: Imagination Technologies Ltd.
"^imi,.*":
description: Integrated Micro-Electronics Inc.
"^inanbo,.*":
description: Shenzhen INANBO Electronic Technology Co., Ltd.
"^incircuit,.*":
description: In-Circuit GmbH
"^indiedroid,.*":
@ -675,6 +677,8 @@ patternProperties:
description: iWave Systems Technologies Pvt. Ltd.
"^jadard,.*":
description: Jadard Technology Inc.
"^jasonic,.*":
description: Jasonic Technology Ltd.
"^jdi,.*":
description: Japan Display Inc.
"^jedec,.*":

View File

@ -0,0 +1,33 @@
=======================
dGPU firmware flashing
=======================
IFWI
----
Flashing the dGPU integrated firmware image (IFWI) is supported by GPUs that
use the PSP to orchestrate the update (Navi3x or newer GPUs).
For supported GPUs, `amdgpu` will export a series of sysfs files that can be
used for the flash process.
The IFWI flash process is:
1. Ensure the IFWI image is intended for the dGPU on the system.
2. "Write" the IFWI image to the sysfs file `psp_vbflash`. This will stage the IFWI in memory.
3. "Read" from the `psp_vbflash` sysfs file to initiate the flash process.
4. Poll the `psp_vbflash_status` sysfs file to determine when the flash process completes.
USB-C PD F/W
------------
On GPUs that support flashing an updated USB-C PD firmware image, the process
is done using the `usbc_pd_fw` sysfs file.
* Reading the file will provide the current firmware version.
* Writing the name of a firmware payload stored in `/lib/firmware/amdgpu` to the sysfs file will initiate the flash process.
The firmware payload stored in `/lib/firmware/amdgpu` can be named any name
as long as it doesn't conflict with other existing binaries that are used by
`amdgpu`.
sysfs files
-----------
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c

View File

@ -10,6 +10,7 @@ Next (GCN), Radeon DNA (RDNA), and Compute DNA (CDNA) architectures.
module-parameters
driver-core
display/index
flashing
xgmi
ras
thermal

View File

@ -6,3 +6,14 @@ drm/i915 uAPI
=============
.. kernel-doc:: include/uapi/drm/i915_drm.h
drm/nouveau uAPI
================
VM_BIND / EXEC uAPI
-------------------
.. kernel-doc:: drivers/gpu/drm/nouveau/nouveau_exec.c
:doc: Overview
.. kernel-doc:: include/uapi/drm/nouveau_drm.h

View File

@ -466,6 +466,42 @@ DRM MM Range Allocator Function References
.. kernel-doc:: drivers/gpu/drm/drm_mm.c
:export:
DRM GPU VA Manager
==================
Overview
--------
.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c
:doc: Overview
Split and Merge
---------------
.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c
:doc: Split and Merge
Locking
-------
.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c
:doc: Locking
Examples
--------
.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c
:doc: Examples
DRM GPU VA Manager Function References
--------------------------------------
.. kernel-doc:: include/drm/drm_gpuva_mgr.h
:internal:
.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c
:export:
DRM Buddy Allocator
===================
@ -481,8 +517,10 @@ DRM Cache Handling and Fast WC memcpy()
.. kernel-doc:: drivers/gpu/drm/drm_cache.c
:export:
.. _drm_sync_objects:
DRM Sync Objects
===========================
================
.. kernel-doc:: drivers/gpu/drm/drm_syncobj.c
:doc: Overview
@ -493,6 +531,18 @@ DRM Sync Objects
.. kernel-doc:: drivers/gpu/drm/drm_syncobj.c
:export:
DRM Execution context
=====================
.. kernel-doc:: drivers/gpu/drm/drm_exec.c
:doc: Overview
.. kernel-doc:: include/drm/drm_exec.h
:internal:
.. kernel-doc:: drivers/gpu/drm/drm_exec.c
:export:
GPU Scheduler
=============

View File

@ -135,9 +135,13 @@ Add I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT and
drm_i915_context_engines_parallel_submit to the uAPI to implement this
extension.
.. c:namespace-push:: rfc
.. kernel-doc:: include/uapi/drm/i915_drm.h
:functions: i915_context_engines_parallel_submit
.. c:namespace-pop::
Extend execbuf2 IOCTL to support submitting N BBs in a single IOCTL
-------------------------------------------------------------------
Contexts that have been configured with the 'set_parallel' extension can only

View File

@ -49,14 +49,18 @@ converted over. Modern compositors like Wayland or Surfaceflinger on Android
really want an atomic modeset interface, so this is all about the bright
future.
There is a conversion guide for atomic and all you need is a GPU for a
non-converted driver (again virtual HW drivers for KVM are still all
suitable).
There is a conversion guide for atomic [1]_ and all you need is a GPU for a
non-converted driver. The "Atomic mode setting design overview" series [2]_
[3]_ at LWN.net can also be helpful.
As part of this drivers also need to convert to universal plane (which means
exposing primary & cursor as proper plane objects). But that's much easier to
do by directly using the new atomic helper driver callbacks.
.. [1] https://blog.ffwll.ch/2014/11/atomic-modeset-support-for-kms-drivers.html
.. [2] https://lwn.net/Articles/653071/
.. [3] https://lwn.net/Articles/653466/
Contact: Daniel Vetter, respective driver maintainers
Level: Advanced
@ -319,15 +323,6 @@ Contact: Daniel Vetter, Noralf Tronnes
Level: Advanced
struct drm_gem_object_funcs
---------------------------
GEM objects can now have a function table instead of having the callbacks on the
DRM driver struct. This is now the preferred way. Callbacks in drivers have been
converted, except for struct drm_driver.gem_prime_mmap.
Level: Intermediate
connector register/unregister fixes
-----------------------------------
@ -452,6 +447,44 @@ Contact: Thomas Zimmermann <tzimmermann@suse.de>
Level: Starter
Remove driver dependencies on FB_DEVICE
---------------------------------------
A number of fbdev drivers provide attributes via sysfs and therefore depend
on CONFIG_FB_DEVICE to be selected. Review each driver and attempt to make
any dependencies on CONFIG_FB_DEVICE optional. At the minimum, the respective
code in the driver could be conditionalized via ifdef CONFIG_FB_DEVICE. Not
all drivers might be able to drop CONFIG_FB_DEVICE.
Contact: Thomas Zimmermann <tzimmermann@suse.de>
Level: Starter
Clean up checks for already prepared/enabled in panels
------------------------------------------------------
In a whole pile of panel drivers, we have code to make the
prepare/unprepare/enable/disable callbacks behave as no-ops if they've already
been called. To get some idea of the duplicated code, try::
git grep 'if.*>prepared' -- drivers/gpu/drm/panel
git grep 'if.*>enabled' -- drivers/gpu/drm/panel
In the patch ("drm/panel: Check for already prepared/enabled in drm_panel")
we've moved this check to the core. Now we can most definitely remove the
check from the individual panels and save a pile of code.
In adition to removing the check from the individual panels, it is believed
that even the core shouldn't need this check and that should be considered
an error if other code ever relies on this check. The check in the core
currently prints a warning whenever something is relying on this check with
dev_warn(). After a little while, we likely want to promote this to a
WARN(1) to help encourage folks not to rely on this behavior.
Contact: Douglas Anderson <dianders@chromium.org>
Level: Starter/Intermediate
Core refactorings
=================
@ -749,16 +782,16 @@ existing hardware. The new driver's call-back functions are filled from
existing fbdev code.
More complex fbdev drivers can be refactored step-by-step into a DRM
driver with the help of the DRM fbconv helpers. [1] These helpers provide
driver with the help of the DRM fbconv helpers [4]_. These helpers provide
the transition layer between the DRM core infrastructure and the fbdev
driver interface. Create a new DRM driver on top of the fbconv helpers,
copy over the fbdev driver, and hook it up to the DRM code. Examples for
several fbdev drivers are available at [1] and a tutorial of this process
available at [2]. The result is a primitive DRM driver that can run X11
and Weston.
several fbdev drivers are available in Thomas Zimmermann's fbconv tree
[4]_, as well as a tutorial of this process [5]_. The result is a primitive
DRM driver that can run X11 and Weston.
- [1] https://gitlab.freedesktop.org/tzimmermann/linux/tree/fbconv
- [2] https://gitlab.freedesktop.org/tzimmermann/linux/blob/fbconv/drivers/gpu/drm/drm_fbconv_helper.c
.. [4] https://gitlab.freedesktop.org/tzimmermann/linux/tree/fbconv
.. [5] https://gitlab.freedesktop.org/tzimmermann/linux/blob/fbconv/drivers/gpu/drm/drm_fbconv_helper.c
Contact: Thomas Zimmermann <tzimmermann@suse.de>

View File

@ -6180,10 +6180,9 @@ F: kernel/dma/
DMA-BUF HEAPS FRAMEWORK
M: Sumit Semwal <sumit.semwal@linaro.org>
R: Benjamin Gaignard <benjamin.gaignard@collabora.com>
R: Liam Mark <lmark@codeaurora.org>
R: Laura Abbott <labbott@redhat.com>
R: Brian Starkey <Brian.Starkey@arm.com>
R: John Stultz <jstultz@google.com>
R: T.J. Mercier <tjmercier@google.com>
L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
@ -6425,6 +6424,7 @@ F: drivers/gpu/drm/aspeed/
DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
M: Dave Airlie <airlied@redhat.com>
R: Thomas Zimmermann <tzimmermann@suse.de>
R: Jocelyn Falempe <jfalempe@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
@ -6576,6 +6576,7 @@ F: drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
DRM DRIVER FOR MGA G200 GRAPHICS CHIPS
M: Dave Airlie <airlied@redhat.com>
R: Thomas Zimmermann <tzimmermann@suse.de>
R: Jocelyn Falempe <jfalempe@redhat.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
@ -6975,6 +6976,13 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/lima/
F: include/uapi/drm/lima_drm.h
DRM DRIVERS FOR LOONGSON
M: Sui Jingfeng <suijingfeng@loongson.cn>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/loongson/
DRM DRIVERS FOR MEDIATEK
M: Chun-Kuang Hu <chunkuang.hu@kernel.org>
M: Philipp Zabel <p.zabel@pengutronix.de>
@ -7044,7 +7052,7 @@ F: drivers/gpu/drm/stm
DRM DRIVERS FOR TI KEYSTONE
M: Jyri Sarha <jyri.sarha@iki.fi>
M: Tomi Valkeinen <tomba@kernel.org>
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@ -7055,16 +7063,18 @@ F: drivers/gpu/drm/tidss/
DRM DRIVERS FOR TI LCDC
M: Jyri Sarha <jyri.sarha@iki.fi>
R: Tomi Valkeinen <tomba@kernel.org>
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/tilcdc/
F: drivers/gpu/drm/tilcdc/
DRM DRIVERS FOR TI OMAP
M: Tomi Valkeinen <tomba@kernel.org>
M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/ti/
F: drivers/gpu/drm/omapdrm/

View File

@ -5,6 +5,8 @@
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/screen_info.h>
#include <asm/efi.h>
#include <asm/mach/map.h>
#include <asm/mmu_context.h>

View File

@ -9,6 +9,7 @@
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/screen_info.h>
#include <asm/efi.h>
#include <asm/stacktrace.h>

View File

@ -18,6 +18,7 @@
#include <linux/kobject.h>
#include <linux/memblock.h>
#include <linux/reboot.h>
#include <linux/screen_info.h>
#include <linux/uaccess.h>
#include <asm/early_ioremap.h>

View File

@ -386,7 +386,7 @@ static struct property_entry gpio_backlight_props[] = {
};
static struct gpio_backlight_platform_data gpio_backlight_data = {
.fbdev = &lcdc_device.dev,
.dev = &lcdc_device.dev,
};
static const struct platform_device_info gpio_backlight_device_info = {

View File

@ -202,7 +202,7 @@ static struct platform_device kfr2r09_sh_lcdc_device = {
};
static struct lv5207lp_platform_data kfr2r09_backlight_data = {
.fbdev = &kfr2r09_sh_lcdc_device.dev,
.dev = &kfr2r09_sh_lcdc_device.dev,
.def_value = 13,
.max_value = 13,
};

View File

@ -119,7 +119,7 @@ static struct fb_videomode sh7763fb_videomode = {
.vsync_len = 1,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED,
.flag = FBINFO_FLAG_DEFAULT,
.flag = FB_MODE_IS_UNKNOWN,
};
static struct sh7760fb_platdata sh7763fb_def_pdata = {

View File

@ -259,7 +259,7 @@ drivers-$(CONFIG_PCI) += arch/x86/pci/
# suspend and hibernation support
drivers-$(CONFIG_PM) += arch/x86/power/
drivers-$(CONFIG_FB) += arch/x86/video/
drivers-$(CONFIG_FB_CORE) += arch/x86/video/
####
# boot loader support. Several targets are kept for legacy purposes

View File

@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_FB) += fbdev.o
obj-$(CONFIG_FB_CORE) += fbdev.o

View File

@ -129,8 +129,6 @@ source "drivers/dma-buf/Kconfig"
source "drivers/dca/Kconfig"
source "drivers/auxdisplay/Kconfig"
source "drivers/uio/Kconfig"
source "drivers/vfio/Kconfig"

View File

@ -2,10 +2,13 @@
# Copyright (C) 2023 Intel Corporation
intel_vpu-y := \
ivpu_debugfs.o \
ivpu_drv.o \
ivpu_fw.o \
ivpu_fw_log.o \
ivpu_gem.o \
ivpu_hw_mtl.o \
ivpu_hw_37xx.o \
ivpu_hw_40xx.o \
ivpu_ipc.o \
ivpu_job.o \
ivpu_jsm_msg.o \
@ -13,4 +16,4 @@ intel_vpu-y := \
ivpu_mmu_context.o \
ivpu_pm.o
obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o
obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o

View File

@ -0,0 +1,294 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include <uapi/drm/ivpu_accel.h>
#include "ivpu_debugfs.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_pm.h"
static int bo_list_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct drm_printer p = drm_seq_file_printer(s);
ivpu_bo_list(node->minor->dev, &p);
return 0;
}
static int fw_name_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
seq_printf(s, "%s\n", vdev->fw->name);
return 0;
}
static int fw_trace_capability_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
u64 trace_hw_component_mask;
u32 trace_destination_mask;
int ret;
ret = ivpu_jsm_trace_get_capability(vdev, &trace_destination_mask,
&trace_hw_component_mask);
if (!ret) {
seq_printf(s,
"trace_destination_mask: %#18x\n"
"trace_hw_component_mask: %#18llx\n",
trace_destination_mask, trace_hw_component_mask);
}
return 0;
}
static int fw_trace_config_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
/**
* WA: VPU_JSM_MSG_TRACE_GET_CONFIG command is not working yet,
* so we use values from vdev->fw instead of calling ivpu_jsm_trace_get_config()
*/
u32 trace_level = vdev->fw->trace_level;
u32 trace_destination_mask = vdev->fw->trace_destination_mask;
u64 trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
seq_printf(s,
"trace_level: %#18x\n"
"trace_destination_mask: %#18x\n"
"trace_hw_component_mask: %#18llx\n",
trace_level, trace_destination_mask, trace_hw_component_mask);
return 0;
}
static int last_bootmode_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
seq_printf(s, "%s\n", (vdev->pm->is_warmboot) ? "warmboot" : "coldboot");
return 0;
}
static int reset_counter_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_counter));
return 0;
}
static int reset_pending_show(struct seq_file *s, void *v)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct ivpu_device *vdev = to_ivpu_device(node->minor->dev);
seq_printf(s, "%d\n", atomic_read(&vdev->pm->in_reset));
return 0;
}
static const struct drm_info_list vdev_debugfs_list[] = {
{"bo_list", bo_list_show, 0},
{"fw_name", fw_name_show, 0},
{"fw_trace_capability", fw_trace_capability_show, 0},
{"fw_trace_config", fw_trace_config_show, 0},
{"last_bootmode", last_bootmode_show, 0},
{"reset_counter", reset_counter_show, 0},
{"reset_pending", reset_pending_show, 0},
};
static int fw_log_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = s->private;
struct drm_printer p = drm_seq_file_printer(s);
ivpu_fw_log_print(vdev, true, &p);
return 0;
}
static int fw_log_fops_open(struct inode *inode, struct file *file)
{
return single_open(file, fw_log_show, inode->i_private);
}
static ssize_t
fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct seq_file *s = file->private_data;
struct ivpu_device *vdev = s->private;
if (!size)
return -EINVAL;
ivpu_fw_log_clear(vdev);
return size;
}
static const struct file_operations fw_log_fops = {
.owner = THIS_MODULE,
.open = fw_log_fops_open,
.write = fw_log_fops_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static ssize_t
fw_trace_destination_mask_fops_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
struct ivpu_fw_info *fw = vdev->fw;
u32 trace_destination_mask;
int ret;
ret = kstrtou32_from_user(user_buf, size, 0, &trace_destination_mask);
if (ret < 0)
return ret;
fw->trace_destination_mask = trace_destination_mask;
ivpu_jsm_trace_set_config(vdev, fw->trace_level, trace_destination_mask,
fw->trace_hw_component_mask);
return size;
}
static const struct file_operations fw_trace_destination_mask_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = fw_trace_destination_mask_fops_write,
};
static ssize_t
fw_trace_hw_comp_mask_fops_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
struct ivpu_fw_info *fw = vdev->fw;
u64 trace_hw_component_mask;
int ret;
ret = kstrtou64_from_user(user_buf, size, 0, &trace_hw_component_mask);
if (ret < 0)
return ret;
fw->trace_hw_component_mask = trace_hw_component_mask;
ivpu_jsm_trace_set_config(vdev, fw->trace_level, fw->trace_destination_mask,
trace_hw_component_mask);
return size;
}
static const struct file_operations fw_trace_hw_comp_mask_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = fw_trace_hw_comp_mask_fops_write,
};
static ssize_t
fw_trace_level_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
struct ivpu_fw_info *fw = vdev->fw;
u32 trace_level;
int ret;
ret = kstrtou32_from_user(user_buf, size, 0, &trace_level);
if (ret < 0)
return ret;
fw->trace_level = trace_level;
ivpu_jsm_trace_set_config(vdev, trace_level, fw->trace_destination_mask,
fw->trace_hw_component_mask);
return size;
}
static const struct file_operations fw_trace_level_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = fw_trace_level_fops_write,
};
static ssize_t
ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
if (!size)
return -EINVAL;
if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
return -ENODEV;
if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
return -ENODEV;
return size;
}
static ssize_t
ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
if (!size)
return -EINVAL;
ivpu_pm_schedule_recovery(vdev);
return size;
}
static const struct file_operations ivpu_force_recovery_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = ivpu_force_recovery_fn,
};
static const struct file_operations ivpu_reset_engine_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = ivpu_reset_engine_fn,
};
void ivpu_debugfs_init(struct drm_minor *minor)
{
struct ivpu_device *vdev = to_ivpu_device(minor->dev);
drm_debugfs_create_files(vdev_debugfs_list, ARRAY_SIZE(vdev_debugfs_list),
minor->debugfs_root, minor);
debugfs_create_file("force_recovery", 0200, minor->debugfs_root, vdev,
&ivpu_force_recovery_fops);
debugfs_create_file("fw_log", 0644, minor->debugfs_root, vdev,
&fw_log_fops);
debugfs_create_file("fw_trace_destination_mask", 0200, minor->debugfs_root, vdev,
&fw_trace_destination_mask_fops);
debugfs_create_file("fw_trace_hw_comp_mask", 0200, minor->debugfs_root, vdev,
&fw_trace_hw_comp_mask_fops);
debugfs_create_file("fw_trace_level", 0200, minor->debugfs_root, vdev,
&fw_trace_level_fops);
debugfs_create_file("reset_engine", 0200, minor->debugfs_root, vdev,
&ivpu_reset_engine_fops);
}

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#ifndef __IVPU_DEBUGFS_H__
#define __IVPU_DEBUGFS_H__
struct drm_minor;
void ivpu_debugfs_init(struct drm_minor *minor);
#endif /* __IVPU_DEBUGFS_H__ */

View File

@ -14,6 +14,7 @@
#include <drm/drm_prime.h>
#include "vpu_boot_api.h"
#include "ivpu_debugfs.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_gem.h"
@ -50,6 +51,10 @@ u8 ivpu_pll_max_ratio = U8_MAX;
module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
bool ivpu_disable_mmu_cont_pages;
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
@ -110,6 +115,22 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
kref_put(&file_priv->ref, file_priv_release);
}
static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
{
switch (args->index) {
case DRM_IVPU_CAP_METRIC_STREAMER:
args->value = 0;
break;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
args->value = 1;
break;
default:
return -EINVAL;
}
return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@ -139,7 +160,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = ivpu_get_context_count(vdev);
break;
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
args->value = vdev->hw->ranges.user_low.start;
args->value = vdev->hw->ranges.user.start;
break;
case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
args->value = file_priv->priority;
@ -169,6 +190,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_SKU:
args->value = vdev->hw->sku;
break;
case DRM_IVPU_PARAM_CAPABILITIES:
ret = ivpu_get_capabilities(vdev, args);
break;
default:
ret = -EINVAL;
break;
@ -369,10 +393,11 @@ static const struct drm_driver driver = {
.open = ivpu_open,
.postclose = ivpu_postclose,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = ivpu_gem_prime_import,
.gem_prime_mmap = drm_gem_prime_mmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = ivpu_debugfs_init,
#endif
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
@ -427,7 +452,7 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
return PTR_ERR(vdev->regb);
}
ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(38));
ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
if (ret) {
ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
return ret;
@ -437,8 +462,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
/* Clear any pending errors */
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
/* VPU MTL does not require PCI spec 10m D3hot delay */
if (ivpu_is_mtl(vdev))
/* VPU 37XX does not require 10m D3hot delay */
if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
pdev->d3hot_delay = 0;
ret = pcim_enable_device(pdev);
@ -476,7 +501,14 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (!vdev->pm)
return -ENOMEM;
vdev->hw->ops = &ivpu_hw_mtl_ops;
if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
vdev->hw->ops = &ivpu_hw_40xx_ops;
vdev->hw->dma_bits = 48;
} else {
vdev->hw->ops = &ivpu_hw_37xx_ops;
vdev->hw->dma_bits = 38;
}
vdev->platform = IVPU_PLATFORM_INVALID;
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
@ -602,6 +634,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ }
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);

View File

@ -23,6 +23,10 @@
#define DRIVER_DATE "20230117"
#define PCI_DEVICE_ID_MTL 0x7d1d
#define PCI_DEVICE_ID_LNL 0x643e
#define IVPU_HW_37XX 37
#define IVPU_HW_40XX 40
#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
/* SSID 1 is used by the VPU to represent invalid context */
@ -76,6 +80,7 @@ struct ivpu_wa_table {
bool clear_runtime_mem;
bool d3hot_after_power_off;
bool interrupt_clear_with_0;
bool disable_clock_relinquish;
};
struct ivpu_hw_info;
@ -132,6 +137,7 @@ struct ivpu_file_priv {
extern int ivpu_dbg_mask;
extern u8 ivpu_pll_min_ratio;
extern u8 ivpu_pll_max_ratio;
extern bool ivpu_disable_mmu_cont_pages;
#define IVPU_TEST_MODE_DISABLED 0
#define IVPU_TEST_MODE_FW_TEST 1
@ -145,11 +151,6 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev);
static inline bool ivpu_is_mtl(struct ivpu_device *vdev)
{
return to_pci_dev(vdev->drm.dev)->device == PCI_DEVICE_ID_MTL;
}
static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
return to_pci_dev(vdev->drm.dev)->revision;
@ -160,6 +161,19 @@ static inline u16 ivpu_device_id(struct ivpu_device *vdev)
return to_pci_dev(vdev->drm.dev)->device;
}
static inline int ivpu_hw_gen(struct ivpu_device *vdev)
{
switch (ivpu_device_id(vdev)) {
case PCI_DEVICE_ID_MTL:
return IVPU_HW_37XX;
case PCI_DEVICE_ID_LNL:
return IVPU_HW_40XX;
default:
ivpu_err(vdev, "Unknown VPU device\n");
return 0;
}
}
static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev)
{
return container_of(dev, struct ivpu_device, drm);

View File

@ -11,6 +11,7 @@
#include "vpu_boot_api.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
@ -42,22 +43,39 @@ static char *ivpu_firmware;
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
/* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */
static struct {
int gen;
const char *name;
} fw_names[] = {
{ IVPU_HW_37XX, "vpu_37xx.bin" },
{ IVPU_HW_37XX, "mtl_vpu.bin" },
{ IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
{ IVPU_HW_40XX, "vpu_40xx.bin" },
{ IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
};
static int ivpu_fw_request(struct ivpu_device *vdev)
{
static const char * const fw_names[] = {
"mtl_vpu.bin",
"intel/vpu/mtl_vpu_v0.0.bin"
};
int ret = -ENOENT;
int i;
if (ivpu_firmware)
return request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
if (ivpu_firmware) {
ret = request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
if (!ret)
vdev->fw->name = ivpu_firmware;
return ret;
}
for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i], vdev->drm.dev);
if (!ret)
if (fw_names[i].gen != ivpu_hw_gen(vdev))
continue;
ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
if (!ret) {
vdev->fw->name = fw_names[i].name;
return 0;
}
}
ivpu_err(vdev, "Failed to request firmware: %d\n", ret);
@ -142,7 +160,9 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
}
ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
fw_hdr->header_version, fw_hdr->image_format);
ivpu_dbg(vdev, FW_BOOT, "FW version: %s\n", (char *)fw_hdr + VPU_FW_HEADER_SIZE);
ivpu_info(vdev, "Firmware: %s, version: %s", fw->name,
(const char *)fw_hdr + VPU_FW_HEADER_SIZE);
if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3))
return -EINVAL;
@ -158,6 +178,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->cold_boot_entry_point = fw_hdr->entry_point;
fw->entry_point = fw->cold_boot_entry_point;
fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL);
fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
fw->trace_hw_component_mask = -1;
ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
@ -182,13 +206,14 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
return -EINVAL;
}
ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size);
ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
return 0;
}
static int ivpu_fw_mem_init(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
int log_verb_size;
int ret;
ret = ivpu_fw_update_global_range(vdev);
@ -201,17 +226,45 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
return -ENOMEM;
}
fw->mem_log_crit = ivpu_bo_alloc_internal(vdev, 0, IVPU_FW_CRITICAL_BUFFER_SIZE,
DRM_IVPU_BO_CACHED);
if (!fw->mem_log_crit) {
ivpu_err(vdev, "Failed to allocate critical log buffer\n");
ret = -ENOMEM;
goto err_free_fw_mem;
}
if (ivpu_log_level <= IVPU_FW_LOG_INFO)
log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE;
else
log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
fw->mem_log_verb = ivpu_bo_alloc_internal(vdev, 0, log_verb_size, DRM_IVPU_BO_CACHED);
if (!fw->mem_log_verb) {
ivpu_err(vdev, "Failed to allocate verbose log buffer\n");
ret = -ENOMEM;
goto err_free_log_crit;
}
if (fw->shave_nn_size) {
fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start,
fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
if (!fw->mem_shave_nn) {
ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
ivpu_bo_free_internal(fw->mem);
return -ENOMEM;
ret = -ENOMEM;
goto err_free_log_verb;
}
}
return 0;
err_free_log_verb:
ivpu_bo_free_internal(fw->mem_log_verb);
err_free_log_crit:
ivpu_bo_free_internal(fw->mem_log_crit);
err_free_fw_mem:
ivpu_bo_free_internal(fw->mem);
return ret;
}
static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
@ -223,7 +276,12 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
fw->mem_shave_nn = NULL;
}
ivpu_bo_free_internal(fw->mem_log_verb);
ivpu_bo_free_internal(fw->mem_log_crit);
ivpu_bo_free_internal(fw->mem);
fw->mem_log_verb = NULL;
fw->mem_log_crit = NULL;
fw->mem = NULL;
}
@ -387,9 +445,9 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
* Uncached region of VPU address space, covers IPC buffers, job queues
* and log buffers, programmable to L2$ Uncached by VPU MTRR
*/
boot_params->shared_region_base = vdev->hw->ranges.global_low.start;
boot_params->shared_region_size = vdev->hw->ranges.global_low.end -
vdev->hw->ranges.global_low.start;
boot_params->shared_region_base = vdev->hw->ranges.global.start;
boot_params->shared_region_size = vdev->hw->ranges.global.end -
vdev->hw->ranges.global.start;
boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
@ -397,10 +455,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
boot_params->global_aliased_pio_base =
vdev->hw->ranges.global_aliased_pio.start;
boot_params->global_aliased_pio_size =
ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio);
boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
/* Allow configuration for L2C_PAGE_TABLE with boot param value */
boot_params->autoconfig = 1;
@ -408,7 +464,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
/* Enable L2 cache for first 2GB of high memory */
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start);
ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start);
if (vdev->fw->mem_shave_nn)
boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
@ -424,6 +480,15 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->pn_freq_pll_ratio = vdev->hw->pll.pn_ratio;
boot_params->max_freq_pll_ratio = vdev->hw->pll.max_ratio;
boot_params->default_trace_level = vdev->fw->trace_level;
boot_params->tracing_buff_message_format_mask = BIT(VPU_TRACING_FORMAT_STRING);
boot_params->trace_destination_mask = vdev->fw->trace_destination_mask;
boot_params->trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
boot_params->crit_tracing_buff_addr = vdev->fw->mem_log_crit->vpu_addr;
boot_params->crit_tracing_buff_size = vdev->fw->mem_log_crit->base.size;
boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr;
boot_params->verbose_tracing_buff_size = vdev->fw->mem_log_verb->base.size;
boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);

View File

@ -12,6 +12,7 @@ struct vpu_boot_params;
struct ivpu_fw_info {
const struct firmware *file;
const char *name;
struct ivpu_bo *mem;
struct ivpu_bo *mem_shave_nn;
struct ivpu_bo *mem_log_crit;
@ -23,6 +24,9 @@ struct ivpu_fw_info {
u32 shave_nn_size;
u64 entry_point; /* Cold or warm boot entry point for next boot */
u64 cold_boot_entry_point;
u32 trace_level;
u32 trace_destination_mask;
u64 trace_hw_component_mask;
};
int ivpu_fw_init(struct ivpu_device *vdev);

View File

@ -0,0 +1,142 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/moduleparam.h>
#include "vpu_boot_api.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
#define IVPU_FW_LOG_LINE_LENGTH 256
unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR;
module_param(ivpu_log_level, uint, 0444);
MODULE_PARM_DESC(ivpu_log_level,
"VPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
" info=" __stringify(IVPU_FW_LOG_INFO)
" warn=" __stringify(IVPU_FW_LOG_WARN)
" error=" __stringify(IVPU_FW_LOG_ERROR)
" fatal=" __stringify(IVPU_FW_LOG_FATAL));
static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
struct vpu_tracing_buffer_header **log_header)
{
struct vpu_tracing_buffer_header *log;
if ((*offset + sizeof(*log)) > bo->base.size)
return -EINVAL;
log = bo->kvaddr + *offset;
if (log->vpu_canary_start != VPU_TRACING_BUFFER_CANARY)
return -EINVAL;
if (log->header_size < sizeof(*log) || log->header_size > 1024) {
ivpu_dbg(vdev, FW_BOOT, "Invalid header size 0x%x\n", log->header_size);
return -EINVAL;
}
if ((char *)log + log->size > (char *)bo->kvaddr + bo->base.size) {
ivpu_dbg(vdev, FW_BOOT, "Invalid log size 0x%x\n", log->size);
return -EINVAL;
}
*log_header = log;
*offset += log->size;
ivpu_dbg(vdev, FW_BOOT,
"FW log name \"%s\", write offset 0x%x size 0x%x, wrap count %d, hdr version %d size %d format %d, alignment %d",
log->name, log->write_index, log->size, log->wrap_count, log->header_version,
log->header_size, log->format, log->alignment);
return 0;
}
static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
{
char line[IVPU_FW_LOG_LINE_LENGTH];
u32 index = 0;
if (!size || !buffer)
return;
while (size--) {
if (*buffer == '\n' || *buffer == 0) {
line[index] = 0;
if (index != 0)
drm_printf(p, "%s\n", line);
index = 0;
buffer++;
continue;
}
if (index == IVPU_FW_LOG_LINE_LENGTH - 1) {
line[index] = 0;
index = 0;
drm_printf(p, "%s\n", line);
}
if (*buffer != '\r' && (isprint(*buffer) || iscntrl(*buffer)))
line[index++] = *buffer;
buffer++;
}
line[index] = 0;
if (index != 0)
drm_printf(p, "%s\n", line);
}
static void fw_log_print_buffer(struct ivpu_device *vdev, struct vpu_tracing_buffer_header *log,
const char *prefix, bool only_new_msgs, struct drm_printer *p)
{
char *log_buffer = (void *)log + log->header_size;
u32 log_size = log->size - log->header_size;
u32 log_start = log->read_index;
u32 log_end = log->write_index;
if (!(log->write_index || log->wrap_count) ||
(log->write_index == log->read_index && only_new_msgs)) {
drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
return;
}
drm_printf(p, "==== %s \"%s\" log start ====\n", prefix, log->name);
if (log->write_index > log->read_index) {
buffer_print(log_buffer + log_start, log_end - log_start, p);
} else {
buffer_print(log_buffer + log_end, log_size - log_end, p);
buffer_print(log_buffer, log_end, p);
}
drm_printf(p, "\x1b[0m");
drm_printf(p, "==== %s \"%s\" log end ====\n", prefix, log->name);
}
void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
{
struct vpu_tracing_buffer_header *log_header;
u32 next = 0;
while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
fw_log_print_buffer(vdev, log_header, "VPU critical", only_new_msgs, p);
next = 0;
while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
fw_log_print_buffer(vdev, log_header, "VPU verbose", only_new_msgs, p);
}
void ivpu_fw_log_clear(struct ivpu_device *vdev)
{
struct vpu_tracing_buffer_header *log_header;
u32 next = 0;
while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
log_header->read_index = log_header->write_index;
next = 0;
while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
log_header->read_index = log_header->write_index;
}

View File

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#ifndef __IVPU_FW_LOG_H__
#define __IVPU_FW_LOG_H__
#include <linux/types.h>
#include <drm/drm_print.h>
#include "ivpu_drv.h"
#define IVPU_FW_LOG_DEFAULT 0
#define IVPU_FW_LOG_DEBUG 1
#define IVPU_FW_LOG_INFO 2
#define IVPU_FW_LOG_WARN 3
#define IVPU_FW_LOG_ERROR 4
#define IVPU_FW_LOG_FATAL 5
extern unsigned int ivpu_log_level;
#define IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE SZ_1M
#define IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE SZ_8M
#define IVPU_FW_CRITICAL_BUFFER_SIZE SZ_512K
void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p);
void ivpu_fw_log_clear(struct ivpu_device *vdev);
static inline void ivpu_fw_log_dump(struct ivpu_device *vdev)
{
struct drm_printer p = drm_info_printer(vdev->drm.dev);
ivpu_fw_log_print(vdev, false, &p);
}
#endif /* __IVPU_FW_LOG_H__ */

View File

@ -282,10 +282,12 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
int ret;
if (!range) {
if (bo->flags & DRM_IVPU_BO_HIGH_MEM)
range = &vdev->hw->ranges.user_high;
if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
range = &vdev->hw->ranges.shave;
else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
range = &vdev->hw->ranges.dma;
else
range = &vdev->hw->ranges.user_low;
range = &vdev->hw->ranges.user;
}
mutex_lock(&ctx->lock);
@ -573,7 +575,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
fixed_range.end = vpu_addr + size;
range = &fixed_range;
} else {
range = &vdev->hw->ranges.global_low;
range = &vdev->hw->ranges.global;
}
bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);

View File

@ -38,11 +38,10 @@ struct ivpu_addr_range {
struct ivpu_hw_info {
const struct ivpu_hw_ops *ops;
struct {
struct ivpu_addr_range global_low;
struct ivpu_addr_range global_high;
struct ivpu_addr_range user_low;
struct ivpu_addr_range user_high;
struct ivpu_addr_range global_aliased_pio;
struct ivpu_addr_range global;
struct ivpu_addr_range user;
struct ivpu_addr_range shave;
struct ivpu_addr_range dma;
} ranges;
struct {
u8 min_ratio;
@ -57,9 +56,11 @@ struct ivpu_hw_info {
u32 tile_fuse;
u32 sku;
u16 config;
int dma_bits;
};
extern const struct ivpu_hw_ops ivpu_hw_mtl_ops;
extern const struct ivpu_hw_ops ivpu_hw_37xx_ops;
extern const struct ivpu_hw_ops ivpu_hw_40xx_ops;
static inline int ivpu_hw_info_init(struct ivpu_device *vdev)
{

View File

@ -5,7 +5,7 @@
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_hw_mtl_reg.h"
#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
@ -39,34 +39,34 @@
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
#define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC)
#define ICB_0_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
#define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
#define ICB_1_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
#define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
#define BUTTRESS_IRQ_MASK ((REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
(REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
(REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
(REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
(REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
(REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
(REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
(REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
(REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
static char *ivpu_platform_to_str(u32 platform)
{
@ -84,8 +84,8 @@ static char *ivpu_platform_to_str(u32 platform)
static void ivpu_hw_read_platform(struct ivpu_device *vdev)
{
u32 gen_ctrl = REGV_RD32(MTL_VPU_HOST_SS_GEN_CTRL);
u32 platform = REG_GET_FLD(MTL_VPU_HOST_SS_GEN_CTRL, PS, gen_ctrl);
u32 gen_ctrl = REGV_RD32(VPU_37XX_HOST_SS_GEN_CTRL);
u32 platform = REG_GET_FLD(VPU_37XX_HOST_SS_GEN_CTRL, PS, gen_ctrl);
if (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA)
vdev->platform = platform;
@ -123,7 +123,7 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
{
return REGB_POLL_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
return REGB_POLL_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
}
/* Send KMD initiated workpoint change */
@ -139,23 +139,23 @@ static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ra
return ret;
}
val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD0);
val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD0, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD1);
val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD1, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD2);
val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD2, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
val = REGB_RD32(MTL_BUTTRESS_WP_REQ_CMD);
val = REG_SET_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, val);
REGB_WR32(MTL_BUTTRESS_WP_REQ_CMD, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_CMD);
val = REG_SET_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, val);
REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_CMD, val);
ret = ivpu_pll_wait_for_cmd_send(vdev);
if (ret)
@ -171,7 +171,7 @@ static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
if (IVPU_WA(punit_disabled))
return 0;
return REGB_POLL_FLD(MTL_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
return REGB_POLL_FLD(VPU_37XX_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
}
static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
@ -179,7 +179,7 @@ static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
if (IVPU_WA(punit_disabled))
return 0;
return REGB_POLL_FLD(MTL_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
}
static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
@ -188,21 +188,21 @@ static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
u32 fmin_fuse, fmax_fuse;
fmin_fuse = REGB_RD32(MTL_BUTTRESS_FMIN_FUSE);
fuse_min_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
fuse_pn_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
fmin_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMIN_FUSE);
fuse_min_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
fuse_pn_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
fmax_fuse = REGB_RD32(MTL_BUTTRESS_FMAX_FUSE);
fuse_max_ratio = REG_GET_FLD(MTL_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
fmax_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMAX_FUSE);
fuse_max_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
}
static int ivpu_hw_mtl_wait_for_vpuip_bar(struct ivpu_device *vdev)
static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev)
{
return REGV_POLL_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, AON, 0, 100);
return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
}
static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
@ -248,7 +248,7 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
return ret;
}
ret = ivpu_hw_mtl_wait_for_vpuip_bar(vdev);
ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev);
if (ret) {
ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
return ret;
@ -272,52 +272,52 @@ static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
{
u32 val = 0;
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_CLR, val);
REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
}
static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_SET);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
if (enable) {
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
} else {
val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
}
REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_SET, val);
REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
}
static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_CLK_SET);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
if (enable) {
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
} else {
val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
}
REGV_WR32(MTL_VPU_HOST_SS_CPR_CLK_SET, val);
REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
}
static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
@ -325,9 +325,9 @@ static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QACCEPTN);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
@ -335,9 +335,9 @@ static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QDENY);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
@ -385,7 +385,7 @@ static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
{
REGV_WR32(MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
}
static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
@ -393,12 +393,12 @@ static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
int ret;
u32 val;
val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
if (enable)
val = REG_SET_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
else
val = REG_CLR_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
REGV_WR32(MTL_VPU_HOST_SS_NOC_QREQN, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
if (ret) {
@ -453,26 +453,26 @@ static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
if (enable)
val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
else
val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
}
static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
if (enable)
val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
else
val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
}
static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
@ -481,32 +481,32 @@ static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 ex
if (ivpu_is_fpga(vdev))
return 0;
return REGV_POLL_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
}
static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
if (enable)
val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
else
val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
}
static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
if (enable)
val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
else
val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
}
static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
@ -538,36 +538,25 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES);
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
REGV_WR32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, val);
REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
}
static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(MTL_VPU_HOST_IF_TBU_MMUSSIDV);
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
if (ivpu_is_fpga(vdev)) {
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
} else {
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_AWMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_ARMMUSSIDV, val);
}
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
REGV_WR32(MTL_VPU_HOST_IF_TBU_MMUSSIDV, val);
REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
}
static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
@ -587,10 +576,10 @@ static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
val = vdev->fw->entry_point >> 9;
REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
@ -601,27 +590,27 @@ static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
int ret;
u32 val;
ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
return ret;
}
val = REGB_RD32(MTL_BUTTRESS_VPU_D0I3_CONTROL);
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL);
if (enable)
val = REG_SET_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
else
val = REG_CLR_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
REGB_WR32(MTL_BUTTRESS_VPU_D0I3_CONTROL, val);
val = REG_CLR_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
REGB_WR32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, val);
ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
return ret;
}
static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
@ -631,16 +620,15 @@ static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
ivpu_pll_init_frequency_ratios(vdev);
ivpu_hw_init_range(&hw->ranges.global_low, 0x80000000, SZ_512M);
ivpu_hw_init_range(&hw->ranges.global_high, 0x180000000, SZ_2M);
ivpu_hw_init_range(&hw->ranges.user_low, 0xc0000000, 255 * SZ_1M);
ivpu_hw_init_range(&hw->ranges.user_high, 0x180000000, SZ_2G);
hw->ranges.global_aliased_pio = hw->ranges.user_low;
ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M);
ivpu_hw_init_range(&hw->ranges.user, 0xc0000000, 255 * SZ_1M);
ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);
return 0;
}
static int ivpu_hw_mtl_reset(struct ivpu_device *vdev)
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{
int ret;
u32 val;
@ -648,24 +636,24 @@ static int ivpu_hw_mtl_reset(struct ivpu_device *vdev)
if (IVPU_WA(punit_disabled))
return 0;
ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
return ret;
}
val = REGB_RD32(MTL_BUTTRESS_VPU_IP_RESET);
val = REG_SET_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
REGB_WR32(MTL_BUTTRESS_VPU_IP_RESET, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
return ret;
}
static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev)
static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev)
{
int ret;
@ -678,7 +666,7 @@ static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev)
return ret;
}
static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev)
static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev)
{
int ret;
@ -689,7 +677,7 @@ static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev)
return ret;
}
static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{
int ret;
@ -697,11 +685,11 @@ static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
ivpu_hw_wa_init(vdev);
ivpu_hw_timeouts_init(vdev);
ret = ivpu_hw_mtl_reset(vdev);
ret = ivpu_hw_37xx_reset(vdev);
if (ret)
ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
ret = ivpu_hw_mtl_d0i3_disable(vdev);
ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@ -743,7 +731,7 @@ static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
return ret;
}
static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev)
static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev)
{
ivpu_boot_no_snoop_enable(vdev);
ivpu_boot_tbu_mmu_enable(vdev);
@ -752,32 +740,31 @@ static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev)
return 0;
}
static bool ivpu_hw_mtl_is_idle(struct ivpu_device *vdev)
static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev)
{
u32 val;
if (IVPU_WA(punit_disabled))
return true;
val = REGB_RD32(MTL_BUTTRESS_VPU_STATUS);
return REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, READY, val) &&
REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, IDLE, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_STATUS);
return REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, val) &&
REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val);
}
static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
{
int ret = 0;
if (!ivpu_hw_mtl_is_idle(vdev) && ivpu_hw_mtl_reset(vdev)) {
if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev))
ivpu_err(vdev, "Failed to reset the VPU\n");
}
if (ivpu_pll_disable(vdev)) {
ivpu_err(vdev, "Failed to disable PLL\n");
ret = -EIO;
}
if (ivpu_hw_mtl_d0i3_enable(vdev)) {
if (ivpu_hw_37xx_d0i3_enable(vdev)) {
ivpu_err(vdev, "Failed to enter D0I3\n");
ret = -EIO;
}
@ -785,7 +772,7 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
return ret;
}
static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev)
{
u32 val;
@ -803,7 +790,7 @@ static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
}
static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
@ -817,35 +804,35 @@ static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
}
/* Register indirect accesses */
static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
{
u32 pll_curr_ratio;
pll_curr_ratio = REGB_RD32(MTL_BUTTRESS_CURRENT_PLL);
pll_curr_ratio &= MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK;
pll_curr_ratio = REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL);
pll_curr_ratio &= VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK;
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
return ivpu_hw_mtl_pll_to_freq(pll_curr_ratio, vdev->hw->config);
return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
}
static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev)
static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_OFFSET);
return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
}
static u32 ivpu_hw_mtl_reg_telemetry_size_get(struct ivpu_device *vdev)
static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev)
{
return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_SIZE);
return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE);
}
static u32 ivpu_hw_mtl_reg_telemetry_enable_get(struct ivpu_device *vdev)
static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
{
return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_ENABLE);
return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
}
static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id)
static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
{
u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0;
u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET);
@ -853,52 +840,52 @@ static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id)
REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
}
static u32 ivpu_hw_mtl_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
{
return REGV_RD32(MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM);
return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
}
static u32 ivpu_hw_mtl_reg_ipc_rx_count_get(struct ivpu_device *vdev)
static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
{
u32 count = REGV_RD32_SILENT(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT);
u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
return REG_GET_FLD(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
static void ivpu_hw_mtl_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
{
REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr);
}
static void ivpu_hw_mtl_irq_clear(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev)
{
REGV_WR64(MTL_VPU_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
}
static void ivpu_hw_mtl_irq_enable(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev)
{
REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
}
static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
{
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull);
REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
}
static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
ivpu_pm_schedule_recovery(vdev);
}
static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
@ -906,7 +893,7 @@ static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev)
ivpu_pm_schedule_recovery(vdev);
}
static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
@ -914,65 +901,66 @@ static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev)
}
/* Handler for IRQs from VPU core (irqV) */
static u32 ivpu_hw_mtl_irqv_handler(struct ivpu_device *vdev, int irq)
static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
{
u32 status = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
REGV_WR32(MTL_VPU_HOST_SS_ICB_CLEAR_0, status);
REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
ivpu_mmu_irq_evtq_handler(vdev);
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
ivpu_ipc_irq_handler(vdev);
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
ivpu_mmu_irq_gerr_handler(vdev);
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
ivpu_hw_mtl_irq_wdt_mss_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
ivpu_hw_37xx_irq_wdt_mss_handler(vdev);
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
ivpu_hw_mtl_irq_wdt_nce_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
ivpu_hw_37xx_irq_wdt_nce_handler(vdev);
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
ivpu_hw_mtl_irq_noc_firewall_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
return status;
}
/* Handler for IRQs from Buttress core (irqB) */
static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
{
u32 status = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
bool schedule_recovery = false;
if (status == 0)
return 0;
/* Disable global interrupt before handling local buttress interrupts */
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(MTL_BUTTRESS_CURRENT_PLL));
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
REGB_WR32(MTL_BUTTRESS_ATS_ERR_CLEAR, 0x1);
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
REGB_WR32(VPU_37XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
REGB_WR32(MTL_BUTTRESS_UFI_ERR_CLEAR, 0x1);
ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
REGB_WR32(VPU_37XX_BUTTRESS_UFI_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
@ -982,12 +970,12 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
* Writing 1 triggers an interrupt, so we can't perform read update write.
* Clear local interrupt status by writing 0 to all bits.
*/
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
else
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, status);
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
/* Re-enable global interrupt */
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
@ -995,65 +983,65 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
return status;
}
static irqreturn_t ivpu_hw_mtl_irq_handler(int irq, void *ptr)
static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
{
struct ivpu_device *vdev = ptr;
u32 ret_irqv, ret_irqb;
ret_irqv = ivpu_hw_mtl_irqv_handler(vdev, irq);
ret_irqb = ivpu_hw_mtl_irqb_handler(vdev, irq);
ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
return IRQ_RETVAL(ret_irqb | ret_irqv);
}
static void ivpu_hw_mtl_diagnose_failure(struct ivpu_device *vdev)
static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
{
u32 irqv = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
u32 irqb = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
u32 irqb = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
if (ivpu_hw_mtl_reg_ipc_rx_count_get(vdev))
if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev))
ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
ivpu_err(vdev, "WDT MSS timeout detected\n");
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
ivpu_err(vdev, "WDT NCE timeout detected\n");
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
ivpu_err(vdev, "NOC Firewall irq detected\n");
if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
}
}
const struct ivpu_hw_ops ivpu_hw_mtl_ops = {
.info_init = ivpu_hw_mtl_info_init,
.power_up = ivpu_hw_mtl_power_up,
.is_idle = ivpu_hw_mtl_is_idle,
.power_down = ivpu_hw_mtl_power_down,
.boot_fw = ivpu_hw_mtl_boot_fw,
.wdt_disable = ivpu_hw_mtl_wdt_disable,
.diagnose_failure = ivpu_hw_mtl_diagnose_failure,
.reg_pll_freq_get = ivpu_hw_mtl_reg_pll_freq_get,
.reg_telemetry_offset_get = ivpu_hw_mtl_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_mtl_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_mtl_reg_telemetry_enable_get,
.reg_db_set = ivpu_hw_mtl_reg_db_set,
.reg_ipc_rx_addr_get = ivpu_hw_mtl_reg_ipc_rx_addr_get,
.reg_ipc_rx_count_get = ivpu_hw_mtl_reg_ipc_rx_count_get,
.reg_ipc_tx_set = ivpu_hw_mtl_reg_ipc_tx_set,
.irq_clear = ivpu_hw_mtl_irq_clear,
.irq_enable = ivpu_hw_mtl_irq_enable,
.irq_disable = ivpu_hw_mtl_irq_disable,
.irq_handler = ivpu_hw_mtl_irq_handler,
const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.info_init = ivpu_hw_37xx_info_init,
.power_up = ivpu_hw_37xx_power_up,
.is_idle = ivpu_hw_37xx_is_idle,
.power_down = ivpu_hw_37xx_power_down,
.boot_fw = ivpu_hw_37xx_boot_fw,
.wdt_disable = ivpu_hw_37xx_wdt_disable,
.diagnose_failure = ivpu_hw_37xx_diagnose_failure,
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
.reg_db_set = ivpu_hw_37xx_reg_db_set,
.reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get,
.reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get,
.reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set,
.irq_clear = ivpu_hw_37xx_irq_clear,
.irq_enable = ivpu_hw_37xx_irq_enable,
.irq_disable = ivpu_hw_37xx_irq_disable,
.irq_handler = ivpu_hw_37xx_irq_handler,
};

View File

@ -0,0 +1,281 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#ifndef __IVPU_HW_MTL_REG_H__
#define __IVPU_HW_MTL_REG_H__
#include <linux/bits.h>
#define VPU_37XX_BUTTRESS_INTERRUPT_TYPE 0x00000000u
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT 0x00000004u
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_WP_REQ_CMD 0x00000014u
#define VPU_37XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_WP_DOWNLOAD 0x00000018u
#define VPU_37XX_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_CURRENT_PLL 0x0000001cu
#define VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_PLL_ENABLE 0x00000020u
#define VPU_37XX_BUTTRESS_FMIN_FUSE 0x00000024u
#define VPU_37XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
#define VPU_37XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
#define VPU_37XX_BUTTRESS_FMAX_FUSE 0x00000028u
#define VPU_37XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
#define VPU_37XX_BUTTRESS_TILE_FUSE 0x0000002cu
#define VPU_37XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
#define VPU_37XX_BUTTRESS_LOCAL_INT_MASK 0x00000030u
#define VPU_37XX_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
#define VPU_37XX_BUTTRESS_PLL_STATUS 0x00000040u
#define VPU_37XX_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
#define VPU_37XX_BUTTRESS_VPU_STATUS 0x00000044u
#define VPU_37XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
#define VPU_37XX_BUTTRESS_VPU_IP_RESET 0x00000050u
#define VPU_37XX_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
#define VPU_37XX_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG 0x000000b0u
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
#define VPU_37XX_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
#define VPU_37XX_HOST_SS_CPR_CLK_SET 0x00000084u
#define VPU_37XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
#define VPU_37XX_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
#define VPU_37XX_HOST_SS_CPR_RST_SET 0x00000094u
#define VPU_37XX_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
#define VPU_37XX_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
#define VPU_37XX_HOST_SS_CPR_RST_CLR 0x00000098u
#define VPU_37XX_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
#define VPU_37XX_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
#define VPU_37XX_HOST_SS_HW_VERSION 0x00000108u
#define VPU_37XX_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
#define VPU_37XX_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
#define VPU_37XX_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
#define VPU_37XX_HOST_SS_GEN_CTRL 0x00000118u
#define VPU_37XX_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
#define VPU_37XX_HOST_SS_NOC_QREQN 0x00000154u
#define VPU_37XX_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_NOC_QACCEPTN 0x00000158u
#define VPU_37XX_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_NOC_QDENY 0x0000015cu
#define VPU_37XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QREQN 0x00000160u
#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u
#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
#define MTL_VPU_TOP_NOC_QDENY 0x00000168u
#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
#define VPU_37XX_HOST_SS_ICB_STATUS_0 0x00010210u
#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
#define VPU_37XX_HOST_SS_ICB_STATUS_1 0x00010214u
#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
#define VPU_37XX_HOST_SS_ICB_CLEAR_0 0x00010220u
#define VPU_37XX_HOST_SS_ICB_CLEAR_1 0x00010224u
#define VPU_37XX_HOST_SS_ICB_ENABLE_0 0x00010240u
#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0)
#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8)
#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24)
#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3)
#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3)
#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3)
#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3)
#define VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u
#define VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u
#define VPU_37XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO 0x00041040u
#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
#define VPU_37XX_HOST_MMU_IDR0 0x00200000u
#define VPU_37XX_HOST_MMU_IDR1 0x00200004u
#define VPU_37XX_HOST_MMU_IDR3 0x0020000cu
#define VPU_37XX_HOST_MMU_IDR5 0x00200014u
#define VPU_37XX_HOST_MMU_CR0 0x00200020u
#define VPU_37XX_HOST_MMU_CR0ACK 0x00200024u
#define VPU_37XX_HOST_MMU_CR1 0x00200028u
#define VPU_37XX_HOST_MMU_CR2 0x0020002cu
#define VPU_37XX_HOST_MMU_IRQ_CTRL 0x00200050u
#define VPU_37XX_HOST_MMU_IRQ_CTRLACK 0x00200054u
#define VPU_37XX_HOST_MMU_GERROR 0x00200060u
#define VPU_37XX_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0)
#define VPU_37XX_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
#define VPU_37XX_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
#define VPU_37XX_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
#define VPU_37XX_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
#define VPU_37XX_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
#define VPU_37XX_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7)
#define VPU_37XX_HOST_MMU_GERRORN 0x00200064u
#define VPU_37XX_HOST_MMU_STRTAB_BASE 0x00200080u
#define VPU_37XX_HOST_MMU_STRTAB_BASE_CFG 0x00200088u
#define VPU_37XX_HOST_MMU_CMDQ_BASE 0x00200090u
#define VPU_37XX_HOST_MMU_CMDQ_PROD 0x00200098u
#define VPU_37XX_HOST_MMU_CMDQ_CONS 0x0020009cu
#define VPU_37XX_HOST_MMU_EVTQ_BASE 0x002000a0u
#define VPU_37XX_HOST_MMU_EVTQ_PROD 0x002000a8u
#define VPU_37XX_HOST_MMU_EVTQ_CONS 0x002000acu
#define VPU_37XX_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
#define VPU_37XX_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV 0x00360004u
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4)
#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu
#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u
#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u
#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u
#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u
#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u
#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u
#endif /* __IVPU_HW_MTL_REG_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,267 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#ifndef __IVPU_HW_40XX_REG_H__
#define __IVPU_HW_40XX_REG_H__
#include <linux/bits.h>
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT 0x00000000u
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI0_ERR_MASK BIT_MASK(2)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI1_ERR_MASK BIT_MASK(3)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR0_ERR_MASK BIT_MASK(4)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR1_ERR_MASK BIT_MASK(5)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_SURV_ERR_MASK BIT_MASK(6)
#define VPU_40XX_BUTTRESS_LOCAL_INT_MASK 0x00000004u
#define VPU_40XX_BUTTRESS_GLOBAL_INT_MASK 0x00000008u
#define VPU_40XX_BUTTRESS_HM_ATS 0x0000000cu
#define VPU_40XX_BUTTRESS_ATS_ERR_LOG1 0x00000010u
#define VPU_40XX_BUTTRESS_ATS_ERR_LOG2 0x00000014u
#define VPU_40XX_BUTTRESS_ATS_ERR_CLEAR 0x00000018u
#define VPU_40XX_BUTTRESS_CFI0_ERR_LOG 0x0000001cu
#define VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR 0x00000020u
#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS 0x00000024u
#define VPU_40XX_BUTTRESS_CFI1_ERR_LOG 0x00000040u
#define VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR 0x00000044u
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW 0x00000048u
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH 0x0000004cu
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR 0x00000050u
#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS 0x00000054u
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW 0x00000058u
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH 0x0000005cu
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR 0x00000060u
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000130u
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1 0x00000134u
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000138u
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CDYN_MASK GENMASK(31, 16)
#define VPU_40XX_BUTTRESS_WP_REQ_CMD 0x0000013cu
#define VPU_40XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_PLL_FREQ 0x00000148u
#define VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK GENMASK(15, 0)
#define VPU_40XX_BUTTRESS_TILE_FUSE 0x00000150u
#define VPU_40XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_TILE_FUSE_CONFIG_MASK GENMASK(6, 1)
#define VPU_40XX_BUTTRESS_VPU_STATUS 0x00000154u
#define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
#define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
#define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
#define VPU_40XX_BUTTRESS_IP_RESET 0x00000160u
#define VPU_40XX_BUTTRESS_IP_RESET_TRIGGER_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_D0I3_CONTROL 0x00000164u
#define VPU_40XX_BUTTRESS_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_D0I3_CONTROL_I3_MASK BIT_MASK(2)
#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000168u
#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x0000016cu
#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000170u
#define VPU_40XX_BUTTRESS_FMIN_FUSE 0x00000174u
#define VPU_40XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
#define VPU_40XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
#define VPU_40XX_BUTTRESS_FMAX_FUSE 0x00000178u
#define VPU_40XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
#define VPU_40XX_HOST_SS_CPR_CLK_EN 0x00000080u
#define VPU_40XX_HOST_SS_CPR_CLK_EN_TOP_NOC_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_CPR_CLK_EN_DSS_MAS_MASK BIT_MASK(10)
#define VPU_40XX_HOST_SS_CPR_CLK_EN_CSS_MAS_MASK BIT_MASK(11)
#define VPU_40XX_HOST_SS_CPR_CLK_SET 0x00000084u
#define VPU_40XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
#define VPU_40XX_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
#define VPU_40XX_HOST_SS_CPR_RST_EN 0x00000090u
#define VPU_40XX_HOST_SS_CPR_RST_EN_TOP_NOC_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_CPR_RST_EN_DSS_MAS_MASK BIT_MASK(10)
#define VPU_40XX_HOST_SS_CPR_RST_EN_CSS_MAS_MASK BIT_MASK(11)
#define VPU_40XX_HOST_SS_CPR_RST_SET 0x00000094u
#define VPU_40XX_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
#define VPU_40XX_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
#define VPU_40XX_HOST_SS_CPR_RST_CLR 0x00000098u
#define VPU_40XX_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
#define VPU_40XX_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
#define VPU_40XX_HOST_SS_HW_VERSION 0x00000108u
#define VPU_40XX_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
#define VPU_40XX_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
#define VPU_40XX_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
#define VPU_40XX_HOST_SS_SW_VERSION 0x0000010cu
#define VPU_40XX_HOST_SS_GEN_CTRL 0x00000118u
#define VPU_40XX_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
#define VPU_40XX_HOST_SS_NOC_QREQN 0x00000154u
#define VPU_40XX_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_NOC_QACCEPTN 0x00000158u
#define VPU_40XX_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_NOC_QDENY 0x0000015cu
#define VPU_40XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
#define VPU_40XX_TOP_NOC_QREQN 0x00000160u
#define VPU_40XX_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
#define VPU_40XX_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(2)
#define VPU_40XX_TOP_NOC_QACCEPTN 0x00000164u
#define VPU_40XX_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
#define VPU_40XX_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(2)
#define VPU_40XX_TOP_NOC_QDENY 0x00000168u
#define VPU_40XX_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
#define VPU_40XX_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(2)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
#define VPU_40XX_HOST_SS_ICB_STATUS_0 0x00010210u
#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
#define VPU_40XX_HOST_SS_ICB_STATUS_1 0x00010214u
#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
#define VPU_40XX_HOST_SS_ICB_CLEAR_0 0x00010220u
#define VPU_40XX_HOST_SS_ICB_CLEAR_1 0x00010224u
#define VPU_40XX_HOST_SS_ICB_ENABLE_0 0x00010240u
#define VPU_40XX_HOST_SS_ICB_ENABLE_1 0x00010244u
#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
#define VPU_40XX_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
#define VPU_40XX_HOST_SS_AON_PWR_ISO_EN0_CSS_CPU_MASK BIT_MASK(3)
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0_CSS_CPU_MASK BIT_MASK(3)
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_CSS_CPU_MASK BIT_MASK(3)
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0_CSS_CPU_MASK BIT_MASK(3)
#define VPU_40XX_HOST_SS_AON_IDLE_GEN 0x00030200u
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u
#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO 0x00040040u
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_DONE_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_SNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AW_SNOOP_OVERRIDE_MASK BIT_MASK(4)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AR_SNOOP_OVERRIDE_MASK BIT_MASK(5)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV 0x00360004u
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
#define VPU_40XX_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
#define VPU_40XX_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
#define VPU_40XX_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
#define VPU_40XX_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
#define VPU_40XX_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
#define VPU_40XX_CPU_SS_TIM_WATCHDOG 0x0102009cu
#define VPU_40XX_CPU_SS_TIM_WDOG_EN 0x010200a4u
#define VPU_40XX_CPU_SS_TIM_SAFE 0x010200a8u
#define VPU_40XX_CPU_SS_TIM_GEN_CONFIG 0x01021008u
#define VPU_40XX_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
#define VPU_40XX_CPU_SS_CPR_NOC_QREQN 0x01010030u
#define VPU_40XX_CPU_SS_CPR_NOC_QREQN_TOP_MMIO_MASK BIT_MASK(0)
#define VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN 0x01010034u
#define VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN_TOP_MMIO_MASK BIT_MASK(0)
#define VPU_40XX_CPU_SS_CPR_NOC_QDENY 0x01010038u
#define VPU_40XX_CPU_SS_CPR_NOC_QDENY_TOP_MMIO_MASK BIT_MASK(0)
#define VPU_40XX_CPU_SS_TIM_IPC_FIFO 0x010200f0u
#define VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT 0x01029008u
#define VPU_40XX_CPU_SS_DOORBELL_0 0x01300000u
#define VPU_40XX_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
#define VPU_40XX_CPU_SS_DOORBELL_1 0x01301000u
#endif /* __IVPU_HW_40XX_REG_H__ */

View File

@ -1,281 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#ifndef __IVPU_HW_MTL_REG_H__
#define __IVPU_HW_MTL_REG_H__
#include <linux/bits.h>
#define MTL_BUTTRESS_INTERRUPT_TYPE 0x00000000u
#define MTL_BUTTRESS_INTERRUPT_STAT 0x00000004u
#define MTL_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
#define MTL_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
#define MTL_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
#define MTL_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
#define MTL_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
#define MTL_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
#define MTL_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
#define MTL_BUTTRESS_WP_REQ_CMD 0x00000014u
#define MTL_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
#define MTL_BUTTRESS_WP_DOWNLOAD 0x00000018u
#define MTL_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
#define MTL_BUTTRESS_CURRENT_PLL 0x0000001cu
#define MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
#define MTL_BUTTRESS_PLL_ENABLE 0x00000020u
#define MTL_BUTTRESS_FMIN_FUSE 0x00000024u
#define MTL_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
#define MTL_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
#define MTL_BUTTRESS_FMAX_FUSE 0x00000028u
#define MTL_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
#define MTL_BUTTRESS_TILE_FUSE 0x0000002cu
#define MTL_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
#define MTL_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
#define MTL_BUTTRESS_LOCAL_INT_MASK 0x00000030u
#define MTL_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
#define MTL_BUTTRESS_PLL_STATUS 0x00000040u
#define MTL_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
#define MTL_BUTTRESS_VPU_STATUS 0x00000044u
#define MTL_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
#define MTL_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define MTL_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
#define MTL_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
#define MTL_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
#define MTL_BUTTRESS_VPU_IP_RESET 0x00000050u
#define MTL_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
#define MTL_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
#define MTL_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
#define MTL_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
#define MTL_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
#define MTL_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
#define MTL_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
#define MTL_BUTTRESS_UFI_ERR_LOG 0x000000b0u
#define MTL_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
#define MTL_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
#define MTL_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
#define MTL_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
#define MTL_VPU_HOST_SS_CPR_CLK_SET 0x00000084u
#define MTL_VPU_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
#define MTL_VPU_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
#define MTL_VPU_HOST_SS_CPR_RST_SET 0x00000094u
#define MTL_VPU_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
#define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
#define MTL_VPU_HOST_SS_CPR_RST_CLR 0x00000098u
#define MTL_VPU_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
#define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
#define MTL_VPU_HOST_SS_HW_VERSION 0x00000108u
#define MTL_VPU_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
#define MTL_VPU_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
#define MTL_VPU_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
#define MTL_VPU_HOST_SS_GEN_CTRL 0x00000118u
#define MTL_VPU_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
#define MTL_VPU_HOST_SS_NOC_QREQN 0x00000154u
#define MTL_VPU_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_NOC_QACCEPTN 0x00000158u
#define MTL_VPU_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_NOC_QDENY 0x0000015cu
#define MTL_VPU_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QREQN 0x00000160u
#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u
#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
#define MTL_VPU_TOP_NOC_QDENY 0x00000168u
#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
#define MTL_VPU_HOST_SS_ICB_STATUS_0 0x00010210u
#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
#define MTL_VPU_HOST_SS_ICB_STATUS_1 0x00010214u
#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
#define MTL_VPU_HOST_SS_ICB_CLEAR_0 0x00010220u
#define MTL_VPU_HOST_SS_ICB_CLEAR_1 0x00010224u
#define MTL_VPU_HOST_SS_ICB_ENABLE_0 0x00010240u
#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0)
#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8)
#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24)
#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3)
#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3)
#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3)
#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3)
#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u
#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE 0x00030204u
#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO 0x00041040u
#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
#define MTL_VPU_HOST_MMU_IDR0 0x00200000u
#define MTL_VPU_HOST_MMU_IDR1 0x00200004u
#define MTL_VPU_HOST_MMU_IDR3 0x0020000cu
#define MTL_VPU_HOST_MMU_IDR5 0x00200014u
#define MTL_VPU_HOST_MMU_CR0 0x00200020u
#define MTL_VPU_HOST_MMU_CR0ACK 0x00200024u
#define MTL_VPU_HOST_MMU_CR1 0x00200028u
#define MTL_VPU_HOST_MMU_CR2 0x0020002cu
#define MTL_VPU_HOST_MMU_IRQ_CTRL 0x00200050u
#define MTL_VPU_HOST_MMU_IRQ_CTRLACK 0x00200054u
#define MTL_VPU_HOST_MMU_GERROR 0x00200060u
#define MTL_VPU_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0)
#define MTL_VPU_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
#define MTL_VPU_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
#define MTL_VPU_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
#define MTL_VPU_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
#define MTL_VPU_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
#define MTL_VPU_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7)
#define MTL_VPU_HOST_MMU_GERRORN 0x00200064u
#define MTL_VPU_HOST_MMU_STRTAB_BASE 0x00200080u
#define MTL_VPU_HOST_MMU_STRTAB_BASE_CFG 0x00200088u
#define MTL_VPU_HOST_MMU_CMDQ_BASE 0x00200090u
#define MTL_VPU_HOST_MMU_CMDQ_PROD 0x00200098u
#define MTL_VPU_HOST_MMU_CMDQ_CONS 0x0020009cu
#define MTL_VPU_HOST_MMU_EVTQ_BASE 0x002000a0u
#define MTL_VPU_HOST_MMU_EVTQ_PROD 0x002000a8u
#define MTL_VPU_HOST_MMU_EVTQ_CONS 0x002000acu
#define MTL_VPU_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
#define MTL_VPU_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV 0x00360004u
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4)
#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu
#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u
#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u
#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u
#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u
#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u
#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u
#endif /* __IVPU_HW_MTL_REG_H__ */

View File

@ -289,15 +289,13 @@ ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
size_t buf_size;
int ret;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return NULL;
buf_size = sizeof(*job) + bo_count * sizeof(struct ivpu_bo *);
job = kzalloc(buf_size, GFP_KERNEL);
job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
if (!job)
goto err_rpm_put;

View File

@ -7,7 +7,7 @@
#include <linux/highmem.h>
#include "ivpu_drv.h"
#include "ivpu_hw_mtl_reg.h"
#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
@ -143,6 +143,16 @@
#define IVPU_MMU_CD_0_ASET BIT(47)
#define IVPU_MMU_CD_0_ASID GENMASK_ULL(63, 48)
#define IVPU_MMU_T0SZ_48BIT 16
#define IVPU_MMU_T0SZ_38BIT 26
#define IVPU_MMU_IPS_48BIT 5
#define IVPU_MMU_IPS_44BIT 4
#define IVPU_MMU_IPS_42BIT 3
#define IVPU_MMU_IPS_40BIT 2
#define IVPU_MMU_IPS_36BIT 1
#define IVPU_MMU_IPS_32BIT 0
#define IVPU_MMU_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
#define IVPU_MMU_STE_0_S1CDMAX GENMASK_ULL(63, 59)
@ -176,13 +186,13 @@
#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT)))
#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT)))
static char *ivpu_mmu_event_to_str(u32 cmd)
{
@ -240,15 +250,15 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
else
val_ref = IVPU_MMU_IDR0_REF;
val = REGV_RD32(MTL_VPU_HOST_MMU_IDR0);
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR0);
if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
val = REGV_RD32(MTL_VPU_HOST_MMU_IDR1);
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR1);
if (val != IVPU_MMU_IDR1_REF)
ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
val = REGV_RD32(MTL_VPU_HOST_MMU_IDR3);
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR3);
if (val != IVPU_MMU_IDR3_REF)
ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
@ -259,7 +269,7 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
else
val_ref = IVPU_MMU_IDR5_REF;
val = REGV_RD32(MTL_VPU_HOST_MMU_IDR5);
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR5);
if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
}
@ -386,18 +396,18 @@ static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
int ret;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, 0);
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0);
if (ret)
return ret;
return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, irq_ctrl);
return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl);
}
static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
{
struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
return REGV_POLL(MTL_VPU_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
IVPU_MMU_QUEUE_TIMEOUT_US);
}
@ -437,7 +447,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
return ret;
clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, q->prod);
REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod);
ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
if (ret)
@ -485,7 +495,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
mmu->evtq.prod = 0;
mmu->evtq.cons = 0;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, 0);
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0);
if (ret)
return ret;
@ -495,17 +505,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
REGV_WR32(MTL_VPU_HOST_MMU_CR1, val);
REGV_WR32(VPU_37XX_HOST_MMU_CR1, val);
REGV_WR64(MTL_VPU_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
REGV_WR32(MTL_VPU_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
REGV_WR64(MTL_VPU_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, 0);
REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_CONS, 0);
REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, 0);
REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_CONS, 0);
val = IVPU_MMU_CR0_CMDQEN;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
@ -521,17 +531,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
if (ret)
return ret;
REGV_WR64(MTL_VPU_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC, 0);
REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, 0);
REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC, 0);
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, 0);
val |= IVPU_MMU_CR0_EVTQEN;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
val |= IVPU_MMU_CR0_ATSCHK;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
@ -540,7 +550,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
return ret;
val |= IVPU_MMU_CR0_SMMUEN;
return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
}
static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
@ -617,12 +627,12 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
if (cd_dma != 0) {
cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, 26) |
cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, 3) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
IVPU_MMU_CD_0_TCR_EPD1 |
IVPU_MMU_CD_0_AA64 |
@ -791,14 +801,14 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
evtq->prod = REGV_RD32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC);
evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC);
if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
return NULL;
clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
return evt;
}
@ -831,35 +841,35 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
ivpu_dbg(vdev, IRQ, "MMU error\n");
gerror_val = REGV_RD32(MTL_VPU_HOST_MMU_GERROR);
gerrorn_val = REGV_RD32(MTL_VPU_HOST_MMU_GERRORN);
gerror_val = REGV_RD32(VPU_37XX_HOST_MMU_GERROR);
gerrorn_val = REGV_RD32(VPU_37XX_HOST_MMU_GERRORN);
active = gerror_val ^ gerrorn_val;
if (!(active & IVPU_MMU_GERROR_ERR_MASK))
return;
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active))
ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
REGV_WR32(MTL_VPU_HOST_MMU_GERRORN, gerror_val);
REGV_WR32(VPU_37XX_HOST_MMU_GERRORN, gerror_val);
}
int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)

View File

@ -11,10 +11,12 @@
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
#define IVPU_MMU_PGD_INDEX_MASK GENMASK(38, 30)
#define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)
#define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)
#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
#define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12)
#define IVPU_MMU_ENTRY_FLAGS_MASK GENMASK(11, 0)
#define IVPU_MMU_ENTRY_FLAGS_MASK (BIT(52) | GENMASK(11, 0))
#define IVPU_MMU_ENTRY_FLAG_CONT BIT(52)
#define IVPU_MMU_ENTRY_FLAG_NG BIT(11)
#define IVPU_MMU_ENTRY_FLAG_AF BIT(10)
#define IVPU_MMU_ENTRY_FLAG_USER BIT(6)
@ -22,10 +24,13 @@
#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)
#define IVPU_MMU_ENTRY_FLAG_VALID BIT(0)
#define IVPU_MMU_PAGE_SIZE SZ_4K
#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
#define IVPU_MMU_PAGE_SIZE SZ_4K
#define IVPU_MMU_CONT_PAGES_SIZE (IVPU_MMU_PAGE_SIZE * 16)
#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
#define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE)
#define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE)
#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
#define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
#define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
@ -36,167 +41,268 @@
static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
dma_addr_t pgd_dma;
u64 *pgd;
pgd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma, GFP_KERNEL);
if (!pgd)
pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma,
GFP_KERNEL);
if (!pgtable->pgd_dma_ptr)
return -ENOMEM;
pgtable->pgd = pgd;
pgtable->pgd_dma = pgd_dma;
return 0;
}
static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
{
int pgd_index, pmd_index;
if (cpu_addr)
dma_free_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, cpu_addr,
dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK);
}
for (pgd_index = 0; pgd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_index) {
u64 **pmd_entries = pgtable->pgd_cpu_entries[pgd_index];
u64 *pmd = pgtable->pgd_entries[pgd_index];
static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
int pgd_idx, pud_idx, pmd_idx;
dma_addr_t pud_dma, pmd_dma, pte_dma;
u64 *pud_dma_ptr, *pmd_dma_ptr, *pte_dma_ptr;
if (!pmd_entries)
for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) {
pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
pud_dma = pgtable->pgd_dma_ptr[pgd_idx];
if (!pud_dma_ptr)
continue;
for (pmd_index = 0; pmd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_index) {
if (pmd_entries[pmd_index])
dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE,
pmd_entries[pmd_index],
pmd[pmd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK);
for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) {
pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];
if (!pmd_dma_ptr)
continue;
for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) {
pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
ivpu_mmu_pgtable_free(vdev, pte_dma_ptr, pte_dma);
}
kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
}
kfree(pmd_entries);
dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd_entries[pgd_index],
pgtable->pgd[pgd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK);
kfree(pgtable->pmd_ptrs[pgd_idx]);
kfree(pgtable->pte_ptrs[pgd_idx]);
ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
}
dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd,
pgtable->pgd_dma & ~IVPU_MMU_ENTRY_FLAGS_MASK);
ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
}
static u64*
ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, u64 pgd_index)
ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
{
u64 **pmd_entries;
dma_addr_t pmd_dma;
u64 *pmd;
u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
dma_addr_t pud_dma;
if (pgtable->pgd_entries[pgd_index])
return pgtable->pgd_entries[pgd_index];
if (pud_dma_ptr)
return pud_dma_ptr;
pmd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
if (!pmd)
pud_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL);
if (!pud_dma_ptr)
return NULL;
pmd_entries = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
if (!pmd_entries)
goto err_free_pgd;
drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
if (!pgtable->pmd_ptrs[pgd_idx])
goto err_free_pud_dma_ptr;
pgtable->pgd_entries[pgd_index] = pmd;
pgtable->pgd_cpu_entries[pgd_index] = pmd_entries;
pgtable->pgd[pgd_index] = pmd_dma | IVPU_MMU_ENTRY_VALID;
drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
if (!pgtable->pte_ptrs[pgd_idx])
goto err_free_pmd_ptrs;
return pmd;
pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;
pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;
err_free_pgd:
dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pmd, pmd_dma);
return pud_dma_ptr;
err_free_pmd_ptrs:
kfree(pgtable->pmd_ptrs[pgd_idx]);
err_free_pud_dma_ptr:
ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
return NULL;
}
static u64*
ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
int pud_idx)
{
u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
dma_addr_t pmd_dma;
if (pmd_dma_ptr)
return pmd_dma_ptr;
pmd_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
if (!pmd_dma_ptr)
return NULL;
drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
if (!pgtable->pte_ptrs[pgd_idx][pud_idx])
goto err_free_pmd_dma_ptr;
pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;
pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;
return pmd_dma_ptr;
err_free_pmd_dma_ptr:
ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
return NULL;
}
static u64*
ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
int pgd_index, int pmd_index)
int pgd_idx, int pud_idx, int pmd_idx)
{
u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
dma_addr_t pte_dma;
u64 *pte;
if (pgtable->pgd_cpu_entries[pgd_index][pmd_index])
return pgtable->pgd_cpu_entries[pgd_index][pmd_index];
if (pte_dma_ptr)
return pte_dma_ptr;
pte = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
if (!pte)
pte_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
if (!pte_dma_ptr)
return NULL;
pgtable->pgd_cpu_entries[pgd_index][pmd_index] = pte;
pgtable->pgd_entries[pgd_index][pmd_index] = pte_dma | IVPU_MMU_ENTRY_VALID;
pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;
pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;
return pte;
return pte_dma_ptr;
}
static int
ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, dma_addr_t dma_addr, int prot)
u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
{
u64 *pte;
int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
/* Allocate PMD - second level page table if needed */
if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_index))
/* Allocate PUD - second level page table if needed */
if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
return -ENOMEM;
/* Allocate PTE - third level page table if needed */
pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_index, pmd_index);
/* Allocate PMD - third level page table if needed */
if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
return -ENOMEM;
/* Allocate PTE - fourth level page table if needed */
pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
if (!pte)
return -ENOMEM;
/* Update PTE - third level page table with DMA address */
pte[pte_index] = dma_addr | prot;
/* Update PTE */
pte[pte_idx] = dma_addr | prot;
return 0;
}
static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
{
int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
/* Update PTE with dummy physical address and clear flags */
ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index][pte_index] = IVPU_MMU_ENTRY_INVALID;
}
static void
ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
{
u64 end_addr = vpu_addr + size;
u64 *pgd = ctx->pgtable.pgd;
/* Align to PMD entry (2 MB) */
vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
while (vpu_addr < end_addr) {
int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
u64 pmd_end = (pgd_index + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
u64 *pmd = ctx->pgtable.pgd_entries[pgd_index];
while (vpu_addr < end_addr && vpu_addr < pmd_end) {
int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
u64 *pte = ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index];
clflush_cache_range(pte, IVPU_MMU_PGTABLE_SIZE);
vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
}
clflush_cache_range(pmd, IVPU_MMU_PGTABLE_SIZE);
}
clflush_cache_range(pgd, IVPU_MMU_PGTABLE_SIZE);
}
static int
ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, dma_addr_t dma_addr, size_t size, int prot)
ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
dma_addr_t dma_addr, u64 prot)
{
size_t size = IVPU_MMU_CONT_PAGES_SIZE;
drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
prot |= IVPU_MMU_ENTRY_FLAG_CONT;
while (size) {
int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
if (ret)
return ret;
size -= IVPU_MMU_PAGE_SIZE;
vpu_addr += IVPU_MMU_PAGE_SIZE;
dma_addr += IVPU_MMU_PAGE_SIZE;
size -= IVPU_MMU_PAGE_SIZE;
}
return 0;
}
static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
{
int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
/* Update PTE with dummy physical address and clear flags */
ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
}
static void
ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
{
struct ivpu_mmu_pgtable *pgtable = &ctx->pgtable;
u64 end_addr = vpu_addr + size;
/* Align to PMD entry (2 MB) */
vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
while (vpu_addr < end_addr) {
int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
u64 pud_end = (pgd_idx + 1) * (u64)IVPU_MMU_PUD_MAP_SIZE;
while (vpu_addr < end_addr && vpu_addr < pud_end) {
int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
u64 pmd_end = (pud_idx + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
while (vpu_addr < end_addr && vpu_addr < pmd_end) {
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
clflush_cache_range(pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx],
IVPU_MMU_PGTABLE_SIZE);
vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
}
clflush_cache_range(pgtable->pmd_ptrs[pgd_idx][pud_idx],
IVPU_MMU_PGTABLE_SIZE);
}
clflush_cache_range(pgtable->pud_ptrs[pgd_idx], IVPU_MMU_PGTABLE_SIZE);
}
clflush_cache_range(pgtable->pgd_dma_ptr, IVPU_MMU_PGTABLE_SIZE);
}
static int
ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
{
int map_size;
int ret;
while (size) {
if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE &&
IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) {
ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
map_size = IVPU_MMU_CONT_PAGES_SIZE;
} else {
ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
map_size = IVPU_MMU_PAGE_SIZE;
}
if (ret)
return ret;
vpu_addr += map_size;
dma_addr += map_size;
size -= map_size;
}
return 0;
@ -216,8 +322,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
{
struct scatterlist *sg;
int prot;
int ret;
u64 prot;
u64 i;
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
@ -237,7 +343,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
mutex_lock(&ctx->lock);
for_each_sgtable_dma_sg(sgt, sg, i) {
u64 dma_addr = sg_dma_address(sg) - sg->offset;
dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset;
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
@ -293,8 +399,14 @@ ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
{
lockdep_assert_held(&ctx->lock);
return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE,
0, range->start, range->end, DRM_MM_INSERT_BEST);
if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
if (!drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
range->start, range->end, DRM_MM_INSERT_BEST))
return 0;
}
return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
range->start, range->end, DRM_MM_INSERT_BEST);
}
void
@ -319,11 +431,11 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
return ret;
if (!context_id) {
start = vdev->hw->ranges.global_low.start;
end = vdev->hw->ranges.global_high.end;
start = vdev->hw->ranges.global.start;
end = vdev->hw->ranges.shave.end;
} else {
start = vdev->hw->ranges.user_low.start;
end = vdev->hw->ranges.user_high.end;
start = vdev->hw->ranges.user.start;
end = vdev->hw->ranges.dma.end;
}
drm_mm_init(&ctx->mm, start, end - start);
@ -334,11 +446,15 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd);
if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
return;
mutex_destroy(&ctx->lock);
ivpu_mmu_pgtable_free(vdev, &ctx->pgtable);
ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
drm_mm_takedown(&ctx->mm);
ctx->pgtable.pgd_dma_ptr = NULL;
ctx->pgtable.pgd_dma = 0;
}
int ivpu_mmu_global_context_init(struct ivpu_device *vdev)

View File

@ -12,12 +12,13 @@ struct ivpu_device;
struct ivpu_file_priv;
struct ivpu_addr_range;
#define IVPU_MMU_PGTABLE_ENTRIES 512
#define IVPU_MMU_PGTABLE_ENTRIES 512ull
struct ivpu_mmu_pgtable {
u64 **pgd_cpu_entries[IVPU_MMU_PGTABLE_ENTRIES];
u64 *pgd_entries[IVPU_MMU_PGTABLE_ENTRIES];
u64 *pgd;
u64 ***pte_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
u64 **pmd_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
u64 *pud_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
u64 *pgd_dma_ptr;
dma_addr_t pgd_dma;
};

View File

@ -259,6 +259,7 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
pm_runtime_get_sync(vdev->drm.dev);
ivpu_dbg(vdev, PM, "Pre-reset..\n");
atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->in_reset, 1);
ivpu_shutdown(vdev);
ivpu_pm_prepare_cold_boot(vdev);

View File

@ -14,6 +14,7 @@ struct ivpu_pm_info {
struct ivpu_device *vdev;
struct work_struct recovery_work;
atomic_t in_reset;
atomic_t reset_counter;
bool is_warmboot;
u32 suspend_reschedule_counter;
};

View File

@ -1293,7 +1293,6 @@ static void update_profiling_data(struct drm_file *file_priv,
static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv,
bool is_partial)
{
struct qaic_partial_execute_entry *pexec;
struct qaic_execute *args = data;
struct qaic_execute_entry *exec;
struct dma_bridge_chan *dbc;
@ -1313,7 +1312,7 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
received_ts = ktime_get_ns();
size = is_partial ? sizeof(*pexec) : sizeof(*exec);
size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec);
n = (unsigned long)size * args->hdr.count;
if (args->hdr.count == 0 || n / args->hdr.count != size)
return -EINVAL;
@ -1321,7 +1320,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
user_data = u64_to_user_ptr(args->data);
exec = kcalloc(args->hdr.count, size, GFP_KERNEL);
pexec = (struct qaic_partial_execute_entry *)exec;
if (!exec)
return -ENOMEM;

View File

@ -165,7 +165,6 @@ static const struct drm_driver qaic_accel_driver = {
.ioctls = qaic_drm_ioctls,
.num_ioctls = ARRAY_SIZE(qaic_drm_ioctls),
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = qaic_gem_prime_import,
};

View File

@ -79,7 +79,6 @@ static int cfag12864bfb_probe(struct platform_device *device)
info->var = cfag12864bfb_var;
info->pseudo_palette = NULL;
info->par = NULL;
info->flags = FBINFO_FLAG_DEFAULT;
if (register_framebuffer(info) < 0)
goto fballoced;

View File

@ -646,7 +646,6 @@ static int ht16k33_fbdev_probe(struct device *dev, struct ht16k33_priv *priv,
fbdev->info->var = ht16k33_fb_var;
fbdev->info->bl_dev = bl;
fbdev->info->pseudo_palette = NULL;
fbdev->info->flags = FBINFO_FLAG_DEFAULT;
fbdev->info->par = priv;
err = register_framebuffer(fbdev->info);

View File

@ -33,7 +33,7 @@
* into their address space. This necessitated the creation of the DMA-BUF sysfs
* statistics interface to provide per-buffer information on production systems.
*
* The interface at ``/sys/kernel/dma-buf/buffers`` exposes information about
* The interface at ``/sys/kernel/dmabuf/buffers`` exposes information about
* every DMA-BUF when ``CONFIG_DMABUF_SYSFS_STATS`` is enabled.
*
* The following stats are exposed by the interface:

View File

@ -131,7 +131,6 @@ static struct file_system_type dma_buf_fs_type = {
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
{
struct dma_buf *dmabuf;
int ret;
if (!is_dma_buf_file(file))
return -EINVAL;
@ -147,11 +146,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
dmabuf->size >> PAGE_SHIFT)
return -EINVAL;
dma_resv_lock(dmabuf->resv, NULL);
ret = dmabuf->ops->mmap(dmabuf, vma);
dma_resv_unlock(dmabuf->resv);
return ret;
return dmabuf->ops->mmap(dmabuf, vma);
}
static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
@ -850,6 +845,7 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
* - &dma_buf_ops.release()
* - &dma_buf_ops.begin_cpu_access()
* - &dma_buf_ops.end_cpu_access()
* - &dma_buf_ops.mmap()
*
* 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
* reservation and exporter can't take the lock:
@ -858,7 +854,6 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
* - &dma_buf_ops.unpin()
* - &dma_buf_ops.map_dma_buf()
* - &dma_buf_ops.unmap_dma_buf()
* - &dma_buf_ops.mmap()
* - &dma_buf_ops.vmap()
* - &dma_buf_ops.vunmap()
*
@ -1463,8 +1458,6 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
unsigned long pgoff)
{
int ret;
if (WARN_ON(!dmabuf || !vma))
return -EINVAL;
@ -1485,11 +1478,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
vma_set_file(vma, dmabuf->file);
vma->vm_pgoff = pgoff;
dma_resv_lock(dmabuf->resv, NULL);
ret = dmabuf->ops->mmap(dmabuf, vma);
dma_resv_unlock(dmabuf->resv);
return ret;
return dmabuf->ops->mmap(dmabuf, vma);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);

View File

@ -13,7 +13,6 @@
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/dma-map-ops.h>
#include <linux/dma-resv.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/io.h>
@ -183,8 +182,6 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct cma_heap_buffer *buffer = dmabuf->priv;
dma_resv_assert_held(dmabuf->resv);
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
return -EINVAL;

View File

@ -13,7 +13,6 @@
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/dma-heap.h>
#include <linux/dma-resv.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/mm.h>
@ -201,8 +200,6 @@ static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
struct sg_page_iter piter;
int ret;
dma_resv_assert_held(dmabuf->resv);
for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
struct page *page = sg_page_iter_page(&piter);

View File

@ -51,8 +51,6 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
{
struct udmabuf *ubuf = buf->priv;
dma_resv_assert_held(buf->resv);
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
return -EINVAL;

View File

@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/efi.h>
#include <linux/screen_info.h>
#include <asm/efi.h>
#include "efistub.h"

View File

@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/efi.h>
#include <linux/screen_info.h>
#include <asm/efi.h>
#include "efistub.h"

View File

@ -9,6 +9,9 @@ menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
select DRM_PANEL_ORIENTATION_QUIRKS
select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
select FB_CORE if DRM_FBDEV_EMULATION
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
select HDMI
select I2C
select DMA_SHARED_BUFFER
@ -80,6 +83,7 @@ config DRM_KUNIT_TEST
select DRM_BUDDY
select DRM_EXPORT_FOR_TESTS if m
select DRM_KUNIT_TEST_HELPERS
select DRM_EXEC
default KUNIT_ALL_TESTS
help
This builds unit tests for DRM. This option is not useful for
@ -95,7 +99,6 @@ config DRM_KUNIT_TEST
config DRM_KMS_HELPER
tristate
depends on DRM
select FB_SYS_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
CRTC helpers for KMS drivers.
@ -131,9 +134,7 @@ config DRM_DEBUG_MODESET_LOCK
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM_KMS_HELPER
depends on FB=y || FB=DRM_KMS_HELPER
select FRAMEBUFFER_CONSOLE if !EXPERT
depends on DRM
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
default y
help
@ -194,6 +195,27 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
config DRM_TTM_KUNIT_TEST
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
default n
depends on DRM && KUNIT && MMU
select DRM_TTM
select DRM_EXPORT_FOR_TESTS if m
select DRM_KUNIT_TEST_HELPERS
default KUNIT_ALL_TESTS
help
Enables unit tests for TTM, a GPU memory manager subsystem used
to manage memory buffers. This option is mostly useful for kernel
developers.
If in doubt, say "N".
config DRM_EXEC
tristate
depends on DRM
help
Execution context for command submissions
config DRM_BUDDY
tristate
depends on DRM
@ -216,7 +238,7 @@ config DRM_TTM_HELPER
config DRM_GEM_DMA_HELPER
tristate
depends on DRM
select FB_SYS_HELPERS if DRM_FBDEV_EMULATION
select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
help
Choose this if you need the GEM DMA helper functions
@ -323,6 +345,8 @@ source "drivers/gpu/drm/v3d/Kconfig"
source "drivers/gpu/drm/vc4/Kconfig"
source "drivers/gpu/drm/loongson/Kconfig"
source "drivers/gpu/drm/etnaviv/Kconfig"
source "drivers/gpu/drm/hisilicon/Kconfig"

View File

@ -45,6 +45,7 @@ drm-y := \
drm_vblank.o \
drm_vblank_work.o \
drm_vma_manager.o \
drm_gpuva_mgr.o \
drm_writeback.o
drm-$(CONFIG_DRM_LEGACY) += \
drm_agpsupport.o \
@ -78,6 +79,8 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
#
# Memory-management helpers
#
#
obj-$(CONFIG_DRM_EXEC) += drm_exec.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
@ -194,3 +197,4 @@ obj-y += gud/
obj-$(CONFIG_DRM_HYPERV) += hyperv/
obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
obj-$(CONFIG_DRM_LOONGSON) += loongson/

View File

@ -21,6 +21,7 @@ config DRM_AMDGPU
select INTERVAL_TREE
select DRM_BUDDY
select DRM_SUBALLOC_HELPER
select DRM_EXEC
# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI

View File

@ -62,7 +62,7 @@ subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror
amdgpu-y := amdgpu_drv.o
# add KMS driver
amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
@ -98,7 +98,7 @@ amdgpu-y += \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
nbio_v7_9.o aqua_vanjaram_reg_init.o
nbio_v7_9.o aqua_vanjaram.o
# add DF block
amdgpu-y += \
@ -129,7 +129,8 @@ amdgpu-y += \
vega10_ih.o \
vega20_ih.o \
navi10_ih.o \
ih_v6_0.o
ih_v6_0.o \
ih_v6_1.o
# add PSP block
amdgpu-y += \

View File

@ -53,7 +53,6 @@
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_gem.h>
@ -193,7 +192,6 @@ extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
extern uint amdgpu_freesync_vid_mode;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
extern uint amdgpu_dm_abm_level;
@ -1034,7 +1032,6 @@ struct amdgpu_device {
bool has_pr3;
bool ucode_sysfs_en;
bool psp_sysfs_en;
/* Chip product information */
char product_number[20];
@ -1129,7 +1126,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
u64 reg_addr, u32 reg_data);
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
uint32_t reg, uint32_t v);
uint32_t reg, uint32_t v, uint32_t xcc_id);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
@ -1508,4 +1505,8 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
int amdgpu_in_reset(struct amdgpu_device *adev);
extern const struct attribute_group amdgpu_vram_mgr_attr_group;
extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
extern const struct attribute_group amdgpu_flash_attr_group;
#endif

View File

@ -706,7 +706,7 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
atcs_input.size = sizeof(struct atcs_pref_req_input);
/* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
atcs_input.client_id = adev->pdev->devfn | (adev->pdev->bus->number << 8);
atcs_input.client_id = pci_dev_id(adev->pdev);
atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
if (advertise)
@ -776,7 +776,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
atcs_input.size = sizeof(struct atcs_pwr_shift_input);
/* dGPU id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
atcs_input.dgpu_id = adev->pdev->devfn | (adev->pdev->bus->number << 8);
atcs_input.dgpu_id = pci_dev_id(adev->pdev);
atcs_input.dev_acpi_state = dev_state;
atcs_input.drv_state = drv_state;
@ -868,7 +868,7 @@ static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm)
if (!numa_info) {
struct sysinfo info;
numa_info = kzalloc(sizeof *numa_info, GFP_KERNEL);
numa_info = kzalloc(sizeof(*numa_info), GFP_KERNEL);
if (!numa_info)
return NULL;
@ -1141,7 +1141,7 @@ int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
if (!tmr_offset || !tmr_size)
return -EINVAL;
bdf = (adev->pdev->bus->number << 8) | adev->pdev->devfn;
bdf = pci_dev_id(adev->pdev);
dev_info = amdgpu_acpi_get_dev(bdf);
if (!dev_info)
return -ENOENT;
@ -1162,7 +1162,7 @@ int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
if (!numa_info)
return -EINVAL;
bdf = (adev->pdev->bus->number << 8) | adev->pdev->devfn;
bdf = pci_dev_id(adev->pdev);
dev_info = amdgpu_acpi_get_dev(bdf);
if (!dev_info)
return -ENOENT;

View File

@ -226,16 +226,6 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
kgd2kfd_suspend(adev->kfd.dev, run_pm);
}
int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
{
int r = 0;
if (adev->kfd.dev)
r = kgd2kfd_resume_iommu(adev->kfd.dev);
return r;
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{
int r = 0;
@ -830,3 +820,53 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
return adev->gmc.real_vram_size;
}
}
int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
u32 inst)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
struct amdgpu_ring *kiq_ring = &kiq->ring;
struct amdgpu_ring_funcs *ring_funcs;
struct amdgpu_ring *ring;
int r = 0;
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
if (!ring_funcs)
return -ENOMEM;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
r = -ENOMEM;
goto free_ring_funcs;
}
ring_funcs->type = AMDGPU_RING_TYPE_COMPUTE;
ring->doorbell_index = doorbell_off;
ring->funcs = ring_funcs;
spin_lock(&kiq->ring_lock);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
spin_unlock(&kiq->ring_lock);
r = -ENOMEM;
goto free_ring;
}
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0);
if (kiq_ring->sched.ready && !adev->job_hang)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
free_ring:
kfree(ring);
free_ring_funcs:
kfree(ring_funcs);
return r;
}

View File

@ -25,6 +25,7 @@
#ifndef AMDGPU_AMDKFD_H_INCLUDED
#define AMDGPU_AMDKFD_H_INCLUDED
#include <linux/list.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/kthread.h>
@ -32,7 +33,6 @@
#include <linux/mmu_notifier.h>
#include <linux/memremap.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include "amdgpu_sync.h"
#include "amdgpu_vm.h"
#include "amdgpu_xcp.h"
@ -71,8 +71,7 @@ struct kgd_mem {
struct hmm_range *range;
struct list_head attachments;
/* protected by amdkfd_process_info.lock */
struct ttm_validate_buffer validate_list;
struct ttm_validate_buffer resv_list;
struct list_head validate_list;
uint32_t domain;
unsigned int mapped_to_gpu_memory;
uint64_t va;
@ -149,7 +148,6 @@ int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
@ -252,6 +250,8 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
uint32_t *payload);
int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
u32 inst);
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
@ -398,7 +398,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
@ -438,11 +437,6 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
}
static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{
return 0;
}
static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
return 0;

View File

@ -23,6 +23,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_amdkfd_arcturus.h"
#include "amdgpu_amdkfd_gfx_v9.h"
#include "amdgpu_amdkfd_aldebaran.h"
#include "gc/gc_9_4_2_offset.h"
#include "gc/gc_9_4_2_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
@ -36,7 +37,7 @@
* initialize the debug mode registers after it has disabled GFX off during the
* debug session.
*/
static uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev,
uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev,
bool restore_dbg_registers,
uint32_t vmid)
{
@ -107,7 +108,7 @@ static uint32_t kgd_aldebaran_set_wave_launch_trap_override(struct amdgpu_device
return data;
}
static uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev,
uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev,
uint8_t wave_launch_mode,
uint32_t vmid)
{
@ -125,7 +126,8 @@ static uint32_t kgd_gfx_aldebaran_set_address_watch(
uint32_t watch_address_mask,
uint32_t watch_id,
uint32_t watch_mode,
uint32_t debug_vmid)
uint32_t debug_vmid,
uint32_t inst)
{
uint32_t watch_address_high;
uint32_t watch_address_low;
@ -161,12 +163,6 @@ static uint32_t kgd_gfx_aldebaran_set_address_watch(
return watch_address_cntl;
}
static uint32_t kgd_gfx_aldebaran_clear_address_watch(struct amdgpu_device *adev,
uint32_t watch_id)
{
return 0;
}
const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@ -191,7 +187,7 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.set_wave_launch_trap_override = kgd_aldebaran_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode,
.set_address_watch = kgd_gfx_aldebaran_set_address_watch,
.clear_address_watch = kgd_gfx_aldebaran_clear_address_watch,
.clear_address_watch = kgd_gfx_v9_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,

View File

@ -0,0 +1,27 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev,
bool restore_dbg_registers,
uint32_t vmid);
uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev,
uint8_t wave_launch_mode,
uint32_t vmid);

View File

@ -22,6 +22,7 @@
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_amdkfd_gfx_v9.h"
#include "amdgpu_amdkfd_aldebaran.h"
#include "gc/gc_9_4_3_offset.h"
#include "gc/gc_9_4_3_sh_mask.h"
#include "athub/athub_1_8_0_offset.h"
@ -32,6 +33,7 @@
#include "soc15.h"
#include "sdma/sdma_4_4_2_offset.h"
#include "sdma/sdma_4_4_2_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
{
@ -361,6 +363,156 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd,
return 0;
}
/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
static uint32_t kgd_gfx_v9_4_3_disable_debug_trap(struct amdgpu_device *adev,
bool keep_trap_enabled,
uint32_t vmid)
{
uint32_t data = 0;
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
return data;
}
static int kgd_gfx_v9_4_3_validate_trap_override_request(
struct amdgpu_device *adev,
uint32_t trap_override,
uint32_t *trap_mask_supported)
{
*trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID |
KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
KFD_DBG_TRAP_MASK_FP_OVERFLOW |
KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
KFD_DBG_TRAP_MASK_FP_INEXACT |
KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION |
KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START |
KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR &&
trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE)
return -EPERM;
return 0;
}
static uint32_t trap_mask_map_sw_to_hw(uint32_t mask)
{
uint32_t trap_on_start = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START) ? 1 : 0;
uint32_t trap_on_end = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END) ? 1 : 0;
uint32_t excp_en = mask & (KFD_DBG_TRAP_MASK_FP_INVALID |
KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
KFD_DBG_TRAP_MASK_FP_OVERFLOW |
KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
KFD_DBG_TRAP_MASK_FP_INEXACT |
KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION);
uint32_t ret;
ret = REG_SET_FIELD(0, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, excp_en);
ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START, trap_on_start);
ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END, trap_on_end);
return ret;
}
static uint32_t trap_mask_map_hw_to_sw(uint32_t mask)
{
uint32_t ret = REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, EXCP_EN);
if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START))
ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START;
if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END))
ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
return ret;
}
/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
static uint32_t kgd_gfx_v9_4_3_set_wave_launch_trap_override(
struct amdgpu_device *adev,
uint32_t vmid,
uint32_t trap_override,
uint32_t trap_mask_bits,
uint32_t trap_mask_request,
uint32_t *trap_mask_prev,
uint32_t kfd_dbg_trap_cntl_prev)
{
uint32_t data = 0;
*trap_mask_prev = trap_mask_map_hw_to_sw(kfd_dbg_trap_cntl_prev);
data = (trap_mask_bits & trap_mask_request) |
(*trap_mask_prev & ~trap_mask_request);
data = trap_mask_map_sw_to_hw(data);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override);
return data;
}
#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H)
static uint32_t kgd_gfx_v9_4_3_set_address_watch(
struct amdgpu_device *adev,
uint64_t watch_address,
uint32_t watch_address_mask,
uint32_t watch_id,
uint32_t watch_mode,
uint32_t debug_vmid,
uint32_t inst)
{
uint32_t watch_address_high;
uint32_t watch_address_low;
uint32_t watch_address_cntl;
watch_address_cntl = 0;
watch_address_low = lower_32_bits(watch_address);
watch_address_high = upper_32_bits(watch_address) & 0xffff;
watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
TCP_WATCH0_CNTL,
MODE,
watch_mode);
watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
TCP_WATCH0_CNTL,
MASK,
watch_address_mask >> 7);
watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
TCP_WATCH0_CNTL,
VALID,
1);
WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
regTCP_WATCH0_ADDR_H) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_high);
WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
regTCP_WATCH0_ADDR_L) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_low);
return watch_address_cntl;
}
static uint32_t kgd_gfx_v9_4_3_clear_address_watch(struct amdgpu_device *adev,
uint32_t watch_id)
{
return 0;
}
const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_4_3_set_pasid_vmid_mapping,
@ -379,6 +531,19 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base =
kgd_gfx_v9_set_vm_context_page_table_base,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings =
kgd_gfx_v9_program_trap_handler_settings
kgd_gfx_v9_program_trap_handler_settings,
.build_grace_period_packet_info =
kgd_gfx_v9_build_grace_period_packet_info,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
.enable_debug_trap = kgd_aldebaran_enable_debug_trap,
.disable_debug_trap = kgd_gfx_v9_4_3_disable_debug_trap,
.validate_trap_override_request =
kgd_gfx_v9_4_3_validate_trap_override_request,
.set_wave_launch_trap_override =
kgd_gfx_v9_4_3_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
.clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch
};

View File

@ -886,7 +886,8 @@ uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev,
uint32_t watch_address_mask,
uint32_t watch_id,
uint32_t watch_mode,
uint32_t debug_vmid)
uint32_t debug_vmid,
uint32_t inst)
{
uint32_t watch_address_high;
uint32_t watch_address_low;
@ -968,7 +969,8 @@ uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
* deq_retry_wait_time -- Wait Count for Global Wave Syncs.
*/
void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
uint32_t *wait_times)
uint32_t *wait_times,
uint32_t inst)
{
*wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
@ -978,7 +980,8 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data)
uint32_t *reg_data,
uint32_t inst)
{
*reg_data = wait_times;

View File

@ -44,12 +44,16 @@ uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev,
uint32_t watch_address_mask,
uint32_t watch_id,
uint32_t watch_mode,
uint32_t debug_vmid);
uint32_t debug_vmid,
uint32_t inst);
uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
uint32_t watch_id);
void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times);
void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
uint32_t *wait_times,
uint32_t inst);
void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data);
uint32_t *reg_data,
uint32_t inst);

View File

@ -637,7 +637,7 @@ static uint32_t kgd_gfx_v11_disable_debug_trap(struct amdgpu_device *adev,
{
uint32_t data = 0;
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, keep_trap_enabled);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
@ -743,7 +743,8 @@ static uint32_t kgd_gfx_v11_set_address_watch(struct amdgpu_device *adev,
uint32_t watch_address_mask,
uint32_t watch_id,
uint32_t watch_mode,
uint32_t debug_vmid)
uint32_t debug_vmid,
uint32_t inst)
{
uint32_t watch_address_high;
uint32_t watch_address_low;

View File

@ -822,7 +822,8 @@ uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev,
uint32_t watch_address_mask,
uint32_t watch_id,
uint32_t watch_mode,
uint32_t debug_vmid)
uint32_t debug_vmid,
uint32_t inst)
{
uint32_t watch_address_high;
uint32_t watch_address_low;
@ -903,10 +904,12 @@ uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
* deq_retry_wait_time -- Wait Count for Global Wave Syncs.
*/
void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
uint32_t *wait_times)
uint32_t *wait_times,
uint32_t inst)
{
*wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
*wait_times = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
mmCP_IQ_WAIT_TIME2));
}
void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
@ -1100,12 +1103,13 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data)
uint32_t *reg_data,
uint32_t inst)
{
*reg_data = wait_times;
/*
* The CP cannont handle a 0 grace period input and will result in
* The CP cannot handle a 0 grace period input and will result in
* an infinite grace period being set so set to 1 to prevent this.
*/
if (grace_period == 0)
@ -1116,7 +1120,8 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
SCH_WAVE,
grace_period);
*reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
*reg_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
mmCP_IQ_WAIT_TIME2);
}
void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
@ -1128,9 +1133,9 @@ void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
* Program TBA registers
*/
WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_LO,
lower_32_bits(tba_addr >> 8));
lower_32_bits(tba_addr >> 8));
WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_HI,
upper_32_bits(tba_addr >> 8));
upper_32_bits(tba_addr >> 8));
/*
* Program TMA registers

View File

@ -89,12 +89,16 @@ uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev,
uint32_t watch_address_mask,
uint32_t watch_id,
uint32_t watch_mode,
uint32_t debug_vmid);
uint32_t debug_vmid,
uint32_t inst);
uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
uint32_t watch_id);
void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times);
void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
uint32_t *wait_times,
uint32_t inst);
void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data);
uint32_t *reg_data,
uint32_t inst);

View File

@ -27,6 +27,8 @@
#include <linux/sched/task.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
#include "amdgpu_object.h"
#include "amdgpu_gem.h"
#include "amdgpu_vm.h"
@ -37,7 +39,6 @@
#include "amdgpu_xgmi.h"
#include "kfd_priv.h"
#include "kfd_smi_events.h"
#include <drm/ttm/ttm_tt.h>
/* Userptr restore delay, just long enough to allow consecutive VM
* changes to accumulate
@ -964,28 +965,20 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
struct amdkfd_process_info *process_info,
bool userptr)
{
struct ttm_validate_buffer *entry = &mem->validate_list;
struct amdgpu_bo *bo = mem->bo;
INIT_LIST_HEAD(&entry->head);
entry->num_shared = 1;
entry->bo = &bo->tbo;
mutex_lock(&process_info->lock);
if (userptr)
list_add_tail(&entry->head, &process_info->userptr_valid_list);
list_add_tail(&mem->validate_list,
&process_info->userptr_valid_list);
else
list_add_tail(&entry->head, &process_info->kfd_bo_list);
list_add_tail(&mem->validate_list, &process_info->kfd_bo_list);
mutex_unlock(&process_info->lock);
}
static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
struct amdkfd_process_info *process_info)
{
struct ttm_validate_buffer *bo_list_entry;
bo_list_entry = &mem->validate_list;
mutex_lock(&process_info->lock);
list_del(&bo_list_entry->head);
list_del(&mem->validate_list);
mutex_unlock(&process_info->lock);
}
@ -1072,13 +1065,12 @@ out:
* object can track VM updates.
*/
struct bo_vm_reservation_context {
struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
unsigned int n_vms; /* Number of VMs reserved */
struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
struct ww_acquire_ctx ticket; /* Reservation ticket */
struct list_head list, duplicates; /* BO lists */
struct amdgpu_sync *sync; /* Pointer to sync object */
bool reserved; /* Whether BOs are reserved */
/* DRM execution context for the reservation */
struct drm_exec exec;
/* Number of VMs reserved */
unsigned int n_vms;
/* Pointer to sync object */
struct amdgpu_sync *sync;
};
enum bo_vm_match {
@ -1102,35 +1094,26 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
WARN_ON(!vm);
ctx->reserved = false;
ctx->n_vms = 1;
ctx->sync = &mem->sync;
drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&ctx->exec) {
ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
drm_exec_retry_on_contention(&ctx->exec);
if (unlikely(ret))
goto error;
INIT_LIST_HEAD(&ctx->list);
INIT_LIST_HEAD(&ctx->duplicates);
ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
if (!ctx->vm_pd)
return -ENOMEM;
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
ctx->kfd_bo.tv.num_shared = 1;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates);
if (ret) {
pr_err("Failed to reserve buffers in ttm.\n");
kfree(ctx->vm_pd);
ctx->vm_pd = NULL;
return ret;
ret = drm_exec_lock_obj(&ctx->exec, &bo->tbo.base);
drm_exec_retry_on_contention(&ctx->exec);
if (unlikely(ret))
goto error;
}
ctx->reserved = true;
return 0;
error:
pr_err("Failed to reserve buffers in ttm.\n");
drm_exec_fini(&ctx->exec);
return ret;
}
/**
@ -1147,63 +1130,39 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
struct amdgpu_vm *vm, enum bo_vm_match map_type,
struct bo_vm_reservation_context *ctx)
{
struct amdgpu_bo *bo = mem->bo;
struct kfd_mem_attachment *entry;
unsigned int i;
struct amdgpu_bo *bo = mem->bo;
int ret;
ctx->reserved = false;
ctx->n_vms = 0;
ctx->vm_pd = NULL;
ctx->sync = &mem->sync;
drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&ctx->exec) {
ctx->n_vms = 0;
list_for_each_entry(entry, &mem->attachments, list) {
if ((vm && vm != entry->bo_va->base.vm) ||
(entry->is_mapped != map_type
&& map_type != BO_VM_ALL))
continue;
INIT_LIST_HEAD(&ctx->list);
INIT_LIST_HEAD(&ctx->duplicates);
ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
&ctx->exec, 2);
drm_exec_retry_on_contention(&ctx->exec);
if (unlikely(ret))
goto error;
++ctx->n_vms;
}
list_for_each_entry(entry, &mem->attachments, list) {
if ((vm && vm != entry->bo_va->base.vm) ||
(entry->is_mapped != map_type
&& map_type != BO_VM_ALL))
continue;
ctx->n_vms++;
ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
drm_exec_retry_on_contention(&ctx->exec);
if (unlikely(ret))
goto error;
}
if (ctx->n_vms != 0) {
ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
GFP_KERNEL);
if (!ctx->vm_pd)
return -ENOMEM;
}
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
ctx->kfd_bo.tv.num_shared = 1;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
i = 0;
list_for_each_entry(entry, &mem->attachments, list) {
if ((vm && vm != entry->bo_va->base.vm) ||
(entry->is_mapped != map_type
&& map_type != BO_VM_ALL))
continue;
amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
&ctx->vm_pd[i]);
i++;
}
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates);
if (ret) {
pr_err("Failed to reserve buffers in ttm.\n");
kfree(ctx->vm_pd);
ctx->vm_pd = NULL;
return ret;
}
ctx->reserved = true;
return 0;
error:
pr_err("Failed to reserve buffers in ttm.\n");
drm_exec_fini(&ctx->exec);
return ret;
}
/**
@ -1224,15 +1183,8 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
if (wait)
ret = amdgpu_sync_wait(ctx->sync, intr);
if (ctx->reserved)
ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
kfree(ctx->vm_pd);
drm_exec_fini(&ctx->exec);
ctx->sync = NULL;
ctx->reserved = false;
ctx->vm_pd = NULL;
return ret;
}
@ -1855,7 +1807,6 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
bool use_release_notifier = (mem->bo->kfd_bo == mem);
struct kfd_mem_attachment *entry, *tmp;
struct bo_vm_reservation_context ctx;
struct ttm_validate_buffer *bo_list_entry;
unsigned int mapped_to_gpu_memory;
int ret;
bool is_imported = false;
@ -1883,9 +1834,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
}
/* Make sure restore workers don't access the BO any more */
bo_list_entry = &mem->validate_list;
mutex_lock(&process_info->lock);
list_del(&bo_list_entry->head);
list_del(&mem->validate_list);
mutex_unlock(&process_info->lock);
/* Cleanup user pages and MMU notifiers */
@ -2452,14 +2402,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
/* Move all invalidated BOs to the userptr_inval_list */
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_valid_list,
validate_list.head)
validate_list)
if (mem->invalid)
list_move_tail(&mem->validate_list.head,
list_move_tail(&mem->validate_list,
&process_info->userptr_inval_list);
/* Go through userptr_inval_list and update any invalid user_pages */
list_for_each_entry(mem, &process_info->userptr_inval_list,
validate_list.head) {
validate_list) {
invalid = mem->invalid;
if (!invalid)
/* BO hasn't been invalidated since the last
@ -2539,51 +2489,42 @@ unlock_out:
*/
static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
{
struct amdgpu_bo_list_entry *pd_bo_list_entries;
struct list_head resv_list, duplicates;
struct ww_acquire_ctx ticket;
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_sync sync;
struct drm_exec exec;
struct amdgpu_vm *peer_vm;
struct kgd_mem *mem, *tmp_mem;
struct amdgpu_bo *bo;
struct ttm_operation_ctx ctx = { false, false };
int i, ret;
pd_bo_list_entries = kcalloc(process_info->n_vms,
sizeof(struct amdgpu_bo_list_entry),
GFP_KERNEL);
if (!pd_bo_list_entries) {
pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
ret = -ENOMEM;
goto out_no_mem;
}
INIT_LIST_HEAD(&resv_list);
INIT_LIST_HEAD(&duplicates);
/* Get all the page directory BOs that need to be reserved */
i = 0;
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node)
amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
&pd_bo_list_entries[i++]);
/* Add the userptr_inval_list entries to resv_list */
list_for_each_entry(mem, &process_info->userptr_inval_list,
validate_list.head) {
list_add_tail(&mem->resv_list.head, &resv_list);
mem->resv_list.bo = mem->validate_list.bo;
mem->resv_list.num_shared = mem->validate_list.num_shared;
}
/* Reserve all BOs and page tables for validation */
ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret)
goto out_free;
int ret;
amdgpu_sync_create(&sync);
drm_exec_init(&exec, 0);
/* Reserve all BOs and page tables for validation */
drm_exec_until_all_locked(&exec) {
/* Reserve all the page directories */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto unreserve_out;
}
/* Reserve the userptr_inval_list entries to resv_list */
list_for_each_entry(mem, &process_info->userptr_inval_list,
validate_list) {
struct drm_gem_object *gobj;
gobj = &mem->bo->tbo.base;
ret = drm_exec_prepare_obj(&exec, gobj, 1);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto unreserve_out;
}
}
ret = process_validate_vms(process_info);
if (ret)
goto unreserve_out;
@ -2591,7 +2532,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
/* Validate BOs and update GPUVM page tables */
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_inval_list,
validate_list.head) {
validate_list) {
struct kfd_mem_attachment *attachment;
bo = mem->bo;
@ -2633,12 +2574,9 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
ret = process_update_pds(process_info, &sync);
unreserve_out:
ttm_eu_backoff_reservation(&ticket, &resv_list);
drm_exec_fini(&exec);
amdgpu_sync_wait(&sync, false);
amdgpu_sync_free(&sync);
out_free:
kfree(pd_bo_list_entries);
out_no_mem:
return ret;
}
@ -2654,7 +2592,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_inval_list,
validate_list.head) {
validate_list) {
bool valid;
/* keep mem without hmm range at userptr_inval_list */
@ -2678,7 +2616,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
continue;
}
list_move_tail(&mem->validate_list.head,
list_move_tail(&mem->validate_list,
&process_info->userptr_valid_list);
}
@ -2788,50 +2726,44 @@ unlock_out:
*/
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
{
struct amdgpu_bo_list_entry *pd_bo_list;
struct amdkfd_process_info *process_info = info;
struct amdgpu_vm *peer_vm;
struct kgd_mem *mem;
struct bo_vm_reservation_context ctx;
struct amdgpu_amdkfd_fence *new_fence;
int ret = 0, i;
struct list_head duplicate_save;
struct amdgpu_sync sync_obj;
unsigned long failed_size = 0;
unsigned long total_size = 0;
struct drm_exec exec;
int ret;
INIT_LIST_HEAD(&duplicate_save);
INIT_LIST_HEAD(&ctx.list);
INIT_LIST_HEAD(&ctx.duplicates);
pd_bo_list = kcalloc(process_info->n_vms,
sizeof(struct amdgpu_bo_list_entry),
GFP_KERNEL);
if (!pd_bo_list)
return -ENOMEM;
i = 0;
mutex_lock(&process_info->lock);
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node)
amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
/* Reserve all BOs and page tables/directory. Add all BOs from
* kfd_bo_list to ctx.list
*/
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list.head) {
drm_exec_init(&exec, 0);
drm_exec_until_all_locked(&exec) {
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto ttm_reserve_fail;
}
list_add_tail(&mem->resv_list.head, &ctx.list);
mem->resv_list.bo = mem->validate_list.bo;
mem->resv_list.num_shared = mem->validate_list.num_shared;
}
/* Reserve all BOs and page tables/directory. Add all BOs from
* kfd_bo_list to ctx.list
*/
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
struct drm_gem_object *gobj;
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
false, &duplicate_save);
if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail;
gobj = &mem->bo->tbo.base;
ret = drm_exec_prepare_obj(&exec, gobj, 1);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto ttm_reserve_fail;
}
}
amdgpu_sync_create(&sync_obj);
@ -2849,7 +2781,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
/* Validate BOs and map them to GPUVM (update VM page tables). */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list.head) {
validate_list) {
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
@ -2925,8 +2857,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
*ef = dma_fence_get(&new_fence->base);
/* Attach new eviction fence to all BOs except pinned ones */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list.head) {
list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) {
if (mem->bo->tbo.pin_count)
continue;
@ -2945,11 +2876,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
}
validate_map_fail:
ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
amdgpu_sync_free(&sync_obj);
ttm_reserve_fail:
drm_exec_fini(&exec);
mutex_unlock(&process_info->lock);
kfree(pd_bo_list);
return ret;
}

View File

@ -1776,7 +1776,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct atom_context *ctx = adev->mode_info.atom_context;
return sysfs_emit(buf, "%s\n", ctx->vbios_version);
return sysfs_emit(buf, "%s\n", ctx->vbios_ver_str);
}
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,

View File

@ -89,8 +89,7 @@ struct atom_memory_info {
#define MAX_AC_TIMING_ENTRIES 16
struct atom_memory_clock_range_table
{
struct atom_memory_clock_range_table {
u8 num_entries;
u8 rsv[3];
u32 mclk[MAX_AC_TIMING_ENTRIES];
@ -118,14 +117,12 @@ struct atom_mc_reg_table {
#define MAX_VOLTAGE_ENTRIES 32
struct atom_voltage_table_entry
{
struct atom_voltage_table_entry {
u16 value;
u32 smio_low;
};
struct atom_voltage_table
{
struct atom_voltage_table {
u32 count;
u32 mask_low;
u32 phase_delay;

View File

@ -58,7 +58,7 @@ uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *ade
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
index, &size, &frev, &crev, &data_offset)) {
/* support firmware_info 3.1 + */
if ((frev == 3 && crev >=1) || (frev > 3)) {
if ((frev == 3 && crev >= 1) || (frev > 3)) {
firmware_info = (union firmware_info *)
(mode_info->atom_context->bios + data_offset);
fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability);
@ -597,7 +597,7 @@ bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
index, &size, &frev, &crev,
&data_offset)) {
/* support firmware_info 3.4 + */
if ((frev == 3 && crev >=4) || (frev > 3)) {
if ((frev == 3 && crev >= 4) || (frev > 3)) {
firmware_info = (union firmware_info *)
(mode_info->atom_context->bios + data_offset);
/* The ras_rom_i2c_slave_addr should ideally
@ -850,7 +850,7 @@ int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
firmware_info = (union firmware_info *)(ctx->bios + data_offset);
if (frev !=3)
if (frev != 3)
return -EINVAL;
switch (crev) {
@ -909,7 +909,7 @@ int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
}
index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
asic_init);
asic_init);
if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) {
if (frev == 2 && crev >= 1) {
memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1));

View File

@ -74,24 +74,29 @@ struct atpx_mux {
u16 mux;
} __packed;
bool amdgpu_has_atpx(void) {
bool amdgpu_has_atpx(void)
{
return amdgpu_atpx_priv.atpx_detected;
}
bool amdgpu_has_atpx_dgpu_power_cntl(void) {
bool amdgpu_has_atpx_dgpu_power_cntl(void)
{
return amdgpu_atpx_priv.atpx.functions.power_cntl;
}
bool amdgpu_is_atpx_hybrid(void) {
bool amdgpu_is_atpx_hybrid(void)
{
return amdgpu_atpx_priv.atpx.is_hybrid;
}
bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
bool amdgpu_atpx_dgpu_req_power_for_displays(void)
{
return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
}
#if defined(CONFIG_ACPI)
void *amdgpu_atpx_get_dhandle(void) {
void *amdgpu_atpx_get_dhandle(void)
{
return amdgpu_atpx_priv.dhandle;
}
#endif
@ -134,7 +139,7 @@ static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function,
/* Fail only if calling the method fails and ATPX is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
printk("failed to evaluate ATPX got %s\n",
pr_err("failed to evaluate ATPX got %s\n",
acpi_format_exception(status));
kfree(buffer.pointer);
return NULL;
@ -190,7 +195,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
size = *(u16 *) info->buffer.pointer;
if (size < 10) {
printk("ATPX buffer is too small: %zu\n", size);
pr_err("ATPX buffer is too small: %zu\n", size);
kfree(info);
return -EINVAL;
}
@ -223,11 +228,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) {
printk("ATPX Hybrid Graphics, forcing to ATPX\n");
pr_warn("ATPX Hybrid Graphics, forcing to ATPX\n");
atpx->functions.power_cntl = true;
atpx->is_hybrid = false;
} else {
printk("ATPX Hybrid Graphics\n");
pr_notice("ATPX Hybrid Graphics\n");
/*
* Disable legacy PM methods only when pcie port PM is usable,
* otherwise the device might fail to power off or power on.
@ -269,7 +274,7 @@ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx)
size = *(u16 *) info->buffer.pointer;
if (size < 8) {
printk("ATPX buffer is too small: %zu\n", size);
pr_err("ATPX buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}
@ -278,8 +283,8 @@ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
printk("ATPX version %u, functions 0x%08x\n",
output.version, output.function_bits);
pr_notice("ATPX version %u, functions 0x%08x\n",
output.version, output.function_bits);
amdgpu_atpx_parse_functions(&atpx->functions, output.function_bits);

View File

@ -460,7 +460,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
return false;
success:
adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false;
adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10;
return true;
}

View File

@ -28,6 +28,7 @@
* Christian König <deathsimple@vodafone.de>
*/
#include <linux/sort.h>
#include <linux/uaccess.h>
#include "amdgpu.h"
@ -50,15 +51,22 @@ static void amdgpu_bo_list_free(struct kref *ref)
refcount);
struct amdgpu_bo_list_entry *e;
amdgpu_bo_list_for_each_entry(e, list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
amdgpu_bo_unref(&bo);
}
amdgpu_bo_list_for_each_entry(e, list)
amdgpu_bo_unref(&e->bo);
call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
}
static int amdgpu_bo_list_entry_cmp(const void *_a, const void *_b)
{
const struct amdgpu_bo_list_entry *a = _a, *b = _b;
if (a->priority > b->priority)
return 1;
if (a->priority < b->priority)
return -1;
return 0;
}
int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
struct drm_amdgpu_bo_list_entry *info,
size_t num_entries, struct amdgpu_bo_list **result)
@ -118,7 +126,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &bo->tbo;
entry->bo = bo;
if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
list->gds_obj = bo;
@ -133,6 +141,8 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
list->first_userptr = first_userptr;
list->num_entries = num_entries;
sort(array, last_entry, sizeof(struct amdgpu_bo_list_entry),
amdgpu_bo_list_entry_cmp, NULL);
trace_amdgpu_cs_bo_status(list->num_entries, total_size);
@ -141,16 +151,10 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
return 0;
error_free:
for (i = 0; i < last_entry; ++i) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
amdgpu_bo_unref(&bo);
}
for (i = first_userptr; i < num_entries; ++i) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
amdgpu_bo_unref(&bo);
}
for (i = 0; i < last_entry; ++i)
amdgpu_bo_unref(&array[i].bo);
for (i = first_userptr; i < num_entries; ++i)
amdgpu_bo_unref(&array[i].bo);
kvfree(list);
return r;
@ -182,41 +186,6 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
return -ENOENT;
}
void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
struct list_head *validated)
{
/* This is based on the bucket sort with O(n) time complexity.
* An item with priority "i" is added to bucket[i]. The lists are then
* concatenated in descending order.
*/
struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
struct amdgpu_bo_list_entry *e;
unsigned i;
for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
INIT_LIST_HEAD(&bucket[i]);
/* Since buffers which appear sooner in the relocation list are
* likely to be used more often than buffers which appear later
* in the list, the sort mustn't change the ordering of buffers
* with the same priority, i.e. it must be stable.
*/
amdgpu_bo_list_for_each_entry(e, list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
unsigned priority = e->priority;
if (!bo->parent)
list_add_tail(&e->tv.head, &bucket[priority]);
e->user_pages = NULL;
e->range = NULL;
}
/* Connect the sorted buckets in the output list. */
for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
list_splice(&bucket[i], validated);
}
void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
{
kref_put(&list->refcount, amdgpu_bo_list_free);

View File

@ -23,7 +23,6 @@
#ifndef __AMDGPU_BO_LIST_H__
#define __AMDGPU_BO_LIST_H__
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/amdgpu_drm.h>
struct hmm_range;
@ -36,7 +35,7 @@ struct amdgpu_bo_va;
struct amdgpu_fpriv;
struct amdgpu_bo_list_entry {
struct ttm_validate_buffer tv;
struct amdgpu_bo *bo;
struct amdgpu_bo_va *bo_va;
uint32_t priority;
struct page **user_pages;
@ -60,8 +59,6 @@ struct amdgpu_bo_list {
int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
struct amdgpu_bo_list **result);
void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
struct list_head *validated);
void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
struct drm_amdgpu_bo_list_entry **info_param);

Some files were not shown because too many files have changed in this diff Show More