mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
drm next for 6.12-rc1
string: - add mem_is_zero() core: - support more device numbers - use XArray for minor ids - add backlight constants - Split dma fence array creation into alloc and arm fbdev: - remove usage of old fbdev hooks kms: - Add might_fault() to drm_modeset_lock priming - Add dynamic per-crtc vblank configuration support dma-buf: - docs cleanup buddy: - Add start address support for trim function printk: - pass description to kmsg_dump scheduler; - Remove full_recover from drm_sched_start ttm: - Make LRU walk restartable after dropping locks - Allow direct reclaim to allocate local memory panic: - add display QR code (in rust) displayport: - mst: GUID improvements bridge: - Silence error message on -EPROBE_DEFER - analogix: Clean aup - bridge-connector: Fix double free - lt6505: Disable interrupt when powered off - tc358767: Make default DP port preemphasis configurable - lt9611uxc: require DRM_BRIDGE_ATTACH_NO_CONNECTOR - anx7625: simplify OF array handling - dw-hdmi: simplify clock handling - lontium-lt8912b: fix mode validation - nwl-dsi: fix mode vsync/hsync polarity xe: - Enable LunarLake and Battlemage support - Introducing Xe2 ccs modifiers for integrated and discrete graphics - rename xe perf to xe observation - use wb caching on DGFX for system memory - add fence timeouts - Lunar Lake graphics/media/display workarounds - Battlemage workarounds - Battlemage GSC support - GSC and HuC fw updates for LL/BM - use dma_fence_chain_free - refactor hw engine lookup and mmio access - enable priority mem read for Xe2 - Add first GuC BMG fw - fix dma-resv lock - Fix DGFX display suspend/resume - Use xe_managed for kernel BOs - Use reserved copy engine for user binds on faulting devices - Allow mixing dma-fence jobs and long-running faulting jobs - fix media TLB invalidation - fix rpm in TTM swapout path - track resources and VF state by PF i915: - Type-C programming fix for MTL+ - FBC cleanup - Calc vblank delay more accurately - On DP MST, Enable LT fallback for UHBR<->non-UHBR rates - Fix DP LTTPR detection - limit relocations to INT_MAX - fix long hangs in buddy allocator on DG2/A380 amdgpu: - Per-queue reset support - SDMA devcoredump support - DCN 4.0.1 updates - GFX12/VCN4/JPEG4 updates - Convert vbios embedded EDID to drm_edid - GFX9.3/9.4 devcoredump support - process isolation framework for GFX 9.4.3/4 - take IOMMU mappings into account for P2P DMA amdkfd: - CRIU fixes - HMM fix - Enable process isolation support for GFX 9.4.3/4 - Allow users to target recommended SDMA engines - KFD support for targetting queues on recommended SDMA engines radeon: - remove .load and drm_dev_alloc - Fix vbios embedded EDID size handling - Convert vbios embedded EDID to drm_edid - Use GEM references instead of TTM - r100 cp init cleanup - Fix potential overflows in evergreen CS offset tracking msm: - DPU: - implement DP/PHY mapping on SC8180X - Enable writeback on SM8150, SC8180X, SM6125, SM6350 - DP: - Enable widebus on all relevant chipsets - MSM8998 HDMI support - GPU: - A642L speedbin support - A615/A306/A621 support - A7xx devcoredump support ast: - astdp: Support AST2600 with VGA - Clean up HPD - Fix timeout loop for DP link training - reorganize output code by type (VGA, DP, etc) - convert to struct drm_edid - fix BMC handling for all outputs exynos: - drop stale MAINTAINERS pattern - constify struct loongson: - use GEM refcount over TTM mgag200: - Improve BMC handling - Support VBLANK intterupts - transparently support BMC outputs nouveau: - Refactor and clean up internals - Use GEM refcount over TTM's gm12u320: - convert to struct drm_edid gma500: - update i2c terms lcdif: - pixel clock fix host1x: - fix syncpoint IRQ during resume - use iommu_paging_domain_alloc() imx: - ipuv3: convert to struct drm_edid omapdrm: - improve error handling - use common helper for_each_endpoint_of_node() panel: - add support for BOE TV101WUM-LL2 plus DT bindings - novatek-nt35950: improve error handling - nv3051d: improve error handling - panel-edp: add support for BOE NE140WUM-N6G; revert support for SDC ATNA45AF01 - visionox-vtdr6130: improve error handling; use devm_regulator_bulk_get_const() - boe-th101mb31ig002: Support for starry-er88577 MIPI-DSI panel plus DT; Fix porch parameter - edp: Support AOU B116XTN02.3, AUO B116XAN06.1, AOU B116XAT04.1, BOE NV140WUM-N41, BOE NV133WUM-N63, BOE NV116WHM-A4D, CMN N116BCA-EA2, CMN N116BCP-EA2, CSW MNB601LS1-4 - himax-hx8394: Support Microchip AC40T08A MIPI Display panel plus DT - ilitek-ili9806e: Support Densitron DMT028VGHMCMI-1D TFT plus DT - jd9365da: Support Melfas lmfbx101117480 MIPI-DSI panel plus DT; Refactor for code sharing - panel-edp: fix name for HKC MB116AN01 - jd9365da: fix "exit sleep" commands - jdi-fhd-r63452: simplify error handling with DSI multi-style helpers - mantix-mlaf057we51: simplify error handling with DSI multi-style helpers - simple: support Innolux G070ACE-LH3 plus DT bindings support On Tat Industrial Company KD50G21-40NT-A1 plus DT bindings - st7701: decouple DSI and DRM code add SPI support support Anbernic RG28XX plus DT bindings mediatek: - support alpha blending - remove cl in struct cmdq_pkt - ovl adaptor fix - add power domain binding for mediatek DPI controller renesas: - rz-du: add support for RZ/G2UL plus DT bindings rockchip: - Improve DP sink-capability reporting - dw_hdmi: Support 4k@60Hz - vop: Support RGB display on Rockchip RK3066; Support 4096px width sti: - convert to struct drm_edid stm: - Avoid UAF wih managed plane and CRTC helpers - Fix module owner - Fix error handling in probe - Depend on COMMON_CLK - ltdc: Fix transparency after disabling plane; Remove unused interrupt tegra: - gr3d: improve PM domain handling - convert to struct drm_edid - Call drm_atomic_helper_shutdown() vc4: - fix PM during detect - replace DRM_ERROR() with drm_error() - v3d: simplify clock retrieval v3d: - Clean up perfmon virtio: - add DRM capset -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmbq43gACgkQDHTzWXnE hr4+lg/+O/r41E7ioitcM0DWeWem0dTlvQr41pJ8jujHvw+bXNdg0BMGWtsTyTLA eOft2AwofsFjg+O7l8IFXOT37mQLdIdfjb3+w5brI198InL3OWC3QV8ZSwY9VGET n8crO9jFoxNmHZnFniBZbtI6egTyl6H+2ey3E0MTnKiPUKZQvsK/4+x532yVLPob UUOze5wcjyGZc7LJEIZPohPVneCb9ki7sabDQqh4cxIQ0Eg+nqPpWjYM4XVd+lTS 8QmssbR49LrJ7z9m90qVE+8TjYUCn+ChDPMs61KZAAnc8k++nK41btjGZ23mDKPb YEguahCYthWJ4U8K18iXBPnLPxZv5+harQ8OIWAUYqdIOWSXHozvuJ2Z84eHV13a 9mQ5vIymXang8G1nEXwX/vml9uhVhBCeWu3qfdse2jfaTWYUb1YzhqUoFvqI0R0K 8wT03MyNdx965CSqAhpH5Jd559ueZmpd+jsHOfhAS+1gxfD6NgoPXv7lpnMUmGWX SnaeC9RLD4cgy7j2Swo7TEqQHrvK5XhZSwX94kU6RPmFE5RRKqWgFVQmwuikDMId UpNqDnPT5NL2UX4TNG4V4coyTXvKgVcSB9TA7j8NSLfwdGHhiz73pkYosaZXKyxe u6qKMwMONfZiT20nhD7RhH0AFnnKosAcO14dhn0TKFZPY6Ce9O8= =7jR+ -----END PGP SIGNATURE----- Merge tag 'drm-next-2024-09-19' of https://gitlab.freedesktop.org/drm/kernel Pull drm updates from Dave Airlie: "This adds a couple of patches outside the drm core, all should be acked appropriately, the string and pstore ones are the main ones that come to mind. Otherwise it's the usual drivers, xe is getting enabled by default on some new hardware, we've changed the device number handling to allow more devices, and we added some optional rust code to create QR codes in the panic handler, an idea first suggested I think 10 years ago :-) string: - add mem_is_zero() core: - support more device numbers - use XArray for minor ids - add backlight constants - Split dma fence array creation into alloc and arm fbdev: - remove usage of old fbdev hooks kms: - Add might_fault() to drm_modeset_lock priming - Add dynamic per-crtc vblank configuration support dma-buf: - docs cleanup buddy: - Add start address support for trim function printk: - pass description to kmsg_dump scheduler: - Remove full_recover from drm_sched_start ttm: - Make LRU walk restartable after dropping locks - Allow direct reclaim to allocate local memory panic: - add display QR code (in rust) displayport: - mst: GUID improvements bridge: - Silence error message on -EPROBE_DEFER - analogix: Clean aup - bridge-connector: Fix double free - lt6505: Disable interrupt when powered off - tc358767: Make default DP port preemphasis configurable - lt9611uxc: require DRM_BRIDGE_ATTACH_NO_CONNECTOR - anx7625: simplify OF array handling - dw-hdmi: simplify clock handling - lontium-lt8912b: fix mode validation - nwl-dsi: fix mode vsync/hsync polarity xe: - Enable LunarLake and Battlemage support - Introducing Xe2 ccs modifiers for integrated and discrete graphics - rename xe perf to xe observation - use wb caching on DGFX for system memory - add fence timeouts - Lunar Lake graphics/media/display workarounds - Battlemage workarounds - Battlemage GSC support - GSC and HuC fw updates for LL/BM - use dma_fence_chain_free - refactor hw engine lookup and mmio access - enable priority mem read for Xe2 - Add first GuC BMG fw - fix dma-resv lock - Fix DGFX display suspend/resume - Use xe_managed for kernel BOs - Use reserved copy engine for user binds on faulting devices - Allow mixing dma-fence jobs and long-running faulting jobs - fix media TLB invalidation - fix rpm in TTM swapout path - track resources and VF state by PF i915: - Type-C programming fix for MTL+ - FBC cleanup - Calc vblank delay more accurately - On DP MST, Enable LT fallback for UHBR<->non-UHBR rates - Fix DP LTTPR detection - limit relocations to INT_MAX - fix long hangs in buddy allocator on DG2/A380 amdgpu: - Per-queue reset support - SDMA devcoredump support - DCN 4.0.1 updates - GFX12/VCN4/JPEG4 updates - Convert vbios embedded EDID to drm_edid - GFX9.3/9.4 devcoredump support - process isolation framework for GFX 9.4.3/4 - take IOMMU mappings into account for P2P DMA amdkfd: - CRIU fixes - HMM fix - Enable process isolation support for GFX 9.4.3/4 - Allow users to target recommended SDMA engines - KFD support for targetting queues on recommended SDMA engines radeon: - remove .load and drm_dev_alloc - Fix vbios embedded EDID size handling - Convert vbios embedded EDID to drm_edid - Use GEM references instead of TTM - r100 cp init cleanup - Fix potential overflows in evergreen CS offset tracking msm: - DPU: - implement DP/PHY mapping on SC8180X - Enable writeback on SM8150, SC8180X, SM6125, SM6350 - DP: - Enable widebus on all relevant chipsets - MSM8998 HDMI support - GPU: - A642L speedbin support - A615/A306/A621 support - A7xx devcoredump support ast: - astdp: Support AST2600 with VGA - Clean up HPD - Fix timeout loop for DP link training - reorganize output code by type (VGA, DP, etc) - convert to struct drm_edid - fix BMC handling for all outputs exynos: - drop stale MAINTAINERS pattern - constify struct loongson: - use GEM refcount over TTM mgag200: - Improve BMC handling - Support VBLANK intterupts - transparently support BMC outputs nouveau: - Refactor and clean up internals - Use GEM refcount over TTM's gm12u320: - convert to struct drm_edid gma500: - update i2c terms lcdif: - pixel clock fix host1x: - fix syncpoint IRQ during resume - use iommu_paging_domain_alloc() imx: - ipuv3: convert to struct drm_edid omapdrm: - improve error handling - use common helper for_each_endpoint_of_node() panel: - add support for BOE TV101WUM-LL2 plus DT bindings - novatek-nt35950: improve error handling - nv3051d: improve error handling - panel-edp: - add support for BOE NE140WUM-N6G - revert support for SDC ATNA45AF01 - visionox-vtdr6130: - improve error handling - use devm_regulator_bulk_get_const() - boe-th101mb31ig002: - Support for starry-er88577 MIPI-DSI panel plus DT - Fix porch parameter - edp: Support AOU B116XTN02.3, AUO B116XAN06.1, AOU B116XAT04.1, BOE NV140WUM-N41, BOE NV133WUM-N63, BOE NV116WHM-A4D, CMN N116BCA-EA2, CMN N116BCP-EA2, CSW MNB601LS1-4 - himax-hx8394: Support Microchip AC40T08A MIPI Display panel plus DT - ilitek-ili9806e: Support Densitron DMT028VGHMCMI-1D TFT plus DT - jd9365da: - Support Melfas lmfbx101117480 MIPI-DSI panel plus DT - Refactor for code sharing - panel-edp: fix name for HKC MB116AN01 - jd9365da: fix "exit sleep" commands - jdi-fhd-r63452: simplify error handling with DSI multi-style helpers - mantix-mlaf057we51: simplify error handling with DSI multi-style helpers - simple: - support Innolux G070ACE-LH3 plus DT bindings - support On Tat Industrial Company KD50G21-40NT-A1 plus DT bindings - st7701: - decouple DSI and DRM code - add SPI support - support Anbernic RG28XX plus DT bindings mediatek: - support alpha blending - remove cl in struct cmdq_pkt - ovl adaptor fix - add power domain binding for mediatek DPI controller renesas: - rz-du: add support for RZ/G2UL plus DT bindings rockchip: - Improve DP sink-capability reporting - dw_hdmi: Support 4k@60Hz - vop: - Support RGB display on Rockchip RK3066 - Support 4096px width sti: - convert to struct drm_edid stm: - Avoid UAF wih managed plane and CRTC helpers - Fix module owner - Fix error handling in probe - Depend on COMMON_CLK - ltdc: - Fix transparency after disabling plane - Remove unused interrupt tegra: - gr3d: improve PM domain handling - convert to struct drm_edid - Call drm_atomic_helper_shutdown() vc4: - fix PM during detect - replace DRM_ERROR() with drm_error() - v3d: simplify clock retrieval v3d: - Clean up perfmon virtio: - add DRM capset" * tag 'drm-next-2024-09-19' of https://gitlab.freedesktop.org/drm/kernel: (1326 commits) drm/xe: Fix missing conversion to xe_display_pm_runtime_resume drm/xe/xe2hpg: Add Wa_15016589081 drm/xe: Don't keep stale pointer to bo->ggtt_node drm/xe: fix missing 'xe_vm_put' drm/xe: fix build warning with CONFIG_PM=n drm/xe: Suppress missing outer rpm protection warning drm/xe: prevent potential UAF in pf_provision_vf_ggtt() drm/amd/display: Add all planes on CRTC to state for overlay cursor drm/i915/bios: fix printk format width drm/i915/display: Fix BMG CCS modifiers drm/amdgpu: get rid of bogus includes of fdtable.h drm/amdkfd: CRIU fixes drm/amdgpu: fix a race in kfd_mem_export_dmabuf() drm: new helper: drm_gem_prime_handle_to_dmabuf() drm/amdgpu/atomfirmware: Silence UBSAN warning drm/amdgpu: Fix kdoc entry in 'amdgpu_vm_cpu_prepare' drm/amd/amdgpu: apply command submission parser for JPEG v1 drm/amd/amdgpu: apply command submission parser for JPEG v2+ drm/amd/pm: fix the pp_dpm_pcie issue on smu v14.0.2/3 drm/amd/pm: update the features set on smu v14.0.2/3 ...
This commit is contained in:
commit
de848da12f
4
.mailmap
4
.mailmap
@ -613,6 +613,10 @@ Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
|
|||||||
Sibi Sankar <quic_sibis@quicinc.com> <sibis@codeaurora.org>
|
Sibi Sankar <quic_sibis@quicinc.com> <sibis@codeaurora.org>
|
||||||
Sid Manning <quic_sidneym@quicinc.com> <sidneym@codeaurora.org>
|
Sid Manning <quic_sidneym@quicinc.com> <sidneym@codeaurora.org>
|
||||||
Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
|
Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
|
||||||
|
Simona Vetter <simona.vetter@ffwll.ch> <daniel.vetter@ffwll.ch>
|
||||||
|
Simona Vetter <simona.vetter@ffwll.ch> <daniel.vetter@intel.com>
|
||||||
|
Simona Vetter <simona.vetter@ffwll.ch> <daniel@ffwll.ch>
|
||||||
|
Simona Vetter <simona.vetter@ffwll.ch> <daniel@biene.ffwll.ch>
|
||||||
Simon Horman <horms@kernel.org> <simon.horman@corigine.com>
|
Simon Horman <horms@kernel.org> <simon.horman@corigine.com>
|
||||||
Simon Horman <horms@kernel.org> <simon.horman@netronome.com>
|
Simon Horman <horms@kernel.org> <simon.horman@netronome.com>
|
||||||
Simon Kelley <simon@thekelleys.org.uk>
|
Simon Kelley <simon@thekelleys.org.uk>
|
||||||
|
@ -75,3 +75,11 @@ Description: RO. Energy input of device or gt in microjoules.
|
|||||||
for the gt.
|
for the gt.
|
||||||
|
|
||||||
Only supported for particular Intel i915 graphics platforms.
|
Only supported for particular Intel i915 graphics platforms.
|
||||||
|
|
||||||
|
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/fan1_input
|
||||||
|
Date: November 2024
|
||||||
|
KernelVersion: 6.12
|
||||||
|
Contact: intel-gfx@lists.freedesktop.org
|
||||||
|
Description: RO. Fan speed of device in RPM.
|
||||||
|
|
||||||
|
Only supported for particular Intel i915 graphics platforms.
|
||||||
|
@ -147,12 +147,6 @@ DRM_IOCTL_QAIC_PERF_STATS_BO
|
|||||||
recent execution of a BO. This allows userspace to construct an end to end
|
recent execution of a BO. This allows userspace to construct an end to end
|
||||||
timeline of the BO processing for a performance analysis.
|
timeline of the BO processing for a performance analysis.
|
||||||
|
|
||||||
DRM_IOCTL_QAIC_PART_DEV
|
|
||||||
This IOCTL allows userspace to request a duplicate "shadow device". This extra
|
|
||||||
accelN device is associated with a specific partition of resources on the
|
|
||||||
AIC100 device and can be used for limiting a process to some subset of
|
|
||||||
resources.
|
|
||||||
|
|
||||||
DRM_IOCTL_QAIC_DETACH_SLICE_BO
|
DRM_IOCTL_QAIC_DETACH_SLICE_BO
|
||||||
This IOCTL allows userspace to remove the slicing information from a BO that
|
This IOCTL allows userspace to remove the slicing information from a BO that
|
||||||
was originally provided by a call to DRM_IOCTL_QAIC_ATTACH_SLICE_BO. This
|
was originally provided by a call to DRM_IOCTL_QAIC_ATTACH_SLICE_BO. This
|
||||||
|
@ -92,12 +92,31 @@ properties:
|
|||||||
reference to a valid DPI output or input endpoint node.
|
reference to a valid DPI output or input endpoint node.
|
||||||
|
|
||||||
port@2:
|
port@2:
|
||||||
$ref: /schemas/graph.yaml#/properties/port
|
$ref: /schemas/graph.yaml#/$defs/port-base
|
||||||
|
unevaluatedProperties: false
|
||||||
description: |
|
description: |
|
||||||
eDP/DP output port. The remote endpoint phandle should be a
|
eDP/DP output port. The remote endpoint phandle should be a
|
||||||
reference to a valid eDP panel input endpoint node. This port is
|
reference to a valid eDP panel input endpoint node. This port is
|
||||||
optional, treated as DP panel if not defined
|
optional, treated as DP panel if not defined
|
||||||
|
|
||||||
|
properties:
|
||||||
|
endpoint:
|
||||||
|
$ref: /schemas/media/video-interfaces.yaml#
|
||||||
|
unevaluatedProperties: false
|
||||||
|
|
||||||
|
properties:
|
||||||
|
toshiba,pre-emphasis:
|
||||||
|
description:
|
||||||
|
Display port output Pre-Emphasis settings for both DP lanes.
|
||||||
|
$ref: /schemas/types.yaml#/definitions/uint8-array
|
||||||
|
minItems: 2
|
||||||
|
maxItems: 2
|
||||||
|
items:
|
||||||
|
enum:
|
||||||
|
- 0 # No pre-emphasis
|
||||||
|
- 1 # 3.5dB pre-emphasis
|
||||||
|
- 2 # 6dB pre-emphasis
|
||||||
|
|
||||||
oneOf:
|
oneOf:
|
||||||
- required:
|
- required:
|
||||||
- port@0
|
- port@0
|
||||||
|
@ -62,6 +62,9 @@ properties:
|
|||||||
- const: default
|
- const: default
|
||||||
- const: sleep
|
- const: sleep
|
||||||
|
|
||||||
|
power-domains:
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
port:
|
port:
|
||||||
$ref: /schemas/graph.yaml#/properties/port
|
$ref: /schemas/graph.yaml#/properties/port
|
||||||
description:
|
description:
|
||||||
@ -76,6 +79,20 @@ required:
|
|||||||
- clock-names
|
- clock-names
|
||||||
- port
|
- port
|
||||||
|
|
||||||
|
allOf:
|
||||||
|
- if:
|
||||||
|
not:
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
contains:
|
||||||
|
enum:
|
||||||
|
- mediatek,mt6795-dpi
|
||||||
|
- mediatek,mt8173-dpi
|
||||||
|
- mediatek,mt8186-dpi
|
||||||
|
then:
|
||||||
|
properties:
|
||||||
|
power-domains: false
|
||||||
|
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
||||||
examples:
|
examples:
|
||||||
|
@ -19,14 +19,15 @@ properties:
|
|||||||
- qcom,hdmi-tx-8974
|
- qcom,hdmi-tx-8974
|
||||||
- qcom,hdmi-tx-8994
|
- qcom,hdmi-tx-8994
|
||||||
- qcom,hdmi-tx-8996
|
- qcom,hdmi-tx-8996
|
||||||
|
- qcom,hdmi-tx-8998
|
||||||
|
|
||||||
clocks:
|
clocks:
|
||||||
minItems: 1
|
minItems: 1
|
||||||
maxItems: 5
|
maxItems: 8
|
||||||
|
|
||||||
clock-names:
|
clock-names:
|
||||||
minItems: 1
|
minItems: 1
|
||||||
maxItems: 5
|
maxItems: 8
|
||||||
|
|
||||||
reg:
|
reg:
|
||||||
minItems: 1
|
minItems: 1
|
||||||
@ -142,6 +143,7 @@ allOf:
|
|||||||
properties:
|
properties:
|
||||||
clocks:
|
clocks:
|
||||||
minItems: 5
|
minItems: 5
|
||||||
|
maxItems: 5
|
||||||
clock-names:
|
clock-names:
|
||||||
items:
|
items:
|
||||||
- const: mdp_core
|
- const: mdp_core
|
||||||
@ -151,6 +153,28 @@ allOf:
|
|||||||
- const: extp
|
- const: extp
|
||||||
hdmi-mux-supplies: false
|
hdmi-mux-supplies: false
|
||||||
|
|
||||||
|
- if:
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
contains:
|
||||||
|
enum:
|
||||||
|
- qcom,hdmi-tx-8998
|
||||||
|
then:
|
||||||
|
properties:
|
||||||
|
clocks:
|
||||||
|
minItems: 8
|
||||||
|
maxItems: 8
|
||||||
|
clock-names:
|
||||||
|
items:
|
||||||
|
- const: mdp_core
|
||||||
|
- const: iface
|
||||||
|
- const: core
|
||||||
|
- const: alt_iface
|
||||||
|
- const: extp
|
||||||
|
- const: bus
|
||||||
|
- const: mnoc
|
||||||
|
- const: iface_mmss
|
||||||
|
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
||||||
examples:
|
examples:
|
||||||
|
@ -9,20 +9,20 @@ title: BOE TH101MB31IG002-28A WXGA DSI Display Panel
|
|||||||
maintainers:
|
maintainers:
|
||||||
- Manuel Traut <manut@mecka.net>
|
- Manuel Traut <manut@mecka.net>
|
||||||
|
|
||||||
allOf:
|
|
||||||
- $ref: panel-common.yaml#
|
|
||||||
|
|
||||||
properties:
|
properties:
|
||||||
compatible:
|
compatible:
|
||||||
enum:
|
enum:
|
||||||
# BOE TH101MB31IG002-28A 10.1" WXGA TFT LCD panel
|
# BOE TH101MB31IG002-28A 10.1" WXGA TFT LCD panel
|
||||||
- boe,th101mb31ig002-28a
|
- boe,th101mb31ig002-28a
|
||||||
|
# The Starry-er88577 is a 10.1" WXGA TFT-LCD panel
|
||||||
|
- starry,er88577
|
||||||
|
|
||||||
reg:
|
reg:
|
||||||
maxItems: 1
|
maxItems: 1
|
||||||
|
|
||||||
backlight: true
|
backlight: true
|
||||||
enable-gpios: true
|
enable-gpios: true
|
||||||
|
reset-gpios: true
|
||||||
power-supply: true
|
power-supply: true
|
||||||
port: true
|
port: true
|
||||||
rotation: true
|
rotation: true
|
||||||
@ -33,6 +33,20 @@ required:
|
|||||||
- enable-gpios
|
- enable-gpios
|
||||||
- power-supply
|
- power-supply
|
||||||
|
|
||||||
|
allOf:
|
||||||
|
- $ref: panel-common.yaml#
|
||||||
|
- if:
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
# The Starry-er88577 is a 10.1" WXGA TFT-LCD panel
|
||||||
|
const: starry,er88577
|
||||||
|
then:
|
||||||
|
properties:
|
||||||
|
reset-gpios: false
|
||||||
|
else:
|
||||||
|
required:
|
||||||
|
- reset-gpios
|
||||||
|
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
||||||
examples:
|
examples:
|
||||||
@ -47,6 +61,7 @@ examples:
|
|||||||
reg = <0>;
|
reg = <0>;
|
||||||
backlight = <&backlight_lcd0>;
|
backlight = <&backlight_lcd0>;
|
||||||
enable-gpios = <&gpio 45 GPIO_ACTIVE_HIGH>;
|
enable-gpios = <&gpio 45 GPIO_ACTIVE_HIGH>;
|
||||||
|
reset-gpios = <&gpio 55 GPIO_ACTIVE_LOW>;
|
||||||
rotation = <90>;
|
rotation = <90>;
|
||||||
power-supply = <&vcc_3v3>;
|
power-supply = <&vcc_3v3>;
|
||||||
port {
|
port {
|
||||||
|
@ -0,0 +1,63 @@
|
|||||||
|
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||||
|
%YAML 1.2
|
||||||
|
---
|
||||||
|
$id: http://devicetree.org/schemas/display/panel/boe,tv101wum-ll2.yaml#
|
||||||
|
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||||
|
|
||||||
|
title: BOE TV101WUM-LL2 DSI Display Panel
|
||||||
|
|
||||||
|
maintainers:
|
||||||
|
- Neil Armstrong <neil.armstrong@linaro.org>
|
||||||
|
|
||||||
|
allOf:
|
||||||
|
- $ref: panel-common.yaml#
|
||||||
|
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
const: boe,tv101wum-ll2
|
||||||
|
|
||||||
|
reg:
|
||||||
|
maxItems: 1
|
||||||
|
description: DSI virtual channel
|
||||||
|
|
||||||
|
backlight: true
|
||||||
|
reset-gpios: true
|
||||||
|
vsp-supply: true
|
||||||
|
vsn-supply: true
|
||||||
|
port: true
|
||||||
|
rotation: true
|
||||||
|
|
||||||
|
required:
|
||||||
|
- compatible
|
||||||
|
- reg
|
||||||
|
- reset-gpios
|
||||||
|
- vsp-supply
|
||||||
|
- vsn-supply
|
||||||
|
- port
|
||||||
|
|
||||||
|
additionalProperties: false
|
||||||
|
|
||||||
|
examples:
|
||||||
|
- |
|
||||||
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
|
dsi {
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
panel@0 {
|
||||||
|
compatible = "boe,tv101wum-ll2";
|
||||||
|
reg = <0>;
|
||||||
|
|
||||||
|
vsn-supply = <&vsn_lcd>;
|
||||||
|
vsp-supply = <&vsp_lcd>;
|
||||||
|
|
||||||
|
reset-gpios = <&pio 45 GPIO_ACTIVE_LOW>;
|
||||||
|
|
||||||
|
port {
|
||||||
|
panel_in: endpoint {
|
||||||
|
remote-endpoint = <&dsi_out>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
...
|
@ -15,14 +15,12 @@ description:
|
|||||||
such as the HannStar HSD060BHW4 720x1440 TFT LCD panel connected with
|
such as the HannStar HSD060BHW4 720x1440 TFT LCD panel connected with
|
||||||
a MIPI-DSI video interface.
|
a MIPI-DSI video interface.
|
||||||
|
|
||||||
allOf:
|
|
||||||
- $ref: panel-common.yaml#
|
|
||||||
|
|
||||||
properties:
|
properties:
|
||||||
compatible:
|
compatible:
|
||||||
items:
|
items:
|
||||||
- enum:
|
- enum:
|
||||||
- hannstar,hsd060bhw4
|
- hannstar,hsd060bhw4
|
||||||
|
- microchip,ac40t08a-mipi-panel
|
||||||
- powkiddy,x55-panel
|
- powkiddy,x55-panel
|
||||||
- const: himax,hx8394
|
- const: himax,hx8394
|
||||||
|
|
||||||
@ -46,7 +44,6 @@ properties:
|
|||||||
required:
|
required:
|
||||||
- compatible
|
- compatible
|
||||||
- reg
|
- reg
|
||||||
- reset-gpios
|
|
||||||
- backlight
|
- backlight
|
||||||
- port
|
- port
|
||||||
- vcc-supply
|
- vcc-supply
|
||||||
@ -54,6 +51,18 @@ required:
|
|||||||
|
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
||||||
|
allOf:
|
||||||
|
- $ref: panel-common.yaml#
|
||||||
|
- if:
|
||||||
|
not:
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
enum:
|
||||||
|
- microchip,ac40t08a-mipi-panel
|
||||||
|
then:
|
||||||
|
required:
|
||||||
|
- reset-gpios
|
||||||
|
|
||||||
examples:
|
examples:
|
||||||
- |
|
- |
|
||||||
#include <dt-bindings/gpio/gpio.h>
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
|
@ -16,6 +16,7 @@ properties:
|
|||||||
compatible:
|
compatible:
|
||||||
items:
|
items:
|
||||||
- enum:
|
- enum:
|
||||||
|
- densitron,dmt028vghmcmi-1d
|
||||||
- ortustech,com35h3p70ulc
|
- ortustech,com35h3p70ulc
|
||||||
- const: ilitek,ili9806e
|
- const: ilitek,ili9806e
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ properties:
|
|||||||
- enum:
|
- enum:
|
||||||
- chongzhou,cz101b4001
|
- chongzhou,cz101b4001
|
||||||
- kingdisplay,kd101ne3-40ti
|
- kingdisplay,kd101ne3-40ti
|
||||||
|
- melfas,lmfbx101117480
|
||||||
- radxa,display-10hd-ad001
|
- radxa,display-10hd-ad001
|
||||||
- radxa,display-8hd-ad002
|
- radxa,display-8hd-ad002
|
||||||
- const: jadard,jd9365da-h3
|
- const: jadard,jd9365da-h3
|
||||||
|
@ -158,6 +158,8 @@ properties:
|
|||||||
- innolux,at070tn92
|
- innolux,at070tn92
|
||||||
# Innolux G070ACE-L01 7" WVGA (800x480) TFT LCD panel
|
# Innolux G070ACE-L01 7" WVGA (800x480) TFT LCD panel
|
||||||
- innolux,g070ace-l01
|
- innolux,g070ace-l01
|
||||||
|
# Innolux G070ACE-LH3 7" WVGA (800x480) TFT LCD panel with WLED backlight
|
||||||
|
- innolux,g070ace-lh3
|
||||||
# Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
|
# Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
|
||||||
- innolux,g070y2-l01
|
- innolux,g070y2-l01
|
||||||
# Innolux G070Y2-T02 7" WVGA (800x480) TFT LCD TTL panel
|
# Innolux G070Y2-T02 7" WVGA (800x480) TFT LCD TTL panel
|
||||||
@ -222,6 +224,8 @@ properties:
|
|||||||
- okaya,rs800480t-7x0gp
|
- okaya,rs800480t-7x0gp
|
||||||
# Olimex 4.3" TFT LCD panel
|
# Olimex 4.3" TFT LCD panel
|
||||||
- olimex,lcd-olinuxino-43-ts
|
- olimex,lcd-olinuxino-43-ts
|
||||||
|
# On Tat Industrial Company 5" DPI TFT panel.
|
||||||
|
- ontat,kd50g21-40nt-a1
|
||||||
# On Tat Industrial Company 7" DPI TFT panel.
|
# On Tat Industrial Company 7" DPI TFT panel.
|
||||||
- ontat,yx700wv03
|
- ontat,yx700wv03
|
||||||
# OrtusTech COM37H3M05DTC Blanview 3.7" VGA portrait TFT-LCD panel
|
# OrtusTech COM37H3M05DTC Blanview 3.7" VGA portrait TFT-LCD panel
|
||||||
|
@ -20,21 +20,19 @@ description: |
|
|||||||
Densitron DMT028VGHMCMI-1A is 480x640, 2-lane MIPI DSI LCD panel
|
Densitron DMT028VGHMCMI-1A is 480x640, 2-lane MIPI DSI LCD panel
|
||||||
which has built-in ST7701 chip.
|
which has built-in ST7701 chip.
|
||||||
|
|
||||||
allOf:
|
|
||||||
- $ref: panel-common.yaml#
|
|
||||||
|
|
||||||
properties:
|
properties:
|
||||||
compatible:
|
compatible:
|
||||||
items:
|
items:
|
||||||
- enum:
|
- enum:
|
||||||
- anbernic,rg-arc-panel
|
- anbernic,rg-arc-panel
|
||||||
|
- anbernic,rg28xx-panel
|
||||||
- densitron,dmt028vghmcmi-1a
|
- densitron,dmt028vghmcmi-1a
|
||||||
- elida,kd50t048a
|
- elida,kd50t048a
|
||||||
- techstar,ts8550b
|
- techstar,ts8550b
|
||||||
- const: sitronix,st7701
|
- const: sitronix,st7701
|
||||||
|
|
||||||
reg:
|
reg:
|
||||||
description: DSI virtual channel used by that screen
|
description: DSI / SPI channel used by that screen
|
||||||
maxItems: 1
|
maxItems: 1
|
||||||
|
|
||||||
VCC-supply:
|
VCC-supply:
|
||||||
@ -43,6 +41,13 @@ properties:
|
|||||||
IOVCC-supply:
|
IOVCC-supply:
|
||||||
description: I/O system regulator
|
description: I/O system regulator
|
||||||
|
|
||||||
|
dc-gpios:
|
||||||
|
maxItems: 1
|
||||||
|
description:
|
||||||
|
Controller data/command selection (D/CX) in 4-line SPI mode.
|
||||||
|
If not set, the controller is in 3-line SPI mode.
|
||||||
|
Disallowed for DSI.
|
||||||
|
|
||||||
port: true
|
port: true
|
||||||
reset-gpios: true
|
reset-gpios: true
|
||||||
rotation: true
|
rotation: true
|
||||||
@ -57,7 +62,38 @@ required:
|
|||||||
- port
|
- port
|
||||||
- reset-gpios
|
- reset-gpios
|
||||||
|
|
||||||
additionalProperties: false
|
allOf:
|
||||||
|
- $ref: panel-common.yaml#
|
||||||
|
- if:
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
contains:
|
||||||
|
# SPI connected panels
|
||||||
|
enum:
|
||||||
|
- anbernic,rg28xx-panel
|
||||||
|
then:
|
||||||
|
$ref: /schemas/spi/spi-peripheral-props.yaml#
|
||||||
|
|
||||||
|
- if:
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
not:
|
||||||
|
contains:
|
||||||
|
# DSI or SPI without D/CX pin
|
||||||
|
enum:
|
||||||
|
- anbernic,rg-arc-panel
|
||||||
|
- anbernic,rg28xx-panel
|
||||||
|
- densitron,dmt028vghmcmi-1a
|
||||||
|
- elida,kd50t048a
|
||||||
|
- techstar,ts8550b
|
||||||
|
then:
|
||||||
|
required:
|
||||||
|
- dc-gpios
|
||||||
|
else:
|
||||||
|
properties:
|
||||||
|
dc-gpios: false
|
||||||
|
|
||||||
|
unevaluatedProperties: false
|
||||||
|
|
||||||
examples:
|
examples:
|
||||||
- |
|
- |
|
||||||
@ -82,3 +118,26 @@ examples:
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
- |
|
||||||
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
|
|
||||||
|
spi {
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
|
||||||
|
panel@0 {
|
||||||
|
compatible = "anbernic,rg28xx-panel", "sitronix,st7701";
|
||||||
|
reg = <0>;
|
||||||
|
spi-max-frequency = <3125000>;
|
||||||
|
VCC-supply = <®_lcd>;
|
||||||
|
IOVCC-supply = <®_lcd>;
|
||||||
|
reset-gpios = <&pio 8 14 GPIO_ACTIVE_HIGH>; /* LCD-RST: PI14 */
|
||||||
|
backlight = <&backlight>;
|
||||||
|
|
||||||
|
port {
|
||||||
|
panel_in_rgb: endpoint {
|
||||||
|
remote-endpoint = <&tcon_lcd0_out_lcd>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
@ -18,6 +18,7 @@ properties:
|
|||||||
compatible:
|
compatible:
|
||||||
oneOf:
|
oneOf:
|
||||||
- enum:
|
- enum:
|
||||||
|
- renesas,r9a07g043u-du # RZ/G2UL
|
||||||
- renesas,r9a07g044-du # RZ/G2{L,LC}
|
- renesas,r9a07g044-du # RZ/G2{L,LC}
|
||||||
- items:
|
- items:
|
||||||
- enum:
|
- enum:
|
||||||
@ -60,9 +61,6 @@ properties:
|
|||||||
$ref: /schemas/graph.yaml#/properties/port
|
$ref: /schemas/graph.yaml#/properties/port
|
||||||
unevaluatedProperties: false
|
unevaluatedProperties: false
|
||||||
|
|
||||||
required:
|
|
||||||
- port@0
|
|
||||||
|
|
||||||
unevaluatedProperties: false
|
unevaluatedProperties: false
|
||||||
|
|
||||||
renesas,vsps:
|
renesas,vsps:
|
||||||
@ -88,6 +86,34 @@ required:
|
|||||||
|
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
||||||
|
allOf:
|
||||||
|
- if:
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
contains:
|
||||||
|
const: renesas,r9a07g043u-du
|
||||||
|
then:
|
||||||
|
properties:
|
||||||
|
ports:
|
||||||
|
properties:
|
||||||
|
port@0:
|
||||||
|
description: DPI
|
||||||
|
|
||||||
|
required:
|
||||||
|
- port@0
|
||||||
|
else:
|
||||||
|
properties:
|
||||||
|
ports:
|
||||||
|
properties:
|
||||||
|
port@0:
|
||||||
|
description: DSI
|
||||||
|
port@1:
|
||||||
|
description: DPI
|
||||||
|
|
||||||
|
required:
|
||||||
|
- port@0
|
||||||
|
- port@1
|
||||||
|
|
||||||
examples:
|
examples:
|
||||||
# RZ/G2L DU
|
# RZ/G2L DU
|
||||||
- |
|
- |
|
||||||
|
@ -14,6 +14,7 @@ properties:
|
|||||||
compatible:
|
compatible:
|
||||||
enum:
|
enum:
|
||||||
- qcom,hdmi-phy-8996
|
- qcom,hdmi-phy-8996
|
||||||
|
- qcom,hdmi-phy-8998
|
||||||
|
|
||||||
reg:
|
reg:
|
||||||
maxItems: 6
|
maxItems: 6
|
||||||
|
@ -179,4 +179,4 @@ IP Blocks
|
|||||||
:doc: IP Blocks
|
:doc: IP Blocks
|
||||||
|
|
||||||
.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
|
.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
|
||||||
:identifiers: amd_ip_block_type amd_ip_funcs
|
:identifiers: amd_ip_block_type amd_ip_funcs DC_DEBUG_MASK
|
||||||
|
@ -154,11 +154,11 @@ Conference talks
|
|||||||
|
|
||||||
* `An Overview of the Linux and Userspace Graphics Stack <https://www.youtube.com/watch?v=wjAJmqwg47k>`_ - Paul Kocialkowski (2020)
|
* `An Overview of the Linux and Userspace Graphics Stack <https://www.youtube.com/watch?v=wjAJmqwg47k>`_ - Paul Kocialkowski (2020)
|
||||||
* `Getting pixels on screen on Linux: introduction to Kernel Mode Setting <https://www.youtube.com/watch?v=haes4_Xnc5Q>`_ - Simon Ser (2020)
|
* `Getting pixels on screen on Linux: introduction to Kernel Mode Setting <https://www.youtube.com/watch?v=haes4_Xnc5Q>`_ - Simon Ser (2020)
|
||||||
* `Everything Great about Upstream Graphics <https://www.youtube.com/watch?v=kVzHOgt6WGE>`_ - Daniel Vetter (2019)
|
* `Everything Great about Upstream Graphics <https://www.youtube.com/watch?v=kVzHOgt6WGE>`_ - Simona Vetter (2019)
|
||||||
* `An introduction to the Linux DRM subsystem <https://www.youtube.com/watch?v=LbDOCJcDRoo>`_ - Maxime Ripard (2017)
|
* `An introduction to the Linux DRM subsystem <https://www.youtube.com/watch?v=LbDOCJcDRoo>`_ - Maxime Ripard (2017)
|
||||||
* `Embrace the Atomic (Display) Age <https://www.youtube.com/watch?v=LjiB_JeDn2M>`_ - Daniel Vetter (2016)
|
* `Embrace the Atomic (Display) Age <https://www.youtube.com/watch?v=LjiB_JeDn2M>`_ - Simona Vetter (2016)
|
||||||
* `Anatomy of an Atomic KMS Driver <https://www.youtube.com/watch?v=lihqR9sENpc>`_ - Laurent Pinchart (2015)
|
* `Anatomy of an Atomic KMS Driver <https://www.youtube.com/watch?v=lihqR9sENpc>`_ - Laurent Pinchart (2015)
|
||||||
* `Atomic Modesetting for Drivers <https://www.youtube.com/watch?v=kl9suFgbTc8>`_ - Daniel Vetter (2015)
|
* `Atomic Modesetting for Drivers <https://www.youtube.com/watch?v=kl9suFgbTc8>`_ - Simona Vetter (2015)
|
||||||
* `Anatomy of an Embedded KMS Driver <https://www.youtube.com/watch?v=Ja8fM7rTae4>`_ - Laurent Pinchart (2013)
|
* `Anatomy of an Embedded KMS Driver <https://www.youtube.com/watch?v=Ja8fM7rTae4>`_ - Laurent Pinchart (2013)
|
||||||
|
|
||||||
Slides and articles
|
Slides and articles
|
||||||
@ -169,8 +169,8 @@ Slides and articles
|
|||||||
* `Understanding the Linux Graphics Stack <https://bootlin.com/doc/training/graphics/graphics-slides.pdf>`_ - Bootlin (2022)
|
* `Understanding the Linux Graphics Stack <https://bootlin.com/doc/training/graphics/graphics-slides.pdf>`_ - Bootlin (2022)
|
||||||
* `DRM KMS overview <https://wiki.st.com/stm32mpu/wiki/DRM_KMS_overview>`_ - STMicroelectronics (2021)
|
* `DRM KMS overview <https://wiki.st.com/stm32mpu/wiki/DRM_KMS_overview>`_ - STMicroelectronics (2021)
|
||||||
* `Linux graphic stack <https://studiopixl.com/2017-05-13/linux-graphic-stack-an-overview>`_ - Nathan Gauër (2017)
|
* `Linux graphic stack <https://studiopixl.com/2017-05-13/linux-graphic-stack-an-overview>`_ - Nathan Gauër (2017)
|
||||||
* `Atomic mode setting design overview, part 1 <https://lwn.net/Articles/653071/>`_ - Daniel Vetter (2015)
|
* `Atomic mode setting design overview, part 1 <https://lwn.net/Articles/653071/>`_ - Simona Vetter (2015)
|
||||||
* `Atomic mode setting design overview, part 2 <https://lwn.net/Articles/653466/>`_ - Daniel Vetter (2015)
|
* `Atomic mode setting design overview, part 2 <https://lwn.net/Articles/653466/>`_ - Simona Vetter (2015)
|
||||||
* `The DRM/KMS subsystem from a newbie’s point of view <https://bootlin.com/pub/conferences/2014/elce/brezillon-drm-kms/brezillon-drm-kms.pdf>`_ - Boris Brezillon (2014)
|
* `The DRM/KMS subsystem from a newbie’s point of view <https://bootlin.com/pub/conferences/2014/elce/brezillon-drm-kms/brezillon-drm-kms.pdf>`_ - Boris Brezillon (2014)
|
||||||
* `A brief introduction to the Linux graphics stack <https://blogs.igalia.com/itoral/2014/07/29/a-brief-introduction-to-the-linux-graphics-stack/>`_ - Iago Toral (2014)
|
* `A brief introduction to the Linux graphics stack <https://blogs.igalia.com/itoral/2014/07/29/a-brief-introduction-to-the-linux-graphics-stack/>`_ - Iago Toral (2014)
|
||||||
* `The Linux Graphics Stack <https://blog.mecheye.net/2012/06/the-linux-graphics-stack/>`_ - Jasper St. Pierre (2012)
|
* `The Linux Graphics Stack <https://blog.mecheye.net/2012/06/the-linux-graphics-stack/>`_ - Jasper St. Pierre (2012)
|
||||||
|
@ -37,7 +37,7 @@ Audit each individual driver, make sure it'll work with the generic
|
|||||||
implementation (there's lots of outdated locking leftovers in various
|
implementation (there's lots of outdated locking leftovers in various
|
||||||
implementations), and then remove it.
|
implementations), and then remove it.
|
||||||
|
|
||||||
Contact: Daniel Vetter, respective driver maintainers
|
Contact: Simona Vetter, respective driver maintainers
|
||||||
|
|
||||||
Level: Intermediate
|
Level: Intermediate
|
||||||
|
|
||||||
@ -61,7 +61,7 @@ do by directly using the new atomic helper driver callbacks.
|
|||||||
.. [2] https://lwn.net/Articles/653071/
|
.. [2] https://lwn.net/Articles/653071/
|
||||||
.. [3] https://lwn.net/Articles/653466/
|
.. [3] https://lwn.net/Articles/653466/
|
||||||
|
|
||||||
Contact: Daniel Vetter, respective driver maintainers
|
Contact: Simona Vetter, respective driver maintainers
|
||||||
|
|
||||||
Level: Advanced
|
Level: Advanced
|
||||||
|
|
||||||
@ -75,7 +75,7 @@ helper should also be moved from drm_plane_helper.c to the atomic helpers, to
|
|||||||
avoid confusion - the other helpers in that file are all deprecated legacy
|
avoid confusion - the other helpers in that file are all deprecated legacy
|
||||||
helpers.
|
helpers.
|
||||||
|
|
||||||
Contact: Ville Syrjälä, Daniel Vetter, driver maintainers
|
Contact: Ville Syrjälä, Simona Vetter, driver maintainers
|
||||||
|
|
||||||
Level: Advanced
|
Level: Advanced
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ with the current helpers:
|
|||||||
- Then we could go through all the drivers and remove the more-or-less confused
|
- Then we could go through all the drivers and remove the more-or-less confused
|
||||||
checks for plane_state->fb and plane_state->crtc.
|
checks for plane_state->fb and plane_state->crtc.
|
||||||
|
|
||||||
Contact: Daniel Vetter
|
Contact: Simona Vetter
|
||||||
|
|
||||||
Level: Advanced
|
Level: Advanced
|
||||||
|
|
||||||
@ -116,7 +116,7 @@ Somewhat related is the legacy_cursor_update hack, which should be replaced with
|
|||||||
the new atomic_async_check/commit functionality in the helpers in drivers that
|
the new atomic_async_check/commit functionality in the helpers in drivers that
|
||||||
still look at that flag.
|
still look at that flag.
|
||||||
|
|
||||||
Contact: Daniel Vetter, respective driver maintainers
|
Contact: Simona Vetter, respective driver maintainers
|
||||||
|
|
||||||
Level: Advanced
|
Level: Advanced
|
||||||
|
|
||||||
@ -169,7 +169,7 @@ interfaces to fix these issues:
|
|||||||
``_helper_funcs`` since they are not part of the core ABI. There's a
|
``_helper_funcs`` since they are not part of the core ABI. There's a
|
||||||
``FIXME`` comment in the kerneldoc for each such case in ``drm_crtc.h``.
|
``FIXME`` comment in the kerneldoc for each such case in ``drm_crtc.h``.
|
||||||
|
|
||||||
Contact: Daniel Vetter
|
Contact: Simona Vetter
|
||||||
|
|
||||||
Level: Intermediate
|
Level: Intermediate
|
||||||
|
|
||||||
@ -194,7 +194,7 @@ performance-critical drivers it might also be better to go with a more
|
|||||||
fine-grained per-buffer object and per-context lockings scheme. Currently only
|
fine-grained per-buffer object and per-context lockings scheme. Currently only
|
||||||
the ``msm`` and `i915` drivers use ``struct_mutex``.
|
the ``msm`` and `i915` drivers use ``struct_mutex``.
|
||||||
|
|
||||||
Contact: Daniel Vetter, respective driver maintainers
|
Contact: Simona Vetter, respective driver maintainers
|
||||||
|
|
||||||
Level: Advanced
|
Level: Advanced
|
||||||
|
|
||||||
@ -251,7 +251,7 @@ being rewritten without dependencies on the fbdev module. Some of the
|
|||||||
helpers could further benefit from using struct iosys_map instead of
|
helpers could further benefit from using struct iosys_map instead of
|
||||||
raw pointers.
|
raw pointers.
|
||||||
|
|
||||||
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
|
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Simona Vetter
|
||||||
|
|
||||||
Level: Advanced
|
Level: Advanced
|
||||||
|
|
||||||
@ -297,7 +297,7 @@ Various hold-ups:
|
|||||||
version of the varios drm_gem_fb_create functions. Maybe called
|
version of the varios drm_gem_fb_create functions. Maybe called
|
||||||
drm_gem_fb_create/_with_dirty/_with_funcs as needed.
|
drm_gem_fb_create/_with_dirty/_with_funcs as needed.
|
||||||
|
|
||||||
Contact: Daniel Vetter
|
Contact: Simona Vetter
|
||||||
|
|
||||||
Level: Intermediate
|
Level: Intermediate
|
||||||
|
|
||||||
@ -329,7 +329,7 @@ everything after it has done the write-protect/mkwrite trickery:
|
|||||||
|
|
||||||
Might be good to also have some igt testcases for this.
|
Might be good to also have some igt testcases for this.
|
||||||
|
|
||||||
Contact: Daniel Vetter, Noralf Tronnes
|
Contact: Simona Vetter, Noralf Tronnes
|
||||||
|
|
||||||
Level: Advanced
|
Level: Advanced
|
||||||
|
|
||||||
@ -359,7 +359,7 @@ between setting up the &drm_driver structure and calling drm_dev_register().
|
|||||||
|
|
||||||
- Once all drivers are converted, remove the load/unload callbacks.
|
- Once all drivers are converted, remove the load/unload callbacks.
|
||||||
|
|
||||||
Contact: Daniel Vetter
|
Contact: Simona Vetter
|
||||||
|
|
||||||
Level: Intermediate
|
Level: Intermediate
|
||||||
|
|
||||||
@ -422,7 +422,7 @@ The task is to use struct iosys_map where it makes sense.
|
|||||||
* TTM might benefit from using struct iosys_map internally.
|
* TTM might benefit from using struct iosys_map internally.
|
||||||
* Framebuffer copying and blitting helpers should operate on struct iosys_map.
|
* Framebuffer copying and blitting helpers should operate on struct iosys_map.
|
||||||
|
|
||||||
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Christian König, Daniel Vetter
|
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Christian König, Simona Vetter
|
||||||
|
|
||||||
Level: Intermediate
|
Level: Intermediate
|
||||||
|
|
||||||
@ -475,25 +475,22 @@ Remove disable/unprepare in remove/shutdown in panel-simple and panel-edp
|
|||||||
As of commit d2aacaf07395 ("drm/panel: Check for already prepared/enabled in
|
As of commit d2aacaf07395 ("drm/panel: Check for already prepared/enabled in
|
||||||
drm_panel"), we have a check in the drm_panel core to make sure nobody
|
drm_panel"), we have a check in the drm_panel core to make sure nobody
|
||||||
double-calls prepare/enable/disable/unprepare. Eventually that should probably
|
double-calls prepare/enable/disable/unprepare. Eventually that should probably
|
||||||
be turned into a WARN_ON() or somehow made louder, but right now we actually
|
be turned into a WARN_ON() or somehow made louder.
|
||||||
expect it to trigger and so we don't want it to be too loud.
|
|
||||||
|
|
||||||
Specifically, that warning will trigger for panel-edp and panel-simple at
|
At the moment, we expect that we may still encounter the warnings in the
|
||||||
shutdown time because those panels hardcode a call to drm_panel_disable()
|
drm_panel core when using panel-simple and panel-edp. Since those panel
|
||||||
and drm_panel_unprepare() at shutdown and remove time that they call regardless
|
drivers are used with a lot of different DRM modeset drivers they still
|
||||||
of panel state. On systems with a properly coded DRM modeset driver that
|
make an extra effort to disable/unprepare the panel themsevles at shutdown
|
||||||
calls drm_atomic_helper_shutdown() this is pretty much guaranteed to cause
|
time. Specifically we could still encounter those warnings if the panel
|
||||||
the warning to fire.
|
driver gets shutdown() _before_ the DRM modeset driver and the DRM modeset
|
||||||
|
driver properly calls drm_atomic_helper_shutdown() in its own shutdown()
|
||||||
|
callback. Warnings could be avoided in such a case by using something like
|
||||||
|
device links to ensure that the panel gets shutdown() after the DRM modeset
|
||||||
|
driver.
|
||||||
|
|
||||||
Unfortunately we can't safely remove the calls in panel-edp and panel-simple
|
Once all DRM modeset drivers are known to shutdown properly, the extra
|
||||||
until we're sure that all DRM modeset drivers that are used with those panels
|
calls to disable/unprepare in remove/shutdown in panel-simple and panel-edp
|
||||||
properly call drm_atomic_helper_shutdown(). This TODO item is to validate
|
should be removed and this TODO item marked complete.
|
||||||
that all DRM modeset drivers used with panel-edp and panel-simple properly
|
|
||||||
call drm_atomic_helper_shutdown() and then remove the calls to
|
|
||||||
disable/unprepare from those panels. Alternatively, this TODO item could be
|
|
||||||
removed by convincing stakeholders that those calls are fine and downgrading
|
|
||||||
the error message in drm_panel_disable() / drm_panel_unprepare() to a
|
|
||||||
debug-level message.
|
|
||||||
|
|
||||||
Contact: Douglas Anderson <dianders@chromium.org>
|
Contact: Douglas Anderson <dianders@chromium.org>
|
||||||
|
|
||||||
@ -561,7 +558,7 @@ This is a really varied tasks with lots of little bits and pieces:
|
|||||||
<https://lore.kernel.org/lkml/1446217392-11981-1-git-send-email-alexandru.murtaza@intel.com/>`_
|
<https://lore.kernel.org/lkml/1446217392-11981-1-git-send-email-alexandru.murtaza@intel.com/>`_
|
||||||
for some example code that could be reused.
|
for some example code that could be reused.
|
||||||
|
|
||||||
Contact: Daniel Vetter
|
Contact: Simona Vetter
|
||||||
|
|
||||||
Level: Advanced
|
Level: Advanced
|
||||||
|
|
||||||
@ -590,7 +587,7 @@ There's a bunch of issues with it:
|
|||||||
this (together with the drm_minor->drm_device move) would allow us to remove
|
this (together with the drm_minor->drm_device move) would allow us to remove
|
||||||
debugfs_init.
|
debugfs_init.
|
||||||
|
|
||||||
Contact: Daniel Vetter
|
Contact: Simona Vetter
|
||||||
|
|
||||||
Level: Intermediate
|
Level: Intermediate
|
||||||
|
|
||||||
@ -611,7 +608,7 @@ Both these problems can be solved by switching over to drmm_kzalloc(), and the
|
|||||||
various convenience wrappers provided, e.g. drmm_crtc_alloc_with_planes(),
|
various convenience wrappers provided, e.g. drmm_crtc_alloc_with_planes(),
|
||||||
drmm_universal_plane_alloc(), ... and so on.
|
drmm_universal_plane_alloc(), ... and so on.
|
||||||
|
|
||||||
Contact: Daniel Vetter
|
Contact: Simona Vetter
|
||||||
|
|
||||||
Level: Intermediate
|
Level: Intermediate
|
||||||
|
|
||||||
@ -631,7 +628,7 @@ cache is also tied to &drm_gem_object.import_attach. Meanwhile we paper over
|
|||||||
this problem for USB devices by fishing out the USB host controller device, as
|
this problem for USB devices by fishing out the USB host controller device, as
|
||||||
long as that supports DMA. Otherwise importing can still needlessly fail.
|
long as that supports DMA. Otherwise importing can still needlessly fail.
|
||||||
|
|
||||||
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
|
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Simona Vetter
|
||||||
|
|
||||||
Level: Advanced
|
Level: Advanced
|
||||||
|
|
||||||
@ -712,7 +709,7 @@ Plan to fix this:
|
|||||||
2. In all, only look at one of the three status bits set by the above helpers.
|
2. In all, only look at one of the three status bits set by the above helpers.
|
||||||
3. Remove the other two status bits.
|
3. Remove the other two status bits.
|
||||||
|
|
||||||
Contact: Daniel Vetter
|
Contact: Simona Vetter
|
||||||
|
|
||||||
Level: Intermediate
|
Level: Intermediate
|
||||||
|
|
||||||
|
@ -7,6 +7,21 @@ Memory Management
|
|||||||
.. kernel-doc:: drivers/gpu/drm/xe/xe_bo_doc.h
|
.. kernel-doc:: drivers/gpu/drm/xe/xe_bo_doc.h
|
||||||
:doc: Buffer Objects (BO)
|
:doc: Buffer Objects (BO)
|
||||||
|
|
||||||
|
GGTT
|
||||||
|
====
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/gpu/drm/xe/xe_ggtt.c
|
||||||
|
:doc: Global Graphics Translation Table (GGTT)
|
||||||
|
|
||||||
|
GGTT Internal API
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/gpu/drm/xe/xe_ggtt_types.h
|
||||||
|
:internal:
|
||||||
|
|
||||||
|
.. kernel-doc:: drivers/gpu/drm/xe/xe_ggtt.c
|
||||||
|
:internal:
|
||||||
|
|
||||||
Pagetable building
|
Pagetable building
|
||||||
==================
|
==================
|
||||||
|
|
||||||
|
20
MAINTAINERS
20
MAINTAINERS
@ -1025,6 +1025,13 @@ S: Supported
|
|||||||
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
||||||
F: drivers/gpu/drm/amd/display/
|
F: drivers/gpu/drm/amd/display/
|
||||||
|
|
||||||
|
AMD DISPLAY CORE - DML
|
||||||
|
M: Chaitanya Dhere <chaitanya.dhere@amd.com>
|
||||||
|
M: Jun Lei <jun.lei@amd.com>
|
||||||
|
S: Supported
|
||||||
|
F: drivers/gpu/drm/amd/display/dc/dml/
|
||||||
|
F: drivers/gpu/drm/amd/display/dc/dml2/
|
||||||
|
|
||||||
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
|
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
|
||||||
M: Huang Rui <ray.huang@amd.com>
|
M: Huang Rui <ray.huang@amd.com>
|
||||||
L: linux-hwmon@vger.kernel.org
|
L: linux-hwmon@vger.kernel.org
|
||||||
@ -1799,6 +1806,7 @@ L: dri-devel@lists.freedesktop.org
|
|||||||
S: Supported
|
S: Supported
|
||||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||||
F: Documentation/gpu/panfrost.rst
|
F: Documentation/gpu/panfrost.rst
|
||||||
|
F: drivers/gpu/drm/ci/xfails/panfrost*
|
||||||
F: drivers/gpu/drm/panfrost/
|
F: drivers/gpu/drm/panfrost/
|
||||||
F: include/uapi/drm/panfrost_drm.h
|
F: include/uapi/drm/panfrost_drm.h
|
||||||
|
|
||||||
@ -6724,6 +6732,7 @@ F: drivers/dma-buf/dma-heap.c
|
|||||||
F: drivers/dma-buf/heaps/*
|
F: drivers/dma-buf/heaps/*
|
||||||
F: include/linux/dma-heap.h
|
F: include/linux/dma-heap.h
|
||||||
F: include/uapi/linux/dma-heap.h
|
F: include/uapi/linux/dma-heap.h
|
||||||
|
F: tools/testing/selftests/dmabuf-heaps/
|
||||||
|
|
||||||
DMC FREQUENCY DRIVER FOR SAMSUNG EXYNOS5422
|
DMC FREQUENCY DRIVER FOR SAMSUNG EXYNOS5422
|
||||||
M: Lukasz Luba <lukasz.luba@arm.com>
|
M: Lukasz Luba <lukasz.luba@arm.com>
|
||||||
@ -7398,10 +7407,10 @@ F: drivers/gpu/drm/udl/
|
|||||||
|
|
||||||
DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
|
DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
|
||||||
M: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
|
M: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
|
||||||
M: Melissa Wen <melissa.srw@gmail.com>
|
|
||||||
M: Maíra Canal <mairacanal@riseup.net>
|
M: Maíra Canal <mairacanal@riseup.net>
|
||||||
R: Haneen Mohammed <hamohammed.sa@gmail.com>
|
R: Haneen Mohammed <hamohammed.sa@gmail.com>
|
||||||
R: Daniel Vetter <daniel@ffwll.ch>
|
R: Simona Vetter <simona@ffwll.ch>
|
||||||
|
R: Melissa Wen <melissa.srw@gmail.com>
|
||||||
L: dri-devel@lists.freedesktop.org
|
L: dri-devel@lists.freedesktop.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||||
@ -7434,7 +7443,7 @@ F: drivers/gpu/drm/panel/panel-widechips-ws2401.c
|
|||||||
|
|
||||||
DRM DRIVERS
|
DRM DRIVERS
|
||||||
M: David Airlie <airlied@gmail.com>
|
M: David Airlie <airlied@gmail.com>
|
||||||
M: Daniel Vetter <daniel@ffwll.ch>
|
M: Simona Vetter <simona@ffwll.ch>
|
||||||
L: dri-devel@lists.freedesktop.org
|
L: dri-devel@lists.freedesktop.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
B: https://gitlab.freedesktop.org/drm
|
B: https://gitlab.freedesktop.org/drm
|
||||||
@ -7530,7 +7539,6 @@ M: Kyungmin Park <kyungmin.park@samsung.com>
|
|||||||
L: dri-devel@lists.freedesktop.org
|
L: dri-devel@lists.freedesktop.org
|
||||||
S: Supported
|
S: Supported
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
|
||||||
F: Documentation/devicetree/bindings/display/exynos/
|
|
||||||
F: Documentation/devicetree/bindings/display/samsung/
|
F: Documentation/devicetree/bindings/display/samsung/
|
||||||
F: drivers/gpu/drm/exynos/
|
F: drivers/gpu/drm/exynos/
|
||||||
F: include/uapi/drm/exynos_drm.h
|
F: include/uapi/drm/exynos_drm.h
|
||||||
@ -8877,7 +8885,7 @@ W: https://floatingpoint.billm.au/
|
|||||||
F: arch/x86/math-emu/
|
F: arch/x86/math-emu/
|
||||||
|
|
||||||
FRAMEBUFFER CORE
|
FRAMEBUFFER CORE
|
||||||
M: Daniel Vetter <daniel@ffwll.ch>
|
M: Simona Vetter <simona@ffwll.ch>
|
||||||
S: Odd Fixes
|
S: Odd Fixes
|
||||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||||
F: drivers/video/fbdev/core/
|
F: drivers/video/fbdev/core/
|
||||||
@ -11044,6 +11052,7 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
|||||||
F: Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml
|
F: Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml
|
||||||
F: Documentation/devicetree/bindings/gpu/img,powervr-sgx.yaml
|
F: Documentation/devicetree/bindings/gpu/img,powervr-sgx.yaml
|
||||||
F: Documentation/gpu/imagination/
|
F: Documentation/gpu/imagination/
|
||||||
|
F: drivers/gpu/drm/ci/xfails/powervr*
|
||||||
F: drivers/gpu/drm/imagination/
|
F: drivers/gpu/drm/imagination/
|
||||||
F: include/uapi/drm/pvr_drm.h
|
F: include/uapi/drm/pvr_drm.h
|
||||||
|
|
||||||
@ -18923,7 +18932,6 @@ F: include/dt-bindings/clock/qcom,*
|
|||||||
QUALCOMM CLOUD AI (QAIC) DRIVER
|
QUALCOMM CLOUD AI (QAIC) DRIVER
|
||||||
M: Jeffrey Hugo <quic_jhugo@quicinc.com>
|
M: Jeffrey Hugo <quic_jhugo@quicinc.com>
|
||||||
R: Carl Vanderlip <quic_carlv@quicinc.com>
|
R: Carl Vanderlip <quic_carlv@quicinc.com>
|
||||||
R: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
|
|
||||||
L: linux-arm-msm@vger.kernel.org
|
L: linux-arm-msm@vger.kernel.org
|
||||||
L: dri-devel@lists.freedesktop.org
|
L: dri-devel@lists.freedesktop.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
@ -73,7 +73,7 @@ static const char *nvram_os_partitions[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static void oops_to_nvram(struct kmsg_dumper *dumper,
|
static void oops_to_nvram(struct kmsg_dumper *dumper,
|
||||||
enum kmsg_dump_reason reason);
|
struct kmsg_dump_detail *detail);
|
||||||
|
|
||||||
static struct kmsg_dumper nvram_kmsg_dumper = {
|
static struct kmsg_dumper nvram_kmsg_dumper = {
|
||||||
.dump = oops_to_nvram
|
.dump = oops_to_nvram
|
||||||
@ -643,7 +643,7 @@ void __init nvram_init_oops_partition(int rtas_partition_exists)
|
|||||||
* partition. If that's too much, go back and capture uncompressed text.
|
* partition. If that's too much, go back and capture uncompressed text.
|
||||||
*/
|
*/
|
||||||
static void oops_to_nvram(struct kmsg_dumper *dumper,
|
static void oops_to_nvram(struct kmsg_dumper *dumper,
|
||||||
enum kmsg_dump_reason reason)
|
struct kmsg_dump_detail *detail)
|
||||||
{
|
{
|
||||||
struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
|
struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
|
||||||
static unsigned int oops_count = 0;
|
static unsigned int oops_count = 0;
|
||||||
@ -655,7 +655,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
|
|||||||
unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
|
unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
|
||||||
int rc = -1;
|
int rc = -1;
|
||||||
|
|
||||||
switch (reason) {
|
switch (detail->reason) {
|
||||||
case KMSG_DUMP_SHUTDOWN:
|
case KMSG_DUMP_SHUTDOWN:
|
||||||
/* These are almost always orderly shutdowns. */
|
/* These are almost always orderly shutdowns. */
|
||||||
return;
|
return;
|
||||||
@ -671,7 +671,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
|
pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
|
||||||
__func__, (int) reason);
|
__func__, (int) detail->reason);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,13 +20,13 @@
|
|||||||
* message, it just ensures that OPAL completely flushes the console buffer.
|
* message, it just ensures that OPAL completely flushes the console buffer.
|
||||||
*/
|
*/
|
||||||
static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper,
|
static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper,
|
||||||
enum kmsg_dump_reason reason)
|
struct kmsg_dump_detail *detail)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Outside of a panic context the pollers will continue to run,
|
* Outside of a panic context the pollers will continue to run,
|
||||||
* so we don't need to do any special flushing.
|
* so we don't need to do any special flushing.
|
||||||
*/
|
*/
|
||||||
if (reason != KMSG_DUMP_PANIC)
|
if (detail->reason != KMSG_DUMP_PANIC)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
opal_flush_console(0);
|
opal_flush_console(0);
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
#include <os.h>
|
#include <os.h>
|
||||||
|
|
||||||
static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
|
static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
|
||||||
enum kmsg_dump_reason reason)
|
struct kmsg_dump_detail *detail)
|
||||||
{
|
{
|
||||||
static struct kmsg_dump_iter iter;
|
static struct kmsg_dump_iter iter;
|
||||||
static DEFINE_SPINLOCK(lock);
|
static DEFINE_SPINLOCK(lock);
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/idr.h>
|
#include <linux/xarray.h>
|
||||||
|
|
||||||
#include <drm/drm_accel.h>
|
#include <drm/drm_accel.h>
|
||||||
#include <drm/drm_auth.h>
|
#include <drm/drm_auth.h>
|
||||||
@ -18,8 +18,7 @@
|
|||||||
#include <drm/drm_ioctl.h>
|
#include <drm/drm_ioctl.h>
|
||||||
#include <drm/drm_print.h>
|
#include <drm/drm_print.h>
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(accel_minor_lock);
|
DEFINE_XARRAY_ALLOC(accel_minors_xa);
|
||||||
static struct idr accel_minors_idr;
|
|
||||||
|
|
||||||
static struct dentry *accel_debugfs_root;
|
static struct dentry *accel_debugfs_root;
|
||||||
|
|
||||||
@ -117,99 +116,6 @@ void accel_set_device_instance_params(struct device *kdev, int index)
|
|||||||
kdev->type = &accel_sysfs_device_minor;
|
kdev->type = &accel_sysfs_device_minor;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* accel_minor_alloc() - Allocates a new accel minor
|
|
||||||
*
|
|
||||||
* This function access the accel minors idr and allocates from it
|
|
||||||
* a new id to represent a new accel minor
|
|
||||||
*
|
|
||||||
* Return: A new id on success or error code in case idr_alloc failed
|
|
||||||
*/
|
|
||||||
int accel_minor_alloc(void)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int r;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&accel_minor_lock, flags);
|
|
||||||
r = idr_alloc(&accel_minors_idr, NULL, 0, ACCEL_MAX_MINORS, GFP_NOWAIT);
|
|
||||||
spin_unlock_irqrestore(&accel_minor_lock, flags);
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* accel_minor_remove() - Remove an accel minor
|
|
||||||
* @index: The minor id to remove.
|
|
||||||
*
|
|
||||||
* This function access the accel minors idr and removes from
|
|
||||||
* it the member with the id that is passed to this function.
|
|
||||||
*/
|
|
||||||
void accel_minor_remove(int index)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&accel_minor_lock, flags);
|
|
||||||
idr_remove(&accel_minors_idr, index);
|
|
||||||
spin_unlock_irqrestore(&accel_minor_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* accel_minor_replace() - Replace minor pointer in accel minors idr.
|
|
||||||
* @minor: Pointer to the new minor.
|
|
||||||
* @index: The minor id to replace.
|
|
||||||
*
|
|
||||||
* This function access the accel minors idr structure and replaces the pointer
|
|
||||||
* that is associated with an existing id. Because the minor pointer can be
|
|
||||||
* NULL, we need to explicitly pass the index.
|
|
||||||
*
|
|
||||||
* Return: 0 for success, negative value for error
|
|
||||||
*/
|
|
||||||
void accel_minor_replace(struct drm_minor *minor, int index)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&accel_minor_lock, flags);
|
|
||||||
idr_replace(&accel_minors_idr, minor, index);
|
|
||||||
spin_unlock_irqrestore(&accel_minor_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Looks up the given minor-ID and returns the respective DRM-minor object. The
|
|
||||||
* refence-count of the underlying device is increased so you must release this
|
|
||||||
* object with accel_minor_release().
|
|
||||||
*
|
|
||||||
* The object can be only a drm_minor that represents an accel device.
|
|
||||||
*
|
|
||||||
* As long as you hold this minor, it is guaranteed that the object and the
|
|
||||||
* minor->dev pointer will stay valid! However, the device may get unplugged and
|
|
||||||
* unregistered while you hold the minor.
|
|
||||||
*/
|
|
||||||
static struct drm_minor *accel_minor_acquire(unsigned int minor_id)
|
|
||||||
{
|
|
||||||
struct drm_minor *minor;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&accel_minor_lock, flags);
|
|
||||||
minor = idr_find(&accel_minors_idr, minor_id);
|
|
||||||
if (minor)
|
|
||||||
drm_dev_get(minor->dev);
|
|
||||||
spin_unlock_irqrestore(&accel_minor_lock, flags);
|
|
||||||
|
|
||||||
if (!minor) {
|
|
||||||
return ERR_PTR(-ENODEV);
|
|
||||||
} else if (drm_dev_is_unplugged(minor->dev)) {
|
|
||||||
drm_dev_put(minor->dev);
|
|
||||||
return ERR_PTR(-ENODEV);
|
|
||||||
}
|
|
||||||
|
|
||||||
return minor;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void accel_minor_release(struct drm_minor *minor)
|
|
||||||
{
|
|
||||||
drm_dev_put(minor->dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* accel_open - open method for ACCEL file
|
* accel_open - open method for ACCEL file
|
||||||
* @inode: device inode
|
* @inode: device inode
|
||||||
@ -227,7 +133,7 @@ int accel_open(struct inode *inode, struct file *filp)
|
|||||||
struct drm_minor *minor;
|
struct drm_minor *minor;
|
||||||
int retcode;
|
int retcode;
|
||||||
|
|
||||||
minor = accel_minor_acquire(iminor(inode));
|
minor = drm_minor_acquire(&accel_minors_xa, iminor(inode));
|
||||||
if (IS_ERR(minor))
|
if (IS_ERR(minor))
|
||||||
return PTR_ERR(minor);
|
return PTR_ERR(minor);
|
||||||
|
|
||||||
@ -246,7 +152,7 @@ int accel_open(struct inode *inode, struct file *filp)
|
|||||||
|
|
||||||
err_undo:
|
err_undo:
|
||||||
atomic_dec(&dev->open_count);
|
atomic_dec(&dev->open_count);
|
||||||
accel_minor_release(minor);
|
drm_minor_release(minor);
|
||||||
return retcode;
|
return retcode;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(accel_open);
|
EXPORT_SYMBOL_GPL(accel_open);
|
||||||
@ -257,7 +163,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp)
|
|||||||
struct drm_minor *minor;
|
struct drm_minor *minor;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
minor = accel_minor_acquire(iminor(inode));
|
minor = drm_minor_acquire(&accel_minors_xa, iminor(inode));
|
||||||
if (IS_ERR(minor))
|
if (IS_ERR(minor))
|
||||||
return PTR_ERR(minor);
|
return PTR_ERR(minor);
|
||||||
|
|
||||||
@ -274,7 +180,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp)
|
|||||||
err = 0;
|
err = 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
accel_minor_release(minor);
|
drm_minor_release(minor);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -290,15 +196,13 @@ void accel_core_exit(void)
|
|||||||
unregister_chrdev(ACCEL_MAJOR, "accel");
|
unregister_chrdev(ACCEL_MAJOR, "accel");
|
||||||
debugfs_remove(accel_debugfs_root);
|
debugfs_remove(accel_debugfs_root);
|
||||||
accel_sysfs_destroy();
|
accel_sysfs_destroy();
|
||||||
idr_destroy(&accel_minors_idr);
|
WARN_ON(!xa_empty(&accel_minors_xa));
|
||||||
}
|
}
|
||||||
|
|
||||||
int __init accel_core_init(void)
|
int __init accel_core_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
idr_init(&accel_minors_idr);
|
|
||||||
|
|
||||||
ret = accel_sysfs_init();
|
ret = accel_sysfs_init();
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
DRM_ERROR("Cannot create ACCEL class: %d\n", ret);
|
DRM_ERROR("Cannot create ACCEL class: %d\n", ret);
|
||||||
|
@ -60,6 +60,10 @@ static struct {
|
|||||||
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
|
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Production fw_names from the table above */
|
||||||
|
MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin");
|
||||||
|
MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin");
|
||||||
|
|
||||||
static int ivpu_fw_request(struct ivpu_device *vdev)
|
static int ivpu_fw_request(struct ivpu_device *vdev)
|
||||||
{
|
{
|
||||||
int ret = -ENOENT;
|
int ret = -ENOENT;
|
||||||
|
@ -144,37 +144,38 @@ const struct dma_fence_ops dma_fence_array_ops = {
|
|||||||
EXPORT_SYMBOL(dma_fence_array_ops);
|
EXPORT_SYMBOL(dma_fence_array_ops);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_fence_array_create - Create a custom fence array
|
* dma_fence_array_alloc - Allocate a custom fence array
|
||||||
|
* @num_fences: [in] number of fences to add in the array
|
||||||
|
*
|
||||||
|
* Return dma fence array on success, NULL on failure
|
||||||
|
*/
|
||||||
|
struct dma_fence_array *dma_fence_array_alloc(int num_fences)
|
||||||
|
{
|
||||||
|
struct dma_fence_array *array;
|
||||||
|
|
||||||
|
return kzalloc(struct_size(array, callbacks, num_fences), GFP_KERNEL);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_fence_array_alloc);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dma_fence_array_init - Init a custom fence array
|
||||||
|
* @array: [in] dma fence array to arm
|
||||||
* @num_fences: [in] number of fences to add in the array
|
* @num_fences: [in] number of fences to add in the array
|
||||||
* @fences: [in] array containing the fences
|
* @fences: [in] array containing the fences
|
||||||
* @context: [in] fence context to use
|
* @context: [in] fence context to use
|
||||||
* @seqno: [in] sequence number to use
|
* @seqno: [in] sequence number to use
|
||||||
* @signal_on_any: [in] signal on any fence in the array
|
* @signal_on_any: [in] signal on any fence in the array
|
||||||
*
|
*
|
||||||
* Allocate a dma_fence_array object and initialize the base fence with
|
* Implementation of @dma_fence_array_create without allocation. Useful to init
|
||||||
* dma_fence_init().
|
* a preallocated dma fence array in the path of reclaim or dma fence signaling.
|
||||||
* In case of error it returns NULL.
|
|
||||||
*
|
|
||||||
* The caller should allocate the fences array with num_fences size
|
|
||||||
* and fill it with the fences it wants to add to the object. Ownership of this
|
|
||||||
* array is taken and dma_fence_put() is used on each fence on release.
|
|
||||||
*
|
|
||||||
* If @signal_on_any is true the fence array signals if any fence in the array
|
|
||||||
* signals, otherwise it signals when all fences in the array signal.
|
|
||||||
*/
|
*/
|
||||||
struct dma_fence_array *dma_fence_array_create(int num_fences,
|
void dma_fence_array_init(struct dma_fence_array *array,
|
||||||
struct dma_fence **fences,
|
int num_fences, struct dma_fence **fences,
|
||||||
u64 context, unsigned seqno,
|
u64 context, unsigned seqno,
|
||||||
bool signal_on_any)
|
bool signal_on_any)
|
||||||
{
|
{
|
||||||
struct dma_fence_array *array;
|
|
||||||
|
|
||||||
WARN_ON(!num_fences || !fences);
|
WARN_ON(!num_fences || !fences);
|
||||||
|
|
||||||
array = kzalloc(struct_size(array, callbacks, num_fences), GFP_KERNEL);
|
|
||||||
if (!array)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
array->num_fences = num_fences;
|
array->num_fences = num_fences;
|
||||||
|
|
||||||
spin_lock_init(&array->lock);
|
spin_lock_init(&array->lock);
|
||||||
@ -200,6 +201,41 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
|
|||||||
*/
|
*/
|
||||||
while (num_fences--)
|
while (num_fences--)
|
||||||
WARN_ON(dma_fence_is_container(fences[num_fences]));
|
WARN_ON(dma_fence_is_container(fences[num_fences]));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dma_fence_array_init);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dma_fence_array_create - Create a custom fence array
|
||||||
|
* @num_fences: [in] number of fences to add in the array
|
||||||
|
* @fences: [in] array containing the fences
|
||||||
|
* @context: [in] fence context to use
|
||||||
|
* @seqno: [in] sequence number to use
|
||||||
|
* @signal_on_any: [in] signal on any fence in the array
|
||||||
|
*
|
||||||
|
* Allocate a dma_fence_array object and initialize the base fence with
|
||||||
|
* dma_fence_init().
|
||||||
|
* In case of error it returns NULL.
|
||||||
|
*
|
||||||
|
* The caller should allocate the fences array with num_fences size
|
||||||
|
* and fill it with the fences it wants to add to the object. Ownership of this
|
||||||
|
* array is taken and dma_fence_put() is used on each fence on release.
|
||||||
|
*
|
||||||
|
* If @signal_on_any is true the fence array signals if any fence in the array
|
||||||
|
* signals, otherwise it signals when all fences in the array signal.
|
||||||
|
*/
|
||||||
|
struct dma_fence_array *dma_fence_array_create(int num_fences,
|
||||||
|
struct dma_fence **fences,
|
||||||
|
u64 context, unsigned seqno,
|
||||||
|
bool signal_on_any)
|
||||||
|
{
|
||||||
|
struct dma_fence_array *array;
|
||||||
|
|
||||||
|
array = dma_fence_array_alloc(num_fences);
|
||||||
|
if (!array)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
dma_fence_array_init(array, num_fences, fences,
|
||||||
|
context, seqno, signal_on_any);
|
||||||
|
|
||||||
return array;
|
return array;
|
||||||
}
|
}
|
||||||
|
@ -7,17 +7,15 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/cdev.h>
|
#include <linux/cdev.h>
|
||||||
#include <linux/debugfs.h>
|
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/dma-buf.h>
|
#include <linux/dma-buf.h>
|
||||||
#include <linux/err.h>
|
|
||||||
#include <linux/xarray.h>
|
|
||||||
#include <linux/list.h>
|
|
||||||
#include <linux/slab.h>
|
|
||||||
#include <linux/nospec.h>
|
|
||||||
#include <linux/uaccess.h>
|
|
||||||
#include <linux/syscalls.h>
|
|
||||||
#include <linux/dma-heap.h>
|
#include <linux/dma-heap.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/list.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
|
#include <linux/syscalls.h>
|
||||||
|
#include <linux/uaccess.h>
|
||||||
|
#include <linux/xarray.h>
|
||||||
#include <uapi/linux/dma-heap.h>
|
#include <uapi/linux/dma-heap.h>
|
||||||
|
|
||||||
#define DEVNAME "dma_heap"
|
#define DEVNAME "dma_heap"
|
||||||
@ -28,9 +26,10 @@
|
|||||||
* struct dma_heap - represents a dmabuf heap in the system
|
* struct dma_heap - represents a dmabuf heap in the system
|
||||||
* @name: used for debugging/device-node name
|
* @name: used for debugging/device-node name
|
||||||
* @ops: ops struct for this heap
|
* @ops: ops struct for this heap
|
||||||
* @heap_devt heap device node
|
* @priv: private data for this heap
|
||||||
* @list list head connecting to list of heaps
|
* @heap_devt: heap device node
|
||||||
* @heap_cdev heap char device
|
* @list: list head connecting to list of heaps
|
||||||
|
* @heap_cdev: heap char device
|
||||||
*
|
*
|
||||||
* Represents a heap of memory from which buffers can be made.
|
* Represents a heap of memory from which buffers can be made.
|
||||||
*/
|
*/
|
||||||
@ -193,11 +192,11 @@ static const struct file_operations dma_heap_fops = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_heap_get_drvdata() - get per-subdriver data for the heap
|
* dma_heap_get_drvdata - get per-heap driver data
|
||||||
* @heap: DMA-Heap to retrieve private data for
|
* @heap: DMA-Heap to retrieve private data for
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* The per-subdriver data for the heap.
|
* The per-heap data for the heap.
|
||||||
*/
|
*/
|
||||||
void *dma_heap_get_drvdata(struct dma_heap *heap)
|
void *dma_heap_get_drvdata(struct dma_heap *heap)
|
||||||
{
|
{
|
||||||
@ -205,8 +204,8 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_heap_get_name() - get heap name
|
* dma_heap_get_name - get heap name
|
||||||
* @heap: DMA-Heap to retrieve private data for
|
* @heap: DMA-Heap to retrieve the name of
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* The char* for the heap name.
|
* The char* for the heap name.
|
||||||
@ -216,6 +215,10 @@ const char *dma_heap_get_name(struct dma_heap *heap)
|
|||||||
return heap->name;
|
return heap->name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dma_heap_add - adds a heap to dmabuf heaps
|
||||||
|
* @exp_info: information needed to register this heap
|
||||||
|
*/
|
||||||
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
|
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
|
||||||
{
|
{
|
||||||
struct dma_heap *heap, *h, *err_ret;
|
struct dma_heap *heap, *h, *err_ret;
|
||||||
|
@ -107,7 +107,7 @@ config DRM_KMS_HELPER
|
|||||||
|
|
||||||
config DRM_PANIC
|
config DRM_PANIC
|
||||||
bool "Display a user-friendly message when a kernel panic occurs"
|
bool "Display a user-friendly message when a kernel panic occurs"
|
||||||
depends on DRM && !(FRAMEBUFFER_CONSOLE && VT_CONSOLE)
|
depends on DRM
|
||||||
select FONT_SUPPORT
|
select FONT_SUPPORT
|
||||||
help
|
help
|
||||||
Enable a drm panic handler, which will display a user-friendly message
|
Enable a drm panic handler, which will display a user-friendly message
|
||||||
@ -149,6 +149,37 @@ config DRM_PANIC_SCREEN
|
|||||||
or by writing to /sys/module/drm/parameters/panic_screen sysfs entry
|
or by writing to /sys/module/drm/parameters/panic_screen sysfs entry
|
||||||
Default is "user"
|
Default is "user"
|
||||||
|
|
||||||
|
config DRM_PANIC_SCREEN_QR_CODE
|
||||||
|
bool "Add a panic screen with a QR code"
|
||||||
|
depends on DRM_PANIC && RUST
|
||||||
|
help
|
||||||
|
This option adds a QR code generator, and a panic screen with a QR
|
||||||
|
code. The QR code will contain the last lines of kmsg and other debug
|
||||||
|
information. This should be easier for the user to report a kernel
|
||||||
|
panic, with all debug information available.
|
||||||
|
To use this panic screen, also set DRM_PANIC_SCREEN to "qr_code"
|
||||||
|
|
||||||
|
config DRM_PANIC_SCREEN_QR_CODE_URL
|
||||||
|
string "Base URL of the QR code in the panic screen"
|
||||||
|
depends on DRM_PANIC_SCREEN_QR_CODE
|
||||||
|
help
|
||||||
|
This option sets the base URL to report the kernel panic. If it's set
|
||||||
|
the QR code will contain the URL and the kmsg compressed with zlib as
|
||||||
|
a URL parameter. If it's empty, the QR code will contain the kmsg as
|
||||||
|
uncompressed text only.
|
||||||
|
There is a demo code in javascript, to decode and uncompress the kmsg
|
||||||
|
data from the URL parameter at https://github.com/kdj0c/panic_report
|
||||||
|
|
||||||
|
config DRM_PANIC_SCREEN_QR_VERSION
|
||||||
|
int "Maximum version (size) of the QR code."
|
||||||
|
depends on DRM_PANIC_SCREEN_QR_CODE
|
||||||
|
default 40
|
||||||
|
help
|
||||||
|
This option limits the version (or size) of the QR code. QR code
|
||||||
|
version ranges from Version 1 (21x21) to Version 40 (177x177).
|
||||||
|
Smaller QR code are easier to read, but will contain less debugging
|
||||||
|
data. Default is 40.
|
||||||
|
|
||||||
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
|
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
|
||||||
bool "Enable refcount backtrace history in the DP MST helpers"
|
bool "Enable refcount backtrace history in the DP MST helpers"
|
||||||
depends on STACKTRACE_SUPPORT
|
depends on STACKTRACE_SUPPORT
|
||||||
|
@ -89,6 +89,7 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
|
|||||||
drm_privacy_screen_x86.o
|
drm_privacy_screen_x86.o
|
||||||
drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
|
drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
|
||||||
drm-$(CONFIG_DRM_PANIC) += drm_panic.o
|
drm-$(CONFIG_DRM_PANIC) += drm_panic.o
|
||||||
|
drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o
|
||||||
obj-$(CONFIG_DRM) += drm.o
|
obj-$(CONFIG_DRM) += drm.o
|
||||||
|
|
||||||
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
|
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
|
||||||
|
@ -39,23 +39,7 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
|
|||||||
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
|
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
|
||||||
-I$(FULL_AMD_PATH)/amdkfd
|
-I$(FULL_AMD_PATH)/amdkfd
|
||||||
|
|
||||||
subdir-ccflags-y := -Wextra
|
# Locally disable W=1 warnings enabled in drm subsystem Makefile
|
||||||
subdir-ccflags-y += -Wunused
|
|
||||||
subdir-ccflags-y += -Wmissing-prototypes
|
|
||||||
subdir-ccflags-y += -Wmissing-declarations
|
|
||||||
subdir-ccflags-y += -Wmissing-include-dirs
|
|
||||||
subdir-ccflags-y += -Wold-style-definition
|
|
||||||
subdir-ccflags-y += -Wmissing-format-attribute
|
|
||||||
# Need this to avoid recursive variable evaluation issues
|
|
||||||
cond-flags := $(call cc-option, -Wunused-but-set-variable) \
|
|
||||||
$(call cc-option, -Wunused-const-variable) \
|
|
||||||
$(call cc-option, -Wstringop-truncation) \
|
|
||||||
$(call cc-option, -Wpacked-not-aligned)
|
|
||||||
subdir-ccflags-y += $(cond-flags)
|
|
||||||
subdir-ccflags-y += -Wno-unused-parameter
|
|
||||||
subdir-ccflags-y += -Wno-type-limits
|
|
||||||
subdir-ccflags-y += -Wno-sign-compare
|
|
||||||
subdir-ccflags-y += -Wno-missing-field-initializers
|
|
||||||
subdir-ccflags-y += -Wno-override-init
|
subdir-ccflags-y += -Wno-override-init
|
||||||
subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror
|
subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror
|
||||||
|
|
||||||
|
@ -118,6 +118,8 @@
|
|||||||
|
|
||||||
#define MAX_GPU_INSTANCE 64
|
#define MAX_GPU_INSTANCE 64
|
||||||
|
|
||||||
|
#define GFX_SLICE_PERIOD msecs_to_jiffies(250)
|
||||||
|
|
||||||
struct amdgpu_gpu_instance {
|
struct amdgpu_gpu_instance {
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
int mgpu_fan_enabled;
|
int mgpu_fan_enabled;
|
||||||
@ -235,6 +237,7 @@ extern int sched_policy;
|
|||||||
extern bool debug_evictions;
|
extern bool debug_evictions;
|
||||||
extern bool no_system_mem_limit;
|
extern bool no_system_mem_limit;
|
||||||
extern int halt_if_hws_hang;
|
extern int halt_if_hws_hang;
|
||||||
|
extern uint amdgpu_svm_default_granularity;
|
||||||
#else
|
#else
|
||||||
static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
|
static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
|
||||||
static const bool __maybe_unused debug_evictions; /* = false */
|
static const bool __maybe_unused debug_evictions; /* = false */
|
||||||
@ -348,7 +351,7 @@ enum amdgpu_kiq_irq {
|
|||||||
AMDGPU_CP_KIQ_IRQ_LAST
|
AMDGPU_CP_KIQ_IRQ_LAST
|
||||||
};
|
};
|
||||||
#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
|
#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
|
||||||
#define MAX_KIQ_REG_WAIT (amdgpu_sriov_vf(adev) ? 50000 : 5000) /* in usecs, extend for VF */
|
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
|
||||||
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
|
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
|
||||||
#define MAX_KIQ_REG_TRY 1000
|
#define MAX_KIQ_REG_TRY 1000
|
||||||
|
|
||||||
@ -823,17 +826,6 @@ struct amdgpu_mqd {
|
|||||||
struct amdgpu_reset_domain;
|
struct amdgpu_reset_domain;
|
||||||
struct amdgpu_fru_info;
|
struct amdgpu_fru_info;
|
||||||
|
|
||||||
struct amdgpu_reset_info {
|
|
||||||
/* reset dump register */
|
|
||||||
u32 *reset_dump_reg_list;
|
|
||||||
u32 *reset_dump_reg_value;
|
|
||||||
int num_regs;
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEV_COREDUMP
|
|
||||||
struct amdgpu_coredump_info *coredump_info;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
|
* Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
|
||||||
*/
|
*/
|
||||||
@ -1157,8 +1149,6 @@ struct amdgpu_device {
|
|||||||
|
|
||||||
struct mutex benchmark_mutex;
|
struct mutex benchmark_mutex;
|
||||||
|
|
||||||
struct amdgpu_reset_info reset_info;
|
|
||||||
|
|
||||||
bool scpm_enabled;
|
bool scpm_enabled;
|
||||||
uint32_t scpm_status;
|
uint32_t scpm_status;
|
||||||
|
|
||||||
@ -1175,6 +1165,11 @@ struct amdgpu_device {
|
|||||||
bool debug_disable_soft_recovery;
|
bool debug_disable_soft_recovery;
|
||||||
bool debug_use_vram_fw_buf;
|
bool debug_use_vram_fw_buf;
|
||||||
bool debug_enable_ras_aca;
|
bool debug_enable_ras_aca;
|
||||||
|
bool debug_exp_resets;
|
||||||
|
|
||||||
|
bool enforce_isolation[MAX_XCP];
|
||||||
|
/* Added this mutex for cleaner shader isolation between GFX and compute processes */
|
||||||
|
struct mutex enforce_isolation_mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
|
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
|
||||||
@ -1484,7 +1479,6 @@ extern const int amdgpu_max_kms_ioctl;
|
|||||||
|
|
||||||
int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
|
int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
|
||||||
void amdgpu_driver_unload_kms(struct drm_device *dev);
|
void amdgpu_driver_unload_kms(struct drm_device *dev);
|
||||||
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
|
|
||||||
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
|
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
|
||||||
void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
@ -1588,13 +1582,6 @@ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return
|
|||||||
static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
|
static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_DRM_AMD_DC)
|
|
||||||
int amdgpu_dm_display_resume(struct amdgpu_device *adev );
|
|
||||||
#else
|
|
||||||
static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
|
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
|
||||||
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
|
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
|
||||||
|
|
||||||
|
@ -80,6 +80,9 @@ static void aca_banks_release(struct aca_banks *banks)
|
|||||||
{
|
{
|
||||||
struct aca_bank_node *node, *tmp;
|
struct aca_bank_node *node, *tmp;
|
||||||
|
|
||||||
|
if (list_empty(&banks->list))
|
||||||
|
return;
|
||||||
|
|
||||||
list_for_each_entry_safe(node, tmp, &banks->list, node) {
|
list_for_each_entry_safe(node, tmp, &banks->list, node) {
|
||||||
list_del(&node->node);
|
list_del(&node->node);
|
||||||
kvfree(node);
|
kvfree(node);
|
||||||
@ -453,13 +456,13 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er
|
|||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case ACA_ERROR_TYPE_UE:
|
case ACA_ERROR_TYPE_UE:
|
||||||
amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, count);
|
amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, count);
|
||||||
break;
|
break;
|
||||||
case ACA_ERROR_TYPE_CE:
|
case ACA_ERROR_TYPE_CE:
|
||||||
amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count);
|
amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, count);
|
||||||
break;
|
break;
|
||||||
case ACA_ERROR_TYPE_DEFERRED:
|
case ACA_ERROR_TYPE_DEFERRED:
|
||||||
amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, count);
|
amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, count);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
@ -562,9 +565,13 @@ static void aca_error_fini(struct aca_error *aerr)
|
|||||||
struct aca_bank_error *bank_error, *tmp;
|
struct aca_bank_error *bank_error, *tmp;
|
||||||
|
|
||||||
mutex_lock(&aerr->lock);
|
mutex_lock(&aerr->lock);
|
||||||
|
if (list_empty(&aerr->list))
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
list_for_each_entry_safe(bank_error, tmp, &aerr->list, node)
|
list_for_each_entry_safe(bank_error, tmp, &aerr->list, node)
|
||||||
aca_bank_error_remove(aerr, bank_error);
|
aca_bank_error_remove(aerr, bank_error);
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
mutex_destroy(&aerr->lock);
|
mutex_destroy(&aerr->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -680,6 +687,9 @@ static void aca_manager_fini(struct aca_handle_manager *mgr)
|
|||||||
{
|
{
|
||||||
struct aca_handle *handle, *tmp;
|
struct aca_handle *handle, *tmp;
|
||||||
|
|
||||||
|
if (list_empty(&mgr->list))
|
||||||
|
return;
|
||||||
|
|
||||||
list_for_each_entry_safe(handle, tmp, &mgr->list, node)
|
list_for_each_entry_safe(handle, tmp, &mgr->list, node)
|
||||||
amdgpu_aca_remove_handle(handle);
|
amdgpu_aca_remove_handle(handle);
|
||||||
}
|
}
|
||||||
|
@ -364,15 +364,15 @@ allocate_mem_reserve_bo_failed:
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj)
|
void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
|
struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
|
||||||
|
|
||||||
amdgpu_bo_reserve(bo, true);
|
amdgpu_bo_reserve(*bo, true);
|
||||||
amdgpu_bo_kunmap(bo);
|
amdgpu_bo_kunmap(*bo);
|
||||||
amdgpu_bo_unpin(bo);
|
amdgpu_bo_unpin(*bo);
|
||||||
amdgpu_bo_unreserve(bo);
|
amdgpu_bo_unreserve(*bo);
|
||||||
amdgpu_bo_unref(&(bo));
|
amdgpu_bo_unref(bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
|
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
|
||||||
@ -783,22 +783,6 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
|
|
||||||
int hub_inst, int hub_type)
|
|
||||||
{
|
|
||||||
if (!hub_type) {
|
|
||||||
if (adev->gfxhub.funcs->query_utcl2_poison_status)
|
|
||||||
return adev->gfxhub.funcs->query_utcl2_poison_status(adev, hub_inst);
|
|
||||||
else
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
if (adev->mmhub.funcs->query_utcl2_poison_status)
|
|
||||||
return adev->mmhub.funcs->query_utcl2_poison_status(adev, hub_inst);
|
|
||||||
else
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
|
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
return kgd2kfd_check_and_lock_kfd();
|
return kgd2kfd_check_and_lock_kfd();
|
||||||
@ -887,3 +871,21 @@ free_ring_funcs:
|
|||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Stop scheduling on KFD */
|
||||||
|
int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id)
|
||||||
|
{
|
||||||
|
if (!adev->kfd.init_complete)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return kgd2kfd_stop_sched(adev->kfd.dev, node_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Start scheduling on KFD */
|
||||||
|
int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id)
|
||||||
|
{
|
||||||
|
if (!adev->kfd.init_complete)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return kgd2kfd_start_sched(adev->kfd.dev, node_id);
|
||||||
|
}
|
||||||
|
@ -235,7 +235,7 @@ int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
|
|||||||
int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
|
int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
|
||||||
void **mem_obj, uint64_t *gpu_addr,
|
void **mem_obj, uint64_t *gpu_addr,
|
||||||
void **cpu_ptr, bool mqd_gfx9);
|
void **cpu_ptr, bool mqd_gfx9);
|
||||||
void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj);
|
void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj);
|
||||||
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
|
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
|
||||||
void **mem_obj);
|
void **mem_obj);
|
||||||
void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
|
void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
|
||||||
@ -264,6 +264,8 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
|
|||||||
uint32_t *payload);
|
uint32_t *payload);
|
||||||
int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
|
int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
|
||||||
u32 inst);
|
u32 inst);
|
||||||
|
int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id);
|
||||||
|
int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id);
|
||||||
|
|
||||||
/* Read user wptr from a specified user address space with page fault
|
/* Read user wptr from a specified user address space with page fault
|
||||||
* disabled. The memory must be pinned and mapped to the hardware when
|
* disabled. The memory must be pinned and mapped to the hardware when
|
||||||
@ -322,7 +324,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
|
|||||||
void **kptr, uint64_t *size);
|
void **kptr, uint64_t *size);
|
||||||
void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
|
void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
|
||||||
|
|
||||||
int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo);
|
int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart);
|
||||||
|
|
||||||
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
|
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
|
||||||
struct dma_fence __rcu **ef);
|
struct dma_fence __rcu **ef);
|
||||||
@ -345,11 +347,9 @@ void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *ad
|
|||||||
pasid_notify pasid_fn, void *data, uint32_t reset);
|
pasid_notify pasid_fn, void *data, uint32_t reset);
|
||||||
|
|
||||||
bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
|
bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
|
||||||
bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
|
bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem);
|
||||||
void amdgpu_amdkfd_block_mmu_notifications(void *p);
|
void amdgpu_amdkfd_block_mmu_notifications(void *p);
|
||||||
int amdgpu_amdkfd_criu_resume(void *p);
|
int amdgpu_amdkfd_criu_resume(void *p);
|
||||||
bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
|
|
||||||
int hub_inst, int hub_type);
|
|
||||||
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
||||||
uint64_t size, u32 alloc_flag, int8_t xcp_id);
|
uint64_t size, u32 alloc_flag, int8_t xcp_id);
|
||||||
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
|
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
|
||||||
@ -426,6 +426,8 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
|
|||||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
|
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
|
||||||
int kgd2kfd_check_and_lock_kfd(void);
|
int kgd2kfd_check_and_lock_kfd(void);
|
||||||
void kgd2kfd_unlock_kfd(void);
|
void kgd2kfd_unlock_kfd(void);
|
||||||
|
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
|
||||||
|
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
|
||||||
#else
|
#else
|
||||||
static inline int kgd2kfd_init(void)
|
static inline int kgd2kfd_init(void)
|
||||||
{
|
{
|
||||||
@ -496,5 +498,15 @@ static inline int kgd2kfd_check_and_lock_kfd(void)
|
|||||||
static inline void kgd2kfd_unlock_kfd(void)
|
static inline void kgd2kfd_unlock_kfd(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
|
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
|
||||||
|
@ -191,4 +191,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
|
|||||||
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
|
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
|
||||||
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
|
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
|
||||||
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
|
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
|
||||||
|
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
|
||||||
|
.hqd_reset = kgd_gfx_v9_hqd_reset,
|
||||||
};
|
};
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/fdtable.h>
|
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/firmware.h>
|
#include <linux/firmware.h>
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
@ -300,7 +299,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
|
|||||||
if (r)
|
if (r)
|
||||||
goto out;
|
goto out;
|
||||||
} else {
|
} else {
|
||||||
drm_sched_start(&ring->sched, false);
|
drm_sched_start(&ring->sched);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -418,5 +417,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
|
|||||||
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
|
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
|
||||||
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
|
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
|
||||||
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
|
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
|
||||||
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
|
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
|
||||||
|
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
|
||||||
|
.hqd_reset = kgd_gfx_v9_hqd_reset
|
||||||
};
|
};
|
||||||
|
@ -541,5 +541,7 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
|
|||||||
kgd_gfx_v9_4_3_set_wave_launch_trap_override,
|
kgd_gfx_v9_4_3_set_wave_launch_trap_override,
|
||||||
.set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode,
|
.set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode,
|
||||||
.set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
|
.set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
|
||||||
.clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch
|
.clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch,
|
||||||
|
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
|
||||||
|
.hqd_reset = kgd_gfx_v9_hqd_reset
|
||||||
};
|
};
|
||||||
|
@ -1070,6 +1070,20 @@ static void program_trap_handler_settings(struct amdgpu_device *adev,
|
|||||||
unlock_srbm(adev);
|
unlock_srbm(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id,
|
||||||
|
uint32_t inst)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id,
|
||||||
|
uint32_t inst, unsigned int utimeout)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
|
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
|
||||||
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
||||||
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
||||||
@ -1097,4 +1111,6 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
|
|||||||
.get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
|
.get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
|
||||||
.build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
|
.build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
|
||||||
.program_trap_handler_settings = program_trap_handler_settings,
|
.program_trap_handler_settings = program_trap_handler_settings,
|
||||||
|
.hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
|
||||||
|
.hqd_reset = kgd_gfx_v10_hqd_reset
|
||||||
};
|
};
|
||||||
|
@ -56,3 +56,12 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
|
|||||||
uint32_t grace_period,
|
uint32_t grace_period,
|
||||||
uint32_t *reg_offset,
|
uint32_t *reg_offset,
|
||||||
uint32_t *reg_data);
|
uint32_t *reg_data);
|
||||||
|
uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
|
||||||
|
uint32_t pipe_id,
|
||||||
|
uint32_t queue_id,
|
||||||
|
uint32_t inst);
|
||||||
|
uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
|
||||||
|
uint32_t pipe_id,
|
||||||
|
uint32_t queue_id,
|
||||||
|
uint32_t inst,
|
||||||
|
unsigned int utimeout);
|
||||||
|
@ -680,5 +680,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
|
|||||||
.set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override,
|
.set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override,
|
||||||
.set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode,
|
.set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode,
|
||||||
.set_address_watch = kgd_gfx_v10_set_address_watch,
|
.set_address_watch = kgd_gfx_v10_set_address_watch,
|
||||||
.clear_address_watch = kgd_gfx_v10_clear_address_watch
|
.clear_address_watch = kgd_gfx_v10_clear_address_watch,
|
||||||
|
.hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
|
||||||
|
.hqd_reset = kgd_gfx_v10_hqd_reset
|
||||||
};
|
};
|
||||||
|
@ -786,6 +786,20 @@ static uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t kgd_gfx_v11_hqd_get_pq_addr(struct amdgpu_device *adev,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id,
|
||||||
|
uint32_t inst)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint64_t kgd_gfx_v11_hqd_reset(struct amdgpu_device *adev,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id,
|
||||||
|
uint32_t inst, unsigned int utimeout)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
|
const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
|
||||||
.program_sh_mem_settings = program_sh_mem_settings_v11,
|
.program_sh_mem_settings = program_sh_mem_settings_v11,
|
||||||
.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11,
|
.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11,
|
||||||
@ -808,5 +822,7 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
|
|||||||
.set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override,
|
.set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override,
|
||||||
.set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode,
|
.set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode,
|
||||||
.set_address_watch = kgd_gfx_v11_set_address_watch,
|
.set_address_watch = kgd_gfx_v11_set_address_watch,
|
||||||
.clear_address_watch = kgd_gfx_v11_clear_address_watch
|
.clear_address_watch = kgd_gfx_v11_clear_address_watch,
|
||||||
|
.hqd_get_pq_addr = kgd_gfx_v11_hqd_get_pq_addr,
|
||||||
|
.hqd_reset = kgd_gfx_v11_hqd_reset
|
||||||
};
|
};
|
||||||
|
@ -1144,6 +1144,109 @@ void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
|
|||||||
kgd_gfx_v9_unlock_srbm(adev, inst);
|
kgd_gfx_v9_unlock_srbm(adev, inst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id,
|
||||||
|
uint32_t inst)
|
||||||
|
{
|
||||||
|
uint32_t low, high;
|
||||||
|
uint64_t queue_addr = 0;
|
||||||
|
|
||||||
|
if (!adev->debug_exp_resets &&
|
||||||
|
!adev->gfx.num_gfx_rings)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
|
||||||
|
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
|
||||||
|
|
||||||
|
if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE))
|
||||||
|
goto unlock_out;
|
||||||
|
|
||||||
|
low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE);
|
||||||
|
high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI);
|
||||||
|
|
||||||
|
/* only concerned with user queues. */
|
||||||
|
if (!high)
|
||||||
|
goto unlock_out;
|
||||||
|
|
||||||
|
queue_addr = (((queue_addr | high) << 32) | low) << 8;
|
||||||
|
|
||||||
|
unlock_out:
|
||||||
|
amdgpu_gfx_rlc_exit_safe_mode(adev, inst);
|
||||||
|
kgd_gfx_v9_release_queue(adev, inst);
|
||||||
|
|
||||||
|
return queue_addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* assume queue acquired */
|
||||||
|
static int kgd_gfx_v9_hqd_dequeue_wait(struct amdgpu_device *adev, uint32_t inst,
|
||||||
|
unsigned int utimeout)
|
||||||
|
{
|
||||||
|
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
uint32_t temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
|
||||||
|
|
||||||
|
if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (time_after(jiffies, end_jiffies))
|
||||||
|
return -ETIME;
|
||||||
|
|
||||||
|
usleep_range(500, 1000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id,
|
||||||
|
uint32_t inst, unsigned int utimeout)
|
||||||
|
{
|
||||||
|
uint32_t low, high, pipe_reset_data = 0;
|
||||||
|
uint64_t queue_addr = 0;
|
||||||
|
|
||||||
|
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
|
||||||
|
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
|
||||||
|
|
||||||
|
if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE))
|
||||||
|
goto unlock_out;
|
||||||
|
|
||||||
|
low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE);
|
||||||
|
high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI);
|
||||||
|
|
||||||
|
/* only concerned with user queues. */
|
||||||
|
if (!high)
|
||||||
|
goto unlock_out;
|
||||||
|
|
||||||
|
queue_addr = (((queue_addr | high) << 32) | low) << 8;
|
||||||
|
|
||||||
|
pr_debug("Attempting queue reset on XCC %i pipe id %i queue id %i\n",
|
||||||
|
inst, pipe_id, queue_id);
|
||||||
|
|
||||||
|
/* assume previous dequeue request issued will take affect after reset */
|
||||||
|
WREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_COMPUTE_QUEUE_RESET, 0x1);
|
||||||
|
|
||||||
|
if (!kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout))
|
||||||
|
goto unlock_out;
|
||||||
|
|
||||||
|
pr_debug("Attempting pipe reset on XCC %i pipe id %i\n", inst, pipe_id);
|
||||||
|
|
||||||
|
pipe_reset_data = REG_SET_FIELD(pipe_reset_data, CP_MEC_CNTL, MEC_ME1_PIPE0_RESET, 1);
|
||||||
|
pipe_reset_data = pipe_reset_data << pipe_id;
|
||||||
|
|
||||||
|
WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, pipe_reset_data);
|
||||||
|
WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, 0);
|
||||||
|
|
||||||
|
if (kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout))
|
||||||
|
queue_addr = 0;
|
||||||
|
|
||||||
|
unlock_out:
|
||||||
|
pr_debug("queue reset on XCC %i pipe id %i queue id %i %s\n",
|
||||||
|
inst, pipe_id, queue_id, !!queue_addr ? "succeeded!" : "failed!");
|
||||||
|
amdgpu_gfx_rlc_exit_safe_mode(adev, inst);
|
||||||
|
kgd_gfx_v9_release_queue(adev, inst);
|
||||||
|
|
||||||
|
return queue_addr;
|
||||||
|
}
|
||||||
|
|
||||||
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
|
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
|
||||||
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
|
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
|
||||||
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
|
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
|
||||||
@ -1172,4 +1275,6 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
|
|||||||
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
|
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
|
||||||
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
|
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
|
||||||
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
|
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
|
||||||
|
.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
|
||||||
|
.hqd_reset = kgd_gfx_v9_hqd_reset
|
||||||
};
|
};
|
||||||
|
@ -101,3 +101,12 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
|
|||||||
uint32_t grace_period,
|
uint32_t grace_period,
|
||||||
uint32_t *reg_offset,
|
uint32_t *reg_offset,
|
||||||
uint32_t *reg_data);
|
uint32_t *reg_data);
|
||||||
|
uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
|
||||||
|
uint32_t pipe_id,
|
||||||
|
uint32_t queue_id,
|
||||||
|
uint32_t inst);
|
||||||
|
uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
|
||||||
|
uint32_t pipe_id,
|
||||||
|
uint32_t queue_id,
|
||||||
|
uint32_t inst,
|
||||||
|
unsigned int utimeout);
|
||||||
|
@ -25,7 +25,6 @@
|
|||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/sched/mm.h>
|
#include <linux/sched/mm.h>
|
||||||
#include <linux/sched/task.h>
|
#include <linux/sched/task.h>
|
||||||
#include <linux/fdtable.h>
|
|
||||||
#include <drm/ttm/ttm_tt.h>
|
#include <drm/ttm/ttm_tt.h>
|
||||||
|
|
||||||
#include <drm/drm_exec.h>
|
#include <drm/drm_exec.h>
|
||||||
@ -818,18 +817,13 @@ static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
|
|||||||
if (!mem->dmabuf) {
|
if (!mem->dmabuf) {
|
||||||
struct amdgpu_device *bo_adev;
|
struct amdgpu_device *bo_adev;
|
||||||
struct dma_buf *dmabuf;
|
struct dma_buf *dmabuf;
|
||||||
int r, fd;
|
|
||||||
|
|
||||||
bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
|
bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
|
||||||
r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file,
|
dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file,
|
||||||
mem->gem_handle,
|
mem->gem_handle,
|
||||||
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
|
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
|
||||||
DRM_RDWR : 0, &fd);
|
DRM_RDWR : 0);
|
||||||
if (r)
|
if (IS_ERR(dmabuf))
|
||||||
return r;
|
|
||||||
dmabuf = dma_buf_get(fd);
|
|
||||||
close_fd(fd);
|
|
||||||
if (WARN_ON_ONCE(IS_ERR(dmabuf)))
|
|
||||||
return PTR_ERR(dmabuf);
|
return PTR_ERR(dmabuf);
|
||||||
mem->dmabuf = dmabuf;
|
mem->dmabuf = dmabuf;
|
||||||
}
|
}
|
||||||
@ -1252,7 +1246,7 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
|
static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
|
||||||
struct kfd_mem_attachment *entry,
|
struct kfd_mem_attachment *entry,
|
||||||
struct amdgpu_sync *sync)
|
struct amdgpu_sync *sync)
|
||||||
{
|
{
|
||||||
@ -1260,11 +1254,18 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
|
|||||||
struct amdgpu_device *adev = entry->adev;
|
struct amdgpu_device *adev = entry->adev;
|
||||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||||
|
|
||||||
|
if (bo_va->queue_refcount) {
|
||||||
|
pr_debug("bo_va->queue_refcount %d\n", bo_va->queue_refcount);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
|
amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
|
||||||
|
|
||||||
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
|
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
|
||||||
|
|
||||||
amdgpu_sync_fence(sync, bo_va->last_pt_update);
|
amdgpu_sync_fence(sync, bo_va->last_pt_update);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int update_gpuvm_pte(struct kgd_mem *mem,
|
static int update_gpuvm_pte(struct kgd_mem *mem,
|
||||||
@ -2191,7 +2192,10 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
|
|||||||
pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
|
pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
|
||||||
entry->va, entry->va + bo_size, entry);
|
entry->va, entry->va + bo_size, entry);
|
||||||
|
|
||||||
unmap_bo_from_gpuvm(mem, entry, ctx.sync);
|
ret = unmap_bo_from_gpuvm(mem, entry, ctx.sync);
|
||||||
|
if (ret)
|
||||||
|
goto unreserve_out;
|
||||||
|
|
||||||
entry->is_mapped = false;
|
entry->is_mapped = false;
|
||||||
|
|
||||||
mem->mapped_to_gpu_memory--;
|
mem->mapped_to_gpu_memory--;
|
||||||
@ -2226,11 +2230,12 @@ int amdgpu_amdkfd_gpuvm_sync_memory(
|
|||||||
/**
|
/**
|
||||||
* amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
|
* amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
|
||||||
* @bo: Buffer object to be mapped
|
* @bo: Buffer object to be mapped
|
||||||
|
* @bo_gart: Return bo reference
|
||||||
*
|
*
|
||||||
* Before return, bo reference count is incremented. To release the reference and unpin/
|
* Before return, bo reference count is incremented. To release the reference and unpin/
|
||||||
* unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
|
* unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
|
||||||
*/
|
*/
|
||||||
int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
|
int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -2257,7 +2262,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
|
|||||||
|
|
||||||
amdgpu_bo_unreserve(bo);
|
amdgpu_bo_unreserve(bo);
|
||||||
|
|
||||||
bo = amdgpu_bo_ref(bo);
|
*bo_gart = amdgpu_bo_ref(bo);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -3200,12 +3205,13 @@ int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
|
bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_vm *vm = drm_priv_to_vm(drm_priv);
|
||||||
struct kfd_mem_attachment *entry;
|
struct kfd_mem_attachment *entry;
|
||||||
|
|
||||||
list_for_each_entry(entry, &mem->attachments, list) {
|
list_for_each_entry(entry, &mem->attachments, list) {
|
||||||
if (entry->is_mapped && entry->adev == adev)
|
if (entry->is_mapped && entry->bo_va->base.vm == vm)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
@ -1018,8 +1018,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
|
|||||||
if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
|
if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
|
||||||
args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
|
args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
|
||||||
|
|
||||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
|
if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
|
||||||
sizeof(args));
|
index, (uint32_t *)&args, sizeof(args)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
dividers->post_div = args.v3.ucPostDiv;
|
dividers->post_div = args.v3.ucPostDiv;
|
||||||
dividers->enable_post_div = (args.v3.ucCntlFlag &
|
dividers->enable_post_div = (args.v3.ucCntlFlag &
|
||||||
@ -1039,8 +1040,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
|
|||||||
if (strobe_mode)
|
if (strobe_mode)
|
||||||
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
|
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
|
||||||
|
|
||||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
|
if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
|
||||||
sizeof(args));
|
index, (uint32_t *)&args, sizeof(args)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
dividers->post_div = args.v5.ucPostDiv;
|
dividers->post_div = args.v5.ucPostDiv;
|
||||||
dividers->enable_post_div = (args.v5.ucCntlFlag &
|
dividers->enable_post_div = (args.v5.ucCntlFlag &
|
||||||
@ -1058,8 +1060,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
|
|||||||
/* fusion */
|
/* fusion */
|
||||||
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
|
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
|
||||||
|
|
||||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
|
if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
|
||||||
sizeof(args));
|
index, (uint32_t *)&args, sizeof(args)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
|
dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
|
||||||
dividers->real_clock = le32_to_cpu(args.v4.ulClock);
|
dividers->real_clock = le32_to_cpu(args.v4.ulClock);
|
||||||
@ -1070,8 +1073,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
|
|||||||
args.v6_in.ulClock.ulComputeClockFlag = clock_type;
|
args.v6_in.ulClock.ulComputeClockFlag = clock_type;
|
||||||
args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
|
args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
|
||||||
|
|
||||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
|
if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
|
||||||
sizeof(args));
|
index, (uint32_t *)&args, sizeof(args)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
|
dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
|
||||||
dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
|
dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
|
||||||
@ -1113,8 +1117,9 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
|
|||||||
if (strobe_mode)
|
if (strobe_mode)
|
||||||
args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
|
args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
|
||||||
|
|
||||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
|
if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
|
||||||
sizeof(args));
|
index, (uint32_t *)&args, sizeof(args)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
|
mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
|
||||||
mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
|
mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
|
||||||
@ -1211,8 +1216,9 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
|
|||||||
args.v2.ucVoltageMode = 0;
|
args.v2.ucVoltageMode = 0;
|
||||||
args.v2.usVoltageLevel = 0;
|
args.v2.usVoltageLevel = 0;
|
||||||
|
|
||||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
|
if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
|
||||||
sizeof(args));
|
index, (uint32_t *)&args, sizeof(args)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
|
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
|
||||||
break;
|
break;
|
||||||
@ -1221,8 +1227,9 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
|
|||||||
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
|
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
|
||||||
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
|
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
|
||||||
|
|
||||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
|
if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
|
||||||
sizeof(args));
|
index, (uint32_t *)&args, sizeof(args)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
|
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
|
||||||
break;
|
break;
|
||||||
|
@ -431,6 +431,11 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
|
|||||||
goto success;
|
goto success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (amdgpu_read_platform_bios(adev)) {
|
||||||
|
dev_info(adev->dev, "Fetched VBIOS from platform\n");
|
||||||
|
goto success;
|
||||||
|
}
|
||||||
|
|
||||||
if (amdgpu_read_bios(adev)) {
|
if (amdgpu_read_bios(adev)) {
|
||||||
dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
|
dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
|
||||||
goto success;
|
goto success;
|
||||||
@ -446,11 +451,6 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
|
|||||||
goto success;
|
goto success;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_read_platform_bios(adev)) {
|
|
||||||
dev_info(adev->dev, "Fetched VBIOS from platform\n");
|
|
||||||
goto success;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
|
dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -414,7 +414,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
|
err = amdgpu_ucode_request(adev, &adev->pm.fw, "%s", fw_name);
|
||||||
if (err) {
|
if (err) {
|
||||||
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
|
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
|
||||||
amdgpu_ucode_release(&adev->pm.fw);
|
amdgpu_ucode_release(&adev->pm.fw);
|
||||||
|
@ -249,11 +249,7 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
|
|||||||
static struct edid *
|
static struct edid *
|
||||||
amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
|
amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
if (adev->mode_info.bios_hardcoded_edid) {
|
return drm_edid_duplicate(drm_edid_raw(adev->mode_info.bios_hardcoded_edid));
|
||||||
return kmemdup((unsigned char *)adev->mode_info.bios_hardcoded_edid,
|
|
||||||
adev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_connector_get_edid(struct drm_connector *connector)
|
static void amdgpu_connector_get_edid(struct drm_connector *connector)
|
||||||
@ -442,6 +438,9 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
|
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
|
||||||
|
if (!mode)
|
||||||
|
return;
|
||||||
|
|
||||||
drm_mode_probed_add(connector, mode);
|
drm_mode_probed_add(connector, mode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -263,6 +263,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
|
|||||||
if (size < sizeof(struct drm_amdgpu_bo_list_in))
|
if (size < sizeof(struct drm_amdgpu_bo_list_in))
|
||||||
goto free_partial_kdata;
|
goto free_partial_kdata;
|
||||||
|
|
||||||
|
/* Only a single BO list is allowed to simplify handling. */
|
||||||
|
if (p->bo_list)
|
||||||
|
ret = -EINVAL;
|
||||||
|
|
||||||
ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
|
ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_partial_kdata;
|
goto free_partial_kdata;
|
||||||
@ -292,6 +296,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
|
|||||||
num_ibs[i], &p->jobs[i]);
|
num_ibs[i], &p->jobs[i]);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_all_kdata;
|
goto free_all_kdata;
|
||||||
|
p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id];
|
||||||
}
|
}
|
||||||
p->gang_leader = p->jobs[p->gang_leader_idx];
|
p->gang_leader = p->jobs[p->gang_leader_idx];
|
||||||
|
|
||||||
@ -1106,7 +1111,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
|||||||
struct drm_gpu_scheduler *sched = entity->rq->sched;
|
struct drm_gpu_scheduler *sched = entity->rq->sched;
|
||||||
struct amdgpu_ring *ring = to_amdgpu_ring(sched);
|
struct amdgpu_ring *ring = to_amdgpu_ring(sched);
|
||||||
|
|
||||||
if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
|
if (amdgpu_vmid_uses_reserved(adev, vm, ring->vm_hub))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2026,100 +2026,6 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
|
|||||||
DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
|
DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
|
||||||
amdgpu_debugfs_sclk_set, "%llu\n");
|
amdgpu_debugfs_sclk_set, "%llu\n");
|
||||||
|
|
||||||
static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
|
|
||||||
char __user *buf, size_t size, loff_t *pos)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
|
|
||||||
char reg_offset[12];
|
|
||||||
int i, ret, len = 0;
|
|
||||||
|
|
||||||
if (*pos)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
memset(reg_offset, 0, 12);
|
|
||||||
ret = down_read_killable(&adev->reset_domain->sem);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
for (i = 0; i < adev->reset_info.num_regs; i++) {
|
|
||||||
sprintf(reg_offset, "0x%x\n", adev->reset_info.reset_dump_reg_list[i]);
|
|
||||||
up_read(&adev->reset_domain->sem);
|
|
||||||
if (copy_to_user(buf + len, reg_offset, strlen(reg_offset)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
len += strlen(reg_offset);
|
|
||||||
ret = down_read_killable(&adev->reset_domain->sem);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
up_read(&adev->reset_domain->sem);
|
|
||||||
*pos += len;
|
|
||||||
|
|
||||||
return len;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
|
|
||||||
const char __user *buf, size_t size, loff_t *pos)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
|
|
||||||
char reg_offset[11];
|
|
||||||
uint32_t *new = NULL, *tmp = NULL;
|
|
||||||
unsigned int len = 0;
|
|
||||||
int ret, i = 0;
|
|
||||||
|
|
||||||
do {
|
|
||||||
memset(reg_offset, 0, 11);
|
|
||||||
if (copy_from_user(reg_offset, buf + len,
|
|
||||||
min(10, (size-len)))) {
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto error_free;
|
|
||||||
}
|
|
||||||
|
|
||||||
new = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
|
|
||||||
if (!new) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto error_free;
|
|
||||||
}
|
|
||||||
tmp = new;
|
|
||||||
if (sscanf(reg_offset, "%X %n", &tmp[i], &ret) != 1) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto error_free;
|
|
||||||
}
|
|
||||||
|
|
||||||
len += ret;
|
|
||||||
i++;
|
|
||||||
} while (len < size);
|
|
||||||
|
|
||||||
new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL);
|
|
||||||
if (!new) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto error_free;
|
|
||||||
}
|
|
||||||
ret = down_write_killable(&adev->reset_domain->sem);
|
|
||||||
if (ret)
|
|
||||||
goto error_free;
|
|
||||||
|
|
||||||
swap(adev->reset_info.reset_dump_reg_list, tmp);
|
|
||||||
swap(adev->reset_info.reset_dump_reg_value, new);
|
|
||||||
adev->reset_info.num_regs = i;
|
|
||||||
up_write(&adev->reset_domain->sem);
|
|
||||||
ret = size;
|
|
||||||
|
|
||||||
error_free:
|
|
||||||
if (tmp != new)
|
|
||||||
kfree(tmp);
|
|
||||||
kfree(new);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct file_operations amdgpu_reset_dump_register_list = {
|
|
||||||
.owner = THIS_MODULE,
|
|
||||||
.read = amdgpu_reset_dump_register_list_read,
|
|
||||||
.write = amdgpu_reset_dump_register_list_write,
|
|
||||||
.llseek = default_llseek
|
|
||||||
};
|
|
||||||
|
|
||||||
int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
|
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
|
||||||
@ -2204,8 +2110,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
|||||||
&amdgpu_debugfs_vm_info_fops);
|
&amdgpu_debugfs_vm_info_fops);
|
||||||
debugfs_create_file("amdgpu_benchmark", 0200, root, adev,
|
debugfs_create_file("amdgpu_benchmark", 0200, root, adev,
|
||||||
&amdgpu_benchmark_fops);
|
&amdgpu_benchmark_fops);
|
||||||
debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
|
|
||||||
&amdgpu_reset_dump_register_list);
|
|
||||||
|
|
||||||
adev->debugfs_vbios_blob.data = adev->bios;
|
adev->debugfs_vbios_blob.data = adev->bios;
|
||||||
adev->debugfs_vbios_blob.size = adev->bios_size;
|
adev->debugfs_vbios_blob.size = adev->bios_size;
|
||||||
|
@ -28,8 +28,8 @@
|
|||||||
#include "atom.h"
|
#include "atom.h"
|
||||||
|
|
||||||
#ifndef CONFIG_DEV_COREDUMP
|
#ifndef CONFIG_DEV_COREDUMP
|
||||||
void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
|
void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
|
||||||
struct amdgpu_reset_context *reset_context)
|
bool vram_lost, struct amdgpu_job *job)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@ -203,7 +203,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
|
|||||||
struct amdgpu_coredump_info *coredump = data;
|
struct amdgpu_coredump_info *coredump = data;
|
||||||
struct drm_print_iterator iter;
|
struct drm_print_iterator iter;
|
||||||
struct amdgpu_vm_fault_info *fault_info;
|
struct amdgpu_vm_fault_info *fault_info;
|
||||||
int i, ver;
|
int ver;
|
||||||
|
|
||||||
iter.data = buffer;
|
iter.data = buffer;
|
||||||
iter.offset = 0;
|
iter.offset = 0;
|
||||||
@ -236,7 +236,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
|
|||||||
drm_printf(&p, "\nSOC Memory Information\n");
|
drm_printf(&p, "\nSOC Memory Information\n");
|
||||||
drm_printf(&p, "real vram size: %llu\n", coredump->adev->gmc.real_vram_size);
|
drm_printf(&p, "real vram size: %llu\n", coredump->adev->gmc.real_vram_size);
|
||||||
drm_printf(&p, "visible vram size: %llu\n", coredump->adev->gmc.visible_vram_size);
|
drm_printf(&p, "visible vram size: %llu\n", coredump->adev->gmc.visible_vram_size);
|
||||||
drm_printf(&p, "visible vram size: %llu\n", coredump->adev->mman.gtt_mgr.manager.size);
|
drm_printf(&p, "gtt size: %llu\n", coredump->adev->mman.gtt_mgr.manager.size);
|
||||||
|
|
||||||
/* GDS Config */
|
/* GDS Config */
|
||||||
drm_printf(&p, "\nGDS Config\n");
|
drm_printf(&p, "\nGDS Config\n");
|
||||||
@ -315,16 +315,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (coredump->reset_vram_lost)
|
if (coredump->skip_vram_check)
|
||||||
|
drm_printf(&p, "VRAM lost check is skipped!\n");
|
||||||
|
else if (coredump->reset_vram_lost)
|
||||||
drm_printf(&p, "VRAM is lost due to GPU reset!\n");
|
drm_printf(&p, "VRAM is lost due to GPU reset!\n");
|
||||||
if (coredump->adev->reset_info.num_regs) {
|
|
||||||
drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
|
|
||||||
|
|
||||||
for (i = 0; i < coredump->adev->reset_info.num_regs; i++)
|
|
||||||
drm_printf(&p, "0x%08x: 0x%08x\n",
|
|
||||||
coredump->adev->reset_info.reset_dump_reg_list[i],
|
|
||||||
coredump->adev->reset_info.reset_dump_reg_value[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
return count - iter.remain;
|
return count - iter.remain;
|
||||||
}
|
}
|
||||||
@ -334,12 +328,11 @@ static void amdgpu_devcoredump_free(void *data)
|
|||||||
kfree(data);
|
kfree(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
|
void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
|
||||||
struct amdgpu_reset_context *reset_context)
|
bool vram_lost, struct amdgpu_job *job)
|
||||||
{
|
{
|
||||||
struct amdgpu_coredump_info *coredump;
|
|
||||||
struct drm_device *dev = adev_to_drm(adev);
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct amdgpu_job *job = reset_context->job;
|
struct amdgpu_coredump_info *coredump;
|
||||||
struct drm_sched_job *s_job;
|
struct drm_sched_job *s_job;
|
||||||
|
|
||||||
coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
|
coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
|
||||||
@ -349,11 +342,12 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
coredump->skip_vram_check = skip_vram_check;
|
||||||
coredump->reset_vram_lost = vram_lost;
|
coredump->reset_vram_lost = vram_lost;
|
||||||
|
|
||||||
if (reset_context->job && reset_context->job->vm) {
|
if (job && job->vm) {
|
||||||
|
struct amdgpu_vm *vm = job->vm;
|
||||||
struct amdgpu_task_info *ti;
|
struct amdgpu_task_info *ti;
|
||||||
struct amdgpu_vm *vm = reset_context->job->vm;
|
|
||||||
|
|
||||||
ti = amdgpu_vm_get_task_info_vm(vm);
|
ti = amdgpu_vm_get_task_info_vm(vm);
|
||||||
if (ti) {
|
if (ti) {
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
#define __AMDGPU_DEV_COREDUMP_H__
|
#define __AMDGPU_DEV_COREDUMP_H__
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_reset.h"
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEV_COREDUMP
|
#ifdef CONFIG_DEV_COREDUMP
|
||||||
|
|
||||||
@ -36,12 +35,12 @@ struct amdgpu_coredump_info {
|
|||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
struct amdgpu_task_info reset_task_info;
|
struct amdgpu_task_info reset_task_info;
|
||||||
struct timespec64 reset_time;
|
struct timespec64 reset_time;
|
||||||
|
bool skip_vram_check;
|
||||||
bool reset_vram_lost;
|
bool reset_vram_lost;
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
|
void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
|
||||||
struct amdgpu_reset_context *reset_context);
|
bool vram_lost, struct amdgpu_job *job);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -1916,6 +1916,8 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
|
|||||||
*/
|
*/
|
||||||
static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
|
static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
if (amdgpu_sched_jobs < 4) {
|
if (amdgpu_sched_jobs < 4) {
|
||||||
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
|
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
|
||||||
amdgpu_sched_jobs);
|
amdgpu_sched_jobs);
|
||||||
@ -1970,6 +1972,9 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
||||||
|
|
||||||
|
for (i = 0; i < MAX_XCP; i++)
|
||||||
|
adev->enforce_isolation[i] = !!enforce_isolation;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2471,6 +2476,7 @@ out:
|
|||||||
*/
|
*/
|
||||||
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_ip_block *ip_block;
|
||||||
struct pci_dev *parent;
|
struct pci_dev *parent;
|
||||||
int i, r;
|
int i, r;
|
||||||
bool total;
|
bool total;
|
||||||
@ -2608,7 +2614,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
|||||||
if (!total)
|
if (!total)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
|
||||||
|
if (ip_block->status.valid != false)
|
||||||
amdgpu_amdkfd_device_probe(adev);
|
amdgpu_amdkfd_device_probe(adev);
|
||||||
|
|
||||||
adev->cg_flags &= amdgpu_cg_mask;
|
adev->cg_flags &= amdgpu_cg_mask;
|
||||||
adev->pg_flags &= amdgpu_pg_mask;
|
adev->pg_flags &= amdgpu_pg_mask;
|
||||||
|
|
||||||
@ -3948,6 +3957,27 @@ static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
|
|||||||
adev->ram_is_direct_mapped = true;
|
adev->ram_is_direct_mapped = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_HSA_AMD_P2P)
|
||||||
|
/**
|
||||||
|
* amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
*
|
||||||
|
* return if IOMMU remapping bar address
|
||||||
|
*/
|
||||||
|
static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
struct iommu_domain *domain;
|
||||||
|
|
||||||
|
domain = iommu_get_domain_for_dev(adev->dev);
|
||||||
|
if (domain && (domain->type == IOMMU_DOMAIN_DMA ||
|
||||||
|
domain->type == IOMMU_DOMAIN_DMA_FQ))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static const struct attribute *amdgpu_dev_attributes[] = {
|
static const struct attribute *amdgpu_dev_attributes[] = {
|
||||||
&dev_attr_pcie_replay_count.attr,
|
&dev_attr_pcie_replay_count.attr,
|
||||||
NULL
|
NULL
|
||||||
@ -4055,6 +4085,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||||||
mutex_init(&adev->notifier_lock);
|
mutex_init(&adev->notifier_lock);
|
||||||
mutex_init(&adev->pm.stable_pstate_ctx_lock);
|
mutex_init(&adev->pm.stable_pstate_ctx_lock);
|
||||||
mutex_init(&adev->benchmark_mutex);
|
mutex_init(&adev->benchmark_mutex);
|
||||||
|
mutex_init(&adev->gfx.reset_sem_mutex);
|
||||||
|
/* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
|
||||||
|
mutex_init(&adev->enforce_isolation_mutex);
|
||||||
|
mutex_init(&adev->gfx.kfd_sch_mutex);
|
||||||
|
|
||||||
amdgpu_device_init_apu_flags(adev);
|
amdgpu_device_init_apu_flags(adev);
|
||||||
|
|
||||||
@ -4086,6 +4120,21 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||||||
amdgpu_device_delayed_init_work_handler);
|
amdgpu_device_delayed_init_work_handler);
|
||||||
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
|
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
|
||||||
amdgpu_device_delay_enable_gfx_off);
|
amdgpu_device_delay_enable_gfx_off);
|
||||||
|
/*
|
||||||
|
* Initialize the enforce_isolation work structures for each XCP
|
||||||
|
* partition. This work handler is responsible for enforcing shader
|
||||||
|
* isolation on AMD GPUs. It counts the number of emitted fences for
|
||||||
|
* each GFX and compute ring. If there are any fences, it schedules
|
||||||
|
* the `enforce_isolation_work` to be run after a delay. If there are
|
||||||
|
* no fences, it signals the Kernel Fusion Driver (KFD) to resume the
|
||||||
|
* runqueue.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < MAX_XCP; i++) {
|
||||||
|
INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
|
||||||
|
amdgpu_gfx_enforce_isolation_handler);
|
||||||
|
adev->gfx.enforce_isolation[i].adev = adev;
|
||||||
|
adev->gfx.enforce_isolation[i].xcp_id = i;
|
||||||
|
}
|
||||||
|
|
||||||
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
|
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
|
||||||
|
|
||||||
@ -4482,6 +4531,9 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
dev_info(adev->dev, "amdgpu: finishing device.\n");
|
dev_info(adev->dev, "amdgpu: finishing device.\n");
|
||||||
flush_delayed_work(&adev->delayed_init_work);
|
flush_delayed_work(&adev->delayed_init_work);
|
||||||
|
|
||||||
|
if (adev->mman.initialized)
|
||||||
|
drain_workqueue(adev->mman.bdev.wq);
|
||||||
adev->shutdown = true;
|
adev->shutdown = true;
|
||||||
|
|
||||||
/* make sure IB test finished before entering exclusive mode
|
/* make sure IB test finished before entering exclusive mode
|
||||||
@ -4502,9 +4554,6 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
amdgpu_fence_driver_hw_fini(adev);
|
amdgpu_fence_driver_hw_fini(adev);
|
||||||
|
|
||||||
if (adev->mman.initialized)
|
|
||||||
drain_workqueue(adev->mman.bdev.wq);
|
|
||||||
|
|
||||||
if (adev->pm.sysfs_initialized)
|
if (adev->pm.sysfs_initialized)
|
||||||
amdgpu_pm_sysfs_fini(adev);
|
amdgpu_pm_sysfs_fini(adev);
|
||||||
if (adev->ucode_sysfs_en)
|
if (adev->ucode_sysfs_en)
|
||||||
@ -5278,16 +5327,15 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
|||||||
{
|
{
|
||||||
int i, r = 0;
|
int i, r = 0;
|
||||||
struct amdgpu_job *job = NULL;
|
struct amdgpu_job *job = NULL;
|
||||||
|
struct amdgpu_device *tmp_adev = reset_context->reset_req_dev;
|
||||||
bool need_full_reset =
|
bool need_full_reset =
|
||||||
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
|
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
|
||||||
|
|
||||||
if (reset_context->reset_req_dev == adev)
|
if (reset_context->reset_req_dev == adev)
|
||||||
job = reset_context->job;
|
job = reset_context->job;
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev))
|
||||||
/* stop the data exchange thread */
|
amdgpu_virt_pre_reset(adev);
|
||||||
amdgpu_virt_fini_data_exchange(adev);
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_fence_driver_isr_toggle(adev, true);
|
amdgpu_fence_driver_isr_toggle(adev, true);
|
||||||
|
|
||||||
@ -5336,6 +5384,16 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
|
||||||
|
dev_info(tmp_adev->dev, "Dumping IP State\n");
|
||||||
|
/* Trigger ip dump before we reset the asic */
|
||||||
|
for (i = 0; i < tmp_adev->num_ip_blocks; i++)
|
||||||
|
if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
|
||||||
|
tmp_adev->ip_blocks[i].version->funcs
|
||||||
|
->dump_ip_state((void *)tmp_adev);
|
||||||
|
dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
|
||||||
|
}
|
||||||
|
|
||||||
if (need_full_reset)
|
if (need_full_reset)
|
||||||
r = amdgpu_device_ip_suspend(adev);
|
r = amdgpu_device_ip_suspend(adev);
|
||||||
if (need_full_reset)
|
if (need_full_reset)
|
||||||
@ -5348,47 +5406,17 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
lockdep_assert_held(&adev->reset_domain->sem);
|
|
||||||
|
|
||||||
for (i = 0; i < adev->reset_info.num_regs; i++) {
|
|
||||||
adev->reset_info.reset_dump_reg_value[i] =
|
|
||||||
RREG32(adev->reset_info.reset_dump_reg_list[i]);
|
|
||||||
|
|
||||||
trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
|
|
||||||
adev->reset_info.reset_dump_reg_value[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
||||||
struct amdgpu_reset_context *reset_context)
|
struct amdgpu_reset_context *reset_context)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *tmp_adev = NULL;
|
struct amdgpu_device *tmp_adev = NULL;
|
||||||
bool need_full_reset, skip_hw_reset, vram_lost = false;
|
bool need_full_reset, skip_hw_reset, vram_lost = false;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
uint32_t i;
|
|
||||||
|
|
||||||
/* Try reset handler method first */
|
/* Try reset handler method first */
|
||||||
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
|
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
|
||||||
reset_list);
|
reset_list);
|
||||||
|
|
||||||
if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
|
|
||||||
amdgpu_reset_reg_dumps(tmp_adev);
|
|
||||||
|
|
||||||
dev_info(tmp_adev->dev, "Dumping IP State\n");
|
|
||||||
/* Trigger ip dump before we reset the asic */
|
|
||||||
for (i = 0; i < tmp_adev->num_ip_blocks; i++)
|
|
||||||
if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
|
|
||||||
tmp_adev->ip_blocks[i].version->funcs
|
|
||||||
->dump_ip_state((void *)tmp_adev);
|
|
||||||
dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
reset_context->reset_device_list = device_list_handle;
|
reset_context->reset_device_list = device_list_handle;
|
||||||
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
|
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
|
||||||
/* If reset handler not implemented, continue; otherwise return */
|
/* If reset handler not implemented, continue; otherwise return */
|
||||||
@ -5461,7 +5489,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
|||||||
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
|
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
|
||||||
|
|
||||||
if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
|
if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
|
||||||
amdgpu_coredump(tmp_adev, vram_lost, reset_context);
|
amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
|
||||||
|
|
||||||
if (vram_lost) {
|
if (vram_lost) {
|
||||||
DRM_INFO("VRAM is lost due to GPU reset!\n");
|
DRM_INFO("VRAM is lost due to GPU reset!\n");
|
||||||
@ -5513,7 +5541,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
|||||||
* bad_page_threshold value to fix this once
|
* bad_page_threshold value to fix this once
|
||||||
* probing driver again.
|
* probing driver again.
|
||||||
*/
|
*/
|
||||||
if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
|
if (!amdgpu_ras_is_rma(tmp_adev)) {
|
||||||
/* must succeed. */
|
/* must succeed. */
|
||||||
amdgpu_ras_resume(tmp_adev);
|
amdgpu_ras_resume(tmp_adev);
|
||||||
} else {
|
} else {
|
||||||
@ -5879,7 +5907,7 @@ skip_hw_reset:
|
|||||||
if (!amdgpu_ring_sched_ready(ring))
|
if (!amdgpu_ring_sched_ready(ring))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
drm_sched_start(&ring->sched, true);
|
drm_sched_start(&ring->sched);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
|
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
|
||||||
@ -5891,8 +5919,14 @@ skip_hw_reset:
|
|||||||
tmp_adev->asic_reset_res = 0;
|
tmp_adev->asic_reset_res = 0;
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
/* bad news, how to tell it to userspace ? */
|
/* bad news, how to tell it to userspace ?
|
||||||
dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
|
* for ras error, we should report GPU bad status instead of
|
||||||
|
* reset failure
|
||||||
|
*/
|
||||||
|
if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
|
||||||
|
!amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
|
||||||
|
dev_info(tmp_adev->dev, "GPU reset(%d) failed\n",
|
||||||
|
atomic_read(&tmp_adev->gpu_reset_counter));
|
||||||
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
|
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
|
||||||
} else {
|
} else {
|
||||||
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
|
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
|
||||||
@ -6138,18 +6172,24 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
|
|||||||
struct amdgpu_device *peer_adev)
|
struct amdgpu_device *peer_adev)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_HSA_AMD_P2P
|
#ifdef CONFIG_HSA_AMD_P2P
|
||||||
uint64_t address_mask = peer_adev->dev->dma_mask ?
|
|
||||||
~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
|
|
||||||
resource_size_t aper_limit =
|
|
||||||
adev->gmc.aper_base + adev->gmc.aper_size - 1;
|
|
||||||
bool p2p_access =
|
bool p2p_access =
|
||||||
!adev->gmc.xgmi.connected_to_cpu &&
|
!adev->gmc.xgmi.connected_to_cpu &&
|
||||||
!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
|
!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
|
||||||
|
|
||||||
return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
|
bool is_large_bar = adev->gmc.visible_vram_size &&
|
||||||
adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
|
adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
|
||||||
!(adev->gmc.aper_base & address_mask ||
|
bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev);
|
||||||
aper_limit & address_mask));
|
|
||||||
|
if (!p2p_addressable) {
|
||||||
|
uint64_t address_mask = peer_adev->dev->dma_mask ?
|
||||||
|
~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
|
||||||
|
resource_size_t aper_limit =
|
||||||
|
adev->gmc.aper_base + adev->gmc.aper_size - 1;
|
||||||
|
|
||||||
|
p2p_addressable = !(adev->gmc.aper_base & address_mask ||
|
||||||
|
aper_limit & address_mask);
|
||||||
|
}
|
||||||
|
return is_large_bar && p2p_access && p2p_addressable;
|
||||||
#else
|
#else
|
||||||
return false;
|
return false;
|
||||||
#endif
|
#endif
|
||||||
@ -6374,7 +6414,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
|
|||||||
if (!amdgpu_ring_sched_ready(ring))
|
if (!amdgpu_ring_sched_ready(ring))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
drm_sched_start(&ring->sched, true);
|
drm_sched_start(&ring->sched);
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_device_unset_mp1_state(adev);
|
amdgpu_device_unset_mp1_state(adev);
|
||||||
|
@ -131,6 +131,7 @@ enum AMDGPU_DEBUG_MASK {
|
|||||||
AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
|
AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
|
||||||
AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
|
AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
|
||||||
AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4),
|
AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4),
|
||||||
|
AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
|
||||||
};
|
};
|
||||||
|
|
||||||
unsigned int amdgpu_vram_limit = UINT_MAX;
|
unsigned int amdgpu_vram_limit = UINT_MAX;
|
||||||
@ -168,6 +169,16 @@ uint amdgpu_sdma_phase_quantum = 32;
|
|||||||
char *amdgpu_disable_cu;
|
char *amdgpu_disable_cu;
|
||||||
char *amdgpu_virtual_display;
|
char *amdgpu_virtual_display;
|
||||||
bool enforce_isolation;
|
bool enforce_isolation;
|
||||||
|
|
||||||
|
/* Specifies the default granularity for SVM, used in buffer
|
||||||
|
* migration and restoration of backing memory when handling
|
||||||
|
* recoverable page faults.
|
||||||
|
*
|
||||||
|
* The value is given as log(numPages(buffer)); for a 2 MiB
|
||||||
|
* buffer it computes to be 9
|
||||||
|
*/
|
||||||
|
uint amdgpu_svm_default_granularity = 9;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OverDrive(bit 14) disabled by default
|
* OverDrive(bit 14) disabled by default
|
||||||
* GFX DCS(bit 19) disabled by default
|
* GFX DCS(bit 19) disabled by default
|
||||||
@ -319,6 +330,13 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
|
|||||||
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
|
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
|
||||||
module_param_named(msi, amdgpu_msi, int, 0444);
|
module_param_named(msi, amdgpu_msi, int, 0444);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DOC: svm_default_granularity (uint)
|
||||||
|
* Used in buffer migration and handling of recoverable page faults
|
||||||
|
*/
|
||||||
|
MODULE_PARM_DESC(svm_default_granularity, "SVM's default granularity in log(2^Pages), default 9 = 2^9 = 2 MiB");
|
||||||
|
module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint, 0644);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DOC: lockup_timeout (string)
|
* DOC: lockup_timeout (string)
|
||||||
* Set GPU scheduler timeout value in ms.
|
* Set GPU scheduler timeout value in ms.
|
||||||
@ -2199,6 +2217,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
|
|||||||
pr_info("debug: enable RAS ACA\n");
|
pr_info("debug: enable RAS ACA\n");
|
||||||
adev->debug_enable_ras_aca = true;
|
adev->debug_enable_ras_aca = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_EXP_RESETS) {
|
||||||
|
pr_info("debug: enable experimental reset features\n");
|
||||||
|
adev->debug_exp_resets = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
|
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
|
||||||
@ -2954,7 +2977,6 @@ static const struct drm_driver amdgpu_kms_driver = {
|
|||||||
DRIVER_SYNCOBJ_TIMELINE,
|
DRIVER_SYNCOBJ_TIMELINE,
|
||||||
.open = amdgpu_driver_open_kms,
|
.open = amdgpu_driver_open_kms,
|
||||||
.postclose = amdgpu_driver_postclose_kms,
|
.postclose = amdgpu_driver_postclose_kms,
|
||||||
.lastclose = amdgpu_driver_lastclose_kms,
|
|
||||||
.ioctls = amdgpu_ioctls_kms,
|
.ioctls = amdgpu_ioctls_kms,
|
||||||
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
|
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
|
||||||
.dumb_create = amdgpu_mode_dumb_create,
|
.dumb_create = amdgpu_mode_dumb_create,
|
||||||
@ -2981,7 +3003,6 @@ const struct drm_driver amdgpu_partition_driver = {
|
|||||||
DRIVER_SYNCOBJ_TIMELINE,
|
DRIVER_SYNCOBJ_TIMELINE,
|
||||||
.open = amdgpu_driver_open_kms,
|
.open = amdgpu_driver_open_kms,
|
||||||
.postclose = amdgpu_driver_postclose_kms,
|
.postclose = amdgpu_driver_postclose_kms,
|
||||||
.lastclose = amdgpu_driver_lastclose_kms,
|
|
||||||
.ioctls = amdgpu_ioctls_kms,
|
.ioctls = amdgpu_ioctls_kms,
|
||||||
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
|
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
|
||||||
.dumb_create = amdgpu_mode_dumb_create,
|
.dumb_create = amdgpu_mode_dumb_create,
|
||||||
|
@ -55,8 +55,6 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
|
|||||||
void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
|
void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
|
||||||
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
|
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
|
||||||
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
|
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
|
||||||
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
|
|
||||||
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
|
|
||||||
int amdgpu_gart_init(struct amdgpu_device *adev);
|
int amdgpu_gart_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev);
|
void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev);
|
||||||
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
||||||
|
@ -24,10 +24,13 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/firmware.h>
|
#include <linux/firmware.h>
|
||||||
|
#include <linux/pm_runtime.h>
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_gfx.h"
|
#include "amdgpu_gfx.h"
|
||||||
#include "amdgpu_rlc.h"
|
#include "amdgpu_rlc.h"
|
||||||
#include "amdgpu_ras.h"
|
#include "amdgpu_ras.h"
|
||||||
|
#include "amdgpu_reset.h"
|
||||||
#include "amdgpu_xcp.h"
|
#include "amdgpu_xcp.h"
|
||||||
#include "amdgpu_xgmi.h"
|
#include "amdgpu_xgmi.h"
|
||||||
|
|
||||||
@ -882,8 +885,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
|
|||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
|
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
|
||||||
if (!amdgpu_persistent_edc_harvesting_supported(adev))
|
if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
|
||||||
amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
|
r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
r = amdgpu_ras_block_late_init(adev, ras_block);
|
r = amdgpu_ras_block_late_init(adev, ras_block);
|
||||||
if (r)
|
if (r)
|
||||||
@ -1027,7 +1033,10 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
|
|||||||
pr_err("critical bug! too many kiq readers\n");
|
pr_err("critical bug! too many kiq readers\n");
|
||||||
goto failed_unlock;
|
goto failed_unlock;
|
||||||
}
|
}
|
||||||
amdgpu_ring_alloc(ring, 32);
|
r = amdgpu_ring_alloc(ring, 32);
|
||||||
|
if (r)
|
||||||
|
goto failed_unlock;
|
||||||
|
|
||||||
amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
|
amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
|
||||||
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
|
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
|
||||||
if (r)
|
if (r)
|
||||||
@ -1093,7 +1102,10 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
|
|||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&kiq->ring_lock, flags);
|
spin_lock_irqsave(&kiq->ring_lock, flags);
|
||||||
amdgpu_ring_alloc(ring, 32);
|
r = amdgpu_ring_alloc(ring, 32);
|
||||||
|
if (r)
|
||||||
|
goto failed_unlock;
|
||||||
|
|
||||||
amdgpu_ring_emit_wreg(ring, reg, v);
|
amdgpu_ring_emit_wreg(ring, reg, v);
|
||||||
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
|
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
|
||||||
if (r)
|
if (r)
|
||||||
@ -1129,6 +1141,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
|
|||||||
|
|
||||||
failed_undo:
|
failed_undo:
|
||||||
amdgpu_ring_undo(ring);
|
amdgpu_ring_undo(ring);
|
||||||
|
failed_unlock:
|
||||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||||
failed_kiq_write:
|
failed_kiq_write:
|
||||||
dev_err(adev->dev, "failed to write reg:%x\n", reg);
|
dev_err(adev->dev, "failed to write reg:%x\n", reg);
|
||||||
@ -1381,6 +1394,217 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
|
|||||||
return sysfs_emit(buf, "%s\n", supported_partition);
|
return sysfs_emit(buf, "%s\n", supported_partition);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
struct drm_gpu_scheduler *sched = &ring->sched;
|
||||||
|
struct drm_sched_entity entity;
|
||||||
|
struct dma_fence *f;
|
||||||
|
struct amdgpu_job *job;
|
||||||
|
struct amdgpu_ib *ib;
|
||||||
|
int i, r;
|
||||||
|
|
||||||
|
/* Initialize the scheduler entity */
|
||||||
|
r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
|
||||||
|
&sched, 1, NULL);
|
||||||
|
if (r) {
|
||||||
|
dev_err(adev->dev, "Failed setting up GFX kernel entity.\n");
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
|
||||||
|
64, 0,
|
||||||
|
&job);
|
||||||
|
if (r)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
job->enforce_isolation = true;
|
||||||
|
|
||||||
|
ib = &job->ibs[0];
|
||||||
|
for (i = 0; i <= ring->funcs->align_mask; ++i)
|
||||||
|
ib->ptr[i] = ring->funcs->nop;
|
||||||
|
ib->length_dw = ring->funcs->align_mask + 1;
|
||||||
|
|
||||||
|
f = amdgpu_job_submit(job);
|
||||||
|
|
||||||
|
r = dma_fence_wait(f, false);
|
||||||
|
if (r)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
dma_fence_put(f);
|
||||||
|
|
||||||
|
/* Clean up the scheduler entity */
|
||||||
|
drm_sched_entity_destroy(&entity);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
|
||||||
|
{
|
||||||
|
int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
|
||||||
|
struct amdgpu_ring *ring;
|
||||||
|
int num_xcc_to_clear;
|
||||||
|
int i, r, xcc_id;
|
||||||
|
|
||||||
|
if (adev->gfx.num_xcc_per_xcp)
|
||||||
|
num_xcc_to_clear = adev->gfx.num_xcc_per_xcp;
|
||||||
|
else
|
||||||
|
num_xcc_to_clear = 1;
|
||||||
|
|
||||||
|
for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
|
||||||
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||||
|
ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
|
||||||
|
if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
|
||||||
|
r = amdgpu_gfx_run_cleaner_shader_job(ring);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
num_xcc_to_clear--;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (num_xcc_to_clear)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
const char *buf,
|
||||||
|
size_t count)
|
||||||
|
{
|
||||||
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
|
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||||
|
int ret;
|
||||||
|
long value;
|
||||||
|
|
||||||
|
if (amdgpu_in_reset(adev))
|
||||||
|
return -EPERM;
|
||||||
|
if (adev->in_suspend && !adev->in_runpm)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
ret = kstrtol(buf, 0, &value);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (value < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (adev->xcp_mgr) {
|
||||||
|
if (value >= adev->xcp_mgr->num_xcps)
|
||||||
|
return -EINVAL;
|
||||||
|
} else {
|
||||||
|
if (value > 1)
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = pm_runtime_get_sync(ddev->dev);
|
||||||
|
if (ret < 0) {
|
||||||
|
pm_runtime_put_autosuspend(ddev->dev);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = amdgpu_gfx_run_cleaner_shader(adev, value);
|
||||||
|
|
||||||
|
pm_runtime_mark_last_busy(ddev->dev);
|
||||||
|
pm_runtime_put_autosuspend(ddev->dev);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
|
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||||
|
int i;
|
||||||
|
ssize_t size = 0;
|
||||||
|
|
||||||
|
if (adev->xcp_mgr) {
|
||||||
|
for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
|
||||||
|
size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]);
|
||||||
|
if (i < (adev->xcp_mgr->num_xcps - 1))
|
||||||
|
size += sysfs_emit_at(buf, size, " ");
|
||||||
|
}
|
||||||
|
buf[size++] = '\n';
|
||||||
|
} else {
|
||||||
|
size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
|
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||||
|
long partition_values[MAX_XCP] = {0};
|
||||||
|
int ret, i, num_partitions;
|
||||||
|
const char *input_buf = buf;
|
||||||
|
|
||||||
|
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
|
||||||
|
ret = sscanf(input_buf, "%ld", &partition_values[i]);
|
||||||
|
if (ret <= 0)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* Move the pointer to the next value in the string */
|
||||||
|
input_buf = strchr(input_buf, ' ');
|
||||||
|
if (input_buf) {
|
||||||
|
input_buf++;
|
||||||
|
} else {
|
||||||
|
i++;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
num_partitions = i;
|
||||||
|
|
||||||
|
if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!adev->xcp_mgr && num_partitions != 1)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
for (i = 0; i < num_partitions; i++) {
|
||||||
|
if (partition_values[i] != 0 && partition_values[i] != 1)
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&adev->enforce_isolation_mutex);
|
||||||
|
|
||||||
|
for (i = 0; i < num_partitions; i++) {
|
||||||
|
if (adev->enforce_isolation[i] && !partition_values[i]) {
|
||||||
|
/* Going from enabled to disabled */
|
||||||
|
amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
|
||||||
|
} else if (!adev->enforce_isolation[i] && partition_values[i]) {
|
||||||
|
/* Going from disabled to enabled */
|
||||||
|
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
|
||||||
|
}
|
||||||
|
adev->enforce_isolation[i] = partition_values[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&adev->enforce_isolation_mutex);
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEVICE_ATTR(run_cleaner_shader, 0200,
|
||||||
|
NULL, amdgpu_gfx_set_run_cleaner_shader);
|
||||||
|
|
||||||
|
static DEVICE_ATTR(enforce_isolation, 0644,
|
||||||
|
amdgpu_gfx_get_enforce_isolation,
|
||||||
|
amdgpu_gfx_set_enforce_isolation);
|
||||||
|
|
||||||
static DEVICE_ATTR(current_compute_partition, 0644,
|
static DEVICE_ATTR(current_compute_partition, 0644,
|
||||||
amdgpu_gfx_get_current_compute_partition,
|
amdgpu_gfx_get_current_compute_partition,
|
||||||
amdgpu_gfx_set_compute_partition);
|
amdgpu_gfx_set_compute_partition);
|
||||||
@ -1406,3 +1630,229 @@ void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
|
|||||||
device_remove_file(adev->dev, &dev_attr_current_compute_partition);
|
device_remove_file(adev->dev, &dev_attr_current_compute_partition);
|
||||||
device_remove_file(adev->dev, &dev_attr_available_compute_partition);
|
device_remove_file(adev->dev, &dev_attr_available_compute_partition);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev)) {
|
||||||
|
r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
|
device_remove_file(adev->dev, &dev_attr_enforce_isolation);
|
||||||
|
device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
|
||||||
|
unsigned int cleaner_shader_size)
|
||||||
|
{
|
||||||
|
if (!adev->gfx.enable_cleaner_shader)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
|
||||||
|
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
|
||||||
|
&adev->gfx.cleaner_shader_obj,
|
||||||
|
&adev->gfx.cleaner_shader_gpu_addr,
|
||||||
|
(void **)&adev->gfx.cleaner_shader_cpu_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
if (!adev->gfx.enable_cleaner_shader)
|
||||||
|
return;
|
||||||
|
|
||||||
|
amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj,
|
||||||
|
&adev->gfx.cleaner_shader_gpu_addr,
|
||||||
|
(void **)&adev->gfx.cleaner_shader_cpu_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
|
||||||
|
unsigned int cleaner_shader_size,
|
||||||
|
const void *cleaner_shader_ptr)
|
||||||
|
{
|
||||||
|
if (!adev->gfx.enable_cleaner_shader)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
|
||||||
|
memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
|
||||||
|
cleaner_shader_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
* @idx: Index of the scheduler to control
|
||||||
|
* @enable: Whether to enable or disable the KFD scheduler
|
||||||
|
*
|
||||||
|
* This function is used to control the KFD (Kernel Fusion Driver) scheduler
|
||||||
|
* from the KGD. It is part of the cleaner shader feature. This function plays
|
||||||
|
* a key role in enforcing process isolation on the GPU.
|
||||||
|
*
|
||||||
|
* The function uses a reference count mechanism (kfd_sch_req_count) to keep
|
||||||
|
* track of the number of requests to enable the KFD scheduler. When a request
|
||||||
|
* to enable the KFD scheduler is made, the reference count is decremented.
|
||||||
|
* When the reference count reaches zero, a delayed work is scheduled to
|
||||||
|
* enforce isolation after a delay of GFX_SLICE_PERIOD.
|
||||||
|
*
|
||||||
|
* When a request to disable the KFD scheduler is made, the function first
|
||||||
|
* checks if the reference count is zero. If it is, it cancels the delayed work
|
||||||
|
* for enforcing isolation and checks if the KFD scheduler is active. If the
|
||||||
|
* KFD scheduler is active, it sends a request to stop the KFD scheduler and
|
||||||
|
* sets the KFD scheduler state to inactive. Then, it increments the reference
|
||||||
|
* count.
|
||||||
|
*
|
||||||
|
* The function is synchronized using the kfd_sch_mutex to ensure that the KFD
|
||||||
|
* scheduler state and reference count are updated atomically.
|
||||||
|
*
|
||||||
|
* Note: If the reference count is already zero when a request to enable the
|
||||||
|
* KFD scheduler is made, it means there's an imbalance bug somewhere. The
|
||||||
|
* function triggers a warning in this case.
|
||||||
|
*/
|
||||||
|
static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
|
||||||
|
bool enable)
|
||||||
|
{
|
||||||
|
mutex_lock(&adev->gfx.kfd_sch_mutex);
|
||||||
|
|
||||||
|
if (enable) {
|
||||||
|
/* If the count is already 0, it means there's an imbalance bug somewhere.
|
||||||
|
* Note that the bug may be in a different caller than the one which triggers the
|
||||||
|
* WARN_ON_ONCE.
|
||||||
|
*/
|
||||||
|
if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
|
||||||
|
dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
adev->gfx.kfd_sch_req_count[idx]--;
|
||||||
|
|
||||||
|
if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
|
||||||
|
adev->gfx.kfd_sch_inactive[idx]) {
|
||||||
|
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
|
||||||
|
GFX_SLICE_PERIOD);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (adev->gfx.kfd_sch_req_count[idx] == 0) {
|
||||||
|
cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
|
||||||
|
if (!adev->gfx.kfd_sch_inactive[idx]) {
|
||||||
|
amdgpu_amdkfd_stop_sched(adev, idx);
|
||||||
|
adev->gfx.kfd_sch_inactive[idx] = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
adev->gfx.kfd_sch_req_count[idx]++;
|
||||||
|
}
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&adev->gfx.kfd_sch_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
|
||||||
|
*
|
||||||
|
* @work: work_struct.
|
||||||
|
*
|
||||||
|
* This function is the work handler for enforcing shader isolation on AMD GPUs.
|
||||||
|
* It counts the number of emitted fences for each GFX and compute ring. If there
|
||||||
|
* are any fences, it schedules the `enforce_isolation_work` to be run after a
|
||||||
|
* delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
|
||||||
|
* Driver (KFD) to resume the runqueue. The function is synchronized using the
|
||||||
|
* `enforce_isolation_mutex`.
|
||||||
|
*/
|
||||||
|
void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct amdgpu_isolation_work *isolation_work =
|
||||||
|
container_of(work, struct amdgpu_isolation_work, work.work);
|
||||||
|
struct amdgpu_device *adev = isolation_work->adev;
|
||||||
|
u32 i, idx, fences = 0;
|
||||||
|
|
||||||
|
if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION)
|
||||||
|
idx = 0;
|
||||||
|
else
|
||||||
|
idx = isolation_work->xcp_id;
|
||||||
|
|
||||||
|
if (idx >= MAX_XCP)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&adev->enforce_isolation_mutex);
|
||||||
|
for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) {
|
||||||
|
if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
|
||||||
|
fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
|
||||||
|
}
|
||||||
|
for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) {
|
||||||
|
if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
|
||||||
|
fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
|
||||||
|
}
|
||||||
|
if (fences) {
|
||||||
|
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
|
||||||
|
GFX_SLICE_PERIOD);
|
||||||
|
} else {
|
||||||
|
/* Tell KFD to resume the runqueue */
|
||||||
|
if (adev->kfd.init_complete) {
|
||||||
|
WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
|
||||||
|
WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
|
||||||
|
amdgpu_amdkfd_start_sched(adev, idx);
|
||||||
|
adev->gfx.kfd_sch_inactive[idx] = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mutex_unlock(&adev->enforce_isolation_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
u32 idx;
|
||||||
|
|
||||||
|
if (!adev->gfx.enable_cleaner_shader)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
|
||||||
|
idx = 0;
|
||||||
|
else
|
||||||
|
idx = ring->xcp_id;
|
||||||
|
|
||||||
|
if (idx >= MAX_XCP)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&adev->enforce_isolation_mutex);
|
||||||
|
if (adev->enforce_isolation[idx]) {
|
||||||
|
if (adev->kfd.init_complete)
|
||||||
|
amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
|
||||||
|
}
|
||||||
|
mutex_unlock(&adev->enforce_isolation_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
u32 idx;
|
||||||
|
|
||||||
|
if (!adev->gfx.enable_cleaner_shader)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
|
||||||
|
idx = 0;
|
||||||
|
else
|
||||||
|
idx = ring->xcp_id;
|
||||||
|
|
||||||
|
if (idx >= MAX_XCP)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&adev->enforce_isolation_mutex);
|
||||||
|
if (adev->enforce_isolation[idx]) {
|
||||||
|
if (adev->kfd.init_complete)
|
||||||
|
amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
|
||||||
|
}
|
||||||
|
mutex_unlock(&adev->enforce_isolation_mutex);
|
||||||
|
}
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
#include "amdgpu_ras.h"
|
#include "amdgpu_ras.h"
|
||||||
#include "amdgpu_ring_mux.h"
|
#include "amdgpu_ring_mux.h"
|
||||||
|
#include "amdgpu_xcp.h"
|
||||||
|
|
||||||
/* GFX current status */
|
/* GFX current status */
|
||||||
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
|
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
|
||||||
@ -138,6 +139,10 @@ struct kiq_pm4_funcs {
|
|||||||
void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring,
|
void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring,
|
||||||
uint16_t pasid, uint32_t flush_type,
|
uint16_t pasid, uint32_t flush_type,
|
||||||
bool all_hub);
|
bool all_hub);
|
||||||
|
void (*kiq_reset_hw_queue)(struct amdgpu_ring *kiq_ring,
|
||||||
|
uint32_t queue_type, uint32_t me_id,
|
||||||
|
uint32_t pipe_id, uint32_t queue_id,
|
||||||
|
uint32_t xcc_id, uint32_t vmid);
|
||||||
/* Packet sizes */
|
/* Packet sizes */
|
||||||
int set_resources_size;
|
int set_resources_size;
|
||||||
int map_queues_size;
|
int map_queues_size;
|
||||||
@ -345,6 +350,12 @@ struct amdgpu_me {
|
|||||||
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
|
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct amdgpu_isolation_work {
|
||||||
|
struct amdgpu_device *adev;
|
||||||
|
u32 xcp_id;
|
||||||
|
struct delayed_work work;
|
||||||
|
};
|
||||||
|
|
||||||
struct amdgpu_gfx {
|
struct amdgpu_gfx {
|
||||||
struct mutex gpu_clock_mutex;
|
struct mutex gpu_clock_mutex;
|
||||||
struct amdgpu_gfx_config config;
|
struct amdgpu_gfx_config config;
|
||||||
@ -397,6 +408,7 @@ struct amdgpu_gfx {
|
|||||||
struct amdgpu_irq_src eop_irq;
|
struct amdgpu_irq_src eop_irq;
|
||||||
struct amdgpu_irq_src priv_reg_irq;
|
struct amdgpu_irq_src priv_reg_irq;
|
||||||
struct amdgpu_irq_src priv_inst_irq;
|
struct amdgpu_irq_src priv_inst_irq;
|
||||||
|
struct amdgpu_irq_src bad_op_irq;
|
||||||
struct amdgpu_irq_src cp_ecc_error_irq;
|
struct amdgpu_irq_src cp_ecc_error_irq;
|
||||||
struct amdgpu_irq_src sq_irq;
|
struct amdgpu_irq_src sq_irq;
|
||||||
struct amdgpu_irq_src rlc_gc_fed_irq;
|
struct amdgpu_irq_src rlc_gc_fed_irq;
|
||||||
@ -445,6 +457,21 @@ struct amdgpu_gfx {
|
|||||||
uint32_t *ip_dump_core;
|
uint32_t *ip_dump_core;
|
||||||
uint32_t *ip_dump_compute_queues;
|
uint32_t *ip_dump_compute_queues;
|
||||||
uint32_t *ip_dump_gfx_queues;
|
uint32_t *ip_dump_gfx_queues;
|
||||||
|
|
||||||
|
struct mutex reset_sem_mutex;
|
||||||
|
|
||||||
|
/* cleaner shader */
|
||||||
|
struct amdgpu_bo *cleaner_shader_obj;
|
||||||
|
unsigned int cleaner_shader_size;
|
||||||
|
u64 cleaner_shader_gpu_addr;
|
||||||
|
void *cleaner_shader_cpu_ptr;
|
||||||
|
const void *cleaner_shader_ptr;
|
||||||
|
bool enable_cleaner_shader;
|
||||||
|
struct amdgpu_isolation_work enforce_isolation[MAX_XCP];
|
||||||
|
/* Mutex for synchronizing KFD scheduler operations */
|
||||||
|
struct mutex kfd_sch_mutex;
|
||||||
|
u64 kfd_sch_req_count[MAX_XCP];
|
||||||
|
bool kfd_sch_inactive[MAX_XCP];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_gfx_ras_reg_entry {
|
struct amdgpu_gfx_ras_reg_entry {
|
||||||
@ -546,6 +573,17 @@ void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
|
|||||||
void *ras_error_status,
|
void *ras_error_status,
|
||||||
void (*func)(struct amdgpu_device *adev, void *ras_error_status,
|
void (*func)(struct amdgpu_device *adev, void *ras_error_status,
|
||||||
int xcc_id));
|
int xcc_id));
|
||||||
|
int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
|
||||||
|
unsigned int cleaner_shader_size);
|
||||||
|
void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev);
|
||||||
|
void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
|
||||||
|
unsigned int cleaner_shader_size,
|
||||||
|
const void *cleaner_shader_ptr);
|
||||||
|
int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev);
|
||||||
|
void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev);
|
||||||
|
void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work);
|
||||||
|
void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring);
|
||||||
|
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
|
||||||
|
|
||||||
static inline const char *amdgpu_gfx_compute_mode_desc(int mode)
|
static inline const char *amdgpu_gfx_compute_mode_desc(int mode)
|
||||||
{
|
{
|
||||||
|
@ -38,8 +38,6 @@ struct amdgpu_gfxhub_funcs {
|
|||||||
void (*mode2_save_regs)(struct amdgpu_device *adev);
|
void (*mode2_save_regs)(struct amdgpu_device *adev);
|
||||||
void (*mode2_restore_regs)(struct amdgpu_device *adev);
|
void (*mode2_restore_regs)(struct amdgpu_device *adev);
|
||||||
void (*halt)(struct amdgpu_device *adev);
|
void (*halt)(struct amdgpu_device *adev);
|
||||||
bool (*query_utcl2_poison_status)(struct amdgpu_device *adev,
|
|
||||||
int xcc_id);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_gfxhub {
|
struct amdgpu_gfxhub {
|
||||||
|
@ -786,7 +786,8 @@ void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
|
|||||||
goto failed_kiq;
|
goto failed_kiq;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
|
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
|
||||||
|
!amdgpu_reset_pending(adev->reset_domain)) {
|
||||||
|
|
||||||
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
|
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
|
||||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||||
|
@ -424,7 +424,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||||||
if (r || !idle)
|
if (r || !idle)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
|
if (amdgpu_vmid_uses_reserved(adev, vm, vmhub)) {
|
||||||
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
|
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
|
||||||
if (r || !id)
|
if (r || !id)
|
||||||
goto error;
|
goto error;
|
||||||
@ -476,15 +476,19 @@ error:
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
|
* amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
* @vm: the VM to check
|
* @vm: the VM to check
|
||||||
* @vmhub: the VMHUB which will be used
|
* @vmhub: the VMHUB which will be used
|
||||||
*
|
*
|
||||||
* Returns: True if the VM will use a reserved VMID.
|
* Returns: True if the VM will use a reserved VMID.
|
||||||
*/
|
*/
|
||||||
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
|
bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_vm *vm, unsigned int vmhub)
|
||||||
{
|
{
|
||||||
return vm->reserved_vmid[vmhub] ||
|
return vm->reserved_vmid[vmhub] ||
|
||||||
(enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
|
(adev->enforce_isolation[(vm->root.bo->xcp_id != AMDGPU_XCP_NO_PARTITION) ?
|
||||||
|
vm->root.bo->xcp_id : 0] &&
|
||||||
|
AMDGPU_IS_GFXHUB(vmhub));
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
|
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
|
||||||
@ -600,9 +604,10 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* alloc a default reserved vmid to enforce isolation */
|
/* alloc a default reserved vmid to enforce isolation */
|
||||||
if (enforce_isolation)
|
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
|
||||||
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
|
if (adev->enforce_isolation[i])
|
||||||
|
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -78,7 +78,8 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
|
|||||||
|
|
||||||
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
|
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vmid *id);
|
struct amdgpu_vmid *id);
|
||||||
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
|
bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_vm *vm, unsigned int vmhub);
|
||||||
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
|
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
|
||||||
unsigned vmhub);
|
unsigned vmhub);
|
||||||
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
|
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
|
||||||
|
@ -49,6 +49,7 @@ struct amdgpu_isp {
|
|||||||
const struct isp_funcs *funcs;
|
const struct isp_funcs *funcs;
|
||||||
struct mfd_cell *isp_cell;
|
struct mfd_cell *isp_cell;
|
||||||
struct resource *isp_res;
|
struct resource *isp_res;
|
||||||
|
struct resource *isp_i2c_res;
|
||||||
struct isp_platform_data *isp_pdata;
|
struct isp_platform_data *isp_pdata;
|
||||||
unsigned int harvest_config;
|
unsigned int harvest_config;
|
||||||
const struct firmware *fw;
|
const struct firmware *fw;
|
||||||
|
@ -30,6 +30,60 @@
|
|||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_trace.h"
|
#include "amdgpu_trace.h"
|
||||||
#include "amdgpu_reset.h"
|
#include "amdgpu_reset.h"
|
||||||
|
#include "amdgpu_dev_coredump.h"
|
||||||
|
#include "amdgpu_xgmi.h"
|
||||||
|
|
||||||
|
static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_job *job)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
dev_info(adev->dev, "Dumping IP State\n");
|
||||||
|
for (i = 0; i < adev->num_ip_blocks; i++)
|
||||||
|
if (adev->ip_blocks[i].version->funcs->dump_ip_state)
|
||||||
|
adev->ip_blocks[i].version->funcs
|
||||||
|
->dump_ip_state((void *)adev);
|
||||||
|
dev_info(adev->dev, "Dumping IP State Completed\n");
|
||||||
|
|
||||||
|
amdgpu_coredump(adev, true, false, job);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void amdgpu_job_core_dump(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_job *job)
|
||||||
|
{
|
||||||
|
struct list_head device_list, *device_list_handle = NULL;
|
||||||
|
struct amdgpu_device *tmp_adev = NULL;
|
||||||
|
struct amdgpu_hive_info *hive = NULL;
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
|
hive = amdgpu_get_xgmi_hive(adev);
|
||||||
|
if (hive)
|
||||||
|
mutex_lock(&hive->hive_lock);
|
||||||
|
/*
|
||||||
|
* Reuse the logic in amdgpu_device_gpu_recover() to build list of
|
||||||
|
* devices for code dump
|
||||||
|
*/
|
||||||
|
INIT_LIST_HEAD(&device_list);
|
||||||
|
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
|
||||||
|
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
|
||||||
|
list_add_tail(&tmp_adev->reset_list, &device_list);
|
||||||
|
if (!list_is_first(&adev->reset_list, &device_list))
|
||||||
|
list_rotate_to_front(&adev->reset_list, &device_list);
|
||||||
|
device_list_handle = &device_list;
|
||||||
|
} else {
|
||||||
|
list_add_tail(&adev->reset_list, &device_list);
|
||||||
|
device_list_handle = &device_list;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Do the coredump for each device */
|
||||||
|
list_for_each_entry(tmp_adev, device_list_handle, reset_list)
|
||||||
|
amdgpu_job_do_core_dump(tmp_adev, job);
|
||||||
|
|
||||||
|
if (hive) {
|
||||||
|
mutex_unlock(&hive->hive_lock);
|
||||||
|
amdgpu_put_xgmi_hive(hive);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
|
static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
|
||||||
{
|
{
|
||||||
@ -48,9 +102,14 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
|
|||||||
return DRM_GPU_SCHED_STAT_ENODEV;
|
return DRM_GPU_SCHED_STAT_ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
adev->job_hang = true;
|
adev->job_hang = true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do the coredump immediately after a job timeout to get a very
|
||||||
|
* close dump/snapshot/representation of GPU's current error status
|
||||||
|
*/
|
||||||
|
amdgpu_job_core_dump(adev, job);
|
||||||
|
|
||||||
if (amdgpu_gpu_recovery &&
|
if (amdgpu_gpu_recovery &&
|
||||||
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
|
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
|
||||||
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
|
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
|
||||||
@ -72,6 +131,26 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
|
|||||||
|
|
||||||
dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
|
dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
|
||||||
|
|
||||||
|
/* attempt a per ring reset */
|
||||||
|
if (amdgpu_gpu_recovery &&
|
||||||
|
ring->funcs->reset) {
|
||||||
|
/* stop the scheduler, but don't mess with the
|
||||||
|
* bad job yet because if ring reset fails
|
||||||
|
* we'll fall back to full GPU reset.
|
||||||
|
*/
|
||||||
|
drm_sched_wqueue_stop(&ring->sched);
|
||||||
|
r = amdgpu_ring_reset(ring, job->vmid);
|
||||||
|
if (!r) {
|
||||||
|
if (amdgpu_ring_sched_ready(ring))
|
||||||
|
drm_sched_stop(&ring->sched, s_job);
|
||||||
|
atomic_inc(&ring->adev->gpu_reset_counter);
|
||||||
|
amdgpu_fence_driver_force_completion(ring);
|
||||||
|
if (amdgpu_ring_sched_ready(ring))
|
||||||
|
drm_sched_start(&ring->sched);
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (amdgpu_device_should_recover_gpu(ring->adev)) {
|
if (amdgpu_device_should_recover_gpu(ring->adev)) {
|
||||||
struct amdgpu_reset_context reset_context;
|
struct amdgpu_reset_context reset_context;
|
||||||
memset(&reset_context, 0, sizeof(reset_context));
|
memset(&reset_context, 0, sizeof(reset_context));
|
||||||
@ -81,6 +160,12 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
|
|||||||
reset_context.src = AMDGPU_RESET_SRC_JOB;
|
reset_context.src = AMDGPU_RESET_SRC_JOB;
|
||||||
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* To avoid an unnecessary extra coredump, as we have already
|
||||||
|
* got the very close representation of GPU's error status
|
||||||
|
*/
|
||||||
|
set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
|
||||||
|
|
||||||
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
|
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
|
||||||
if (r)
|
if (r)
|
||||||
dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
|
dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
|
||||||
|
@ -76,6 +76,9 @@ struct amdgpu_job {
|
|||||||
/* job_run_counter >= 1 means a resubmit job */
|
/* job_run_counter >= 1 means a resubmit job */
|
||||||
uint32_t job_run_counter;
|
uint32_t job_run_counter;
|
||||||
|
|
||||||
|
/* enforce isolation */
|
||||||
|
bool enforce_isolation;
|
||||||
|
|
||||||
uint32_t num_ibs;
|
uint32_t num_ibs;
|
||||||
struct amdgpu_ib ibs[];
|
struct amdgpu_ib ibs[];
|
||||||
};
|
};
|
||||||
|
@ -43,6 +43,7 @@
|
|||||||
#include "amdgpu_gem.h"
|
#include "amdgpu_gem.h"
|
||||||
#include "amdgpu_display.h"
|
#include "amdgpu_display.h"
|
||||||
#include "amdgpu_ras.h"
|
#include "amdgpu_ras.h"
|
||||||
|
#include "amdgpu_reset.h"
|
||||||
#include "amd_pcie.h"
|
#include "amd_pcie.h"
|
||||||
|
|
||||||
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
|
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
|
||||||
@ -778,6 +779,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||||||
? -EFAULT : 0;
|
? -EFAULT : 0;
|
||||||
}
|
}
|
||||||
case AMDGPU_INFO_READ_MMR_REG: {
|
case AMDGPU_INFO_READ_MMR_REG: {
|
||||||
|
int ret = 0;
|
||||||
unsigned int n, alloc_size;
|
unsigned int n, alloc_size;
|
||||||
uint32_t *regs;
|
uint32_t *regs;
|
||||||
unsigned int se_num = (info->read_mmr_reg.instance >>
|
unsigned int se_num = (info->read_mmr_reg.instance >>
|
||||||
@ -787,24 +789,37 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||||||
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
|
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
|
||||||
AMDGPU_INFO_MMR_SH_INDEX_MASK;
|
AMDGPU_INFO_MMR_SH_INDEX_MASK;
|
||||||
|
|
||||||
|
if (!down_read_trylock(&adev->reset_domain->sem))
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
/* set full masks if the userspace set all bits
|
/* set full masks if the userspace set all bits
|
||||||
* in the bitfields
|
* in the bitfields
|
||||||
*/
|
*/
|
||||||
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
|
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) {
|
||||||
se_num = 0xffffffff;
|
se_num = 0xffffffff;
|
||||||
else if (se_num >= AMDGPU_GFX_MAX_SE)
|
} else if (se_num >= AMDGPU_GFX_MAX_SE) {
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
|
goto out;
|
||||||
sh_num = 0xffffffff;
|
}
|
||||||
else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (info->read_mmr_reg.count > 128)
|
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) {
|
||||||
return -EINVAL;
|
sh_num = 0xffffffff;
|
||||||
|
} else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info->read_mmr_reg.count > 128) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
|
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
|
||||||
if (!regs)
|
if (!regs) {
|
||||||
return -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
|
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
|
||||||
|
|
||||||
amdgpu_gfx_off_ctrl(adev, false);
|
amdgpu_gfx_off_ctrl(adev, false);
|
||||||
@ -816,13 +831,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||||||
info->read_mmr_reg.dword_offset + i);
|
info->read_mmr_reg.dword_offset + i);
|
||||||
kfree(regs);
|
kfree(regs);
|
||||||
amdgpu_gfx_off_ctrl(adev, true);
|
amdgpu_gfx_off_ctrl(adev, true);
|
||||||
return -EFAULT;
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
amdgpu_gfx_off_ctrl(adev, true);
|
amdgpu_gfx_off_ctrl(adev, true);
|
||||||
n = copy_to_user(out, regs, min(size, alloc_size));
|
n = copy_to_user(out, regs, min(size, alloc_size));
|
||||||
kfree(regs);
|
kfree(regs);
|
||||||
return n ? -EFAULT : 0;
|
ret = (n ? -EFAULT : 0);
|
||||||
|
out:
|
||||||
|
up_read(&adev->reset_domain->sem);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
case AMDGPU_INFO_DEV_INFO: {
|
case AMDGPU_INFO_DEV_INFO: {
|
||||||
struct drm_amdgpu_info_device *dev_info;
|
struct drm_amdgpu_info_device *dev_info;
|
||||||
@ -1269,23 +1288,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Outdated mess for old drm with Xorg being in charge (void function now).
|
|
||||||
*/
|
|
||||||
/**
|
|
||||||
* amdgpu_driver_lastclose_kms - drm callback for last close
|
|
||||||
*
|
|
||||||
* @dev: drm dev pointer
|
|
||||||
*
|
|
||||||
* Switch vga_switcheroo state after last close (all asics).
|
|
||||||
*/
|
|
||||||
void amdgpu_driver_lastclose_kms(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
drm_fb_helper_lastclose(dev);
|
|
||||||
vga_switcheroo_process_delayed_switch();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_driver_open_kms - drm callback for open
|
* amdgpu_driver_open_kms - drm callback for open
|
||||||
*
|
*
|
||||||
|
@ -396,7 +396,6 @@ static int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum
|
|||||||
static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
|
static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
|
||||||
struct mca_bank_set *mca_set, struct ras_err_data *err_data)
|
struct mca_bank_set *mca_set, struct ras_err_data *err_data)
|
||||||
{
|
{
|
||||||
struct ras_err_addr err_addr;
|
|
||||||
struct amdgpu_smuio_mcm_config_info mcm_info;
|
struct amdgpu_smuio_mcm_config_info mcm_info;
|
||||||
struct mca_bank_node *node, *tmp;
|
struct mca_bank_node *node, *tmp;
|
||||||
struct mca_bank_entry *entry;
|
struct mca_bank_entry *entry;
|
||||||
@ -421,27 +420,20 @@ static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_r
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
memset(&mcm_info, 0, sizeof(mcm_info));
|
memset(&mcm_info, 0, sizeof(mcm_info));
|
||||||
memset(&err_addr, 0, sizeof(err_addr));
|
|
||||||
|
|
||||||
mcm_info.socket_id = entry->info.socket_id;
|
mcm_info.socket_id = entry->info.socket_id;
|
||||||
mcm_info.die_id = entry->info.aid;
|
mcm_info.die_id = entry->info.aid;
|
||||||
|
|
||||||
if (blk == AMDGPU_RAS_BLOCK__UMC) {
|
|
||||||
err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS];
|
|
||||||
err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID];
|
|
||||||
err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (type == AMDGPU_MCA_ERROR_TYPE_UE) {
|
if (type == AMDGPU_MCA_ERROR_TYPE_UE) {
|
||||||
amdgpu_ras_error_statistic_ue_count(err_data,
|
amdgpu_ras_error_statistic_ue_count(err_data,
|
||||||
&mcm_info, &err_addr, (uint64_t)count);
|
&mcm_info, (uint64_t)count);
|
||||||
} else {
|
} else {
|
||||||
if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
|
if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
|
||||||
amdgpu_ras_error_statistic_de_count(err_data,
|
amdgpu_ras_error_statistic_de_count(err_data,
|
||||||
&mcm_info, &err_addr, (uint64_t)count);
|
&mcm_info, (uint64_t)count);
|
||||||
else
|
else
|
||||||
amdgpu_ras_error_statistic_ce_count(err_data,
|
amdgpu_ras_error_statistic_ce_count(err_data,
|
||||||
&mcm_info, &err_addr, (uint64_t)count);
|
&mcm_info, (uint64_t)count);
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_mca_bank_set_remove_node(mca_set, node);
|
amdgpu_mca_bank_set_remove_node(mca_set, node);
|
||||||
|
@ -501,60 +501,50 @@ int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
|
|||||||
|
|
||||||
int amdgpu_mes_suspend(struct amdgpu_device *adev)
|
int amdgpu_mes_suspend(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct idr *idp;
|
|
||||||
struct amdgpu_mes_process *process;
|
|
||||||
struct amdgpu_mes_gang *gang;
|
|
||||||
struct mes_suspend_gang_input input;
|
struct mes_suspend_gang_input input;
|
||||||
int r, pasid;
|
int r;
|
||||||
|
|
||||||
|
if (!amdgpu_mes_suspend_resume_all_supported(adev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
|
||||||
|
input.suspend_all_gangs = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Avoid taking any other locks under MES lock to avoid circular
|
* Avoid taking any other locks under MES lock to avoid circular
|
||||||
* lock dependencies.
|
* lock dependencies.
|
||||||
*/
|
*/
|
||||||
amdgpu_mes_lock(&adev->mes);
|
amdgpu_mes_lock(&adev->mes);
|
||||||
|
|
||||||
idp = &adev->mes.pasid_idr;
|
|
||||||
|
|
||||||
idr_for_each_entry(idp, process, pasid) {
|
|
||||||
list_for_each_entry(gang, &process->gang_list, list) {
|
|
||||||
r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
|
r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
|
||||||
if (r)
|
|
||||||
DRM_ERROR("failed to suspend pasid %d gangid %d",
|
|
||||||
pasid, gang->gang_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_mes_unlock(&adev->mes);
|
amdgpu_mes_unlock(&adev->mes);
|
||||||
return 0;
|
if (r)
|
||||||
|
DRM_ERROR("failed to suspend all gangs");
|
||||||
|
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_mes_resume(struct amdgpu_device *adev)
|
int amdgpu_mes_resume(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct idr *idp;
|
|
||||||
struct amdgpu_mes_process *process;
|
|
||||||
struct amdgpu_mes_gang *gang;
|
|
||||||
struct mes_resume_gang_input input;
|
struct mes_resume_gang_input input;
|
||||||
int r, pasid;
|
int r;
|
||||||
|
|
||||||
|
if (!amdgpu_mes_suspend_resume_all_supported(adev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
|
||||||
|
input.resume_all_gangs = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Avoid taking any other locks under MES lock to avoid circular
|
* Avoid taking any other locks under MES lock to avoid circular
|
||||||
* lock dependencies.
|
* lock dependencies.
|
||||||
*/
|
*/
|
||||||
amdgpu_mes_lock(&adev->mes);
|
amdgpu_mes_lock(&adev->mes);
|
||||||
|
|
||||||
idp = &adev->mes.pasid_idr;
|
|
||||||
|
|
||||||
idr_for_each_entry(idp, process, pasid) {
|
|
||||||
list_for_each_entry(gang, &process->gang_list, list) {
|
|
||||||
r = adev->mes.funcs->resume_gang(&adev->mes, &input);
|
r = adev->mes.funcs->resume_gang(&adev->mes, &input);
|
||||||
if (r)
|
|
||||||
DRM_ERROR("failed to resume pasid %d gangid %d",
|
|
||||||
pasid, gang->gang_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_mes_unlock(&adev->mes);
|
amdgpu_mes_unlock(&adev->mes);
|
||||||
return 0;
|
if (r)
|
||||||
|
DRM_ERROR("failed to resume all gangs");
|
||||||
|
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
|
static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
|
||||||
@ -793,6 +783,68 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
struct amdgpu_mes_queue *queue;
|
||||||
|
struct amdgpu_mes_gang *gang;
|
||||||
|
struct mes_reset_queue_input queue_input;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Avoid taking any other locks under MES lock to avoid circular
|
||||||
|
* lock dependencies.
|
||||||
|
*/
|
||||||
|
amdgpu_mes_lock(&adev->mes);
|
||||||
|
|
||||||
|
/* remove the mes gang from idr list */
|
||||||
|
spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
|
||||||
|
|
||||||
|
queue = idr_find(&adev->mes.queue_id_idr, queue_id);
|
||||||
|
if (!queue) {
|
||||||
|
spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
|
||||||
|
amdgpu_mes_unlock(&adev->mes);
|
||||||
|
DRM_ERROR("queue id %d doesn't exist\n", queue_id);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
|
||||||
|
|
||||||
|
DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
|
||||||
|
queue->doorbell_off);
|
||||||
|
|
||||||
|
gang = queue->gang;
|
||||||
|
queue_input.doorbell_offset = queue->doorbell_off;
|
||||||
|
queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
|
||||||
|
|
||||||
|
r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
|
||||||
|
queue_id);
|
||||||
|
|
||||||
|
amdgpu_mes_unlock(&adev->mes);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
|
||||||
|
int me_id, int pipe_id, int queue_id, int vmid)
|
||||||
|
{
|
||||||
|
struct mes_reset_queue_input queue_input;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
queue_input.queue_type = queue_type;
|
||||||
|
queue_input.use_mmio = true;
|
||||||
|
queue_input.me_id = me_id;
|
||||||
|
queue_input.pipe_id = pipe_id;
|
||||||
|
queue_input.queue_id = queue_id;
|
||||||
|
queue_input.vmid = vmid;
|
||||||
|
r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
|
||||||
|
queue_id);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
|
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
|
||||||
struct amdgpu_ring *ring)
|
struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
@ -838,6 +890,33 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_ring *ring,
|
||||||
|
unsigned int vmid,
|
||||||
|
bool use_mmio)
|
||||||
|
{
|
||||||
|
struct mes_reset_legacy_queue_input queue_input;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
memset(&queue_input, 0, sizeof(queue_input));
|
||||||
|
|
||||||
|
queue_input.queue_type = ring->funcs->type;
|
||||||
|
queue_input.doorbell_offset = ring->doorbell_index;
|
||||||
|
queue_input.me_id = ring->me;
|
||||||
|
queue_input.pipe_id = ring->pipe;
|
||||||
|
queue_input.queue_id = ring->queue;
|
||||||
|
queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
|
||||||
|
queue_input.wptr_addr = ring->wptr_gpu_addr;
|
||||||
|
queue_input.vmid = vmid;
|
||||||
|
queue_input.use_mmio = use_mmio;
|
||||||
|
|
||||||
|
r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("failed to reset legacy queue\n");
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
|
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
|
||||||
{
|
{
|
||||||
struct mes_misc_op_input op_input;
|
struct mes_misc_op_input op_input;
|
||||||
@ -1533,7 +1612,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
|
|||||||
pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
|
pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
|
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
|
||||||
if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
|
if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
|
||||||
dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
|
dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
|
||||||
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
|
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
|
||||||
@ -1584,6 +1663,19 @@ out:
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
|
||||||
|
bool is_supported = false;
|
||||||
|
|
||||||
|
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
|
||||||
|
amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
|
||||||
|
mes_rev >= 0x63)
|
||||||
|
is_supported = true;
|
||||||
|
|
||||||
|
return is_supported;
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
|
|
||||||
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
|
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
|
||||||
|
@ -249,6 +249,18 @@ struct mes_remove_queue_input {
|
|||||||
uint64_t gang_context_addr;
|
uint64_t gang_context_addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mes_reset_queue_input {
|
||||||
|
uint32_t doorbell_offset;
|
||||||
|
uint64_t gang_context_addr;
|
||||||
|
bool use_mmio;
|
||||||
|
uint32_t queue_type;
|
||||||
|
uint32_t me_id;
|
||||||
|
uint32_t pipe_id;
|
||||||
|
uint32_t queue_id;
|
||||||
|
uint32_t xcc_id;
|
||||||
|
uint32_t vmid;
|
||||||
|
};
|
||||||
|
|
||||||
struct mes_map_legacy_queue_input {
|
struct mes_map_legacy_queue_input {
|
||||||
uint32_t queue_type;
|
uint32_t queue_type;
|
||||||
uint32_t doorbell_offset;
|
uint32_t doorbell_offset;
|
||||||
@ -280,6 +292,18 @@ struct mes_resume_gang_input {
|
|||||||
uint64_t gang_context_addr;
|
uint64_t gang_context_addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mes_reset_legacy_queue_input {
|
||||||
|
uint32_t queue_type;
|
||||||
|
uint32_t doorbell_offset;
|
||||||
|
bool use_mmio;
|
||||||
|
uint32_t me_id;
|
||||||
|
uint32_t pipe_id;
|
||||||
|
uint32_t queue_id;
|
||||||
|
uint64_t mqd_addr;
|
||||||
|
uint64_t wptr_addr;
|
||||||
|
uint32_t vmid;
|
||||||
|
};
|
||||||
|
|
||||||
enum mes_misc_opcode {
|
enum mes_misc_opcode {
|
||||||
MES_MISC_OP_WRITE_REG,
|
MES_MISC_OP_WRITE_REG,
|
||||||
MES_MISC_OP_READ_REG,
|
MES_MISC_OP_READ_REG,
|
||||||
@ -348,6 +372,12 @@ struct amdgpu_mes_funcs {
|
|||||||
|
|
||||||
int (*misc_op)(struct amdgpu_mes *mes,
|
int (*misc_op)(struct amdgpu_mes *mes,
|
||||||
struct mes_misc_op_input *input);
|
struct mes_misc_op_input *input);
|
||||||
|
|
||||||
|
int (*reset_legacy_queue)(struct amdgpu_mes *mes,
|
||||||
|
struct mes_reset_legacy_queue_input *input);
|
||||||
|
|
||||||
|
int (*reset_hw_queue)(struct amdgpu_mes *mes,
|
||||||
|
struct mes_reset_queue_input *input);
|
||||||
};
|
};
|
||||||
|
|
||||||
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
|
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
|
||||||
@ -375,6 +405,9 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
|
|||||||
struct amdgpu_mes_queue_properties *qprops,
|
struct amdgpu_mes_queue_properties *qprops,
|
||||||
int *queue_id);
|
int *queue_id);
|
||||||
int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
|
int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
|
||||||
|
int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id);
|
||||||
|
int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
|
||||||
|
int me_id, int pipe_id, int queue_id, int vmid);
|
||||||
|
|
||||||
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
|
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
|
||||||
struct amdgpu_ring *ring);
|
struct amdgpu_ring *ring);
|
||||||
@ -382,6 +415,10 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
|
|||||||
struct amdgpu_ring *ring,
|
struct amdgpu_ring *ring,
|
||||||
enum amdgpu_unmap_queues_action action,
|
enum amdgpu_unmap_queues_action action,
|
||||||
u64 gpu_addr, u64 seq);
|
u64 gpu_addr, u64 seq);
|
||||||
|
int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_ring *ring,
|
||||||
|
unsigned int vmid,
|
||||||
|
bool use_mmio);
|
||||||
|
|
||||||
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
|
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
|
||||||
int amdgpu_mes_wreg(struct amdgpu_device *adev,
|
int amdgpu_mes_wreg(struct amdgpu_device *adev,
|
||||||
@ -479,4 +516,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
|
|||||||
memalloc_noreclaim_restore(mes->saved_flags);
|
memalloc_noreclaim_restore(mes->saved_flags);
|
||||||
mutex_unlock(&mes->mutex_hidden);
|
mutex_unlock(&mes->mutex_hidden);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
|
||||||
#endif /* __AMDGPU_MES_H__ */
|
#endif /* __AMDGPU_MES_H__ */
|
||||||
|
@ -63,8 +63,6 @@ struct amdgpu_mmhub_funcs {
|
|||||||
uint64_t page_table_base);
|
uint64_t page_table_base);
|
||||||
void (*update_power_gating)(struct amdgpu_device *adev,
|
void (*update_power_gating)(struct amdgpu_device *adev,
|
||||||
bool enable);
|
bool enable);
|
||||||
bool (*query_utcl2_poison_status)(struct amdgpu_device *adev,
|
|
||||||
int hub_inst);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_mmhub {
|
struct amdgpu_mmhub {
|
||||||
|
@ -51,6 +51,7 @@ struct amdgpu_encoder;
|
|||||||
struct amdgpu_router;
|
struct amdgpu_router;
|
||||||
struct amdgpu_hpd;
|
struct amdgpu_hpd;
|
||||||
struct edid;
|
struct edid;
|
||||||
|
struct drm_edid;
|
||||||
|
|
||||||
#define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base)
|
#define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base)
|
||||||
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
|
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
|
||||||
@ -326,8 +327,7 @@ struct amdgpu_mode_info {
|
|||||||
/* FMT dithering */
|
/* FMT dithering */
|
||||||
struct drm_property *dither_property;
|
struct drm_property *dither_property;
|
||||||
/* hardcoded DFP edid from BIOS */
|
/* hardcoded DFP edid from BIOS */
|
||||||
struct edid *bios_hardcoded_edid;
|
const struct drm_edid *bios_hardcoded_edid;
|
||||||
int bios_hardcoded_edid_size;
|
|
||||||
|
|
||||||
/* firmware flags */
|
/* firmware flags */
|
||||||
u32 firmware_flags;
|
u32 firmware_flags;
|
||||||
|
@ -90,6 +90,12 @@ struct amdgpu_bo_va {
|
|||||||
bool cleared;
|
bool cleared;
|
||||||
|
|
||||||
bool is_xgmi;
|
bool is_xgmi;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* protected by vm reservation lock
|
||||||
|
* if non-zero, cannot unmap from GPU because user queues may still access it
|
||||||
|
*/
|
||||||
|
unsigned int queue_refcount;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_bo {
|
struct amdgpu_bo {
|
||||||
|
@ -94,7 +94,7 @@ static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int n
|
|||||||
ref_div_max = min(128 / post_div, ref_div_max);
|
ref_div_max = min(128 / post_div, ref_div_max);
|
||||||
|
|
||||||
/* get matching reference and feedback divider */
|
/* get matching reference and feedback divider */
|
||||||
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
|
*ref_div = clamp(DIV_ROUND_CLOSEST(den, post_div), 1u, ref_div_max);
|
||||||
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
|
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
|
||||||
|
|
||||||
/* limit fb divider to its maximum */
|
/* limit fb divider to its maximum */
|
||||||
|
@ -1223,11 +1223,11 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
|
|||||||
for_each_ras_error(err_node, err_data) {
|
for_each_ras_error(err_node, err_data) {
|
||||||
err_info = &err_node->err_info;
|
err_info = &err_node->err_info;
|
||||||
amdgpu_ras_error_statistic_de_count(&obj->err_data,
|
amdgpu_ras_error_statistic_de_count(&obj->err_data,
|
||||||
&err_info->mcm_info, NULL, err_info->de_count);
|
&err_info->mcm_info, err_info->de_count);
|
||||||
amdgpu_ras_error_statistic_ce_count(&obj->err_data,
|
amdgpu_ras_error_statistic_ce_count(&obj->err_data,
|
||||||
&err_info->mcm_info, NULL, err_info->ce_count);
|
&err_info->mcm_info, err_info->ce_count);
|
||||||
amdgpu_ras_error_statistic_ue_count(&obj->err_data,
|
amdgpu_ras_error_statistic_ue_count(&obj->err_data,
|
||||||
&err_info->mcm_info, NULL, err_info->ue_count);
|
&err_info->mcm_info, err_info->ue_count);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* for legacy asic path which doesn't has error source info */
|
/* for legacy asic path which doesn't has error source info */
|
||||||
@ -2153,7 +2153,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
|
|||||||
/* gpu reset is fallback for failed and default cases.
|
/* gpu reset is fallback for failed and default cases.
|
||||||
* For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
|
* For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
|
||||||
*/
|
*/
|
||||||
if (poison_stat && !con->is_rma) {
|
if (poison_stat && !amdgpu_ras_is_rma(adev)) {
|
||||||
event_id = amdgpu_ras_acquire_event_id(adev, type);
|
event_id = amdgpu_ras_acquire_event_id(adev, type);
|
||||||
RAS_EVENT_LOG(adev, event_id,
|
RAS_EVENT_LOG(adev, event_id,
|
||||||
"GPU reset for %s RAS poison consumption is issued!\n",
|
"GPU reset for %s RAS poison consumption is issued!\n",
|
||||||
@ -2881,9 +2881,6 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
|
|||||||
{
|
{
|
||||||
mutex_init(&ecc_log->lock);
|
mutex_init(&ecc_log->lock);
|
||||||
|
|
||||||
/* Set any value as siphash key */
|
|
||||||
memset(&ecc_log->ecc_key, 0xad, sizeof(ecc_log->ecc_key));
|
|
||||||
|
|
||||||
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
|
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
|
||||||
ecc_log->de_queried_count = 0;
|
ecc_log->de_queried_count = 0;
|
||||||
ecc_log->prev_de_queried_count = 0;
|
ecc_log->prev_de_queried_count = 0;
|
||||||
@ -2948,7 +2945,7 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
|
|||||||
|
|
||||||
amdgpu_ras_error_data_fini(&err_data);
|
amdgpu_ras_error_data_fini(&err_data);
|
||||||
|
|
||||||
if (err_cnt && con->is_rma)
|
if (err_cnt && amdgpu_ras_is_rma(adev))
|
||||||
amdgpu_ras_reset_gpu(adev);
|
amdgpu_ras_reset_gpu(adev);
|
||||||
|
|
||||||
amdgpu_ras_schedule_retirement_dwork(con,
|
amdgpu_ras_schedule_retirement_dwork(con,
|
||||||
@ -3049,7 +3046,7 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
|
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
|
||||||
if (reset_flags && !con->is_rma) {
|
if (reset_flags && !amdgpu_ras_is_rma(adev)) {
|
||||||
if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
|
if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
|
||||||
reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
|
reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
|
||||||
else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
|
else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
|
||||||
@ -3195,7 +3192,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
|
|||||||
* This calling fails when is_rma is true or
|
* This calling fails when is_rma is true or
|
||||||
* ret != 0.
|
* ret != 0.
|
||||||
*/
|
*/
|
||||||
if (con->is_rma || ret)
|
if (amdgpu_ras_is_rma(adev) || ret)
|
||||||
goto free;
|
goto free;
|
||||||
|
|
||||||
if (con->eeprom_control.ras_num_recs) {
|
if (con->eeprom_control.ras_num_recs) {
|
||||||
@ -3244,7 +3241,7 @@ out:
|
|||||||
* Except error threshold exceeding case, other failure cases in this
|
* Except error threshold exceeding case, other failure cases in this
|
||||||
* function would not fail amdgpu driver init.
|
* function would not fail amdgpu driver init.
|
||||||
*/
|
*/
|
||||||
if (!con->is_rma)
|
if (!amdgpu_ras_is_rma(adev))
|
||||||
ret = 0;
|
ret = 0;
|
||||||
else
|
else
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
@ -4287,7 +4284,7 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
|
|||||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||||
|
|
||||||
/* mode1 is the only selection for RMA status */
|
/* mode1 is the only selection for RMA status */
|
||||||
if (ras->is_rma) {
|
if (amdgpu_ras_is_rma(adev)) {
|
||||||
ras->gpu_reset_flags = 0;
|
ras->gpu_reset_flags = 0;
|
||||||
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
|
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
|
||||||
}
|
}
|
||||||
@ -4611,8 +4608,6 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
|
|||||||
if (!err_node)
|
if (!err_node)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&err_node->err_info.err_addr_list);
|
|
||||||
|
|
||||||
memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
|
memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
|
||||||
|
|
||||||
err_data->err_list_count++;
|
err_data->err_list_count++;
|
||||||
@ -4622,21 +4617,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
|
|||||||
return &err_node->err_info;
|
return &err_node->err_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr)
|
|
||||||
{
|
|
||||||
/* This function will be retired. */
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr)
|
|
||||||
{
|
|
||||||
list_del(&mca_err_addr->node);
|
|
||||||
kfree(mca_err_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
|
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
|
||||||
struct amdgpu_smuio_mcm_config_info *mcm_info,
|
struct amdgpu_smuio_mcm_config_info *mcm_info,
|
||||||
struct ras_err_addr *err_addr, u64 count)
|
u64 count)
|
||||||
{
|
{
|
||||||
struct ras_err_info *err_info;
|
struct ras_err_info *err_info;
|
||||||
|
|
||||||
@ -4650,9 +4633,6 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
|
|||||||
if (!err_info)
|
if (!err_info)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (err_addr && err_addr->err_status)
|
|
||||||
amdgpu_ras_add_mca_err_addr(err_info, err_addr);
|
|
||||||
|
|
||||||
err_info->ue_count += count;
|
err_info->ue_count += count;
|
||||||
err_data->ue_count += count;
|
err_data->ue_count += count;
|
||||||
|
|
||||||
@ -4661,7 +4641,7 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
|
|||||||
|
|
||||||
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
|
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
|
||||||
struct amdgpu_smuio_mcm_config_info *mcm_info,
|
struct amdgpu_smuio_mcm_config_info *mcm_info,
|
||||||
struct ras_err_addr *err_addr, u64 count)
|
u64 count)
|
||||||
{
|
{
|
||||||
struct ras_err_info *err_info;
|
struct ras_err_info *err_info;
|
||||||
|
|
||||||
@ -4683,7 +4663,7 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
|
|||||||
|
|
||||||
int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
|
int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
|
||||||
struct amdgpu_smuio_mcm_config_info *mcm_info,
|
struct amdgpu_smuio_mcm_config_info *mcm_info,
|
||||||
struct ras_err_addr *err_addr, u64 count)
|
u64 count)
|
||||||
{
|
{
|
||||||
struct ras_err_info *err_info;
|
struct ras_err_info *err_info;
|
||||||
|
|
||||||
@ -4697,9 +4677,6 @@ int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
|
|||||||
if (!err_info)
|
if (!err_info)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (err_addr && err_addr->err_status)
|
|
||||||
amdgpu_ras_add_mca_err_addr(err_info, err_addr);
|
|
||||||
|
|
||||||
err_info->de_count += count;
|
err_info->de_count += count;
|
||||||
err_data->de_count += count;
|
err_data->de_count += count;
|
||||||
|
|
||||||
@ -4771,6 +4748,16 @@ static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
|
|||||||
dev_info(adev->dev,
|
dev_info(adev->dev,
|
||||||
"socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
|
"socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
|
||||||
socket_id, aid_id, hbm_id, fw_status);
|
socket_id, aid_id, hbm_id, fw_status);
|
||||||
|
|
||||||
|
if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
|
||||||
|
dev_info(adev->dev,
|
||||||
|
"socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
|
||||||
|
socket_id, aid_id, fw_status);
|
||||||
|
|
||||||
|
if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
|
||||||
|
dev_info(adev->dev,
|
||||||
|
"socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
|
||||||
|
socket_id, aid_id, fw_status);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
|
static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
|
||||||
@ -4837,3 +4824,13 @@ void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
|
|||||||
|
|
||||||
va_end(args);
|
va_end(args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||||
|
|
||||||
|
if (!con)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return con->is_rma;
|
||||||
|
}
|
||||||
|
@ -28,7 +28,6 @@
|
|||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/kfifo.h>
|
#include <linux/kfifo.h>
|
||||||
#include <linux/radix-tree.h>
|
#include <linux/radix-tree.h>
|
||||||
#include <linux/siphash.h>
|
|
||||||
#include "ta_ras_if.h"
|
#include "ta_ras_if.h"
|
||||||
#include "amdgpu_ras_eeprom.h"
|
#include "amdgpu_ras_eeprom.h"
|
||||||
#include "amdgpu_smuio.h"
|
#include "amdgpu_smuio.h"
|
||||||
@ -47,6 +46,8 @@ struct amdgpu_iv_entry;
|
|||||||
#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
|
#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
|
||||||
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
|
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
|
||||||
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
|
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
|
||||||
|
#define AMDGPU_RAS_GPU_ERR_DATA_ABORT(x) AMDGPU_GET_REG_FIELD(x, 29, 29)
|
||||||
|
#define AMDGPU_RAS_GPU_ERR_UNKNOWN(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
|
||||||
|
|
||||||
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 100
|
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 100
|
||||||
#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA
|
#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA
|
||||||
@ -476,16 +477,15 @@ struct ras_err_pages {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct ras_ecc_err {
|
struct ras_ecc_err {
|
||||||
u64 hash_index;
|
|
||||||
uint64_t status;
|
uint64_t status;
|
||||||
uint64_t ipid;
|
uint64_t ipid;
|
||||||
uint64_t addr;
|
uint64_t addr;
|
||||||
|
uint64_t pa_pfn;
|
||||||
struct ras_err_pages err_pages;
|
struct ras_err_pages err_pages;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ras_ecc_log_info {
|
struct ras_ecc_log_info {
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
siphash_key_t ecc_key;
|
|
||||||
struct radix_tree_root de_page_tree;
|
struct radix_tree_root de_page_tree;
|
||||||
uint64_t de_queried_count;
|
uint64_t de_queried_count;
|
||||||
uint64_t prev_de_queried_count;
|
uint64_t prev_de_queried_count;
|
||||||
@ -572,19 +572,11 @@ struct ras_fs_data {
|
|||||||
char debugfs_name[32];
|
char debugfs_name[32];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ras_err_addr {
|
|
||||||
struct list_head node;
|
|
||||||
uint64_t err_status;
|
|
||||||
uint64_t err_ipid;
|
|
||||||
uint64_t err_addr;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ras_err_info {
|
struct ras_err_info {
|
||||||
struct amdgpu_smuio_mcm_config_info mcm_info;
|
struct amdgpu_smuio_mcm_config_info mcm_info;
|
||||||
u64 ce_count;
|
u64 ce_count;
|
||||||
u64 ue_count;
|
u64 ue_count;
|
||||||
u64 de_count;
|
u64 de_count;
|
||||||
struct list_head err_addr_list;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ras_err_node {
|
struct ras_err_node {
|
||||||
@ -942,13 +934,13 @@ int amdgpu_ras_error_data_init(struct ras_err_data *err_data);
|
|||||||
void amdgpu_ras_error_data_fini(struct ras_err_data *err_data);
|
void amdgpu_ras_error_data_fini(struct ras_err_data *err_data);
|
||||||
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
|
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
|
||||||
struct amdgpu_smuio_mcm_config_info *mcm_info,
|
struct amdgpu_smuio_mcm_config_info *mcm_info,
|
||||||
struct ras_err_addr *err_addr, u64 count);
|
u64 count);
|
||||||
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
|
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
|
||||||
struct amdgpu_smuio_mcm_config_info *mcm_info,
|
struct amdgpu_smuio_mcm_config_info *mcm_info,
|
||||||
struct ras_err_addr *err_addr, u64 count);
|
u64 count);
|
||||||
int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
|
int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
|
||||||
struct amdgpu_smuio_mcm_config_info *mcm_info,
|
struct amdgpu_smuio_mcm_config_info *mcm_info,
|
||||||
struct ras_err_addr *err_addr, u64 count);
|
u64 count);
|
||||||
void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances);
|
void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances);
|
||||||
int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
|
int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
|
||||||
const struct aca_info *aca_info, void *data);
|
const struct aca_info *aca_info, void *data);
|
||||||
@ -957,12 +949,6 @@ int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
|
|||||||
ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
|
ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
|
||||||
struct aca_handle *handle, char *buf, void *data);
|
struct aca_handle *handle, char *buf, void *data);
|
||||||
|
|
||||||
void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info,
|
|
||||||
struct ras_err_addr *err_addr);
|
|
||||||
|
|
||||||
void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info,
|
|
||||||
struct ras_err_addr *mca_err_addr);
|
|
||||||
|
|
||||||
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
|
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
|
||||||
bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
|
bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
|
||||||
|
|
||||||
@ -982,4 +968,5 @@ __printf(3, 4)
|
|||||||
void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
|
void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
|
||||||
const char *fmt, ...);
|
const char *fmt, ...);
|
||||||
|
|
||||||
|
bool amdgpu_ras_is_rma(struct amdgpu_device *adev);
|
||||||
#endif
|
#endif
|
||||||
|
@ -136,6 +136,12 @@ static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *doma
|
|||||||
return queue_work(domain->wq, work);
|
return queue_work(domain->wq, work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool amdgpu_reset_pending(struct amdgpu_reset_domain *domain)
|
||||||
|
{
|
||||||
|
lockdep_assert_held(&domain->sem);
|
||||||
|
return rwsem_is_contended(&domain->sem);
|
||||||
|
}
|
||||||
|
|
||||||
void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain);
|
void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain);
|
||||||
|
|
||||||
void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain);
|
void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain);
|
||||||
|
@ -144,7 +144,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
|
|||||||
/* We pad to match fetch size */
|
/* We pad to match fetch size */
|
||||||
count = ring->funcs->align_mask + 1 -
|
count = ring->funcs->align_mask + 1 -
|
||||||
(ring->wptr & ring->funcs->align_mask);
|
(ring->wptr & ring->funcs->align_mask);
|
||||||
count %= ring->funcs->align_mask + 1;
|
count &= ring->funcs->align_mask;
|
||||||
|
|
||||||
|
if (count != 0)
|
||||||
ring->funcs->insert_nop(ring, count);
|
ring->funcs->insert_nop(ring, count);
|
||||||
|
|
||||||
mb();
|
mb();
|
||||||
|
@ -235,6 +235,8 @@ struct amdgpu_ring_funcs {
|
|||||||
void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
|
void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
|
||||||
void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
|
void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
|
||||||
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
|
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
|
||||||
|
int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
|
||||||
|
void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_ring {
|
struct amdgpu_ring {
|
||||||
@ -334,6 +336,7 @@ struct amdgpu_ring {
|
|||||||
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
|
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
|
||||||
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
|
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
|
||||||
#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
|
#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
|
||||||
|
#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
|
||||||
|
|
||||||
unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
|
unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
|
||||||
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
|
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
|
||||||
|
@ -410,7 +410,7 @@ void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
|
|||||||
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
|
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
|
||||||
|
|
||||||
WARN_ON(!ring->is_sw_ring);
|
WARN_ON(!ring->is_sw_ring);
|
||||||
if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
|
if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
|
||||||
return;
|
return;
|
||||||
amdgpu_ring_mux_end_ib(mux, ring);
|
amdgpu_ring_mux_end_ib(mux, ring);
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,6 @@
|
|||||||
* Authors: Andres Rodriguez <andresx7@gmail.com>
|
* Authors: Andres Rodriguez <andresx7@gmail.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/fdtable.h>
|
|
||||||
#include <linux/file.h>
|
#include <linux/file.h>
|
||||||
#include <linux/pid.h>
|
#include <linux/pid.h>
|
||||||
|
|
||||||
|
@ -115,6 +115,7 @@ struct amdgpu_sdma {
|
|||||||
bool has_page_queue;
|
bool has_page_queue;
|
||||||
struct ras_common_if *ras_if;
|
struct ras_common_if *ras_if;
|
||||||
struct amdgpu_sdma_ras *ras;
|
struct amdgpu_sdma_ras *ras;
|
||||||
|
uint32_t *ip_dump;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -196,7 +196,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
|
|||||||
amdgpu_umc_handle_bad_pages(adev, ras_error_status);
|
amdgpu_umc_handle_bad_pages(adev, ras_error_status);
|
||||||
|
|
||||||
if ((err_data->ue_count || err_data->de_count) &&
|
if ((err_data->ue_count || err_data->de_count) &&
|
||||||
(reset || (con && con->is_rma))) {
|
(reset || amdgpu_ras_is_rma(adev))) {
|
||||||
con->gpu_reset_flags |= reset;
|
con->gpu_reset_flags |= reset;
|
||||||
amdgpu_ras_reset_gpu(adev);
|
amdgpu_ras_reset_gpu(adev);
|
||||||
}
|
}
|
||||||
@ -204,55 +204,6 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
|
|||||||
return AMDGPU_RAS_SUCCESS;
|
return AMDGPU_RAS_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
|
|
||||||
uint32_t reset, uint32_t timeout_ms)
|
|
||||||
{
|
|
||||||
struct ras_err_data err_data;
|
|
||||||
struct ras_common_if head = {
|
|
||||||
.block = AMDGPU_RAS_BLOCK__UMC,
|
|
||||||
};
|
|
||||||
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
|
|
||||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
|
||||||
uint32_t timeout = timeout_ms;
|
|
||||||
|
|
||||||
memset(&err_data, 0, sizeof(err_data));
|
|
||||||
amdgpu_ras_error_data_init(&err_data);
|
|
||||||
|
|
||||||
do {
|
|
||||||
|
|
||||||
amdgpu_umc_handle_bad_pages(adev, &err_data);
|
|
||||||
|
|
||||||
if (timeout && !err_data.de_count) {
|
|
||||||
msleep(1);
|
|
||||||
timeout--;
|
|
||||||
}
|
|
||||||
|
|
||||||
} while (timeout && !err_data.de_count);
|
|
||||||
|
|
||||||
if (!timeout)
|
|
||||||
dev_warn(adev->dev, "Can't find bad pages\n");
|
|
||||||
|
|
||||||
if (err_data.de_count)
|
|
||||||
dev_info(adev->dev, "%ld new deferred hardware errors detected\n", err_data.de_count);
|
|
||||||
|
|
||||||
if (obj) {
|
|
||||||
obj->err_data.ue_count += err_data.ue_count;
|
|
||||||
obj->err_data.ce_count += err_data.ce_count;
|
|
||||||
obj->err_data.de_count += err_data.de_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_ras_error_data_fini(&err_data);
|
|
||||||
|
|
||||||
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
|
||||||
|
|
||||||
if (reset || (err_data.err_addr_cnt && con && con->is_rma)) {
|
|
||||||
con->gpu_reset_flags |= reset;
|
|
||||||
amdgpu_ras_reset_gpu(adev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
|
int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
|
||||||
enum amdgpu_ras_block block, uint16_t pasid,
|
enum amdgpu_ras_block block, uint16_t pasid,
|
||||||
pasid_notify pasid_fn, void *data, uint32_t reset)
|
pasid_notify pasid_fn, void *data, uint32_t reset)
|
||||||
@ -472,43 +423,6 @@ int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_umc_uint64_cmp(const void *a, const void *b)
|
|
||||||
{
|
|
||||||
uint64_t *addr_a = (uint64_t *)a;
|
|
||||||
uint64_t *addr_b = (uint64_t *)b;
|
|
||||||
|
|
||||||
if (*addr_a > *addr_b)
|
|
||||||
return 1;
|
|
||||||
else if (*addr_a < *addr_b)
|
|
||||||
return -1;
|
|
||||||
else
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Use string hash to avoid logging the same bad pages repeatedly */
|
|
||||||
int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev,
|
|
||||||
uint64_t *pfns, int len, uint64_t *val)
|
|
||||||
{
|
|
||||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
|
||||||
char buf[MAX_UMC_HASH_STRING_SIZE] = {0};
|
|
||||||
int offset = 0, i = 0;
|
|
||||||
uint64_t hash_val;
|
|
||||||
|
|
||||||
if (!pfns || !len)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
sort(pfns, len, sizeof(uint64_t), amdgpu_umc_uint64_cmp, NULL);
|
|
||||||
|
|
||||||
for (i = 0; i < len; i++)
|
|
||||||
offset += snprintf(&buf[offset], sizeof(buf) - offset, "%llx", pfns[i]);
|
|
||||||
|
|
||||||
hash_val = siphash(buf, offset, &con->umc_ecc_log.ecc_key);
|
|
||||||
|
|
||||||
*val = hash_val;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
|
int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
|
||||||
struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err)
|
struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err)
|
||||||
{
|
{
|
||||||
@ -519,18 +433,10 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
|
|||||||
ecc_log = &con->umc_ecc_log;
|
ecc_log = &con->umc_ecc_log;
|
||||||
|
|
||||||
mutex_lock(&ecc_log->lock);
|
mutex_lock(&ecc_log->lock);
|
||||||
ret = radix_tree_insert(ecc_tree, ecc_err->hash_index, ecc_err);
|
ret = radix_tree_insert(ecc_tree, ecc_err->pa_pfn, ecc_err);
|
||||||
if (!ret) {
|
if (!ret)
|
||||||
struct ras_err_pages *err_pages = &ecc_err->err_pages;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/* Reserve memory */
|
|
||||||
for (i = 0; i < err_pages->count; i++)
|
|
||||||
amdgpu_ras_reserve_page(adev, err_pages->pfn[i]);
|
|
||||||
|
|
||||||
radix_tree_tag_set(ecc_tree,
|
radix_tree_tag_set(ecc_tree,
|
||||||
ecc_err->hash_index, UMC_ECC_NEW_DETECTED_TAG);
|
ecc_err->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
|
||||||
}
|
|
||||||
mutex_unlock(&ecc_log->lock);
|
mutex_unlock(&ecc_log->lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -127,13 +127,8 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
|
|||||||
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
|
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
|
||||||
umc_func func, void *data);
|
umc_func func, void *data);
|
||||||
|
|
||||||
int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
|
|
||||||
uint32_t reset, uint32_t timeout_ms);
|
|
||||||
|
|
||||||
int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
|
int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
|
||||||
uint64_t status, uint64_t ipid, uint64_t addr);
|
uint64_t status, uint64_t ipid, uint64_t addr);
|
||||||
int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev,
|
|
||||||
uint64_t *pfns, int len, uint64_t *val);
|
|
||||||
int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
|
int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
|
||||||
struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err);
|
struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err);
|
||||||
|
|
||||||
|
@ -587,7 +587,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, fw_name);
|
r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, "%s", fw_name);
|
||||||
if (r) {
|
if (r) {
|
||||||
release_firmware(adev->umsch_mm.fw);
|
release_firmware(adev->umsch_mm.fw);
|
||||||
adev->umsch_mm.fw = NULL;
|
adev->umsch_mm.fw = NULL;
|
||||||
|
@ -260,7 +260,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_ucode_request(adev, &adev->uvd.fw, fw_name);
|
r = amdgpu_ucode_request(adev, &adev->uvd.fw, "%s", fw_name);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
|
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
|
||||||
fw_name);
|
fw_name);
|
||||||
@ -1088,7 +1088,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser,
|
|||||||
int r;
|
int r;
|
||||||
|
|
||||||
job->vm = NULL;
|
job->vm = NULL;
|
||||||
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
|
|
||||||
|
|
||||||
if (ib->length_dw % 16) {
|
if (ib->length_dw % 16) {
|
||||||
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
|
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
|
||||||
|
@ -158,7 +158,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name);
|
r = amdgpu_ucode_request(adev, &adev->vce.fw, "%s", fw_name);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
|
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
|
||||||
fw_name);
|
fw_name);
|
||||||
@ -749,7 +749,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
|
|||||||
int i, r = 0;
|
int i, r = 0;
|
||||||
|
|
||||||
job->vm = NULL;
|
job->vm = NULL;
|
||||||
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
|
|
||||||
|
|
||||||
for (idx = 0; idx < ib->length_dw;) {
|
for (idx = 0; idx < ib->length_dw;) {
|
||||||
uint32_t len = amdgpu_ib_get_value(ib, idx);
|
uint32_t len = amdgpu_ib_get_value(ib, idx);
|
||||||
@ -1044,7 +1043,6 @@ out:
|
|||||||
if (!r) {
|
if (!r) {
|
||||||
/* No error, free all destroyed handle slots */
|
/* No error, free all destroyed handle slots */
|
||||||
tmp = destroyed;
|
tmp = destroyed;
|
||||||
amdgpu_ib_free(p->adev, ib, NULL);
|
|
||||||
} else {
|
} else {
|
||||||
/* Error during parsing, free all allocated handle slots */
|
/* Error during parsing, free all allocated handle slots */
|
||||||
tmp = allocated;
|
tmp = allocated;
|
||||||
|
@ -330,6 +330,9 @@ struct amdgpu_vcn {
|
|||||||
uint16_t inst_mask;
|
uint16_t inst_mask;
|
||||||
uint8_t num_inst_per_aid;
|
uint8_t num_inst_per_aid;
|
||||||
bool using_unified_queue;
|
bool using_unified_queue;
|
||||||
|
|
||||||
|
/* IP reg dump */
|
||||||
|
uint32_t *ip_dump;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_fw_shared_rb_ptrs_struct {
|
struct amdgpu_fw_shared_rb_ptrs_struct {
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_ras.h"
|
#include "amdgpu_ras.h"
|
||||||
#include "amdgpu_reset.h"
|
#include "amdgpu_reset.h"
|
||||||
|
#include "amdgpu_dpm.h"
|
||||||
#include "vi.h"
|
#include "vi.h"
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
#include "nv.h"
|
#include "nv.h"
|
||||||
@ -849,6 +850,13 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
|
|||||||
return mode;
|
return mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void amdgpu_virt_pre_reset(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
/* stop the data exchange thread */
|
||||||
|
amdgpu_virt_fini_data_exchange(adev);
|
||||||
|
amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_FLR);
|
||||||
|
}
|
||||||
|
|
||||||
void amdgpu_virt_post_reset(struct amdgpu_device *adev)
|
void amdgpu_virt_post_reset(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) {
|
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) {
|
||||||
|
@ -376,6 +376,7 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
|
|||||||
u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id);
|
u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id);
|
||||||
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
|
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
|
||||||
uint32_t ucode_id);
|
uint32_t ucode_id);
|
||||||
|
void amdgpu_virt_pre_reset(struct amdgpu_device *adev);
|
||||||
void amdgpu_virt_post_reset(struct amdgpu_device *adev);
|
void amdgpu_virt_post_reset(struct amdgpu_device *adev);
|
||||||
bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev);
|
bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev);
|
||||||
bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
|
bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
|
||||||
|
@ -549,7 +549,7 @@ static int amdgpu_vkms_sw_fini(void *handle)
|
|||||||
|
|
||||||
adev->mode_info.mode_config_initialized = false;
|
adev->mode_info.mode_config_initialized = false;
|
||||||
|
|
||||||
kfree(adev->mode_info.bios_hardcoded_edid);
|
drm_edid_free(adev->mode_info.bios_hardcoded_edid);
|
||||||
kfree(adev->amdgpu_vkms_output);
|
kfree(adev->amdgpu_vkms_output);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -681,6 +681,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
|||||||
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
|
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
|
||||||
ring->funcs->emit_wreg;
|
ring->funcs->emit_wreg;
|
||||||
|
|
||||||
|
if (adev->gfx.enable_cleaner_shader &&
|
||||||
|
ring->funcs->emit_cleaner_shader &&
|
||||||
|
job->enforce_isolation)
|
||||||
|
ring->funcs->emit_cleaner_shader(ring);
|
||||||
|
|
||||||
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
|
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -742,6 +747,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
|||||||
amdgpu_ring_emit_switch_buffer(ring);
|
amdgpu_ring_emit_switch_buffer(ring);
|
||||||
amdgpu_ring_emit_switch_buffer(ring);
|
amdgpu_ring_emit_switch_buffer(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_ring_ib_end(ring);
|
amdgpu_ring_ib_end(ring);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -838,7 +844,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
|||||||
params.vm = vm;
|
params.vm = vm;
|
||||||
params.immediate = immediate;
|
params.immediate = immediate;
|
||||||
|
|
||||||
r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT);
|
r = vm->update_funcs->prepare(¶ms, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
@ -902,10 +908,12 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
|
|||||||
{
|
{
|
||||||
struct amdgpu_vm *vm = params->vm;
|
struct amdgpu_vm *vm = params->vm;
|
||||||
|
|
||||||
if (!fence || !*fence)
|
|
||||||
return;
|
|
||||||
|
|
||||||
tlb_cb->vm = vm;
|
tlb_cb->vm = vm;
|
||||||
|
if (!fence || !*fence) {
|
||||||
|
amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
|
if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
|
||||||
amdgpu_vm_tlb_seq_cb)) {
|
amdgpu_vm_tlb_seq_cb)) {
|
||||||
dma_fence_put(vm->last_tlb_flush);
|
dma_fence_put(vm->last_tlb_flush);
|
||||||
@ -933,7 +941,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
|
|||||||
* @unlocked: unlocked invalidation during MM callback
|
* @unlocked: unlocked invalidation during MM callback
|
||||||
* @flush_tlb: trigger tlb invalidation after update completed
|
* @flush_tlb: trigger tlb invalidation after update completed
|
||||||
* @allow_override: change MTYPE for local NUMA nodes
|
* @allow_override: change MTYPE for local NUMA nodes
|
||||||
* @resv: fences we need to sync to
|
* @sync: fences we need to sync to
|
||||||
* @start: start of mapped range
|
* @start: start of mapped range
|
||||||
* @last: last mapped entry
|
* @last: last mapped entry
|
||||||
* @flags: flags for the entries
|
* @flags: flags for the entries
|
||||||
@ -949,16 +957,16 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
|
|||||||
* 0 for success, negative erro code for failure.
|
* 0 for success, negative erro code for failure.
|
||||||
*/
|
*/
|
||||||
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
|
bool immediate, bool unlocked, bool flush_tlb,
|
||||||
struct dma_resv *resv, uint64_t start, uint64_t last,
|
bool allow_override, struct amdgpu_sync *sync,
|
||||||
uint64_t flags, uint64_t offset, uint64_t vram_base,
|
uint64_t start, uint64_t last, uint64_t flags,
|
||||||
|
uint64_t offset, uint64_t vram_base,
|
||||||
struct ttm_resource *res, dma_addr_t *pages_addr,
|
struct ttm_resource *res, dma_addr_t *pages_addr,
|
||||||
struct dma_fence **fence)
|
struct dma_fence **fence)
|
||||||
{
|
{
|
||||||
struct amdgpu_vm_tlb_seq_struct *tlb_cb;
|
struct amdgpu_vm_tlb_seq_struct *tlb_cb;
|
||||||
struct amdgpu_vm_update_params params;
|
struct amdgpu_vm_update_params params;
|
||||||
struct amdgpu_res_cursor cursor;
|
struct amdgpu_res_cursor cursor;
|
||||||
enum amdgpu_sync_mode sync_mode;
|
|
||||||
int r, idx;
|
int r, idx;
|
||||||
|
|
||||||
if (!drm_dev_enter(adev_to_drm(adev), &idx))
|
if (!drm_dev_enter(adev_to_drm(adev), &idx))
|
||||||
@ -991,14 +999,6 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
params.allow_override = allow_override;
|
params.allow_override = allow_override;
|
||||||
INIT_LIST_HEAD(¶ms.tlb_flush_waitlist);
|
INIT_LIST_HEAD(¶ms.tlb_flush_waitlist);
|
||||||
|
|
||||||
/* Implicitly sync to command submissions in the same VM before
|
|
||||||
* unmapping. Sync to moving fences before mapping.
|
|
||||||
*/
|
|
||||||
if (!(flags & AMDGPU_PTE_VALID))
|
|
||||||
sync_mode = AMDGPU_SYNC_EQ_OWNER;
|
|
||||||
else
|
|
||||||
sync_mode = AMDGPU_SYNC_EXPLICIT;
|
|
||||||
|
|
||||||
amdgpu_vm_eviction_lock(vm);
|
amdgpu_vm_eviction_lock(vm);
|
||||||
if (vm->evicting) {
|
if (vm->evicting) {
|
||||||
r = -EBUSY;
|
r = -EBUSY;
|
||||||
@ -1013,7 +1013,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
dma_fence_put(tmp);
|
dma_fence_put(tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
r = vm->update_funcs->prepare(¶ms, resv, sync_mode);
|
r = vm->update_funcs->prepare(¶ms, sync);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free;
|
goto error_free;
|
||||||
|
|
||||||
@ -1155,23 +1155,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||||||
struct amdgpu_bo *bo = bo_va->base.bo;
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
||||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||||
struct amdgpu_bo_va_mapping *mapping;
|
struct amdgpu_bo_va_mapping *mapping;
|
||||||
|
struct dma_fence **last_update;
|
||||||
dma_addr_t *pages_addr = NULL;
|
dma_addr_t *pages_addr = NULL;
|
||||||
struct ttm_resource *mem;
|
struct ttm_resource *mem;
|
||||||
struct dma_fence **last_update;
|
struct amdgpu_sync sync;
|
||||||
bool flush_tlb = clear;
|
bool flush_tlb = clear;
|
||||||
bool uncached;
|
|
||||||
struct dma_resv *resv;
|
|
||||||
uint64_t vram_base;
|
uint64_t vram_base;
|
||||||
uint64_t flags;
|
uint64_t flags;
|
||||||
|
bool uncached;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
amdgpu_sync_create(&sync);
|
||||||
if (clear || !bo) {
|
if (clear || !bo) {
|
||||||
mem = NULL;
|
mem = NULL;
|
||||||
resv = vm->root.bo->tbo.base.resv;
|
|
||||||
|
/* Implicitly sync to command submissions in the same VM before
|
||||||
|
* unmapping.
|
||||||
|
*/
|
||||||
|
r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
|
||||||
|
AMDGPU_SYNC_EQ_OWNER, vm);
|
||||||
|
if (r)
|
||||||
|
goto error_free;
|
||||||
} else {
|
} else {
|
||||||
struct drm_gem_object *obj = &bo->tbo.base;
|
struct drm_gem_object *obj = &bo->tbo.base;
|
||||||
|
|
||||||
resv = bo->tbo.base.resv;
|
|
||||||
if (obj->import_attach && bo_va->is_xgmi) {
|
if (obj->import_attach && bo_va->is_xgmi) {
|
||||||
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
|
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
|
||||||
struct drm_gem_object *gobj = dma_buf->priv;
|
struct drm_gem_object *gobj = dma_buf->priv;
|
||||||
@ -1185,6 +1192,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||||||
if (mem && (mem->mem_type == TTM_PL_TT ||
|
if (mem && (mem->mem_type == TTM_PL_TT ||
|
||||||
mem->mem_type == AMDGPU_PL_PREEMPT))
|
mem->mem_type == AMDGPU_PL_PREEMPT))
|
||||||
pages_addr = bo->tbo.ttm->dma_address;
|
pages_addr = bo->tbo.ttm->dma_address;
|
||||||
|
|
||||||
|
/* Implicitly sync to moving fences before mapping anything */
|
||||||
|
r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
|
||||||
|
AMDGPU_SYNC_EXPLICIT, vm);
|
||||||
|
if (r)
|
||||||
|
goto error_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bo) {
|
if (bo) {
|
||||||
@ -1234,12 +1247,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||||||
trace_amdgpu_vm_bo_update(mapping);
|
trace_amdgpu_vm_bo_update(mapping);
|
||||||
|
|
||||||
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
|
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
|
||||||
!uncached, resv, mapping->start, mapping->last,
|
!uncached, &sync, mapping->start,
|
||||||
update_flags, mapping->offset,
|
mapping->last, update_flags,
|
||||||
vram_base, mem, pages_addr,
|
mapping->offset, vram_base, mem,
|
||||||
last_update);
|
pages_addr, last_update);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
goto error_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the BO is not in its preferred location add it back to
|
/* If the BO is not in its preferred location add it back to
|
||||||
@ -1267,7 +1280,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||||||
trace_amdgpu_vm_bo_mapping(mapping);
|
trace_amdgpu_vm_bo_mapping(mapping);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
error_free:
|
||||||
|
amdgpu_sync_free(&sync);
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1414,25 +1429,34 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
|||||||
struct amdgpu_vm *vm,
|
struct amdgpu_vm *vm,
|
||||||
struct dma_fence **fence)
|
struct dma_fence **fence)
|
||||||
{
|
{
|
||||||
struct dma_resv *resv = vm->root.bo->tbo.base.resv;
|
|
||||||
struct amdgpu_bo_va_mapping *mapping;
|
struct amdgpu_bo_va_mapping *mapping;
|
||||||
uint64_t init_pte_value = 0;
|
|
||||||
struct dma_fence *f = NULL;
|
struct dma_fence *f = NULL;
|
||||||
|
struct amdgpu_sync sync;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Implicitly sync to command submissions in the same VM before
|
||||||
|
* unmapping.
|
||||||
|
*/
|
||||||
|
amdgpu_sync_create(&sync);
|
||||||
|
r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
|
||||||
|
AMDGPU_SYNC_EQ_OWNER, vm);
|
||||||
|
if (r)
|
||||||
|
goto error_free;
|
||||||
|
|
||||||
while (!list_empty(&vm->freed)) {
|
while (!list_empty(&vm->freed)) {
|
||||||
mapping = list_first_entry(&vm->freed,
|
mapping = list_first_entry(&vm->freed,
|
||||||
struct amdgpu_bo_va_mapping, list);
|
struct amdgpu_bo_va_mapping, list);
|
||||||
list_del(&mapping->list);
|
list_del(&mapping->list);
|
||||||
|
|
||||||
r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
|
r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
|
||||||
resv, mapping->start, mapping->last,
|
&sync, mapping->start, mapping->last,
|
||||||
init_pte_value, 0, 0, NULL, NULL,
|
0, 0, 0, NULL, NULL, &f);
|
||||||
&f);
|
|
||||||
amdgpu_vm_free_mapping(adev, vm, mapping, f);
|
amdgpu_vm_free_mapping(adev, vm, mapping, f);
|
||||||
if (r) {
|
if (r) {
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
return r;
|
goto error_free;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1443,7 +1467,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
|||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
error_free:
|
||||||
|
amdgpu_sync_free(&sync);
|
||||||
|
return r;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2218,7 +2244,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
|
|||||||
phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
|
phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
|
||||||
(1 << 30) - 1) >> 30;
|
(1 << 30) - 1) >> 30;
|
||||||
vm_size = roundup_pow_of_two(
|
vm_size = roundup_pow_of_two(
|
||||||
min(max(phys_ram_gb * 3, min_vm_size), max_size));
|
clamp(phys_ram_gb * 3, min_vm_size, max_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
|
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
|
||||||
@ -2421,6 +2447,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
ttm_lru_bulk_move_init(&vm->lru_bulk_move);
|
||||||
|
|
||||||
vm->is_compute_context = false;
|
vm->is_compute_context = false;
|
||||||
|
|
||||||
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
|
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
|
||||||
@ -2485,6 +2513,7 @@ error_free_root:
|
|||||||
error_free_delayed:
|
error_free_delayed:
|
||||||
dma_fence_put(vm->last_tlb_flush);
|
dma_fence_put(vm->last_tlb_flush);
|
||||||
dma_fence_put(vm->last_unlocked);
|
dma_fence_put(vm->last_unlocked);
|
||||||
|
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
|
||||||
amdgpu_vm_fini_entities(vm);
|
amdgpu_vm_fini_entities(vm);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
@ -2641,6 +2670,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -2754,6 +2784,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||||||
* amdgpu_vm_handle_fault - graceful handling of VM faults.
|
* amdgpu_vm_handle_fault - graceful handling of VM faults.
|
||||||
* @adev: amdgpu device pointer
|
* @adev: amdgpu device pointer
|
||||||
* @pasid: PASID of the VM
|
* @pasid: PASID of the VM
|
||||||
|
* @ts: Timestamp of the fault
|
||||||
* @vmid: VMID, only used for GFX 9.4.3.
|
* @vmid: VMID, only used for GFX 9.4.3.
|
||||||
* @node_id: Node_id received in IH cookie. Only applicable for
|
* @node_id: Node_id received in IH cookie. Only applicable for
|
||||||
* GFX 9.4.3.
|
* GFX 9.4.3.
|
||||||
@ -2764,7 +2795,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||||||
* shouldn't be reported any more.
|
* shouldn't be reported any more.
|
||||||
*/
|
*/
|
||||||
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
||||||
u32 vmid, u32 node_id, uint64_t addr,
|
u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
|
||||||
bool write_fault)
|
bool write_fault)
|
||||||
{
|
{
|
||||||
bool is_compute_context = false;
|
bool is_compute_context = false;
|
||||||
@ -2790,7 +2821,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
|||||||
addr /= AMDGPU_GPU_PAGE_SIZE;
|
addr /= AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
|
||||||
if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
|
if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
|
||||||
node_id, addr, write_fault)) {
|
node_id, addr, ts, write_fault)) {
|
||||||
amdgpu_bo_unref(&root);
|
amdgpu_bo_unref(&root);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -304,8 +304,8 @@ struct amdgpu_vm_update_params {
|
|||||||
|
|
||||||
struct amdgpu_vm_update_funcs {
|
struct amdgpu_vm_update_funcs {
|
||||||
int (*map_table)(struct amdgpu_bo_vm *bo);
|
int (*map_table)(struct amdgpu_bo_vm *bo);
|
||||||
int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
|
int (*prepare)(struct amdgpu_vm_update_params *p,
|
||||||
enum amdgpu_sync_mode sync_mode);
|
struct amdgpu_sync *sync);
|
||||||
int (*update)(struct amdgpu_vm_update_params *p,
|
int (*update)(struct amdgpu_vm_update_params *p,
|
||||||
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
|
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
|
||||||
unsigned count, uint32_t incr, uint64_t flags);
|
unsigned count, uint32_t incr, uint64_t flags);
|
||||||
@ -505,9 +505,10 @@ int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
|
|||||||
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
||||||
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
|
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
|
||||||
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
|
bool immediate, bool unlocked, bool flush_tlb,
|
||||||
struct dma_resv *resv, uint64_t start, uint64_t last,
|
bool allow_override, struct amdgpu_sync *sync,
|
||||||
uint64_t flags, uint64_t offset, uint64_t vram_base,
|
uint64_t start, uint64_t last, uint64_t flags,
|
||||||
|
uint64_t offset, uint64_t vram_base,
|
||||||
struct ttm_resource *res, dma_addr_t *pages_addr,
|
struct ttm_resource *res, dma_addr_t *pages_addr,
|
||||||
struct dma_fence **fence);
|
struct dma_fence **fence);
|
||||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||||
@ -558,7 +559,7 @@ amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm);
|
|||||||
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info);
|
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info);
|
||||||
|
|
||||||
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
||||||
u32 vmid, u32 node_id, uint64_t addr,
|
u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
|
||||||
bool write_fault);
|
bool write_fault);
|
||||||
|
|
||||||
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
|
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
|
||||||
|
@ -39,20 +39,18 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
|
|||||||
* amdgpu_vm_cpu_prepare - prepare page table update with the CPU
|
* amdgpu_vm_cpu_prepare - prepare page table update with the CPU
|
||||||
*
|
*
|
||||||
* @p: see amdgpu_vm_update_params definition
|
* @p: see amdgpu_vm_update_params definition
|
||||||
* @resv: reservation object with embedded fence
|
* @sync: sync obj with fences to wait on
|
||||||
* @sync_mode: synchronization mode
|
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* Negativ errno, 0 for success.
|
* Negativ errno, 0 for success.
|
||||||
*/
|
*/
|
||||||
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
|
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
|
||||||
struct dma_resv *resv,
|
struct amdgpu_sync *sync)
|
||||||
enum amdgpu_sync_mode sync_mode)
|
|
||||||
{
|
{
|
||||||
if (!resv)
|
if (!sync)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
|
return amdgpu_sync_wait(sync, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -403,7 +403,7 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
params.vm = vm;
|
params.vm = vm;
|
||||||
params.immediate = immediate;
|
params.immediate = immediate;
|
||||||
|
|
||||||
r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT);
|
r = vm->update_funcs->prepare(¶ms, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
|
@ -77,32 +77,24 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
|
|||||||
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
|
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
|
||||||
*
|
*
|
||||||
* @p: see amdgpu_vm_update_params definition
|
* @p: see amdgpu_vm_update_params definition
|
||||||
* @resv: reservation object with embedded fence
|
* @sync: amdgpu_sync object with fences to wait for
|
||||||
* @sync_mode: synchronization mode
|
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* Negativ errno, 0 for success.
|
* Negativ errno, 0 for success.
|
||||||
*/
|
*/
|
||||||
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
||||||
struct dma_resv *resv,
|
struct amdgpu_sync *sync)
|
||||||
enum amdgpu_sync_mode sync_mode)
|
|
||||||
{
|
{
|
||||||
struct amdgpu_sync sync;
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = amdgpu_vm_sdma_alloc_job(p, 0);
|
r = amdgpu_vm_sdma_alloc_job(p, 0);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (!resv)
|
if (!sync)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
amdgpu_sync_create(&sync);
|
r = amdgpu_sync_push_to_job(sync, p->job);
|
||||||
r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
|
|
||||||
if (!r)
|
|
||||||
r = amdgpu_sync_push_to_job(&sync, p->job);
|
|
||||||
amdgpu_sync_free(&sync);
|
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
p->num_dw_left = 0;
|
p->num_dw_left = 0;
|
||||||
amdgpu_job_free(p->job);
|
amdgpu_job_free(p->job);
|
||||||
|
@ -1389,10 +1389,10 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a
|
|||||||
|
|
||||||
switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
|
switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
|
||||||
case ACA_ERROR_TYPE_UE:
|
case ACA_ERROR_TYPE_UE:
|
||||||
amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL);
|
amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL);
|
||||||
break;
|
break;
|
||||||
case ACA_ERROR_TYPE_CE:
|
case ACA_ERROR_TYPE_CE:
|
||||||
amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL);
|
amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -213,7 +213,7 @@ struct amd_sriov_msg_pf2vf_info {
|
|||||||
uint32_t gpu_capacity;
|
uint32_t gpu_capacity;
|
||||||
/* reserved */
|
/* reserved */
|
||||||
uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE];
|
uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE];
|
||||||
};
|
} __packed;
|
||||||
|
|
||||||
struct amd_sriov_msg_vf2pf_info_header {
|
struct amd_sriov_msg_vf2pf_info_header {
|
||||||
/* the total structure size in byte */
|
/* the total structure size in byte */
|
||||||
@ -273,7 +273,7 @@ struct amd_sriov_msg_vf2pf_info {
|
|||||||
uint32_t mes_info_size;
|
uint32_t mes_info_size;
|
||||||
/* reserved */
|
/* reserved */
|
||||||
uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE];
|
uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE];
|
||||||
};
|
} __packed;
|
||||||
|
|
||||||
/* mailbox message send from guest to host */
|
/* mailbox message send from guest to host */
|
||||||
enum amd_sriov_mailbox_request_message {
|
enum amd_sriov_mailbox_request_message {
|
||||||
|
@ -75,6 +75,8 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
|
|||||||
uint32_t inst_mask;
|
uint32_t inst_mask;
|
||||||
|
|
||||||
ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
|
ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
|
||||||
|
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
|
||||||
|
adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
|
||||||
if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
|
if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -103,6 +105,8 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
|
|||||||
for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
|
for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
|
||||||
if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
|
if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
|
||||||
ring->xcp_id = xcp_id;
|
ring->xcp_id = xcp_id;
|
||||||
|
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
|
||||||
|
adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -215,7 +215,7 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
|
|||||||
dig->bl_dev = bd;
|
dig->bl_dev = bd;
|
||||||
|
|
||||||
bd->props.brightness = amdgpu_atombios_encoder_get_backlight_brightness(bd);
|
bd->props.brightness = amdgpu_atombios_encoder_get_backlight_brightness(bd);
|
||||||
bd->props.power = FB_BLANK_UNBLANK;
|
bd->props.power = BACKLIGHT_POWER_ON;
|
||||||
backlight_update_status(bd);
|
backlight_update_status(bd);
|
||||||
|
|
||||||
DRM_INFO("amdgpu atom DIG backlight initialized\n");
|
DRM_INFO("amdgpu atom DIG backlight initialized\n");
|
||||||
@ -2064,27 +2064,25 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
|
|||||||
case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
|
case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
|
||||||
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
|
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
|
||||||
if (fake_edid_record->ucFakeEDIDLength) {
|
if (fake_edid_record->ucFakeEDIDLength) {
|
||||||
struct edid *edid;
|
const struct drm_edid *edid;
|
||||||
int edid_size =
|
int edid_size;
|
||||||
max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
|
|
||||||
edid = kmalloc(edid_size, GFP_KERNEL);
|
|
||||||
if (edid) {
|
|
||||||
memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
|
|
||||||
fake_edid_record->ucFakeEDIDLength);
|
|
||||||
|
|
||||||
if (drm_edid_is_valid(edid)) {
|
if (fake_edid_record->ucFakeEDIDLength == 128)
|
||||||
|
edid_size = fake_edid_record->ucFakeEDIDLength;
|
||||||
|
else
|
||||||
|
edid_size = fake_edid_record->ucFakeEDIDLength * 128;
|
||||||
|
edid = drm_edid_alloc(fake_edid_record->ucFakeEDIDString, edid_size);
|
||||||
|
if (drm_edid_valid(edid))
|
||||||
adev->mode_info.bios_hardcoded_edid = edid;
|
adev->mode_info.bios_hardcoded_edid = edid;
|
||||||
adev->mode_info.bios_hardcoded_edid_size = edid_size;
|
else
|
||||||
} else
|
drm_edid_free(edid);
|
||||||
kfree(edid);
|
record += struct_size(fake_edid_record,
|
||||||
}
|
|
||||||
}
|
|
||||||
record += fake_edid_record->ucFakeEDIDLength ?
|
|
||||||
struct_size(fake_edid_record,
|
|
||||||
ucFakeEDIDString,
|
ucFakeEDIDString,
|
||||||
fake_edid_record->ucFakeEDIDLength) :
|
edid_size);
|
||||||
|
} else {
|
||||||
/* empty fake edid record must be 3 bytes long */
|
/* empty fake edid record must be 3 bytes long */
|
||||||
sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
|
record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
|
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
|
||||||
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
|
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
|
||||||
|
@ -364,6 +364,7 @@
|
|||||||
* 1 - Stream
|
* 1 - Stream
|
||||||
* 2 - Bypass
|
* 2 - Bypass
|
||||||
*/
|
*/
|
||||||
|
#define EOP_EXEC (1 << 28) /* For Trailing Fence */
|
||||||
#define DATA_SEL(x) ((x) << 29)
|
#define DATA_SEL(x) ((x) << 29)
|
||||||
/* 0 - discard
|
/* 0 - discard
|
||||||
* 1 - send low 32bit data
|
* 1 - send low 32bit data
|
||||||
|
@ -2846,7 +2846,7 @@ static int dce_v10_0_sw_fini(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
kfree(adev->mode_info.bios_hardcoded_edid);
|
drm_edid_free(adev->mode_info.bios_hardcoded_edid);
|
||||||
|
|
||||||
drm_kms_helper_poll_fini(adev_to_drm(adev));
|
drm_kms_helper_poll_fini(adev_to_drm(adev));
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user