drm next and fixes for 6.7-rc1

renesas:
 - atomic conversion
 - DT support
 
 ssd13xx:
 - dt binding fix for ssd132x
 - Initialize ssd130x crtc_state to NULL.
 
 amdgpu:
 - Fix RAS support check
 - RAS fixes
 - MES fixes
 - SMU13 fixes
 - Contiguous memory allocation fix
 - BACO fixes
 - GPU reset fixes
 - Min power limit fixes
 - GFX11 fixes
 - USB4/TB hotplug fixes
 - ARM regression fix
 - GFX9.4.3 fixes
 - KASAN/KCSAN stack size check fixes
 - SR-IOV fixes
 - SMU14 fixes
 - PSP13 fixes
 - Display blend fixes
 - Flexible array size fixes
 
 amdkfd:
 - GPUVM fix
 
 radeon:
 - Flexible array size fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmVJmdsACgkQDHTzWXnE
 hr5hphAAoFdk4ma7TauUNyP3JoUwht+Ohm9NcUHq/9P9kOCwPIIehxMZnwPoTGyw
 VWwXpqpeVsW6zyMgfxWq/+P1S1C5LvZ1HLccbP3xv327fUZ1QnLapHwFxT1SYNpi
 Tw5qhQN/cwNOX0Pc9uBavYmJzf54OhvPxt2CHHPDShsHBOBc0Gd88gKJr7GWUF5M
 Ri6i20Tfsgq8AopWQj9628TT+y/aN3rVIfYYBiNejxejlFtt1HODkKFX3DuBNPOI
 bZOtQm11cbxmX7/2RI92mI20axUb4UMNIQFDYEl3bVlyPyEhhwKPQMSDxhQQodPg
 8zMY9Fbl4Z4VaOFEDbpiRv/0/HeWoLefmpQ5LZbz35RhKLTkwsWXHUELPUEj3uTr
 7+EnLwuvQdtbT9W2J8btO7v1dHOy86ArlnmqNjB2cEnvGaR3DNM4jxPVLn60SyAc
 N7CFWNU4EoJf02XhwAludYa5pQHEaTFL8ss6TSoRWXuHHg1vNdu2SorOvkcGkg28
 q/t28gZDZaOpXfMmf3ec+PHhO6nxXLXJRbiksVP/rpXQQ42cEI72hr6//UYLRuzg
 BbYPZ8uXmDjsSZIqYceZwc3Vr6oEmD6EAzHM9+zwS5h0IZ12jKTmFiDEAhG2DwoG
 8PCaj5UXkxVK+6iHndz8Qwg4+Fu1j5nodKM+vBNem/iSnxC/OVw=
 =TsFN
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2023-11-07' of git://anongit.freedesktop.org/drm/drm

Pull more drm updates from Dave Airlie:
 "Geert pointed out I missed the renesas reworks in my main pull, so
  this pull contains the renesas next work for atomic conversion and DT
  support.

  It also contains a bunch of amdgpu and some small ssd13xx fixes.

  renesas:
   - atomic conversion
   - DT support

  ssd13xx:
   - dt binding fix for ssd132x
   - Initialize ssd130x crtc_state to NULL.

  amdgpu:
   - Fix RAS support check
   - RAS fixes
   - MES fixes
   - SMU13 fixes
   - Contiguous memory allocation fix
   - BACO fixes
   - GPU reset fixes
   - Min power limit fixes
   - GFX11 fixes
   - USB4/TB hotplug fixes
   - ARM regression fix
   - GFX9.4.3 fixes
   - KASAN/KCSAN stack size check fixes
   - SR-IOV fixes
   - SMU14 fixes
   - PSP13 fixes
   - Display blend fixes
   - Flexible array size fixes

  amdkfd:
   - GPUVM fix

  radeon:
   - Flexible array size fixes"

* tag 'drm-next-2023-11-07' of git://anongit.freedesktop.org/drm/drm: (83 commits)
  drm/amd/display: Enable fast update on blendTF change
  drm/amd/display: Fix blend LUT programming
  drm/amd/display: Program plane color setting correctly
  drm/amdgpu: Query and report boot status
  drm/amdgpu: Add psp v13 function to query boot status
  drm/amd/swsmu: remove fw version check in sw_init.
  drm/amd/swsmu: update smu v14_0_0 driver if and metrics table
  drm/amdgpu: Add C2PMSG_109/126 reg field shift/masks
  drm/amdgpu: Optimize the asic type fix code
  drm/amdgpu: fix GRBM read timeout when do mes_self_test
  drm/amdgpu: check recovery status of xgmi hive in ras_reset_error_count
  drm/amd/pm: only check sriov vf flag once when creating hwmon sysfs
  drm/amdgpu: Attach eviction fence on alloc
  drm/amdkfd: Improve amdgpu_vm_handle_moved
  drm/amd/display: Increase frame warning limit with KASAN or KCSAN in dml2
  drm/amd/display: Avoid NULL dereference of timing generator
  drm/amdkfd: Update cache info for GFX 9.4.3
  drm/amdkfd: Populate cache info for GFX 9.4.3
  drm/amdgpu: don't put MQDs in VRAM on ARM | ARM64
  drm/amdgpu/smu13: drop compute workload workaround
  ...
This commit is contained in:
Linus Torvalds 2023-11-07 17:10:02 -08:00
commit 25b6377007
72 changed files with 1730 additions and 1357 deletions

View File

@ -0,0 +1,130 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/renesas,shmobile-lcdc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Renesas SH-Mobile LCD Controller (LCDC)
maintainers:
- Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
- Geert Uytterhoeven <geert+renesas@glider.be>
properties:
compatible:
enum:
- renesas,r8a7740-lcdc # R-Mobile A1
- renesas,sh73a0-lcdc # SH-Mobile AG5
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
minItems: 1
maxItems: 5
description:
Only the functional clock is mandatory.
Some of the optional clocks are model-dependent (e.g. "video" (a.k.a.
"vou" or "dv_clk") is available on R-Mobile A1 only).
clock-names:
minItems: 1
items:
- const: fck
- enum: [ media, lclk, hdmi, video ]
- enum: [ media, lclk, hdmi, video ]
- enum: [ media, lclk, hdmi, video ]
- enum: [ media, lclk, hdmi, video ]
power-domains:
maxItems: 1
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description: LCD port (R-Mobile A1 and SH-Mobile AG5)
unevaluatedProperties: false
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: HDMI port (R-Mobile A1 LCDC1 and SH-Mobile AG5)
unevaluatedProperties: false
port@2:
$ref: /schemas/graph.yaml#/properties/port
description: MIPI-DSI port (SH-Mobile AG5)
unevaluatedProperties: false
required:
- port@0
unevaluatedProperties: false
required:
- compatible
- reg
- interrupts
- clocks
- clock-names
- power-domains
- ports
additionalProperties: false
allOf:
- if:
properties:
compatible:
contains:
const: renesas,r8a7740-lcdc
then:
properties:
ports:
properties:
port@2: false
- if:
properties:
compatible:
contains:
const: renesas,sh73a0-lcdc
then:
properties:
ports:
required:
- port@1
- port@2
examples:
- |
#include <dt-bindings/clock/r8a7740-clock.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
lcd-controller@fe940000 {
compatible = "renesas,r8a7740-lcdc";
reg = <0xfe940000 0x4000>;
interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks R8A7740_CLK_LCDC0>,
<&cpg_clocks R8A7740_CLK_M3>, <&lcdlclk0_clk>,
<&vou_clk>;
clock-names = "fck", "media", "lclk", "video";
power-domains = <&pd_a4lc>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
lcdc0_rgb: endpoint {
};
};
};
};

View File

@ -11,10 +11,10 @@ maintainers:
properties: properties:
compatible: compatible:
- enum: enum:
- solomon,ssd1322 - solomon,ssd1322
- solomon,ssd1325 - solomon,ssd1325
- solomon,ssd1327 - solomon,ssd1327
required: required:
- compatible - compatible

View File

@ -949,6 +949,78 @@ The following tables list existing packed RGB formats.
- b\ :sub:`2` - b\ :sub:`2`
- b\ :sub:`1` - b\ :sub:`1`
- b\ :sub:`0` - b\ :sub:`0`
* .. _MEDIA-BUS-FMT-RGB666-2X9-BE:
- MEDIA_BUS_FMT_RGB666_2X9_BE
- 0x1025
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- r\ :sub:`5`
- r\ :sub:`4`
- r\ :sub:`3`
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
- g\ :sub:`5`
- g\ :sub:`4`
- g\ :sub:`3`
* -
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
- b\ :sub:`5`
- b\ :sub:`4`
- b\ :sub:`3`
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
* .. _MEDIA-BUS-FMT-BGR666-1X18: * .. _MEDIA-BUS-FMT-BGR666-1X18:
- MEDIA_BUS_FMT_BGR666_1X18 - MEDIA_BUS_FMT_BGR666_1X18

View File

@ -7133,7 +7133,7 @@ F: drivers/gpu/host1x/
F: include/linux/host1x.h F: include/linux/host1x.h
F: include/uapi/drm/tegra_drm.h F: include/uapi/drm/tegra_drm.h
DRM DRIVERS FOR RENESAS DRM DRIVERS FOR RENESAS R-CAR
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
M: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com> M: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
@ -7144,7 +7144,16 @@ F: Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml
F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.yaml F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.yaml
F: Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml F: Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
F: Documentation/devicetree/bindings/display/renesas,du.yaml F: Documentation/devicetree/bindings/display/renesas,du.yaml
F: drivers/gpu/drm/renesas/ F: drivers/gpu/drm/renesas/rcar-du/
DRM DRIVERS FOR RENESAS SHMOBILE
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
M: Geert Uytterhoeven <geert+renesas@glider.be>
L: dri-devel@lists.freedesktop.org
L: linux-renesas-soc@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml
F: drivers/gpu/drm/renesas/shmobile/
F: include/linux/platform_data/shmob_drm.h F: include/linux/platform_data/shmob_drm.h
DRM DRIVERS FOR ROCKCHIP DRM DRIVERS FOR ROCKCHIP

View File

@ -363,9 +363,6 @@ struct amdgpu_ip_block_version {
const struct amd_ip_funcs *funcs; const struct amd_ip_funcs *funcs;
}; };
#define HW_REV(_Major, _Minor, _Rev) \
((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev)))
struct amdgpu_ip_block { struct amdgpu_ip_block {
struct amdgpu_ip_block_status status; struct amdgpu_ip_block_status status;
const struct amdgpu_ip_block_version *version; const struct amdgpu_ip_block_version *version;

View File

@ -425,6 +425,32 @@ validate_fail:
return ret; return ret;
} }
static int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
uint32_t domain,
struct dma_fence *fence)
{
int ret = amdgpu_bo_reserve(bo, false);
if (ret)
return ret;
ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
if (ret)
goto unreserve_out;
ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
if (ret)
goto unreserve_out;
dma_resv_add_fence(bo->tbo.base.resv, fence,
DMA_RESV_USAGE_BOOKKEEP);
unreserve_out:
amdgpu_bo_unreserve(bo);
return ret;
}
static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
{ {
return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
@ -1784,6 +1810,15 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
} }
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
} else {
mutex_lock(&avm->process_info->lock);
if (avm->process_info->eviction_fence &&
!dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain,
&avm->process_info->eviction_fence->base);
mutex_unlock(&avm->process_info->lock);
if (ret)
goto err_validate_bo;
} }
if (offset) if (offset)
@ -1793,6 +1828,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
allocate_init_user_pages_failed: allocate_init_user_pages_failed:
err_pin_bo: err_pin_bo:
err_validate_bo:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
drm_vma_node_revoke(&gobj->vma_node, drm_priv); drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow: err_node_allow:
@ -1866,10 +1902,6 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
/* The eviction fence should be removed by the last unmap.
* TODO: Log an error condition if the bo still has the eviction fence
* attached
*/
amdgpu_amdkfd_remove_eviction_fence(mem->bo, amdgpu_amdkfd_remove_eviction_fence(mem->bo,
process_info->eviction_fence); process_info->eviction_fence);
pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
@ -1998,19 +2030,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
if (unlikely(ret)) if (unlikely(ret))
goto out_unreserve; goto out_unreserve;
if (mem->mapped_to_gpu_memory == 0 &&
!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
/* Validate BO only once. The eviction fence gets added to BO
* the first time it is mapped. Validate will wait for all
* background evictions to complete.
*/
ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
if (ret) {
pr_debug("Validate failed\n");
goto out_unreserve;
}
}
list_for_each_entry(entry, &mem->attachments, list) { list_for_each_entry(entry, &mem->attachments, list) {
if (entry->bo_va->base.vm != avm || entry->is_mapped) if (entry->bo_va->base.vm != avm || entry->is_mapped)
continue; continue;
@ -2037,10 +2056,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
mem->mapped_to_gpu_memory); mem->mapped_to_gpu_memory);
} }
if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
dma_resv_add_fence(bo->tbo.base.resv,
&avm->process_info->eviction_fence->base,
DMA_RESV_USAGE_BOOKKEEP);
ret = unreserve_bo_and_vms(&ctx, false, false); ret = unreserve_bo_and_vms(&ctx, false, false);
goto out; goto out;
@ -2074,7 +2089,6 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
{ {
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct amdkfd_process_info *process_info = avm->process_info;
unsigned long bo_size = mem->bo->tbo.base.size; unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_mem_attachment *entry; struct kfd_mem_attachment *entry;
struct bo_vm_reservation_context ctx; struct bo_vm_reservation_context ctx;
@ -2115,15 +2129,6 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
mem->mapped_to_gpu_memory); mem->mapped_to_gpu_memory);
} }
/* If BO is unmapped from all VMs, unfence it. It can be evicted if
* required.
*/
if (mem->mapped_to_gpu_memory == 0 &&
!amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
!mem->bo->tbo.pin_count)
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
process_info->eviction_fence);
unreserve_out: unreserve_out:
unreserve_bo_and_vms(&ctx, false, false); unreserve_bo_and_vms(&ctx, false, false);
out: out:
@ -2351,8 +2356,20 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
amdgpu_sync_create(&(*mem)->sync); amdgpu_sync_create(&(*mem)->sync);
(*mem)->is_imported = true; (*mem)->is_imported = true;
mutex_lock(&avm->process_info->lock);
if (avm->process_info->eviction_fence &&
!dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain,
&avm->process_info->eviction_fence->base);
mutex_unlock(&avm->process_info->lock);
if (ret)
goto err_remove_mem;
return 0; return 0;
err_remove_mem:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
drm_vma_node_revoke(&obj->vma_node, drm_priv);
err_free_mem: err_free_mem:
kfree(*mem); kfree(*mem);
err_put_obj: err_put_obj:

View File

@ -29,6 +29,7 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "atom.h" #include "atom.h"
#include <linux/device.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/acpi.h> #include <linux/acpi.h>
@ -287,6 +288,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
return false; return false;
/* ATRM is for on-platform devices only */
if (dev_is_removable(&adev->pdev->dev))
return false;
while ((pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) { while ((pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) {
if ((pdev->class != PCI_CLASS_DISPLAY_VGA << 8) && if ((pdev->class != PCI_CLASS_DISPLAY_VGA << 8) &&
(pdev->class != PCI_CLASS_DISPLAY_OTHER << 8)) (pdev->class != PCI_CLASS_DISPLAY_OTHER << 8))

View File

@ -1117,6 +1117,11 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
return r; return r;
} }
/* FIXME: In theory this loop shouldn't be needed any more when
* amdgpu_vm_handle_moved handles all moved BOs that are reserved
* with p->ticket. But removing it caused test regressions, so I'm
* leaving it here for now.
*/
amdgpu_bo_list_for_each_entry(e, p->bo_list) { amdgpu_bo_list_for_each_entry(e, p->bo_list) {
bo_va = e->bo_va; bo_va = e->bo_va;
if (bo_va == NULL) if (bo_va == NULL)
@ -1131,7 +1136,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
return r; return r;
} }
r = amdgpu_vm_handle_moved(adev, vm); r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket);
if (r) if (r)
return r; return r;

View File

@ -41,6 +41,7 @@
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h> #include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include <linux/device.h>
#include <linux/vgaarb.h> #include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h> #include <linux/vga_switcheroo.h>
#include <linux/efi.h> #include <linux/efi.h>
@ -1073,6 +1074,8 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
amdgpu_psp_wait_for_bootloader(adev); amdgpu_psp_wait_for_bootloader(adev);
ret = amdgpu_atomfirmware_asic_init(adev, true); ret = amdgpu_atomfirmware_asic_init(adev, true);
/* TODO: check the return val and stop device initialization if boot fails */
amdgpu_psp_query_boot_status(adev);
return ret; return ret;
} else { } else {
return amdgpu_atom_asic_init(adev->mode_info.atom_context); return amdgpu_atom_asic_init(adev->mode_info.atom_context);
@ -2223,7 +2226,6 @@ out:
*/ */
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{ {
struct drm_device *dev = adev_to_drm(adev);
struct pci_dev *parent; struct pci_dev *parent;
int i, r; int i, r;
bool total; bool total;
@ -2294,7 +2296,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
(amdgpu_is_atpx_hybrid() || (amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) && amdgpu_has_atpx_dgpu_power_cntl()) &&
((adev->flags & AMD_IS_APU) == 0) && ((adev->flags & AMD_IS_APU) == 0) &&
!pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) !dev_is_removable(&adev->pdev->dev))
adev->flags |= AMD_IS_PX; adev->flags |= AMD_IS_PX;
if (!(adev->flags & AMD_IS_APU)) { if (!(adev->flags & AMD_IS_APU)) {
@ -3962,13 +3964,23 @@ int amdgpu_device_init(struct amdgpu_device *adev,
} }
} }
} else { } else {
tmp = amdgpu_reset_method; switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
/* It should do a default reset when loading or reloading the driver, case IP_VERSION(13, 0, 0):
* regardless of the module parameter reset_method. case IP_VERSION(13, 0, 7):
*/ case IP_VERSION(13, 0, 10):
amdgpu_reset_method = AMD_RESET_METHOD_NONE; r = psp_gpu_reset(adev);
r = amdgpu_asic_reset(adev); break;
amdgpu_reset_method = tmp; default:
tmp = amdgpu_reset_method;
/* It should do a default reset when loading or reloading the driver,
* regardless of the module parameter reset_method.
*/
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
r = amdgpu_asic_reset(adev);
amdgpu_reset_method = tmp;
break;
}
if (r) { if (r) {
dev_err(adev->dev, "asic reset on init failed\n"); dev_err(adev->dev, "asic reset on init failed\n");
goto failed; goto failed;
@ -4132,7 +4144,7 @@ fence_driver_init:
px = amdgpu_device_supports_px(ddev); px = amdgpu_device_supports_px(ddev);
if (px || (!pci_is_thunderbolt_attached(adev->pdev) && if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL))) apple_gmux_detect(NULL, NULL)))
vga_switcheroo_register_client(adev->pdev, vga_switcheroo_register_client(adev->pdev,
&amdgpu_switcheroo_ops, px); &amdgpu_switcheroo_ops, px);
@ -4282,7 +4294,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
px = amdgpu_device_supports_px(adev_to_drm(adev)); px = amdgpu_device_supports_px(adev_to_drm(adev));
if (px || (!pci_is_thunderbolt_attached(adev->pdev) && if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL))) apple_gmux_detect(NULL, NULL)))
vga_switcheroo_unregister_client(adev->pdev); vga_switcheroo_unregister_client(adev->pdev);
@ -5566,10 +5578,6 @@ skip_hw_reset:
drm_sched_start(&ring->sched, true); drm_sched_start(&ring->sched, true);
} }
if (adev->enable_mes &&
amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3))
amdgpu_mes_self_test(tmp_adev);
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); drm_helper_resume_force_mode(adev_to_drm(tmp_adev));

View File

@ -99,6 +99,7 @@
MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
#define mmRCC_CONFIG_MEMSIZE 0xde3 #define mmRCC_CONFIG_MEMSIZE 0xde3
#define mmMP0_SMN_C2PMSG_33 0x16061
#define mmMM_INDEX 0x0 #define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6 #define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1 #define mmMM_DATA 0x1
@ -239,8 +240,26 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint8_t *binary) uint8_t *binary)
{ {
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; uint64_t vram_size;
int ret = 0; u32 msg;
int i, ret = 0;
/* It can take up to a second for IFWI init to complete on some dGPUs,
* but generally it should be in the 60-100ms range. Normally this starts
* as soon as the device gets power so by the time the OS loads this has long
* completed. However, when a card is hotplugged via e.g., USB4, we need to
* wait for this to complete. Once the C2PMSG is updated, we can
* continue.
*/
if (dev_is_removable(&adev->pdev->dev)) {
for (i = 0; i < 1000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33);
if (msg & 0x80000000)
break;
msleep(1);
}
}
vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
if (vram_size) { if (vram_size) {
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
@ -2449,6 +2468,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0)) if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
adev->gmc.xgmi.supported = true; adev->gmc.xgmi.supported = true;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
/* set NBIO version */ /* set NBIO version */
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(6, 1, 0): case IP_VERSION(6, 1, 0):

View File

@ -409,7 +409,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
if (!r) if (!r)
r = amdgpu_vm_clear_freed(adev, vm, NULL); r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (!r) if (!r)
r = amdgpu_vm_handle_moved(adev, vm); r = amdgpu_vm_handle_moved(adev, vm, ticket);
if (r && r != -EBUSY) if (r && r != -EBUSY)
DRM_ERROR("Failed to invalidate VM page tables (%d))\n", DRM_ERROR("Failed to invalidate VM page tables (%d))\n",

View File

@ -2041,6 +2041,14 @@ static const struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist); MODULE_DEVICE_TABLE(pci, pciidlist);
static const struct amdgpu_asic_type_quirk asic_type_quirks[] = {
/* differentiate between P10 and P11 asics with the same DID */
{0x67FF, 0xE3, CHIP_POLARIS10},
{0x67FF, 0xE7, CHIP_POLARIS10},
{0x67FF, 0xF3, CHIP_POLARIS10},
{0x67FF, 0xF7, CHIP_POLARIS10},
};
static const struct drm_driver amdgpu_kms_driver; static const struct drm_driver amdgpu_kms_driver;
static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev) static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
@ -2083,6 +2091,22 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
} }
} }
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
{
int i;
for (i = 0; i < ARRAY_SIZE(asic_type_quirks); i++) {
if (pdev->device == asic_type_quirks[i].device &&
pdev->revision == asic_type_quirks[i].revision) {
flags &= ~AMD_ASIC_MASK;
flags |= asic_type_quirks[i].type;
break;
}
}
return flags;
}
static int amdgpu_pci_probe(struct pci_dev *pdev, static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
@ -2110,15 +2134,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
"See modparam exp_hw_support\n"); "See modparam exp_hw_support\n");
return -ENODEV; return -ENODEV;
} }
/* differentiate between P10 and P11 asics with the same DID */
if (pdev->device == 0x67FF && flags = amdgpu_fix_asic_type(pdev, flags);
(pdev->revision == 0xE3 ||
pdev->revision == 0xE7 ||
pdev->revision == 0xF3 ||
pdev->revision == 0xF7)) {
flags &= ~AMD_ASIC_MASK;
flags |= CHIP_POLARIS10;
}
/* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
* however, SME requires an indirect IOMMU mapping because the encryption * however, SME requires an indirect IOMMU mapping because the encryption

View File

@ -385,9 +385,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring = &kiq->ring; struct amdgpu_ring *ring = &kiq->ring;
u32 domain = AMDGPU_GEM_DOMAIN_GTT; u32 domain = AMDGPU_GEM_DOMAIN_GTT;
#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
/* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */ /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0)) if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
domain |= AMDGPU_GEM_DOMAIN_VRAM; domain |= AMDGPU_GEM_DOMAIN_VRAM;
#endif
/* create MQD for KIQ */ /* create MQD for KIQ */
if (!adev->enable_mes_kiq && !ring->mqd_obj) { if (!adev->enable_mes_kiq && !ring->mqd_obj) {

View File

@ -557,8 +557,20 @@ static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
mqd_prop.hqd_queue_priority = p->hqd_queue_priority; mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
mqd_prop.hqd_active = false; mqd_prop.hqd_active = false;
if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
mutex_lock(&adev->srbm_mutex);
amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
}
mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop); mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
amdgpu_bo_unreserve(q->mqd_obj); amdgpu_bo_unreserve(q->mqd_obj);
} }
@ -994,9 +1006,13 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
switch (queue_type) { switch (queue_type) {
case AMDGPU_RING_TYPE_GFX: case AMDGPU_RING_TYPE_GFX:
ring->funcs = adev->gfx.gfx_ring[0].funcs; ring->funcs = adev->gfx.gfx_ring[0].funcs;
ring->me = adev->gfx.gfx_ring[0].me;
ring->pipe = adev->gfx.gfx_ring[0].pipe;
break; break;
case AMDGPU_RING_TYPE_COMPUTE: case AMDGPU_RING_TYPE_COMPUTE:
ring->funcs = adev->gfx.compute_ring[0].funcs; ring->funcs = adev->gfx.compute_ring[0].funcs;
ring->me = adev->gfx.compute_ring[0].me;
ring->pipe = adev->gfx.compute_ring[0].pipe;
break; break;
case AMDGPU_RING_TYPE_SDMA: case AMDGPU_RING_TYPE_SDMA:
ring->funcs = adev->sdma.instance[0].ring.funcs; ring->funcs = adev->sdma.instance[0].ring.funcs;

View File

@ -2120,6 +2120,21 @@ int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
return ret; return ret;
} }
int amdgpu_psp_query_boot_status(struct amdgpu_device *adev)
{
struct psp_context *psp = &adev->psp;
int ret = 0;
if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
return 0;
if (psp->funcs &&
psp->funcs->query_boot_status)
ret = psp->funcs->query_boot_status(psp);
return ret;
}
static int psp_hw_start(struct psp_context *psp) static int psp_hw_start(struct psp_context *psp)
{ {
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;

View File

@ -134,6 +134,7 @@ struct psp_funcs {
int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr); int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*vbflash_stat)(struct psp_context *psp); int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp); int (*fatal_error_recovery_quirk)(struct psp_context *psp);
int (*query_boot_status)(struct psp_context *psp);
}; };
struct ta_funcs { struct ta_funcs {
@ -537,4 +538,6 @@ int is_psp_fw_valid(struct psp_bin_desc bin);
int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev); int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
int amdgpu_psp_query_boot_status(struct amdgpu_device *adev);
#endif #endif

View File

@ -1222,6 +1222,8 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
struct amdgpu_hive_info *hive;
int hive_ras_recovery = 0;
if (!block_obj || !block_obj->hw_ops) { if (!block_obj || !block_obj->hw_ops) {
dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
@ -1229,15 +1231,22 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
/* skip ras error reset in gpu reset */
if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery)) &&
mca_funcs && mca_funcs->mca_set_debug_mode)
return -EOPNOTSUPP;
if (!amdgpu_ras_is_supported(adev, block) || if (!amdgpu_ras_is_supported(adev, block) ||
!amdgpu_ras_get_mca_debug_mode(adev)) !amdgpu_ras_get_mca_debug_mode(adev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
hive = amdgpu_get_xgmi_hive(adev);
if (hive) {
hive_ras_recovery = atomic_read(&hive->ras_recovery);
amdgpu_put_xgmi_hive(hive);
}
/* skip ras error reset in gpu reset */
if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
hive_ras_recovery) &&
mca_funcs && mca_funcs->mca_set_debug_mode)
return -EOPNOTSUPP;
if (block_obj->hw_ops->reset_ras_error_count) if (block_obj->hw_ops->reset_ras_error_count)
block_obj->hw_ops->reset_ras_error_count(adev); block_obj->hw_ops->reset_ras_error_count(adev);

View File

@ -166,8 +166,12 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
} }
} }
if (reset) if (reset) {
/* use mode-2 reset for poison consumption */
if (!entry)
con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
amdgpu_ras_reset_gpu(adev); amdgpu_ras_reset_gpu(adev);
}
} }
kfree(err_data->err_addr); kfree(err_data->err_addr);

View File

@ -1373,6 +1373,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm: requested vm * @vm: requested vm
* @ticket: optional reservation ticket used to reserve the VM
* *
* Make sure all BOs which are moved are updated in the PTs. * Make sure all BOs which are moved are updated in the PTs.
* *
@ -1382,11 +1383,12 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
* PTs have to be reserved! * PTs have to be reserved!
*/ */
int amdgpu_vm_handle_moved(struct amdgpu_device *adev, int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm) struct amdgpu_vm *vm,
struct ww_acquire_ctx *ticket)
{ {
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
struct dma_resv *resv; struct dma_resv *resv;
bool clear; bool clear, unlock;
int r; int r;
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
@ -1409,17 +1411,24 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock); spin_unlock(&vm->status_lock);
/* Try to reserve the BO to avoid clearing its ptes */ /* Try to reserve the BO to avoid clearing its ptes */
if (!adev->debug_vm && dma_resv_trylock(resv)) if (!adev->debug_vm && dma_resv_trylock(resv)) {
clear = false; clear = false;
unlock = true;
/* The caller is already holding the reservation lock */
} else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
clear = false;
unlock = false;
/* Somebody else is using the BO right now */ /* Somebody else is using the BO right now */
else } else {
clear = true; clear = true;
unlock = false;
}
r = amdgpu_vm_bo_update(adev, bo_va, clear); r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r) if (r)
return r; return r;
if (!clear) if (unlock)
dma_resv_unlock(resv); dma_resv_unlock(resv);
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
} }

View File

@ -443,7 +443,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct dma_fence **fence); struct dma_fence **fence);
int amdgpu_vm_handle_moved(struct amdgpu_device *adev, int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm); struct amdgpu_vm *vm,
struct ww_acquire_ctx *ticket);
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm, struct amdgpu_bo *bo); struct amdgpu_vm *vm, struct amdgpu_bo *bo);
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,

View File

@ -77,7 +77,16 @@ static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
return true; return true;
} }
static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head)
{
struct drm_buddy_block *block;
u64 size = 0;
list_for_each_entry(block, head, link)
size += amdgpu_vram_mgr_block_size(block);
return size;
}
/** /**
* DOC: mem_info_vram_total * DOC: mem_info_vram_total
@ -516,6 +525,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
vres->base.start = 0; vres->base.start = 0;
size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
vres->base.size);
list_for_each_entry(block, &vres->blocks, link) { list_for_each_entry(block, &vres->blocks, link) {
unsigned long start; unsigned long start;
@ -523,8 +534,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
amdgpu_vram_mgr_block_size(block); amdgpu_vram_mgr_block_size(block);
start >>= PAGE_SHIFT; start >>= PAGE_SHIFT;
if (start > PFN_UP(vres->base.size)) if (start > PFN_UP(size))
start -= PFN_UP(vres->base.size); start -= PFN_UP(size);
else else
start = 0; start = 0;
vres->base.start = max(vres->base.start, start); vres->base.start = max(vres->base.start, start);

View File

@ -3498,6 +3498,8 @@ static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev, static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
unsigned int vmid); unsigned int vmid);
static int gfx_v10_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{ {
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
@ -6465,11 +6467,18 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
nv_grbm_select(adev, 0, 0, 0, 0); nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.me.mqd_backup[mqd_idx]) if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else { } else {
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
if (ring->doorbell_index == adev->doorbell_index.gfx_ring0 << 1)
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
/* restore mqd with the backup copy */ /* restore mqd with the backup copy */
if (adev->gfx.me.mqd_backup[mqd_idx]) if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */ /* reset the ring */
ring->wptr = 0; ring->wptr = 0;
*ring->wptr_cpu_addr = 0; *ring->wptr_cpu_addr = 0;
@ -6743,7 +6752,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.kiq[0].mqd_backup) if (adev->gfx.kiq[0].mqd_backup)
memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
/* reset ring buffer */ /* reset ring buffer */
ring->wptr = 0; ring->wptr = 0;
@ -6766,7 +6775,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.kiq[0].mqd_backup) if (adev->gfx.kiq[0].mqd_backup)
memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
} }
return 0; return 0;
@ -6787,11 +6796,11 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else { } else {
/* restore MQD to a clean status */ /* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset ring buffer */ /* reset ring buffer */
ring->wptr = 0; ring->wptr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
@ -7172,6 +7181,13 @@ static int gfx_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
/* WA added for Vangogh asic fixing the SMU suspend failure
* It needs to set power gating again during gfxoff control
* otherwise the gfxoff disallowing will be failed to set.
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1))
gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE);
if (!adev->no_hw_access) { if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) { if (amdgpu_async_gfx_ring) {
if (amdgpu_gfx_disable_kgq(adev, 0)) if (amdgpu_gfx_disable_kgq(adev, 0))

View File

@ -155,6 +155,7 @@ static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue
{ {
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */
PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
@ -3714,11 +3715,11 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
soc21_grbm_select(adev, 0, 0, 0, 0); soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.me.mqd_backup[mqd_idx]) if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else { } else {
/* restore mqd with the backup copy */ /* restore mqd with the backup copy */
if (adev->gfx.me.mqd_backup[mqd_idx]) if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */ /* reset the ring */
ring->wptr = 0; ring->wptr = 0;
*ring->wptr_cpu_addr = 0; *ring->wptr_cpu_addr = 0;
@ -4007,7 +4008,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.kiq[0].mqd_backup) if (adev->gfx.kiq[0].mqd_backup)
memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
/* reset ring buffer */ /* reset ring buffer */
ring->wptr = 0; ring->wptr = 0;
@ -4030,7 +4031,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.kiq[0].mqd_backup) if (adev->gfx.kiq[0].mqd_backup)
memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
} }
return 0; return 0;
@ -4051,11 +4052,11 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else { } else {
/* restore MQD to a clean status */ /* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset ring buffer */ /* reset ring buffer */
ring->wptr = 0; ring->wptr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);

View File

@ -28,6 +28,7 @@
#include "nbio/nbio_2_3_offset.h" #include "nbio/nbio_2_3_offset.h"
#include "nbio/nbio_2_3_sh_mask.h" #include "nbio/nbio_2_3_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h> #include <uapi/linux/kfd_ioctl.h>
#include <linux/device.h>
#include <linux/pci.h> #include <linux/pci.h>
#define smnPCIE_CONFIG_CNTL 0x11180044 #define smnPCIE_CONFIG_CNTL 0x11180044
@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
if (pci_is_thunderbolt_attached(adev->pdev)) if (dev_is_removable(&adev->pdev->dev))
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
else else
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
def = data = RREG32_PCIE(smnPCIE_LC_CNTL); def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
if (pci_is_thunderbolt_attached(adev->pdev)) if (dev_is_removable(&adev->pdev->dev))
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
else else
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;

View File

@ -759,6 +759,83 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp)
return 0; return 0;
} }
static void psp_v13_0_boot_error_reporting(struct amdgpu_device *adev,
uint32_t inst,
uint32_t boot_error)
{
uint32_t socket_id;
uint32_t aid_id;
uint32_t hbm_id;
uint32_t reg_data;
socket_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, SOCKET_ID);
aid_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, AID_ID);
hbm_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, HBM_ID);
reg_data = RREG32_SOC15(MP0, inst, regMP0_SMN_C2PMSG_109);
dev_info(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n",
socket_id, aid_id, reg_data);
if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_MEM_TRAINING))
dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n",
socket_id, aid_id, hbm_id);
if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_FW_LOAD))
dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n",
socket_id, aid_id);
if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_WAFL_LINK_TRAINING))
dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n",
socket_id, aid_id);
if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_XGMI_LINK_TRAINING))
dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n",
socket_id, aid_id);
if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_CP_LINK_TRAINING))
dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n",
socket_id, aid_id);
if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_DP_LINK_TRAINING))
dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n",
socket_id, aid_id);
if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_MEM_TEST))
dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n",
socket_id, aid_id, hbm_id);
if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_BIST_TEST))
dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n",
socket_id, aid_id, hbm_id);
}
static int psp_v13_0_query_boot_status(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int inst_mask = adev->aid_mask;
uint32_t reg_data;
uint32_t i;
int ret = 0;
if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))
return 0;
if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10007)
return 0;
for_each_inst(i, inst_mask) {
reg_data = RREG32_SOC15(MP0, i, regMP0_SMN_C2PMSG_126);
if (!REG_GET_FIELD(reg_data, MP0_SMN_C2PMSG_126, BOOT_STATUS)) {
psp_v13_0_boot_error_reporting(adev, i, reg_data);
ret = -EINVAL;
break;
}
}
return ret;
}
static const struct psp_funcs psp_v13_0_funcs = { static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode, .init_microcode = psp_v13_0_init_microcode,
.wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state, .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
@ -781,6 +858,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.update_spirom = psp_v13_0_update_spirom, .update_spirom = psp_v13_0_update_spirom,
.vbflash_stat = psp_v13_0_vbflash_status, .vbflash_stat = psp_v13_0_vbflash_status,
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
.query_boot_status = psp_v13_0_query_boot_status,
}; };
void psp_v13_0_set_psp_funcs(struct psp_context *psp) void psp_v13_0_set_psp_funcs(struct psp_context *psp)

View File

@ -91,8 +91,7 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
static bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status) static bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status)
{ {
return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)); REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
} }

View File

@ -1404,6 +1404,66 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
return i; return i;
} }
static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
struct kfd_gpu_cache_info *pcache_info)
{
struct amdgpu_device *adev = kdev->adev;
int i = 0;
/* TCP L1 Cache per CU */
if (adev->gfx.config.gc_tcp_size_per_cu) {
pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu;
pcache_info[i].cache_level = 1;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = 1;
i++;
}
/* Scalar L1 Instruction Cache per SQC */
if (adev->gfx.config.gc_l1_instruction_cache_size_per_sqc) {
pcache_info[i].cache_size =
adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
pcache_info[i].cache_level = 1;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_cu_per_sqc;
i++;
}
/* Scalar L1 Data Cache per SQC */
if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) {
pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
pcache_info[i].cache_level = 1;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_cu_per_sqc;
i++;
}
/* L2 Data Cache per GPU (Total Tex Cache) */
if (adev->gfx.config.gc_tcc_size) {
pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size;
pcache_info[i].cache_level = 2;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
i++;
}
/* L3 Data Cache per GPU */
if (adev->gmc.mall_size) {
pcache_info[i].cache_size = adev->gmc.mall_size / 1024;
pcache_info[i].cache_level = 3;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
i++;
}
return i;
}
int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info) int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info)
{ {
int num_of_cache_types = 0; int num_of_cache_types = 0;
@ -1461,10 +1521,14 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
num_of_cache_types = ARRAY_SIZE(vega20_cache_info); num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
break; break;
case IP_VERSION(9, 4, 2): case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
*pcache_info = aldebaran_cache_info; *pcache_info = aldebaran_cache_info;
num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info); num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
break; break;
case IP_VERSION(9, 4, 3):
num_of_cache_types =
kfd_fill_gpu_cache_info_from_gfx_config_v2(kdev->kfd,
*pcache_info);
break;
case IP_VERSION(9, 1, 0): case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2): case IP_VERSION(9, 2, 2):
*pcache_info = raven_cache_info; *pcache_info = raven_cache_info;

View File

@ -1602,10 +1602,13 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
unsigned int cu_sibling_map_mask; unsigned int cu_sibling_map_mask;
int first_active_cu; int first_active_cu;
int i, j, k, xcc, start, end; int i, j, k, xcc, start, end;
int num_xcc = NUM_XCC(knode->xcc_mask);
struct kfd_cache_properties *pcache = NULL; struct kfd_cache_properties *pcache = NULL;
enum amdgpu_memory_partition mode;
struct amdgpu_device *adev = knode->adev;
start = ffs(knode->xcc_mask) - 1; start = ffs(knode->xcc_mask) - 1;
end = start + NUM_XCC(knode->xcc_mask); end = start + num_xcc;
cu_sibling_map_mask = cu_info->bitmap[start][0][0]; cu_sibling_map_mask = cu_info->bitmap[start][0][0];
cu_sibling_map_mask &= cu_sibling_map_mask &=
((1 << pcache_info[cache_type].num_cu_shared) - 1); ((1 << pcache_info[cache_type].num_cu_shared) - 1);
@ -1624,7 +1627,18 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
pcache->processor_id_low = cu_processor_id pcache->processor_id_low = cu_processor_id
+ (first_active_cu - 1); + (first_active_cu - 1);
pcache->cache_level = pcache_info[cache_type].cache_level; pcache->cache_level = pcache_info[cache_type].cache_level;
pcache->cache_size = pcache_info[cache_type].cache_size;
if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3))
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
else
mode = UNKNOWN_MEMORY_PARTITION_MODE;
if (pcache->cache_level == 2)
pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc;
else if (mode)
pcache->cache_size = pcache_info[cache_type].cache_size / mode;
else
pcache->cache_size = pcache_info[cache_type].cache_size;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE) if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_DATA; pcache->cache_type |= HSA_CACHE_TYPE_DATA;

View File

@ -4348,7 +4348,6 @@ static bool full_update_required(struct dc *dc,
srf_updates[i].in_transfer_func || srf_updates[i].in_transfer_func ||
srf_updates[i].func_shaper || srf_updates[i].func_shaper ||
srf_updates[i].lut3d_func || srf_updates[i].lut3d_func ||
srf_updates[i].blend_tf ||
srf_updates[i].surface->force_full_update || srf_updates[i].surface->force_full_update ||
(srf_updates[i].flip_addr && (srf_updates[i].flip_addr &&
srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||

View File

@ -533,7 +533,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
if (res_ctx->pipe_ctx[i].stream != stream) if (res_ctx->pipe_ctx[i].stream != stream || !tg)
continue; continue;
return tg->funcs->get_frame_count(tg); return tg->funcs->get_frame_count(tg);
@ -592,7 +592,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
if (res_ctx->pipe_ctx[i].stream != stream) if (res_ctx->pipe_ctx[i].stream != stream || !tg)
continue; continue;
tg->funcs->get_scanoutpos(tg, tg->funcs->get_scanoutpos(tg,

View File

@ -613,16 +613,19 @@ static void dpp3_program_blnd_pwl(
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red);
} else { } else {
REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0);
REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4); REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4);
for (i = 0 ; i < num; i++) for (i = 0 ; i < num; i++)
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red);
REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0);
REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2); REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2);
for (i = 0 ; i < num; i++) for (i = 0 ; i < num; i++)
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg);
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green);
REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0);
REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1); REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1);
for (i = 0 ; i < num; i++) for (i = 0 ; i < num; i++)
REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg);

View File

@ -316,7 +316,7 @@ bool hubp3_program_surface_flip_and_addr(
return true; return true;
} }
static void hubp3_program_tiling( void hubp3_program_tiling(
struct dcn20_hubp *hubp2, struct dcn20_hubp *hubp2,
const union dc_tiling_info *info, const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format) const enum surface_pixel_format pixel_format)

View File

@ -278,6 +278,11 @@ void hubp3_setup(
struct _vcs_dpi_display_rq_regs_st *rq_regs, struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
void hubp3_program_tiling(
struct dcn20_hubp *hubp2,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp3_dcc_control(struct hubp *hubp, bool enable, void hubp3_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size blk_size); enum hubp_ind_block_size blk_size);

View File

@ -237,16 +237,19 @@ void mpc32_program_post1dlut_pwl(
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg);
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red);
} else { } else {
REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0);
REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 4); REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 4);
for (i = 0 ; i < num; i++) for (i = 0 ; i < num; i++)
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg);
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red);
REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0);
REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 2); REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 2);
for (i = 0 ; i < num; i++) for (i = 0 ; i < num; i++)
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].green_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].green_reg);
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_green); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_green);
REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0);
REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 1); REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 1);
for (i = 0 ; i < num; i++) for (i = 0 ; i < num; i++)
REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].blue_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].blue_reg);

View File

@ -53,11 +53,146 @@ static void hubp35_init(struct hubp *hubp)
/*do nothing for now for dcn3.5 or later*/ /*do nothing for now for dcn3.5 or later*/
} }
void hubp35_program_pixel_format(
struct hubp *hubp,
enum surface_pixel_format format)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
uint32_t green_bar = 1;
uint32_t red_bar = 3;
uint32_t blue_bar = 2;
/* swap for ABGR format */
if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
red_bar = 2;
blue_bar = 3;
}
REG_UPDATE_3(HUBPRET_CONTROL,
CROSSBAR_SRC_Y_G, green_bar,
CROSSBAR_SRC_CB_B, blue_bar,
CROSSBAR_SRC_CR_R, red_bar);
/* Mapping is same as ipp programming (cnvc) */
switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 1);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 3);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 8);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /* we use crossbar already */
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 24);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 65);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 64);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 67);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 66);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 12);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 112);
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 113);
break;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 114);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 118);
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 119);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 116,
ALPHA_PLANE_EN, 0);
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 116,
ALPHA_PLANE_EN, 1);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
/* don't see the need of program the xbar in DCN 1.0 */
}
void hubp35_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
unsigned int compat_level)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
hubp3_dcc_control_sienna_cichlid(hubp, dcc);
hubp3_program_tiling(hubp2, tiling_info, format);
hubp2_program_size(hubp, format, plane_size, dcc);
hubp2_program_rotation(hubp, rotation, horizontal_mirror);
hubp35_program_pixel_format(hubp, format);
}
struct hubp_funcs dcn35_hubp_funcs = { struct hubp_funcs dcn35_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr, .hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp3_program_surface_config, .hubp_program_surface_config = hubp35_program_surface_config,
.hubp_is_flip_pending = hubp2_is_flip_pending, .hubp_is_flip_pending = hubp2_is_flip_pending,
.hubp_setup = hubp3_setup, .hubp_setup = hubp3_setup,
.hubp_setup_interdependent = hubp2_setup_interdependent, .hubp_setup_interdependent = hubp2_setup_interdependent,

View File

@ -58,4 +58,18 @@ bool hubp35_construct(
void hubp35_set_fgcg(struct hubp *hubp, bool enable); void hubp35_set_fgcg(struct hubp *hubp, bool enable);
void hubp35_program_pixel_format(
struct hubp *hubp,
enum surface_pixel_format format);
void hubp35_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
union dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror,
unsigned int compat_level);
#endif /* __DC_HUBP_DCN35_H__ */ #endif /* __DC_HUBP_DCN35_H__ */

View File

@ -60,8 +60,12 @@ endif
endif endif
ifneq ($(CONFIG_FRAME_WARN),0) ifneq ($(CONFIG_FRAME_WARN),0)
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
frame_warn_flag := -Wframe-larger-than=3072
else
frame_warn_flag := -Wframe-larger-than=2048 frame_warn_flag := -Wframe-larger-than=2048
endif endif
endif
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)

View File

@ -242,6 +242,34 @@
//MP0_SMN_C2PMSG_103 //MP0_SMN_C2PMSG_103
#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 #define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL #define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_109
#define MP0_SMN_C2PMSG_109__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_126
#define MP0_SMN_C2PMSG_126__GPU_ERR_MEM_TRAINING__SHIFT 0x0
#define MP0_SMN_C2PMSG_126__GPU_ERR_FW_LOAD__SHIFT 0x1
#define MP0_SMN_C2PMSG_126__GPU_ERR_WAFL_LINK_TRAINING__SHIFT 0x2
#define MP0_SMN_C2PMSG_126__GPU_ERR_XGMI_LINK_TRAINING__SHIFT 0x3
#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_CP_LINK_TRAINING__SHIFT 0x4
#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_DP_LINK_TRAINING__SHIFT 0x5
#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_MEM_TEST__SHIFT 0x6
#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_BIST_TEST__SHIFT 0x7
#define MP0_SMN_C2PMSG_126__SOCKET_ID__SHIFT 0x8
#define MP0_SMN_C2PMSG_126__AID_ID__SHIFT 0xb
#define MP0_SMN_C2PMSG_126__HBM_ID__SHIFT 0xd
#define MP0_SMN_C2PMSG_126__BOOT_STATUS__SHIFT 0x1f
#define MP0_SMN_C2PMSG_126__GPU_ERR_MEM_TRAINING_MASK 0x00000001L
#define MP0_SMN_C2PMSG_126__GPU_ERR_FW_LOAD_MASK 0x00000002L
#define MP0_SMN_C2PMSG_126__GPU_ERR_WAFL_LINK_TRAINING_MASK 0x00000004L
#define MP0_SMN_C2PMSG_126__GPU_ERR_XGMI_LINK_TRAINING_MASK 0x00000008L
#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_CP_LINK_TRAINING_MASK 0x00000010L
#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_DP_LINK_TRAINING_MASK 0x00000020L
#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_MEM_TEST_MASK 0x00000040L
#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_BIST_TEST_MASK 0x00000080L
#define MP0_SMN_C2PMSG_126__SOCKET_ID_MASK 0x00000700L
#define MP0_SMN_C2PMSG_126__AID_ID_MASK 0x00001800L
#define MP0_SMN_C2PMSG_126__HBM_ID_MASK 0x00002000L
#define MP0_SMN_C2PMSG_126__BOOT_STATUS_MASK 0x80000000L
//MP0_SMN_IH_CREDIT //MP0_SMN_IH_CREDIT
#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 #define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 #define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10

View File

@ -1080,33 +1080,35 @@ struct gpu_metrics_v3_0 {
uint16_t average_ipu_activity[8]; uint16_t average_ipu_activity[8];
/* time filtered per-core C0 residency % [0-100]*/ /* time filtered per-core C0 residency % [0-100]*/
uint16_t average_core_c0_activity[16]; uint16_t average_core_c0_activity[16];
/* time filtered DRAM read bandwidth [GB/sec] */ /* time filtered DRAM read bandwidth [MB/sec] */
uint16_t average_dram_reads; uint16_t average_dram_reads;
/* time filtered DRAM write bandwidth [GB/sec] */ /* time filtered DRAM write bandwidth [MB/sec] */
uint16_t average_dram_writes; uint16_t average_dram_writes;
/* Driver attached timestamp (in ns) */ /* Driver attached timestamp (in ns) */
uint64_t system_clock_counter; uint64_t system_clock_counter;
/* Power/Energy */ /* Power/Energy */
/* average dGPU + APU power on A + A platform */ /* time filtered power used for PPT/STAPM [APU+dGPU] [mW] */
uint32_t average_socket_power; uint32_t average_socket_power;
/* average IPU power [W] */ /* time filtered IPU power [mW] */
uint16_t average_ipu_power; uint16_t average_ipu_power;
/* average APU power [W] */ /* time filtered APU power [mW] */
uint32_t average_apu_power; uint32_t average_apu_power;
/* average dGPU power [W] */ /* time filtered GFX power [mW] */
uint32_t average_gfx_power;
/* time filtered dGPU power [mW] */
uint32_t average_dgpu_power; uint32_t average_dgpu_power;
/* sum of core power across all cores in the socket [W] */ /* time filtered sum of core power across all cores in the socket [mW] */
uint32_t average_core_power; uint32_t average_all_core_power;
/* calculated core power [W] */ /* calculated core power [mW] */
uint16_t core_power[16]; uint16_t average_core_power[16];
/* maximum IRM defined STAPM power limit [W] */ /* maximum IRM defined STAPM power limit [mW] */
uint16_t stapm_power_limit; uint16_t stapm_power_limit;
/* time filtered STAPM power limit [W] */ /* time filtered STAPM power limit [mW] */
uint16_t current_stapm_power_limit; uint16_t current_stapm_power_limit;
/* Average clocks */ /* time filtered clocks [MHz] */
uint16_t average_gfxclk_frequency; uint16_t average_gfxclk_frequency;
uint16_t average_socclk_frequency; uint16_t average_socclk_frequency;
uint16_t average_vpeclk_frequency; uint16_t average_vpeclk_frequency;
@ -1115,7 +1117,7 @@ struct gpu_metrics_v3_0 {
uint16_t average_vclk_frequency; uint16_t average_vclk_frequency;
/* Current clocks */ /* Current clocks */
/* target core frequency */ /* target core frequency [MHz] */
uint16_t current_coreclk[16]; uint16_t current_coreclk[16];
/* CCLK frequency limit enforced on classic cores [MHz] */ /* CCLK frequency limit enforced on classic cores [MHz] */
uint16_t current_core_maxfreq; uint16_t current_core_maxfreq;

View File

@ -3288,10 +3288,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
uint32_t tmp; uint32_t tmp;
/* under multi-vf mode, the hwmon attributes are all not supported */
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
/* under pp one vf mode manage of hwmon attributes is not supported */ /* under pp one vf mode manage of hwmon attributes is not supported */
if (amdgpu_sriov_is_pp_one_vf(adev)) if (amdgpu_sriov_is_pp_one_vf(adev))
effective_mode &= ~S_IWUSR; effective_mode &= ~S_IWUSR;
@ -4162,6 +4158,7 @@ err_out:
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{ {
enum amdgpu_sriov_vf_mode mode;
uint32_t mask = 0; uint32_t mask = 0;
int ret; int ret;
@ -4173,17 +4170,21 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled == 0) if (adev->pm.dpm_enabled == 0)
return 0; return 0;
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, mode = amdgpu_virt_get_sriov_vf_mode(adev);
DRIVER_NAME, adev,
hwmon_groups); /* under multi-vf mode, the hwmon attributes are all not supported */
if (IS_ERR(adev->pm.int_hwmon_dev)) { if (mode != SRIOV_VF_MODE_MULTI_VF) {
ret = PTR_ERR(adev->pm.int_hwmon_dev); adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
dev_err(adev->dev, DRIVER_NAME, adev,
"Unable to register hwmon device: %d\n", ret); hwmon_groups);
return ret; if (IS_ERR(adev->pm.int_hwmon_dev)) {
ret = PTR_ERR(adev->pm.int_hwmon_dev);
dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
return ret;
}
} }
switch (amdgpu_virt_get_sriov_vf_mode(adev)) { switch (mode) {
case SRIOV_VF_MODE_ONE_VF: case SRIOV_VF_MODE_ONE_VF:
mask = ATTR_FLAG_ONEVF; mask = ATTR_FLAG_ONEVF;
break; break;

View File

@ -1022,6 +1022,9 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
*limit /= 100; *limit /= 100;
} }
break; break;
case PP_PWR_LIMIT_MIN:
*limit = 0;
break;
default: default:
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
break; break;

View File

@ -367,7 +367,7 @@ typedef struct _ATOM_Tonga_VCE_State_Record {
typedef struct _ATOM_Tonga_VCE_State_Table { typedef struct _ATOM_Tonga_VCE_State_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; UCHAR ucNumEntries;
ATOM_Tonga_VCE_State_Record entries[1]; ATOM_Tonga_VCE_State_Record entries[];
} ATOM_Tonga_VCE_State_Table; } ATOM_Tonga_VCE_State_Table;
typedef struct _ATOM_Tonga_PowerTune_Table { typedef struct _ATOM_Tonga_PowerTune_Table {
@ -481,7 +481,7 @@ typedef struct _ATOM_Tonga_Hard_Limit_Record {
typedef struct _ATOM_Tonga_Hard_Limit_Table { typedef struct _ATOM_Tonga_Hard_Limit_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; UCHAR ucNumEntries;
ATOM_Tonga_Hard_Limit_Record entries[1]; ATOM_Tonga_Hard_Limit_Record entries[];
} ATOM_Tonga_Hard_Limit_Table; } ATOM_Tonga_Hard_Limit_Table;
typedef struct _ATOM_Tonga_GPIO_Table { typedef struct _ATOM_Tonga_GPIO_Table {

View File

@ -129,7 +129,7 @@ typedef struct _ATOM_Vega10_State {
typedef struct _ATOM_Vega10_State_Array { typedef struct _ATOM_Vega10_State_Array {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Vega10_State states[1]; /* Dynamically allocate entries. */ ATOM_Vega10_State states[]; /* Dynamically allocate entries. */
} ATOM_Vega10_State_Array; } ATOM_Vega10_State_Array;
typedef struct _ATOM_Vega10_CLK_Dependency_Record { typedef struct _ATOM_Vega10_CLK_Dependency_Record {
@ -169,37 +169,37 @@ typedef struct _ATOM_Vega10_GFXCLK_Dependency_Table {
typedef struct _ATOM_Vega10_MCLK_Dependency_Table { typedef struct _ATOM_Vega10_MCLK_Dependency_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Vega10_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_MCLK_Dependency_Table; } ATOM_Vega10_MCLK_Dependency_Table;
typedef struct _ATOM_Vega10_SOCCLK_Dependency_Table { typedef struct _ATOM_Vega10_SOCCLK_Dependency_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_SOCCLK_Dependency_Table; } ATOM_Vega10_SOCCLK_Dependency_Table;
typedef struct _ATOM_Vega10_DCEFCLK_Dependency_Table { typedef struct _ATOM_Vega10_DCEFCLK_Dependency_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_DCEFCLK_Dependency_Table; } ATOM_Vega10_DCEFCLK_Dependency_Table;
typedef struct _ATOM_Vega10_PIXCLK_Dependency_Table { typedef struct _ATOM_Vega10_PIXCLK_Dependency_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_PIXCLK_Dependency_Table; } ATOM_Vega10_PIXCLK_Dependency_Table;
typedef struct _ATOM_Vega10_DISPCLK_Dependency_Table { typedef struct _ATOM_Vega10_DISPCLK_Dependency_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries.*/ UCHAR ucNumEntries; /* Number of entries.*/
ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_DISPCLK_Dependency_Table; } ATOM_Vega10_DISPCLK_Dependency_Table;
typedef struct _ATOM_Vega10_PHYCLK_Dependency_Table { typedef struct _ATOM_Vega10_PHYCLK_Dependency_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_PHYCLK_Dependency_Table; } ATOM_Vega10_PHYCLK_Dependency_Table;
typedef struct _ATOM_Vega10_MM_Dependency_Record { typedef struct _ATOM_Vega10_MM_Dependency_Record {
@ -213,7 +213,7 @@ typedef struct _ATOM_Vega10_MM_Dependency_Record {
typedef struct _ATOM_Vega10_MM_Dependency_Table { typedef struct _ATOM_Vega10_MM_Dependency_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries */ UCHAR ucNumEntries; /* Number of entries */
ATOM_Vega10_MM_Dependency_Record entries[1]; /* Dynamically allocate entries */ ATOM_Vega10_MM_Dependency_Record entries[]; /* Dynamically allocate entries */
} ATOM_Vega10_MM_Dependency_Table; } ATOM_Vega10_MM_Dependency_Table;
typedef struct _ATOM_Vega10_PCIE_Record { typedef struct _ATOM_Vega10_PCIE_Record {
@ -225,7 +225,7 @@ typedef struct _ATOM_Vega10_PCIE_Record {
typedef struct _ATOM_Vega10_PCIE_Table { typedef struct _ATOM_Vega10_PCIE_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries */ UCHAR ucNumEntries; /* Number of entries */
ATOM_Vega10_PCIE_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Vega10_PCIE_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Vega10_PCIE_Table; } ATOM_Vega10_PCIE_Table;
typedef struct _ATOM_Vega10_Voltage_Lookup_Record { typedef struct _ATOM_Vega10_Voltage_Lookup_Record {
@ -235,7 +235,7 @@ typedef struct _ATOM_Vega10_Voltage_Lookup_Record {
typedef struct _ATOM_Vega10_Voltage_Lookup_Table { typedef struct _ATOM_Vega10_Voltage_Lookup_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries */ UCHAR ucNumEntries; /* Number of entries */
ATOM_Vega10_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries */ ATOM_Vega10_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries */
} ATOM_Vega10_Voltage_Lookup_Table; } ATOM_Vega10_Voltage_Lookup_Table;
typedef struct _ATOM_Vega10_Fan_Table { typedef struct _ATOM_Vega10_Fan_Table {
@ -327,7 +327,7 @@ typedef struct _ATOM_Vega10_VCE_State_Record {
typedef struct _ATOM_Vega10_VCE_State_Table { typedef struct _ATOM_Vega10_VCE_State_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; UCHAR ucNumEntries;
ATOM_Vega10_VCE_State_Record entries[1]; ATOM_Vega10_VCE_State_Record entries[];
} ATOM_Vega10_VCE_State_Table; } ATOM_Vega10_VCE_State_Table;
typedef struct _ATOM_Vega10_PowerTune_Table { typedef struct _ATOM_Vega10_PowerTune_Table {
@ -427,7 +427,7 @@ typedef struct _ATOM_Vega10_Hard_Limit_Record {
typedef struct _ATOM_Vega10_Hard_Limit_Table { typedef struct _ATOM_Vega10_Hard_Limit_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; UCHAR ucNumEntries;
ATOM_Vega10_Hard_Limit_Record entries[1]; ATOM_Vega10_Hard_Limit_Record entries[];
} ATOM_Vega10_Hard_Limit_Table; } ATOM_Vega10_Hard_Limit_Table;
typedef struct _Vega10_PPTable_Generic_SubTable_Header { typedef struct _Vega10_PPTable_Generic_SubTable_Header {

View File

@ -733,7 +733,7 @@ static int smu_early_init(void *handle)
smu->adev = adev; smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm; smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false; smu->is_apu = false;
smu->smu_baco.state = SMU_BACO_STATE_EXIT; smu->smu_baco.state = SMU_BACO_STATE_NONE;
smu->smu_baco.platform_support = false; smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1; smu->user_dpm_profile.fan_mode = -1;
@ -1742,10 +1742,31 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0; return 0;
} }
static int smu_reset_mp1_state(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if ((!adev->in_runpm) && (!adev->in_suspend) &&
(!amdgpu_in_reset(adev)))
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
break;
default:
break;
}
return ret;
}
static int smu_hw_fini(void *handle) static int smu_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle; struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0; return 0;
@ -1763,7 +1784,15 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false; adev->pm.dpm_enabled = false;
return smu_smc_hw_cleanup(smu); ret = smu_smc_hw_cleanup(smu);
if (ret)
return ret;
ret = smu_reset_mp1_state(smu);
if (ret)
return ret;
return 0;
} }
static void smu_late_fini(void *handle) static void smu_late_fini(void *handle)

View File

@ -419,6 +419,7 @@ enum smu_reset_mode {
enum smu_baco_state { enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0, SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT, SMU_BACO_STATE_EXIT,
SMU_BACO_STATE_NONE,
}; };
struct smu_baco_context { struct smu_baco_context {

View File

@ -150,97 +150,39 @@ typedef struct {
} DpmClocks_t; } DpmClocks_t;
typedef struct { typedef struct {
uint16_t CoreFrequency[16]; //Target core frequency [MHz] uint16_t CoreFrequency[16]; //Target core frequency [MHz]
uint16_t CorePower[16]; //CAC calculated core power [W] [Q8.8] uint16_t CorePower[16]; //CAC calculated core power [mW]
uint16_t CoreTemperature[16]; //TSEN measured core temperature [C] [Q8.8] uint16_t CoreTemperature[16]; //TSEN measured core temperature [centi-C]
uint16_t GfxTemperature; //TSEN measured GFX temperature [C] [Q8.8] uint16_t GfxTemperature; //TSEN measured GFX temperature [centi-C]
uint16_t SocTemperature; //TSEN measured SOC temperature [C] [Q8.8] uint16_t SocTemperature; //TSEN measured SOC temperature [centi-C]
uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [W] [Q8.8] uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [mW]
uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [W] [Q8.8] uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [mW]
uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz] uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz]
uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz] uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz]
uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [C] [Q8.8] uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C]
uint16_t AverageGfxclkFrequency; //Time filtered target GFXCLK frequency [MHz] uint16_t GfxclkFrequency; //Time filtered target GFXCLK frequency [MHz]
uint16_t AverageFclkFrequency; //Time filtered target FCLK frequency [MHz] uint16_t FclkFrequency; //Time filtered target FCLK frequency [MHz]
uint16_t AverageGfxActivity; //Time filtered GFX busy % [0-100] [Q8.8] uint16_t GfxActivity; //Time filtered GFX busy % [0-100]
uint16_t AverageSocclkFrequency; //Time filtered target SOCCLK frequency [MHz] uint16_t SocclkFrequency; //Time filtered target SOCCLK frequency [MHz]
uint16_t AverageVclkFrequency; //Time filtered target VCLK frequency [MHz] uint16_t VclkFrequency; //Time filtered target VCLK frequency [MHz]
uint16_t AverageVcnActivity; //Time filtered VCN busy % [0-100] [Q8.8] uint16_t VcnActivity; //Time filtered VCN busy % [0-100]
uint16_t AverageVpeclkFrequency; //Time filtered target VPECLK frequency [MHz] uint16_t VpeclkFrequency; //Time filtered target VPECLK frequency [MHz]
uint16_t AverageIpuclkFrequency; //Time filtered target IPUCLK frequency [MHz] uint16_t IpuclkFrequency; //Time filtered target IPUCLK frequency [MHz]
uint16_t AverageIpuBusy[8]; //Time filtered IPU per-column busy % [0-100] [Q8.8] uint16_t IpuBusy[8]; //Time filtered IPU per-column busy % [0-100]
uint16_t AverageDRAMReads; //Time filtered DRAM read bandwidth [GB/sec] [Q8.8] uint16_t DRAMReads; //Time filtered DRAM read bandwidth [MB/sec]
uint16_t AverageDRAMWrites; //Time filtered DRAM write bandwidth [GB/sec] [Q8.8] uint16_t DRAMWrites; //Time filtered DRAM write bandwidth [MB/sec]
uint16_t AverageCoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100] [Q8.8] uint16_t CoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100]
uint16_t IpuPower; //Time filtered IPU power [W] [Q8.8] uint16_t IpuPower; //Time filtered IPU power [mW]
uint32_t ApuPower; //Time filtered APU power [W] [Q24.8] uint32_t ApuPower; //Time filtered APU power [mW]
uint32_t dGpuPower; //Time filtered dGPU power [W] [Q24.8] uint32_t GfxPower; //Time filtered GFX power [mW]
uint32_t AverageSocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [W] [Q24.8] uint32_t dGpuPower; //Time filtered dGPU power [mW]
uint32_t AverageCorePower; //Time filtered sum of core power across all cores in the socket [W] [Q24.8] uint32_t SocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [mW]
uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us] uint32_t AllCorePower; //Time filtered sum of core power across all cores in the socket [mW]
uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles] uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us]
uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles]
uint32_t spare[16];
} SmuMetrics_t; } SmuMetrics_t;
typedef struct {
uint16_t GfxclkFrequency; //[MHz]
uint16_t SocclkFrequency; //[MHz]
uint16_t VclkFrequency; //[MHz]
uint16_t DclkFrequency; //[MHz]
uint16_t MemclkFrequency; //[MHz]
uint16_t spare;
uint16_t UvdActivity; //[centi]
uint16_t GfxActivity; //[centi]
uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC
uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC
uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC
uint16_t CoreFrequency[8]; //[MHz]
uint16_t CorePower[8]; //[mW]
uint16_t CoreTemperature[8]; //[centi-Celsius]
uint16_t L3Frequency[2]; //[MHz]
uint16_t L3Temperature[2]; //[centi-Celsius]
uint16_t spare2[24];
uint16_t GfxTemperature; //[centi-Celsius]
uint16_t SocTemperature; //[centi-Celsius]
uint16_t ThrottlerStatus;
uint16_t CurrentSocketPower; //[mW]
uint16_t StapmOpnLimit; //[W]
uint16_t StapmCurrentLimit; //[W]
uint32_t ApuPower; //[mW]
uint32_t dGpuPower; //[mW]
uint16_t VddTdcValue; //[mA]
uint16_t SocTdcValue; //[mA]
uint16_t VddEdcValue; //[mA]
uint16_t SocEdcValue; //[mA]
uint16_t InfrastructureCpuMaxFreq; //[MHz]
uint16_t InfrastructureGfxMaxFreq; //[MHz]
uint16_t SkinTemp;
uint16_t DeviceState;
uint16_t CurTemp; //[centi-Celsius]
uint16_t FilterAlphaValue; //[m]
uint16_t AverageGfxclkFrequency;
uint16_t AverageFclkFrequency;
uint16_t AverageGfxActivity;
uint16_t AverageSocclkFrequency;
uint16_t AverageVclkFrequency;
uint16_t AverageVcnActivity;
uint16_t AverageDRAMReads; //Filtered DF Bandwidth::DRAM Reads
uint16_t AverageDRAMWrites; //Filtered DF Bandwidth::DRAM Writes
uint16_t AverageSocketPower; //Filtered value of CurrentSocketPower
uint16_t AverageCorePower[2]; //Filtered of [sum of CorePower[8] per ccx])
uint16_t AverageCoreC0Residency[16]; //Filtered of [average C0 residency % per core]
uint16_t spare1;
uint32_t MetricsCounter; //Counts the # of metrics table parameter reads per update to the metrics table, i.e. if the metrics table update happens every 1 second, this value could be up to 1000 if the smu collected metrics data every cycle, or as low as 0 if the smu was asleep the whole time. Reset to 0 after writing.
} SmuMetrics_legacy_t;
//ISP tile definitions //ISP tile definitions
typedef enum { typedef enum {
TILE_XTILE = 0, //ONO0 TILE_XTILE = 0, //ONO0

View File

@ -299,5 +299,7 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
uint8_t pcie_gen_cap, uint8_t pcie_gen_cap,
uint8_t pcie_width_cap); uint8_t pcie_width_cap);
int smu_v13_0_disable_pmfw_state(struct smu_context *smu);
#endif #endif
#endif #endif

View File

@ -234,24 +234,15 @@ static int vangogh_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
if (smu->smc_fw_if_version < 0x3) { smu_table->metrics_table = kzalloc(max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), GFP_KERNEL);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
} else {
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
}
if (!smu_table->metrics_table) if (!smu_table->metrics_table)
goto err0_out; goto err0_out;
smu_table->metrics_time = 0; smu_table->metrics_time = 0;
if (smu->smc_fw_version >= 0x043F3E00) smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2));
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3);
else
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) if (!smu_table->gpu_metrics_table)
goto err1_out; goto err1_out;

View File

@ -2477,3 +2477,16 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
return 0; return 0;
} }
int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
{
int ret;
struct amdgpu_device *adev = smu->adev;
WREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff), 0);
ret = RREG32_PCIE(MP1_Public |
(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
return ret == 0 ? 0 : -EINVAL;
}

View File

@ -354,12 +354,12 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC) if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true; smu->dc_controlled_by_gpio = true;
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO || if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO) {
powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true; smu_baco->platform_support = true;
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
smu_baco->maco_support = true; smu_baco->maco_support = true;
}
if (!overdrive_lowerlimits->FeatureCtrlMask || if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask) !overdrive_upperlimits->FeatureCtrlMask)
@ -2530,38 +2530,10 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
} }
} }
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE && /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
(((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) || workload_type = smu_cmn_to_asic_specific_index(smu,
((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_COMPUTE_BIT,
(void *)(&activity_monitor_external),
false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external),
true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_CUSTOM);
} else {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD, CMN2ASIC_MAPPING_WORKLOAD,
smu->power_profile_mode); smu->power_profile_mode);
}
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;
@ -2602,14 +2574,20 @@ static int smu_v13_0_0_baco_enter(struct smu_context *smu)
static int smu_v13_0_0_baco_exit(struct smu_context *smu) static int smu_v13_0_0_baco_exit(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
int ret;
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */ /* Wait for PMFW handling for the Dstate change */
usleep_range(10000, 11000); usleep_range(10000, 11000);
return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
} else { } else {
return smu_v13_0_baco_exit(smu); ret = smu_v13_0_baco_exit(smu);
} }
if (!ret)
adev->gfx.is_poweron = false;
return ret;
} }
static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
@ -2794,7 +2772,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
switch (mp1_state) { switch (mp1_state) {
case PP_MP1_STATE_UNLOAD: case PP_MP1_STATE_UNLOAD:
ret = smu_cmn_set_mp1_state(smu, mp1_state); ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_PrepareMp1ForUnload,
0x55, NULL);
if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
ret = smu_v13_0_disable_pmfw_state(smu);
break; break;
default: default:
/* Ignore others */ /* Ignore others */

View File

@ -270,7 +270,7 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
uint32_t p2s_table_id = P2S_TABLE_ID_A; uint32_t p2s_table_id = P2S_TABLE_ID_A;
int ret = 0, i, p2stable_count; int ret = 0, i, p2stable_count;
char ucode_prefix[30]; char ucode_prefix[15];
char fw_name[30]; char fw_name[30];
/* No need to load P2S tables in IOV mode */ /* No need to load P2S tables in IOV mode */

View File

@ -346,12 +346,13 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC) if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true; smu->dc_controlled_by_gpio = true;
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO || if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) {
powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true; smu_baco->platform_support = true;
if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
smu_baco->maco_support = true; && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
smu_baco->maco_support = true;
}
if (!overdrive_lowerlimits->FeatureCtrlMask || if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask) !overdrive_upperlimits->FeatureCtrlMask)
@ -2498,7 +2499,13 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
switch (mp1_state) { switch (mp1_state) {
case PP_MP1_STATE_UNLOAD: case PP_MP1_STATE_UNLOAD:
ret = smu_cmn_set_mp1_state(smu, mp1_state); ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_PrepareMp1ForUnload,
0x55, NULL);
if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
ret = smu_v13_0_disable_pmfw_state(smu);
break; break;
default: default:
/* Ignore others */ /* Ignore others */
@ -2524,14 +2531,20 @@ static int smu_v13_0_7_baco_enter(struct smu_context *smu)
static int smu_v13_0_7_baco_exit(struct smu_context *smu) static int smu_v13_0_7_baco_exit(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
int ret;
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */ /* Wait for PMFW handling for the Dstate change */
usleep_range(10000, 11000); usleep_range(10000, 11000);
return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
} else { } else {
return smu_v13_0_baco_exit(smu); ret = smu_v13_0_baco_exit(smu);
} }
if (!ret)
adev->gfx.is_poweron = false;
return ret;
} }
static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)

View File

@ -57,7 +57,7 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
char fw_name[30]; char fw_name[30];
char ucode_prefix[30]; char ucode_prefix[15];
int err = 0; int err = 0;
const struct smc_firmware_header_v1_0 *hdr; const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header; const struct common_firmware_header *header;
@ -229,6 +229,8 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
break; break;
case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 0):
if ((smu->smc_fw_version < 0x5d3a00))
dev_warn(smu->adev->dev, "The PMFW version(%x) is behind in this BIOS!\n", smu->smc_fw_version);
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break; break;
default: default:

View File

@ -156,15 +156,10 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
if (smu->smc_fw_version > 0x5d3500) { SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
} else {
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
}
if (!smu_table->metrics_table) if (!smu_table->metrics_table)
goto err0_out; goto err0_out;
smu_table->metrics_time = 0; smu_table->metrics_time = 0;
@ -177,10 +172,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
if (!smu_table->watermarks_table) if (!smu_table->watermarks_table)
goto err2_out; goto err2_out;
if (smu->smc_fw_version > 0x5d3500) smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v3_0);
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v3_0);
else
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) if (!smu_table->gpu_metrics_table)
goto err3_out; goto err3_out;
@ -242,13 +234,13 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
switch (member) { switch (member) {
case METRICS_AVERAGE_GFXCLK: case METRICS_AVERAGE_GFXCLK:
*value = metrics->AverageGfxclkFrequency; *value = metrics->GfxclkFrequency;
break; break;
case METRICS_AVERAGE_SOCCLK: case METRICS_AVERAGE_SOCCLK:
*value = metrics->AverageSocclkFrequency; *value = metrics->SocclkFrequency;
break; break;
case METRICS_AVERAGE_VCLK: case METRICS_AVERAGE_VCLK:
*value = metrics->AverageVclkFrequency; *value = metrics->VclkFrequency;
break; break;
case METRICS_AVERAGE_DCLK: case METRICS_AVERAGE_DCLK:
*value = 0; *value = 0;
@ -257,25 +249,25 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
*value = 0; *value = 0;
break; break;
case METRICS_AVERAGE_FCLK: case METRICS_AVERAGE_FCLK:
*value = metrics->AverageFclkFrequency; *value = metrics->FclkFrequency;
break; break;
case METRICS_AVERAGE_GFXACTIVITY: case METRICS_AVERAGE_GFXACTIVITY:
*value = metrics->AverageGfxActivity >> 8; *value = metrics->GfxActivity / 100;
break; break;
case METRICS_AVERAGE_VCNACTIVITY: case METRICS_AVERAGE_VCNACTIVITY:
*value = metrics->AverageVcnActivity >> 8; *value = metrics->VcnActivity / 100;
break; break;
case METRICS_AVERAGE_SOCKETPOWER: case METRICS_AVERAGE_SOCKETPOWER:
case METRICS_CURR_SOCKETPOWER: case METRICS_CURR_SOCKETPOWER:
*value = (metrics->AverageSocketPower & 0xff00) + *value = (metrics->SocketPower / 1000 << 8) +
((metrics->AverageSocketPower & 0xff) * 100 >> 8); (metrics->SocketPower % 1000 / 10);
break; break;
case METRICS_TEMPERATURE_EDGE: case METRICS_TEMPERATURE_EDGE:
*value = (metrics->GfxTemperature >> 8) * *value = metrics->GfxTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break; break;
case METRICS_TEMPERATURE_HOTSPOT: case METRICS_TEMPERATURE_HOTSPOT:
*value = (metrics->SocTemperature >> 8) * *value = metrics->SocTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break; break;
case METRICS_THROTTLER_STATUS: case METRICS_THROTTLER_STATUS:
@ -317,107 +309,6 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
return ret; return ret;
} }
static int smu_v14_0_0_legacy_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
int ret = 0;
ret = smu_cmn_get_metrics_table(smu, NULL, false);
if (ret)
return ret;
switch (member) {
case METRICS_AVERAGE_GFXCLK:
*value = metrics->GfxclkFrequency;
break;
case METRICS_AVERAGE_SOCCLK:
*value = metrics->SocclkFrequency;
break;
case METRICS_AVERAGE_VCLK:
*value = metrics->VclkFrequency;
break;
case METRICS_AVERAGE_DCLK:
*value = metrics->DclkFrequency;
break;
case METRICS_AVERAGE_UCLK:
*value = metrics->MemclkFrequency;
break;
case METRICS_AVERAGE_GFXACTIVITY:
*value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_FCLK:
*value = metrics->AverageFclkFrequency;
break;
case METRICS_AVERAGE_VCNACTIVITY:
*value = metrics->UvdActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = (metrics->AverageSocketPower << 8) / 1000;
break;
case METRICS_CURR_SOCKETPOWER:
*value = (metrics->CurrentSocketPower << 8) / 1000;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->GfxTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_HOTSPOT:
*value = metrics->SocTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_THROTTLER_STATUS:
*value = metrics->ThrottlerStatus;
break;
case METRICS_VOLTAGE_VDDGFX:
*value = metrics->Voltage[0];
break;
case METRICS_VOLTAGE_VDDSOC:
*value = metrics->Voltage[1];
break;
case METRICS_SS_APU_SHARE:
/* return the percentage of APU power with respect to APU's power limit.
* percentage is reported, this isn't boost value. Smartshift power
* boost/shift is only when the percentage is more than 100.
*/
if (metrics->StapmOpnLimit > 0)
*value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit;
else
*value = 0;
break;
case METRICS_SS_DGPU_SHARE:
/* return the percentage of dGPU power with respect to dGPU's power limit.
* percentage is reported, this isn't boost value. Smartshift power
* boost/shift is only when the percentage is more than 100.
*/
if ((metrics->dGpuPower > 0) &&
(metrics->StapmCurrentLimit > metrics->StapmOpnLimit))
*value = (metrics->dGpuPower * 100) /
(metrics->StapmCurrentLimit - metrics->StapmOpnLimit);
else
*value = 0;
break;
default:
*value = UINT_MAX;
break;
}
return ret;
}
static int smu_v14_0_0_common_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
if (smu->smc_fw_version > 0x5d3500)
return smu_v14_0_0_get_smu_metrics_data(smu, member, value);
else
return smu_v14_0_0_legacy_get_smu_metrics_data(smu, member, value);
}
static int smu_v14_0_0_read_sensor(struct smu_context *smu, static int smu_v14_0_0_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor, enum amd_pp_sensors sensor,
void *data, uint32_t *size) void *data, uint32_t *size)
@ -429,69 +320,69 @@ static int smu_v14_0_0_read_sensor(struct smu_context *smu,
switch (sensor) { switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD: case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = smu_v14_0_0_common_get_smu_metrics_data(smu, ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_GFXACTIVITY, METRICS_AVERAGE_GFXACTIVITY,
(uint32_t *)data); (uint32_t *)data);
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER: case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v14_0_0_common_get_smu_metrics_data(smu, ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER, METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data); (uint32_t *)data);
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = smu_v14_0_0_common_get_smu_metrics_data(smu, ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_CURR_SOCKETPOWER, METRICS_CURR_SOCKETPOWER,
(uint32_t *)data); (uint32_t *)data);
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_EDGE_TEMP: case AMDGPU_PP_SENSOR_EDGE_TEMP:
ret = smu_v14_0_0_common_get_smu_metrics_data(smu, ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_EDGE, METRICS_TEMPERATURE_EDGE,
(uint32_t *)data); (uint32_t *)data);
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
ret = smu_v14_0_0_common_get_smu_metrics_data(smu, ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_HOTSPOT, METRICS_TEMPERATURE_HOTSPOT,
(uint32_t *)data); (uint32_t *)data);
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_GFX_MCLK: case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = smu_v14_0_0_common_get_smu_metrics_data(smu, ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_UCLK, METRICS_AVERAGE_UCLK,
(uint32_t *)data); (uint32_t *)data);
*(uint32_t *)data *= 100; *(uint32_t *)data *= 100;
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_GFX_SCLK: case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = smu_v14_0_0_common_get_smu_metrics_data(smu, ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_GFXCLK, METRICS_AVERAGE_GFXCLK,
(uint32_t *)data); (uint32_t *)data);
*(uint32_t *)data *= 100; *(uint32_t *)data *= 100;
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_VDDGFX: case AMDGPU_PP_SENSOR_VDDGFX:
ret = smu_v14_0_0_common_get_smu_metrics_data(smu, ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDGFX, METRICS_VOLTAGE_VDDGFX,
(uint32_t *)data); (uint32_t *)data);
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_VDDNB: case AMDGPU_PP_SENSOR_VDDNB:
ret = smu_v14_0_0_common_get_smu_metrics_data(smu, ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDSOC, METRICS_VOLTAGE_VDDSOC,
(uint32_t *)data); (uint32_t *)data);
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_SS_APU_SHARE: case AMDGPU_PP_SENSOR_SS_APU_SHARE:
ret = smu_v14_0_0_common_get_smu_metrics_data(smu, ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_SS_APU_SHARE, METRICS_SS_APU_SHARE,
(uint32_t *)data); (uint32_t *)data);
*size = 4; *size = 4;
break; break;
case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
ret = smu_v14_0_0_common_get_smu_metrics_data(smu, ret = smu_v14_0_0_get_smu_metrics_data(smu,
METRICS_SS_DGPU_SHARE, METRICS_SS_DGPU_SHARE,
(uint32_t *)data); (uint32_t *)data);
*size = 4; *size = 4;
@ -588,7 +479,7 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
if (ret) if (ret)
return ret; return ret;
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); smu_cmn_init_soft_gpu_metrics(gpu_metrics, 3, 0);
gpu_metrics->temperature_gfx = metrics.GfxTemperature; gpu_metrics->temperature_gfx = metrics.GfxTemperature;
gpu_metrics->temperature_soc = metrics.SocTemperature; gpu_metrics->temperature_soc = metrics.SocTemperature;
@ -597,32 +488,33 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
sizeof(uint16_t) * 16); sizeof(uint16_t) * 16);
gpu_metrics->temperature_skin = metrics.SkinTemp; gpu_metrics->temperature_skin = metrics.SkinTemp;
gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; gpu_metrics->average_gfx_activity = metrics.GfxActivity;
gpu_metrics->average_vcn_activity = metrics.AverageVcnActivity; gpu_metrics->average_vcn_activity = metrics.VcnActivity;
memcpy(&gpu_metrics->average_ipu_activity[0], memcpy(&gpu_metrics->average_ipu_activity[0],
&metrics.AverageIpuBusy[0], &metrics.IpuBusy[0],
sizeof(uint16_t) * 8); sizeof(uint16_t) * 8);
memcpy(&gpu_metrics->average_core_c0_activity[0], memcpy(&gpu_metrics->average_core_c0_activity[0],
&metrics.AverageCoreC0Residency[0], &metrics.CoreC0Residency[0],
sizeof(uint16_t) * 16); sizeof(uint16_t) * 16);
gpu_metrics->average_dram_reads = metrics.AverageDRAMReads; gpu_metrics->average_dram_reads = metrics.DRAMReads;
gpu_metrics->average_dram_writes = metrics.AverageDRAMWrites; gpu_metrics->average_dram_writes = metrics.DRAMWrites;
gpu_metrics->average_socket_power = metrics.AverageSocketPower; gpu_metrics->average_socket_power = metrics.SocketPower;
gpu_metrics->average_ipu_power = metrics.IpuPower; gpu_metrics->average_ipu_power = metrics.IpuPower;
gpu_metrics->average_apu_power = metrics.ApuPower; gpu_metrics->average_apu_power = metrics.ApuPower;
gpu_metrics->average_gfx_power = metrics.GfxPower;
gpu_metrics->average_dgpu_power = metrics.dGpuPower; gpu_metrics->average_dgpu_power = metrics.dGpuPower;
gpu_metrics->average_core_power = metrics.AverageCorePower; gpu_metrics->average_all_core_power = metrics.AllCorePower;
memcpy(&gpu_metrics->core_power[0], memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0], &metrics.CorePower[0],
sizeof(uint16_t) * 16); sizeof(uint16_t) * 16);
gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
gpu_metrics->average_vpeclk_frequency = metrics.AverageVpeclkFrequency; gpu_metrics->average_vpeclk_frequency = metrics.VpeclkFrequency;
gpu_metrics->average_fclk_frequency = metrics.AverageFclkFrequency; gpu_metrics->average_fclk_frequency = metrics.FclkFrequency;
gpu_metrics->average_vclk_frequency = metrics.AverageVclkFrequency; gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
gpu_metrics->average_ipuclk_frequency = metrics.AverageIpuclkFrequency; gpu_metrics->average_ipuclk_frequency = metrics.IpuclkFrequency;
memcpy(&gpu_metrics->current_coreclk[0], memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0], &metrics.CoreFrequency[0],
@ -638,68 +530,6 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v3_0); return sizeof(struct gpu_metrics_v3_0);
} }
static ssize_t smu_v14_0_0_get_legacy_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v2_1 *gpu_metrics =
(struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
SmuMetrics_legacy_t metrics;
int ret = 0;
ret = smu_cmn_get_metrics_table(smu, &metrics, true);
if (ret)
return ret;
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
gpu_metrics->temperature_gfx = metrics.GfxTemperature;
gpu_metrics->temperature_soc = metrics.SocTemperature;
memcpy(&gpu_metrics->temperature_core[0],
&metrics.CoreTemperature[0],
sizeof(uint16_t) * 8);
gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1];
gpu_metrics->average_gfx_activity = metrics.GfxActivity;
gpu_metrics->average_mm_activity = metrics.UvdActivity;
gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
gpu_metrics->average_gfx_power = metrics.Power[0];
gpu_metrics->average_soc_power = metrics.Power[1];
memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
sizeof(uint16_t) * 8);
gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],
sizeof(uint16_t) * 8);
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v2_1);
}
static ssize_t smu_v14_0_0_common_get_gpu_metrics(struct smu_context *smu,
void **table)
{
if (smu->smc_fw_version > 0x5d3500)
return smu_v14_0_0_get_gpu_metrics(smu, table);
else
return smu_v14_0_0_get_legacy_gpu_metrics(smu, table);
}
static int smu_v14_0_0_mode2_reset(struct smu_context *smu) static int smu_v14_0_0_mode2_reset(struct smu_context *smu)
{ {
int ret; int ret;
@ -928,7 +758,7 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
return -EINVAL; return -EINVAL;
} }
return smu_v14_0_0_common_get_smu_metrics_data(smu, member_type, value); return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value);
} }
static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu, static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
@ -1230,7 +1060,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.read_sensor = smu_v14_0_0_read_sensor, .read_sensor = smu_v14_0_0_read_sensor,
.is_dpm_running = smu_v14_0_0_is_dpm_running, .is_dpm_running = smu_v14_0_0_is_dpm_running,
.set_watermarks_table = smu_v14_0_0_set_watermarks_table, .set_watermarks_table = smu_v14_0_0_set_watermarks_table,
.get_gpu_metrics = smu_v14_0_0_common_get_gpu_metrics, .get_gpu_metrics = smu_v14_0_0_get_gpu_metrics,
.get_enabled_mask = smu_cmn_get_enabled_mask, .get_enabled_mask = smu_cmn_get_enabled_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v14_0_set_driver_table_location, .set_driver_table_location = smu_v14_0_set_driver_table_location,

View File

@ -1004,6 +1004,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(2, 4): case METRICS_VERSION(2, 4):
structure_size = sizeof(struct gpu_metrics_v2_4); structure_size = sizeof(struct gpu_metrics_v2_4);
break; break;
case METRICS_VERSION(3, 0):
structure_size = sizeof(struct gpu_metrics_v3_0);
break;
default: default:
return; return;
} }

View File

@ -3893,7 +3893,7 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
typedef struct _ATOM_GPIO_PIN_LUT typedef struct _ATOM_GPIO_PIN_LUT
{ {
ATOM_COMMON_TABLE_HEADER sHeader; ATOM_COMMON_TABLE_HEADER sHeader;
ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1]; ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[];
}ATOM_GPIO_PIN_LUT; }ATOM_GPIO_PIN_LUT;
/****************************************************************************/ /****************************************************************************/
@ -4061,7 +4061,7 @@ typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset
UCHAR ucNumberOfSrc; UCHAR ucNumberOfSrc;
USHORT usSrcObjectID[1]; USHORT usSrcObjectID[1];
UCHAR ucNumberOfDst; UCHAR ucNumberOfDst;
USHORT usDstObjectID[1]; USHORT usDstObjectID[];
}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; }ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
@ -4233,7 +4233,7 @@ typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD
ATOM_COMMON_RECORD_HEADER sheader; ATOM_COMMON_RECORD_HEADER sheader;
UCHAR ucNumberOfDevice; UCHAR ucNumberOfDevice;
UCHAR ucReserved; UCHAR ucReserved;
ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
}ATOM_CONNECTOR_DEVICE_TAG_RECORD; }ATOM_CONNECTOR_DEVICE_TAG_RECORD;
@ -4293,7 +4293,7 @@ typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
ATOM_COMMON_RECORD_HEADER sheader; ATOM_COMMON_RECORD_HEADER sheader;
UCHAR ucFlags; // Future expnadibility UCHAR ucFlags; // Future expnadibility
UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object
ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins ATOM_GPIO_PIN_CONTROL_PAIR asGpio[]; // the real gpio pin pair determined by number of pins ucNumberOfPins
}ATOM_OBJECT_GPIO_CNTL_RECORD; }ATOM_OBJECT_GPIO_CNTL_RECORD;
//Definitions for GPIO pin state //Definitions for GPIO pin state
@ -4444,7 +4444,7 @@ typedef struct _ATOM_BRACKET_LAYOUT_RECORD
UCHAR ucWidth; UCHAR ucWidth;
UCHAR ucConnNum; UCHAR ucConnNum;
UCHAR ucReserved; UCHAR ucReserved;
ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[1]; ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[];
}ATOM_BRACKET_LAYOUT_RECORD; }ATOM_BRACKET_LAYOUT_RECORD;
/****************************************************************************/ /****************************************************************************/
@ -4600,7 +4600,7 @@ typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
UCHAR ucVoltageControlAddress; UCHAR ucVoltageControlAddress;
UCHAR ucVoltageControlOffset; UCHAR ucVoltageControlOffset;
ULONG ulReserved; ULONG ulReserved;
VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff VOLTAGE_LUT_ENTRY asVolI2cLut[]; // end with 0xff
}ATOM_I2C_VOLTAGE_OBJECT_V3; }ATOM_I2C_VOLTAGE_OBJECT_V3;
// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag // ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
@ -4625,7 +4625,7 @@ typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
UCHAR ucReserved[2]; UCHAR ucReserved[2];
ULONG ulMaxVoltageLevel; ULONG ulMaxVoltageLevel;
LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1]; LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[];
}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3; }ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
@ -4753,7 +4753,7 @@ typedef struct _ATOM_POWER_SOURCE_INFO
{ {
ATOM_COMMON_TABLE_HEADER asHeader; ATOM_COMMON_TABLE_HEADER asHeader;
UCHAR asPwrbehave[16]; UCHAR asPwrbehave[16];
ATOM_POWER_SOURCE_OBJECT asPwrObj[1]; ATOM_POWER_SOURCE_OBJECT asPwrObj[];
}ATOM_POWER_SOURCE_INFO; }ATOM_POWER_SOURCE_INFO;
@ -5440,7 +5440,7 @@ typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
typedef struct _ATOM_I2C_DATA_RECORD typedef struct _ATOM_I2C_DATA_RECORD
{ {
UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually UCHAR ucI2CData[]; //I2C data in bytes, should be less than 16 bytes usually
}ATOM_I2C_DATA_RECORD; }ATOM_I2C_DATA_RECORD;
@ -5451,14 +5451,14 @@ typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
UCHAR ucSSChipID; //SS chip being used UCHAR ucSSChipID; //SS chip being used
UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip
UCHAR ucNumOfI2CDataRecords; //number of data block UCHAR ucNumOfI2CDataRecords; //number of data block
ATOM_I2C_DATA_RECORD asI2CData[1]; ATOM_I2C_DATA_RECORD asI2CData[];
}ATOM_I2C_DEVICE_SETUP_INFO; }ATOM_I2C_DEVICE_SETUP_INFO;
//========================================================================================== //==========================================================================================
typedef struct _ATOM_ASIC_MVDD_INFO typedef struct _ATOM_ASIC_MVDD_INFO
{ {
ATOM_COMMON_TABLE_HEADER sHeader; ATOM_COMMON_TABLE_HEADER sHeader;
ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1]; ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[];
}ATOM_ASIC_MVDD_INFO; }ATOM_ASIC_MVDD_INFO;
//========================================================================================== //==========================================================================================
@ -5520,7 +5520,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
{ {
ATOM_COMMON_TABLE_HEADER sHeader; ATOM_COMMON_TABLE_HEADER sHeader;
ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only. ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[]; //this is point only.
}ATOM_ASIC_INTERNAL_SS_INFO_V2; }ATOM_ASIC_INTERNAL_SS_INFO_V2;
typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3 typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
@ -5542,7 +5542,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
{ {
ATOM_COMMON_TABLE_HEADER sHeader; ATOM_COMMON_TABLE_HEADER sHeader;
ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only. ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[]; //this is pointer only.
}ATOM_ASIC_INTERNAL_SS_INFO_V3; }ATOM_ASIC_INTERNAL_SS_INFO_V3;
@ -6282,7 +6282,7 @@ typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{ typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID; ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
ULONG aulMemData[1]; ULONG aulMemData[];
}ATOM_MEMORY_SETTING_DATA_BLOCK; }ATOM_MEMORY_SETTING_DATA_BLOCK;
@ -7092,7 +7092,7 @@ typedef struct _ATOM_DISP_OUT_INFO_V3
UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
UCHAR ucDispCaps; UCHAR ucDispCaps;
UCHAR ucReserved[2]; UCHAR ucReserved[2];
ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[]; // for alligment only
}ATOM_DISP_OUT_INFO_V3; }ATOM_DISP_OUT_INFO_V3;
//ucDispCaps //ucDispCaps
@ -7324,12 +7324,12 @@ typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
USHORT usMaxClockFreq; USHORT usMaxClockFreq;
UCHAR ucEncodeMode; UCHAR ucEncodeMode;
UCHAR ucPhySel; UCHAR ucPhySel;
ULONG ulAnalogSetting[1]; ULONG ulAnalogSetting[];
}CLOCK_CONDITION_SETTING_ENTRY; }CLOCK_CONDITION_SETTING_ENTRY;
typedef struct _CLOCK_CONDITION_SETTING_INFO{ typedef struct _CLOCK_CONDITION_SETTING_INFO{
USHORT usEntrySize; USHORT usEntrySize;
CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1]; CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[];
}CLOCK_CONDITION_SETTING_INFO; }CLOCK_CONDITION_SETTING_INFO;
typedef struct _PHY_CONDITION_REG_VAL{ typedef struct _PHY_CONDITION_REG_VAL{
@ -7346,27 +7346,27 @@ typedef struct _PHY_CONDITION_REG_VAL_V2{
typedef struct _PHY_CONDITION_REG_INFO{ typedef struct _PHY_CONDITION_REG_INFO{
USHORT usRegIndex; USHORT usRegIndex;
USHORT usSize; USHORT usSize;
PHY_CONDITION_REG_VAL asRegVal[1]; PHY_CONDITION_REG_VAL asRegVal[];
}PHY_CONDITION_REG_INFO; }PHY_CONDITION_REG_INFO;
typedef struct _PHY_CONDITION_REG_INFO_V2{ typedef struct _PHY_CONDITION_REG_INFO_V2{
USHORT usRegIndex; USHORT usRegIndex;
USHORT usSize; USHORT usSize;
PHY_CONDITION_REG_VAL_V2 asRegVal[1]; PHY_CONDITION_REG_VAL_V2 asRegVal[];
}PHY_CONDITION_REG_INFO_V2; }PHY_CONDITION_REG_INFO_V2;
typedef struct _PHY_ANALOG_SETTING_INFO{ typedef struct _PHY_ANALOG_SETTING_INFO{
UCHAR ucEncodeMode; UCHAR ucEncodeMode;
UCHAR ucPhySel; UCHAR ucPhySel;
USHORT usSize; USHORT usSize;
PHY_CONDITION_REG_INFO asAnalogSetting[1]; PHY_CONDITION_REG_INFO asAnalogSetting[];
}PHY_ANALOG_SETTING_INFO; }PHY_ANALOG_SETTING_INFO;
typedef struct _PHY_ANALOG_SETTING_INFO_V2{ typedef struct _PHY_ANALOG_SETTING_INFO_V2{
UCHAR ucEncodeMode; UCHAR ucEncodeMode;
UCHAR ucPhySel; UCHAR ucPhySel;
USHORT usSize; USHORT usSize;
PHY_CONDITION_REG_INFO_V2 asAnalogSetting[1]; PHY_CONDITION_REG_INFO_V2 asAnalogSetting[];
}PHY_ANALOG_SETTING_INFO_V2; }PHY_ANALOG_SETTING_INFO_V2;
typedef struct _GFX_HAVESTING_PARAMETERS { typedef struct _GFX_HAVESTING_PARAMETERS {

View File

@ -1,11 +1,12 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
config DRM_SHMOBILE config DRM_SHMOBILE
tristate "DRM Support for SH Mobile" tristate "DRM Support for SH Mobile"
depends on DRM depends on DRM && PM
depends on ARCH_RENESAS || ARCH_SHMOBILE || COMPILE_TEST depends on ARCH_RENESAS || ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help help
Choose this option if you have an SH Mobile chipset. Choose this option if you have an SH Mobile chipset.
If M is selected the module will be called shmob-drm. If M is selected the module will be called shmob-drm.

View File

@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
shmob-drm-y := shmob_drm_backlight.o \ shmob-drm-y := shmob_drm_crtc.o \
shmob_drm_crtc.o \
shmob_drm_drv.o \ shmob_drm_drv.o \
shmob_drm_kms.o \ shmob_drm_kms.o \
shmob_drm_plane.o shmob_drm_plane.o

View File

@ -1,82 +0,0 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* shmob_drm_backlight.c -- SH Mobile DRM Backlight
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
#include <linux/backlight.h>
#include "shmob_drm_backlight.h"
#include "shmob_drm_crtc.h"
#include "shmob_drm_drv.h"
static int shmob_drm_backlight_update(struct backlight_device *bdev)
{
struct shmob_drm_connector *scon = bl_get_data(bdev);
struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
int brightness = backlight_get_brightness(bdev);
return bdata->set_brightness(brightness);
}
static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev)
{
struct shmob_drm_connector *scon = bl_get_data(bdev);
struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
return bdata->get_brightness();
}
static const struct backlight_ops shmob_drm_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = shmob_drm_backlight_update,
.get_brightness = shmob_drm_backlight_get_brightness,
};
void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode)
{
if (scon->backlight == NULL)
return;
scon->backlight->props.power = mode == DRM_MODE_DPMS_ON
? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
backlight_update_status(scon->backlight);
}
int shmob_drm_backlight_init(struct shmob_drm_connector *scon)
{
struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
struct drm_connector *connector = &scon->connector;
struct drm_device *dev = connector->dev;
struct backlight_device *backlight;
if (!bdata->max_brightness)
return 0;
backlight = backlight_device_register(bdata->name, dev->dev, scon,
&shmob_drm_backlight_ops, NULL);
if (IS_ERR(backlight)) {
dev_err(dev->dev, "unable to register backlight device: %ld\n",
PTR_ERR(backlight));
return PTR_ERR(backlight);
}
backlight->props.max_brightness = bdata->max_brightness;
backlight->props.brightness = bdata->max_brightness;
backlight->props.power = FB_BLANK_POWERDOWN;
backlight_update_status(backlight);
scon->backlight = backlight;
return 0;
}
void shmob_drm_backlight_exit(struct shmob_drm_connector *scon)
{
backlight_device_unregister(scon->backlight);
}

View File

@ -1,19 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* shmob_drm_backlight.h -- SH Mobile DRM Backlight
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
#ifndef __SHMOB_DRM_BACKLIGHT_H__
#define __SHMOB_DRM_BACKLIGHT_H__
struct shmob_drm_connector;
void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode);
int shmob_drm_backlight_init(struct shmob_drm_connector *scon);
void shmob_drm_backlight_exit(struct shmob_drm_connector *scon);
#endif /* __SHMOB_DRM_BACKLIGHT_H__ */

View File

@ -7,9 +7,18 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com) * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/ */
#include <linux/backlight.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/media-bus-format.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_dma_helper.h> #include <drm/drm_fb_dma_helper.h>
@ -18,85 +27,123 @@
#include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper.h> #include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_plane_helper.h> #include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h> #include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h> #include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h> #include <drm/drm_vblank.h>
#include "shmob_drm_backlight.h" #include <video/videomode.h>
#include "shmob_drm_crtc.h" #include "shmob_drm_crtc.h"
#include "shmob_drm_drv.h" #include "shmob_drm_drv.h"
#include "shmob_drm_kms.h" #include "shmob_drm_kms.h"
#include "shmob_drm_plane.h" #include "shmob_drm_plane.h"
#include "shmob_drm_regs.h" #include "shmob_drm_regs.h"
/*
* TODO: panel support
*/
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* Clock management * Page Flip
*/ */
static int shmob_drm_clk_on(struct shmob_drm_device *sdev) void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
{ {
int ret; struct drm_pending_vblank_event *event;
struct drm_device *dev = scrtc->base.dev;
unsigned long flags;
if (sdev->clock) { spin_lock_irqsave(&dev->event_lock, flags);
ret = clk_prepare_enable(sdev->clock); event = scrtc->event;
if (ret < 0) scrtc->event = NULL;
return ret; if (event) {
drm_crtc_send_vblank_event(&scrtc->base, event);
wake_up(&scrtc->flip_wait);
drm_crtc_vblank_put(&scrtc->base);
} }
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
} }
static void shmob_drm_clk_off(struct shmob_drm_device *sdev) static bool shmob_drm_crtc_page_flip_pending(struct shmob_drm_crtc *scrtc)
{ {
if (sdev->clock) struct drm_device *dev = scrtc->base.dev;
clk_disable_unprepare(sdev->clock); unsigned long flags;
bool pending;
spin_lock_irqsave(&dev->event_lock, flags);
pending = scrtc->event != NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
return pending;
}
static void shmob_drm_crtc_wait_page_flip(struct shmob_drm_crtc *scrtc)
{
struct drm_crtc *crtc = &scrtc->base;
struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
if (wait_event_timeout(scrtc->flip_wait,
!shmob_drm_crtc_page_flip_pending(scrtc),
msecs_to_jiffies(50)))
return;
dev_warn(sdev->dev, "page flip timeout\n");
shmob_drm_crtc_finish_page_flip(scrtc);
} }
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* CRTC * CRTC
*/ */
static const struct {
u32 fmt;
u32 ldmt1r;
} shmob_drm_bus_fmts[] = {
{ MEDIA_BUS_FMT_RGB888_3X8, LDMT1R_MIFTYP_RGB8 },
{ MEDIA_BUS_FMT_RGB666_2X9_BE, LDMT1R_MIFTYP_RGB9 },
{ MEDIA_BUS_FMT_RGB888_2X12_BE, LDMT1R_MIFTYP_RGB12A },
{ MEDIA_BUS_FMT_RGB444_1X12, LDMT1R_MIFTYP_RGB12B },
{ MEDIA_BUS_FMT_RGB565_1X16, LDMT1R_MIFTYP_RGB16 },
{ MEDIA_BUS_FMT_RGB666_1X18, LDMT1R_MIFTYP_RGB18 },
{ MEDIA_BUS_FMT_RGB888_1X24, LDMT1R_MIFTYP_RGB24 },
{ MEDIA_BUS_FMT_UYVY8_1X16, LDMT1R_MIFTYP_YCBCR },
};
static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc) static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc)
{ {
struct drm_crtc *crtc = &scrtc->crtc; struct drm_crtc *crtc = &scrtc->base;
struct shmob_drm_device *sdev = crtc->dev->dev_private; struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
const struct shmob_drm_interface_data *idata = &sdev->pdata->iface; const struct drm_display_info *info = &sdev->connector->display_info;
const struct drm_display_mode *mode = &crtc->mode; const struct drm_display_mode *mode = &crtc->mode;
unsigned int i;
u32 value; u32 value;
value = sdev->ldmt1r if (!info->num_bus_formats || !info->bus_formats) {
| ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : LDMT1R_VPOL) dev_warn(sdev->dev, "No bus format reported, using RGB888\n");
| ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : LDMT1R_HPOL) value = LDMT1R_MIFTYP_RGB24;
| ((idata->flags & SHMOB_DRM_IFACE_FL_DWPOL) ? LDMT1R_DWPOL : 0) } else {
| ((idata->flags & SHMOB_DRM_IFACE_FL_DIPOL) ? LDMT1R_DIPOL : 0) for (i = 0; i < ARRAY_SIZE(shmob_drm_bus_fmts); i++) {
| ((idata->flags & SHMOB_DRM_IFACE_FL_DAPOL) ? LDMT1R_DAPOL : 0) if (shmob_drm_bus_fmts[i].fmt == info->bus_formats[0])
| ((idata->flags & SHMOB_DRM_IFACE_FL_HSCNT) ? LDMT1R_HSCNT : 0) break;
| ((idata->flags & SHMOB_DRM_IFACE_FL_DWCNT) ? LDMT1R_DWCNT : 0); }
lcdc_write(sdev, LDMT1R, value); if (i < ARRAY_SIZE(shmob_drm_bus_fmts)) {
value = shmob_drm_bus_fmts[i].ldmt1r;
if (idata->interface >= SHMOB_DRM_IFACE_SYS8A && } else {
idata->interface <= SHMOB_DRM_IFACE_SYS24) { dev_warn(sdev->dev,
/* Setup SYS bus. */ "unsupported bus format 0x%x, using RGB888\n",
value = (idata->sys.cs_setup << LDMT2R_CSUP_SHIFT) info->bus_formats[0]);
| (idata->sys.vsync_active_high ? LDMT2R_RSV : 0) value = LDMT1R_MIFTYP_RGB24;
| (idata->sys.vsync_dir_input ? LDMT2R_VSEL : 0) }
| (idata->sys.write_setup << LDMT2R_WCSC_SHIFT)
| (idata->sys.write_cycle << LDMT2R_WCEC_SHIFT)
| (idata->sys.write_strobe << LDMT2R_WCLW_SHIFT);
lcdc_write(sdev, LDMT2R, value);
value = (idata->sys.read_latch << LDMT3R_RDLC_SHIFT)
| (idata->sys.read_setup << LDMT3R_RCSC_SHIFT)
| (idata->sys.read_cycle << LDMT3R_RCEC_SHIFT)
| (idata->sys.read_strobe << LDMT3R_RCLW_SHIFT);
lcdc_write(sdev, LDMT3R, value);
} }
if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
value |= LDMT1R_DWPOL;
if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
value |= LDMT1R_DIPOL;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
value |= LDMT1R_VPOL;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
value |= LDMT1R_HPOL;
lcdc_write(sdev, LDMT1R, value);
value = ((mode->hdisplay / 8) << 16) /* HDCN */ value = ((mode->hdisplay / 8) << 16) /* HDCN */
| (mode->htotal / 8); /* HTCN */ | (mode->htotal / 8); /* HTCN */
lcdc_write(sdev, LDHCNR, value); lcdc_write(sdev, LDHCNR, value);
@ -121,7 +168,7 @@ static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc)
static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start) static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start)
{ {
struct shmob_drm_device *sdev = scrtc->crtc.dev->dev_private; struct shmob_drm_device *sdev = to_shmob_device(scrtc->base.dev);
u32 value; u32 value;
value = lcdc_read(sdev, LDCNT2R); value = lcdc_read(sdev, LDCNT2R);
@ -145,34 +192,23 @@ static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start)
} }
} }
/* static inline struct shmob_drm_crtc *to_shmob_crtc(struct drm_crtc *crtc)
* shmob_drm_crtc_start - Configure and start the LCDC
* @scrtc: the SH Mobile CRTC
*
* Configure and start the LCDC device. External devices (clocks, MERAM, panels,
* ...) are not touched by this function.
*/
static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
{ {
struct drm_crtc *crtc = &scrtc->crtc; return container_of(crtc, struct shmob_drm_crtc, base);
struct shmob_drm_device *sdev = crtc->dev->dev_private; }
const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
const struct shmob_drm_format_info *format; static void shmob_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_device *dev = sdev->ddev; struct drm_atomic_state *state)
struct drm_plane *plane; {
struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
unsigned int clk_div = sdev->config.clk_div;
struct device *dev = sdev->dev;
u32 value; u32 value;
int ret; int ret;
if (scrtc->started) ret = pm_runtime_resume_and_get(dev);
return; if (ret)
format = shmob_drm_format_info(crtc->primary->fb->format->format);
if (WARN_ON(format == NULL))
return;
/* Enable clocks before accessing the hardware. */
ret = shmob_drm_clk_on(sdev);
if (ret < 0)
return; return;
/* Reset and enable the LCDC. */ /* Reset and enable the LCDC. */
@ -188,79 +224,50 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
lcdc_write(sdev, LDPMR, 0); lcdc_write(sdev, LDPMR, 0);
value = sdev->lddckr; value = sdev->lddckr;
if (idata->clk_div) { if (clk_div) {
/* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider /* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider
* denominator. * denominator.
*/ */
lcdc_write(sdev, LDDCKPAT1R, 0); lcdc_write(sdev, LDDCKPAT1R, 0);
lcdc_write(sdev, LDDCKPAT2R, (1 << (idata->clk_div / 2)) - 1); lcdc_write(sdev, LDDCKPAT2R, (1 << (clk_div / 2)) - 1);
if (idata->clk_div == 1) if (clk_div == 1)
value |= LDDCKR_MOSEL; value |= LDDCKR_MOSEL;
else else
value |= idata->clk_div; value |= clk_div;
} }
lcdc_write(sdev, LDDCKR, value); lcdc_write(sdev, LDDCKR, value);
lcdc_write(sdev, LDDCKSTPR, 0); lcdc_write(sdev, LDDCKSTPR, 0);
lcdc_wait_bit(sdev, LDDCKSTPR, ~0, 0); lcdc_wait_bit(sdev, LDDCKSTPR, ~0, 0);
/* TODO: Setup SYS panel */
/* Setup geometry, format, frame buffer memory and operation mode. */ /* Setup geometry, format, frame buffer memory and operation mode. */
shmob_drm_crtc_setup_geometry(scrtc); shmob_drm_crtc_setup_geometry(scrtc);
/* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */
lcdc_write(sdev, LDDFR, format->lddfr | LDDFR_CF1);
lcdc_write(sdev, LDMLSR, scrtc->line_size);
lcdc_write(sdev, LDSA1R, scrtc->dma[0]);
if (format->yuv)
lcdc_write(sdev, LDSA2R, scrtc->dma[1]);
lcdc_write(sdev, LDSM1R, 0); lcdc_write(sdev, LDSM1R, 0);
/* Word and long word swap. */
switch (format->fourcc) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV42:
value = LDDDSR_LS | LDDDSR_WS;
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV24:
value = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
default:
value = LDDDSR_LS;
break;
}
lcdc_write(sdev, LDDDSR, value);
/* Setup planes. */
drm_for_each_legacy_plane(plane, dev) {
if (plane->crtc == crtc)
shmob_drm_plane_setup(plane);
}
/* Enable the display output. */ /* Enable the display output. */
lcdc_write(sdev, LDCNT1R, LDCNT1R_DE); lcdc_write(sdev, LDCNT1R, LDCNT1R_DE);
shmob_drm_crtc_start_stop(scrtc, true); shmob_drm_crtc_start_stop(scrtc, true);
scrtc->started = true; /* Turn vertical blank interrupt reporting back on. */
drm_crtc_vblank_on(crtc);
} }
static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc) static void shmob_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{ {
struct drm_crtc *crtc = &scrtc->crtc; struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
struct shmob_drm_device *sdev = crtc->dev->dev_private; struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
if (!scrtc->started) /*
return; * Disable vertical blank interrupt reporting. We first need to wait
* for page flip completion before stopping the CRTC as userspace
* expects page flips to eventually complete.
*/
shmob_drm_crtc_wait_page_flip(scrtc);
drm_crtc_vblank_off(crtc);
/* Stop the LCDC. */ /* Stop the LCDC. */
shmob_drm_crtc_start_stop(scrtc, false); shmob_drm_crtc_start_stop(scrtc, false);
@ -268,145 +275,31 @@ static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
/* Disable the display output. */ /* Disable the display output. */
lcdc_write(sdev, LDCNT1R, 0); lcdc_write(sdev, LDCNT1R, 0);
/* Stop clocks. */ pm_runtime_put(sdev->dev);
shmob_drm_clk_off(sdev);
scrtc->started = false;
} }
void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc) static void shmob_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{ {
shmob_drm_crtc_stop(scrtc); struct drm_pending_vblank_event *event;
} struct drm_device *dev = crtc->dev;
unsigned long flags;
void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc) if (crtc->state->event) {
{ spin_lock_irqsave(&dev->event_lock, flags);
if (scrtc->dpms != DRM_MODE_DPMS_ON) event = crtc->state->event;
return; crtc->state->event = NULL;
drm_crtc_send_vblank_event(crtc, event);
shmob_drm_crtc_start(scrtc); spin_unlock_irqrestore(&dev->event_lock, flags);
}
static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
int x, int y)
{
struct drm_crtc *crtc = &scrtc->crtc;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_gem_dma_object *gem;
unsigned int bpp;
bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
gem = drm_fb_dma_get_gem_obj(fb, 0);
scrtc->dma[0] = gem->dma_addr + fb->offsets[0]
+ y * fb->pitches[0] + x * bpp / 8;
if (scrtc->format->yuv) {
bpp = scrtc->format->bpp - 8;
gem = drm_fb_dma_get_gem_obj(fb, 1);
scrtc->dma[1] = gem->dma_addr + fb->offsets[1]
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+ x * (bpp == 16 ? 2 : 1);
} }
} }
static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc)
{
struct drm_crtc *crtc = &scrtc->crtc;
struct shmob_drm_device *sdev = crtc->dev->dev_private;
shmob_drm_crtc_compute_base(scrtc, crtc->x, crtc->y);
lcdc_write_mirror(sdev, LDSA1R, scrtc->dma[0]);
if (scrtc->format->yuv)
lcdc_write_mirror(sdev, LDSA2R, scrtc->dma[1]);
lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS);
}
#define to_shmob_crtc(c) container_of(c, struct shmob_drm_crtc, crtc)
static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
if (scrtc->dpms == mode)
return;
if (mode == DRM_MODE_DPMS_ON)
shmob_drm_crtc_start(scrtc);
else
shmob_drm_crtc_stop(scrtc);
scrtc->dpms = mode;
}
static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc)
{
shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
struct shmob_drm_device *sdev = crtc->dev->dev_private;
const struct shmob_drm_format_info *format;
format = shmob_drm_format_info(crtc->primary->fb->format->format);
if (format == NULL) {
dev_dbg(sdev->dev, "mode_set: unsupported format %p4cc\n",
&crtc->primary->fb->format->format);
return -EINVAL;
}
scrtc->format = format;
scrtc->line_size = crtc->primary->fb->pitches[0];
shmob_drm_crtc_compute_base(scrtc, x, y);
return 0;
}
static void shmob_drm_crtc_mode_commit(struct drm_crtc *crtc)
{
shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
}
static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
shmob_drm_crtc_update_base(to_shmob_crtc(crtc));
return 0;
}
static const struct drm_crtc_helper_funcs crtc_helper_funcs = { static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
.dpms = shmob_drm_crtc_dpms, .atomic_flush = shmob_drm_crtc_atomic_flush,
.prepare = shmob_drm_crtc_mode_prepare, .atomic_enable = shmob_drm_crtc_atomic_enable,
.commit = shmob_drm_crtc_mode_commit, .atomic_disable = shmob_drm_crtc_atomic_disable,
.mode_set = shmob_drm_crtc_mode_set,
.mode_set_base = shmob_drm_crtc_mode_set_base,
}; };
void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
{
struct drm_pending_vblank_event *event;
struct drm_device *dev = scrtc->crtc.dev;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
event = scrtc->event;
scrtc->event = NULL;
if (event) {
drm_crtc_send_vblank_event(&scrtc->crtc, event);
drm_crtc_vblank_put(&scrtc->crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc, static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, struct drm_pending_vblank_event *event,
@ -414,7 +307,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx) struct drm_modeset_acquire_ctx *ctx)
{ {
struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
struct drm_device *dev = scrtc->crtc.dev; struct drm_device *dev = scrtc->base.dev;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags); spin_lock_irqsave(&dev->event_lock, flags);
@ -424,12 +317,11 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
} }
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->primary->fb = fb; drm_atomic_set_fb_for_plane(crtc->primary->state, fb);
shmob_drm_crtc_update_base(scrtc);
if (event) { if (event) {
event->pipe = 0; event->pipe = 0;
drm_crtc_vblank_get(&scrtc->crtc); drm_crtc_vblank_get(&scrtc->base);
spin_lock_irqsave(&dev->event_lock, flags); spin_lock_irqsave(&dev->event_lock, flags);
scrtc->event = event; scrtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
@ -457,7 +349,7 @@ static void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev,
static int shmob_drm_enable_vblank(struct drm_crtc *crtc) static int shmob_drm_enable_vblank(struct drm_crtc *crtc)
{ {
struct shmob_drm_device *sdev = crtc->dev->dev_private; struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
shmob_drm_crtc_enable_vblank(sdev, true); shmob_drm_crtc_enable_vblank(sdev, true);
@ -466,88 +358,65 @@ static int shmob_drm_enable_vblank(struct drm_crtc *crtc)
static void shmob_drm_disable_vblank(struct drm_crtc *crtc) static void shmob_drm_disable_vblank(struct drm_crtc *crtc)
{ {
struct shmob_drm_device *sdev = crtc->dev->dev_private; struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
shmob_drm_crtc_enable_vblank(sdev, false); shmob_drm_crtc_enable_vblank(sdev, false);
} }
static const struct drm_crtc_funcs crtc_funcs = { static const struct drm_crtc_funcs crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup, .destroy = drm_crtc_cleanup,
.set_config = drm_crtc_helper_set_config, .set_config = drm_atomic_helper_set_config,
.page_flip = shmob_drm_crtc_page_flip, .page_flip = shmob_drm_crtc_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = shmob_drm_enable_vblank, .enable_vblank = shmob_drm_enable_vblank,
.disable_vblank = shmob_drm_disable_vblank, .disable_vblank = shmob_drm_disable_vblank,
}; };
static const uint32_t modeset_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
};
static const struct drm_plane_funcs primary_plane_funcs = {
DRM_PLANE_NON_ATOMIC_FUNCS,
};
int shmob_drm_crtc_create(struct shmob_drm_device *sdev) int shmob_drm_crtc_create(struct shmob_drm_device *sdev)
{ {
struct drm_crtc *crtc = &sdev->crtc.crtc; struct drm_crtc *crtc = &sdev->crtc.base;
struct drm_plane *primary; struct drm_plane *primary, *plane;
unsigned int i;
int ret; int ret;
sdev->crtc.dpms = DRM_MODE_DPMS_OFF; init_waitqueue_head(&sdev->crtc.flip_wait);
primary = __drm_universal_plane_alloc(sdev->ddev, sizeof(*primary), 0, primary = shmob_drm_plane_create(sdev, DRM_PLANE_TYPE_PRIMARY, 0);
0, &primary_plane_funcs,
modeset_formats,
ARRAY_SIZE(modeset_formats),
NULL, DRM_PLANE_TYPE_PRIMARY,
NULL);
if (IS_ERR(primary)) if (IS_ERR(primary))
return PTR_ERR(primary); return PTR_ERR(primary);
ret = drm_crtc_init_with_planes(sdev->ddev, crtc, primary, NULL, for (i = 1; i < 5; ++i) {
&crtc_funcs, NULL); plane = shmob_drm_plane_create(sdev, DRM_PLANE_TYPE_OVERLAY, i);
if (ret < 0) { if (IS_ERR(plane))
drm_plane_cleanup(primary); return PTR_ERR(plane);
kfree(primary);
return ret;
} }
ret = drm_crtc_init_with_planes(&sdev->ddev, crtc, primary, NULL,
&crtc_funcs, NULL);
if (ret < 0)
return ret;
drm_crtc_helper_add(crtc, &crtc_helper_funcs); drm_crtc_helper_add(crtc, &crtc_helper_funcs);
/* Start with vertical blank interrupt reporting disabled. */
drm_crtc_vblank_off(crtc);
return 0; return 0;
} }
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* Encoder * Legacy Encoder
*/ */
#define to_shmob_encoder(e) \
container_of(e, struct shmob_drm_encoder, encoder)
static void shmob_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct shmob_drm_encoder *senc = to_shmob_encoder(encoder);
struct shmob_drm_device *sdev = encoder->dev->dev_private;
struct shmob_drm_connector *scon = &sdev->connector;
if (senc->dpms == mode)
return;
shmob_drm_backlight_dpms(scon, mode);
senc->dpms = mode;
}
static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder, static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct shmob_drm_device *sdev = dev->dev_private; struct shmob_drm_device *sdev = to_shmob_device(dev);
struct drm_connector *connector = &sdev->connector.connector; struct drm_connector *connector = sdev->connector;
const struct drm_display_mode *panel_mode; const struct drm_display_mode *panel_mode;
if (list_empty(&connector->modes)) { if (list_empty(&connector->modes)) {
@ -563,60 +432,61 @@ static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder,
return true; return true;
} }
static void shmob_drm_encoder_mode_prepare(struct drm_encoder *encoder)
{
/* No-op, everything is handled in the CRTC code. */
}
static void shmob_drm_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* No-op, everything is handled in the CRTC code. */
}
static void shmob_drm_encoder_mode_commit(struct drm_encoder *encoder)
{
/* No-op, everything is handled in the CRTC code. */
}
static const struct drm_encoder_helper_funcs encoder_helper_funcs = { static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
.dpms = shmob_drm_encoder_dpms,
.mode_fixup = shmob_drm_encoder_mode_fixup, .mode_fixup = shmob_drm_encoder_mode_fixup,
.prepare = shmob_drm_encoder_mode_prepare,
.commit = shmob_drm_encoder_mode_commit,
.mode_set = shmob_drm_encoder_mode_set,
}; };
/* -----------------------------------------------------------------------------
* Encoder
*/
int shmob_drm_encoder_create(struct shmob_drm_device *sdev) int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
{ {
struct drm_encoder *encoder = &sdev->encoder.encoder; struct drm_encoder *encoder = &sdev->encoder;
struct drm_bridge *bridge;
int ret; int ret;
sdev->encoder.dpms = DRM_MODE_DPMS_OFF;
encoder->possible_crtcs = 1; encoder->possible_crtcs = 1;
ret = drm_simple_encoder_init(sdev->ddev, encoder, ret = drm_simple_encoder_init(&sdev->ddev, encoder,
DRM_MODE_ENCODER_LVDS); DRM_MODE_ENCODER_DPI);
if (ret < 0) if (ret < 0)
return ret; return ret;
drm_encoder_helper_add(encoder, &encoder_helper_funcs); if (sdev->pdata) {
drm_encoder_helper_add(encoder, &encoder_helper_funcs);
return 0;
}
/* Create a panel bridge */
bridge = devm_drm_of_get_bridge(sdev->dev, sdev->dev->of_node, 0, 0);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
/* Attach the bridge to the encoder */
ret = drm_bridge_attach(encoder, bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
dev_err(sdev->dev, "failed to attach bridge: %pe\n",
ERR_PTR(ret));
return ret;
}
return 0; return 0;
} }
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* Connector * Legacy Connector
*/ */
#define to_shmob_connector(c) \ static inline struct shmob_drm_connector *to_shmob_connector(struct drm_connector *connector)
container_of(c, struct shmob_drm_connector, connector) {
return container_of(connector, struct shmob_drm_connector, base);
}
static int shmob_drm_connector_get_modes(struct drm_connector *connector) static int shmob_drm_connector_get_modes(struct drm_connector *connector)
{ {
struct shmob_drm_device *sdev = connector->dev->dev_private; struct shmob_drm_connector *scon = to_shmob_connector(connector);
struct drm_display_mode *mode; struct drm_display_mode *mode;
mode = drm_mode_create(connector->dev); mode = drm_mode_create(connector->dev);
@ -624,23 +494,11 @@ static int shmob_drm_connector_get_modes(struct drm_connector *connector)
return 0; return 0;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
mode->clock = sdev->pdata->panel.mode.clock;
mode->hdisplay = sdev->pdata->panel.mode.hdisplay;
mode->hsync_start = sdev->pdata->panel.mode.hsync_start;
mode->hsync_end = sdev->pdata->panel.mode.hsync_end;
mode->htotal = sdev->pdata->panel.mode.htotal;
mode->vdisplay = sdev->pdata->panel.mode.vdisplay;
mode->vsync_start = sdev->pdata->panel.mode.vsync_start;
mode->vsync_end = sdev->pdata->panel.mode.vsync_end;
mode->vtotal = sdev->pdata->panel.mode.vtotal;
mode->flags = sdev->pdata->panel.mode.flags;
drm_mode_set_name(mode); drm_display_mode_from_videomode(scon->mode, mode);
drm_mode_probed_add(connector, mode); drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = sdev->pdata->panel.width_mm;
connector->display_info.height_mm = sdev->pdata->panel.height_mm;
return 1; return 1;
} }
@ -659,54 +517,106 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
static void shmob_drm_connector_destroy(struct drm_connector *connector) static void shmob_drm_connector_destroy(struct drm_connector *connector)
{ {
struct shmob_drm_connector *scon = to_shmob_connector(connector);
shmob_drm_backlight_exit(scon);
drm_connector_unregister(connector); drm_connector_unregister(connector);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
kfree(connector);
} }
static const struct drm_connector_funcs connector_funcs = { static const struct drm_connector_funcs connector_funcs = {
.dpms = drm_helper_connector_dpms, .reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = shmob_drm_connector_destroy, .destroy = shmob_drm_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
}; };
static struct drm_connector *
shmob_drm_connector_init(struct shmob_drm_device *sdev,
struct drm_encoder *encoder)
{
u32 bus_fmt = sdev->pdata->iface.bus_fmt;
struct shmob_drm_connector *scon;
struct drm_connector *connector;
struct drm_display_info *info;
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(shmob_drm_bus_fmts); i++) {
if (shmob_drm_bus_fmts[i].fmt == bus_fmt)
break;
}
if (i == ARRAY_SIZE(shmob_drm_bus_fmts)) {
dev_err(sdev->dev, "unsupported bus format 0x%x\n", bus_fmt);
return ERR_PTR(-EINVAL);
}
scon = kzalloc(sizeof(*scon), GFP_KERNEL);
if (!scon)
return ERR_PTR(-ENOMEM);
connector = &scon->base;
scon->encoder = encoder;
scon->mode = &sdev->pdata->panel.mode;
info = &connector->display_info;
info->width_mm = sdev->pdata->panel.width_mm;
info->height_mm = sdev->pdata->panel.height_mm;
if (scon->mode->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
if (scon->mode->flags & DISPLAY_FLAGS_DE_LOW)
info->bus_flags |= DRM_BUS_FLAG_DE_LOW;
ret = drm_display_info_set_bus_formats(info, &bus_fmt, 1);
if (ret < 0) {
kfree(scon);
return ERR_PTR(ret);
}
ret = drm_connector_init(&sdev->ddev, connector, &connector_funcs,
DRM_MODE_CONNECTOR_DPI);
if (ret < 0) {
kfree(scon);
return ERR_PTR(ret);
}
drm_connector_helper_add(connector, &connector_helper_funcs);
return connector;
}
/* -----------------------------------------------------------------------------
* Connector
*/
int shmob_drm_connector_create(struct shmob_drm_device *sdev, int shmob_drm_connector_create(struct shmob_drm_device *sdev,
struct drm_encoder *encoder) struct drm_encoder *encoder)
{ {
struct drm_connector *connector = &sdev->connector.connector; struct drm_connector *connector;
int ret; int ret;
sdev->connector.encoder = encoder; if (sdev->pdata)
connector = shmob_drm_connector_init(sdev, encoder);
connector->display_info.width_mm = sdev->pdata->panel.width_mm; else
connector->display_info.height_mm = sdev->pdata->panel.height_mm; connector = drm_bridge_connector_init(&sdev->ddev, encoder);
if (IS_ERR(connector)) {
ret = drm_connector_init(sdev->ddev, connector, &connector_funcs, dev_err(sdev->dev, "failed to created connector: %pe\n",
DRM_MODE_CONNECTOR_LVDS); connector);
if (ret < 0) return PTR_ERR(connector);
return ret; }
drm_connector_helper_add(connector, &connector_helper_funcs);
ret = shmob_drm_backlight_init(&sdev->connector);
if (ret < 0)
goto err_cleanup;
ret = drm_connector_attach_encoder(connector, encoder); ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) if (ret < 0)
goto err_backlight; goto error;
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); sdev->connector = connector;
return 0; return 0;
err_backlight: error:
shmob_drm_backlight_exit(&sdev->connector);
err_cleanup:
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
return ret; return ret;
} }

View File

@ -14,39 +14,30 @@
#include <drm/drm_connector.h> #include <drm/drm_connector.h>
#include <drm/drm_encoder.h> #include <drm/drm_encoder.h>
struct backlight_device; #include <linux/wait.h>
#include <video/videomode.h>
struct drm_pending_vblank_event; struct drm_pending_vblank_event;
struct shmob_drm_device; struct shmob_drm_device;
struct shmob_drm_format_info; struct shmob_drm_format_info;
struct shmob_drm_crtc { struct shmob_drm_crtc {
struct drm_crtc crtc; struct drm_crtc base;
struct drm_pending_vblank_event *event; struct drm_pending_vblank_event *event;
int dpms; wait_queue_head_t flip_wait;
const struct shmob_drm_format_info *format;
unsigned long dma[2];
unsigned int line_size;
bool started;
};
struct shmob_drm_encoder {
struct drm_encoder encoder;
int dpms;
}; };
/* Legacy connector */
struct shmob_drm_connector { struct shmob_drm_connector {
struct drm_connector connector; struct drm_connector base;
struct drm_encoder *encoder; struct drm_encoder *encoder;
const struct videomode *mode;
struct backlight_device *backlight;
}; };
int shmob_drm_crtc_create(struct shmob_drm_device *sdev); int shmob_drm_crtc_create(struct shmob_drm_device *sdev);
void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc); void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc);
void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc);
void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc);
int shmob_drm_encoder_create(struct shmob_drm_device *sdev); int shmob_drm_encoder_create(struct shmob_drm_device *sdev);
int shmob_drm_connector_create(struct shmob_drm_device *sdev, int shmob_drm_connector_create(struct shmob_drm_device *sdev,

View File

@ -11,13 +11,17 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h> #include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h> #include <drm/drm_module.h>
#include <drm/drm_probe_helper.h> #include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h> #include <drm/drm_vblank.h>
@ -31,57 +35,23 @@
* Hardware initialization * Hardware initialization
*/ */
static int shmob_drm_init_interface(struct shmob_drm_device *sdev)
{
static const u32 ldmt1r[] = {
[SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
[SHMOB_DRM_IFACE_RGB9] = LDMT1R_MIFTYP_RGB9,
[SHMOB_DRM_IFACE_RGB12A] = LDMT1R_MIFTYP_RGB12A,
[SHMOB_DRM_IFACE_RGB12B] = LDMT1R_MIFTYP_RGB12B,
[SHMOB_DRM_IFACE_RGB16] = LDMT1R_MIFTYP_RGB16,
[SHMOB_DRM_IFACE_RGB18] = LDMT1R_MIFTYP_RGB18,
[SHMOB_DRM_IFACE_RGB24] = LDMT1R_MIFTYP_RGB24,
[SHMOB_DRM_IFACE_YUV422] = LDMT1R_MIFTYP_YCBCR,
[SHMOB_DRM_IFACE_SYS8A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8A,
[SHMOB_DRM_IFACE_SYS8B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8B,
[SHMOB_DRM_IFACE_SYS8C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8C,
[SHMOB_DRM_IFACE_SYS8D] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8D,
[SHMOB_DRM_IFACE_SYS9] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS9,
[SHMOB_DRM_IFACE_SYS12] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS12,
[SHMOB_DRM_IFACE_SYS16A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16A,
[SHMOB_DRM_IFACE_SYS16B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16B,
[SHMOB_DRM_IFACE_SYS16C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16C,
[SHMOB_DRM_IFACE_SYS18] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS18,
[SHMOB_DRM_IFACE_SYS24] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS24,
};
if (sdev->pdata->iface.interface >= ARRAY_SIZE(ldmt1r)) {
dev_err(sdev->dev, "invalid interface type %u\n",
sdev->pdata->iface.interface);
return -EINVAL;
}
sdev->ldmt1r = ldmt1r[sdev->pdata->iface.interface];
return 0;
}
static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev, static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
enum shmob_drm_clk_source clksrc) enum shmob_drm_clk_source clksrc)
{ {
struct clk *clk; struct clk *clk;
char *clkname; char *clkname;
switch (clksrc) { switch (clksrc) {
case SHMOB_DRM_CLK_BUS: case SHMOB_DRM_CLK_BUS:
clkname = "bus_clk"; clkname = "fck";
sdev->lddckr = LDDCKR_ICKSEL_BUS; sdev->lddckr = LDDCKR_ICKSEL_BUS;
break; break;
case SHMOB_DRM_CLK_PERIPHERAL: case SHMOB_DRM_CLK_PERIPHERAL:
clkname = "peripheral_clk"; clkname = "media";
sdev->lddckr = LDDCKR_ICKSEL_MIPI; sdev->lddckr = LDDCKR_ICKSEL_MIPI;
break; break;
case SHMOB_DRM_CLK_EXTERNAL: case SHMOB_DRM_CLK_EXTERNAL:
clkname = NULL; clkname = "lclk";
sdev->lddckr = LDDCKR_ICKSEL_HDMI; sdev->lddckr = LDDCKR_ICKSEL_HDMI;
break; break;
default: default:
@ -105,7 +75,7 @@ static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
static irqreturn_t shmob_drm_irq(int irq, void *arg) static irqreturn_t shmob_drm_irq(int irq, void *arg)
{ {
struct drm_device *dev = arg; struct drm_device *dev = arg;
struct shmob_drm_device *sdev = dev->dev_private; struct shmob_drm_device *sdev = to_shmob_device(dev);
unsigned long flags; unsigned long flags;
u32 status; u32 status;
@ -119,7 +89,7 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
spin_unlock_irqrestore(&sdev->irq_lock, flags); spin_unlock_irqrestore(&sdev->irq_lock, flags);
if (status & LDINTR_VES) { if (status & LDINTR_VES) {
drm_handle_vblank(dev, 0); drm_crtc_handle_vblank(&sdev->crtc.base);
shmob_drm_crtc_finish_page_flip(&sdev->crtc); shmob_drm_crtc_finish_page_flip(&sdev->crtc);
} }
@ -129,7 +99,7 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
DEFINE_DRM_GEM_DMA_FOPS(shmob_drm_fops); DEFINE_DRM_GEM_DMA_FOPS(shmob_drm_fops);
static const struct drm_driver shmob_drm_driver = { static const struct drm_driver shmob_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET, .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS, DRM_GEM_DMA_DRIVER_OPS,
.fops = &shmob_drm_fops, .fops = &shmob_drm_fops,
.name = "shmob-drm", .name = "shmob-drm",
@ -147,26 +117,45 @@ static int shmob_drm_pm_suspend(struct device *dev)
{ {
struct shmob_drm_device *sdev = dev_get_drvdata(dev); struct shmob_drm_device *sdev = dev_get_drvdata(dev);
drm_kms_helper_poll_disable(sdev->ddev); return drm_mode_config_helper_suspend(&sdev->ddev);
shmob_drm_crtc_suspend(&sdev->crtc);
return 0;
} }
static int shmob_drm_pm_resume(struct device *dev) static int shmob_drm_pm_resume(struct device *dev)
{ {
struct shmob_drm_device *sdev = dev_get_drvdata(dev); struct shmob_drm_device *sdev = dev_get_drvdata(dev);
drm_modeset_lock_all(sdev->ddev); return drm_mode_config_helper_resume(&sdev->ddev);
shmob_drm_crtc_resume(&sdev->crtc); }
drm_modeset_unlock_all(sdev->ddev);
static int shmob_drm_pm_runtime_suspend(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
if (sdev->clock)
clk_disable_unprepare(sdev->clock);
drm_kms_helper_poll_enable(sdev->ddev);
return 0; return 0;
} }
static DEFINE_SIMPLE_DEV_PM_OPS(shmob_drm_pm_ops, static int shmob_drm_pm_runtime_resume(struct device *dev)
shmob_drm_pm_suspend, shmob_drm_pm_resume); {
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
int ret;
if (sdev->clock) {
ret = clk_prepare_enable(sdev->clock);
if (ret < 0)
return ret;
}
return 0;
}
static const struct dev_pm_ops shmob_drm_pm_ops = {
SYSTEM_SLEEP_PM_OPS(shmob_drm_pm_suspend, shmob_drm_pm_resume)
RUNTIME_PM_OPS(shmob_drm_pm_runtime_suspend,
shmob_drm_pm_runtime_resume, NULL)
};
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* Platform driver * Platform driver
@ -175,37 +164,45 @@ static DEFINE_SIMPLE_DEV_PM_OPS(shmob_drm_pm_ops,
static void shmob_drm_remove(struct platform_device *pdev) static void shmob_drm_remove(struct platform_device *pdev)
{ {
struct shmob_drm_device *sdev = platform_get_drvdata(pdev); struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
struct drm_device *ddev = sdev->ddev; struct drm_device *ddev = &sdev->ddev;
drm_dev_unregister(ddev); drm_dev_unregister(ddev);
drm_atomic_helper_shutdown(ddev);
drm_kms_helper_poll_fini(ddev); drm_kms_helper_poll_fini(ddev);
free_irq(sdev->irq, ddev);
drm_dev_put(ddev);
} }
static int shmob_drm_probe(struct platform_device *pdev) static int shmob_drm_probe(struct platform_device *pdev)
{ {
struct shmob_drm_platform_data *pdata = pdev->dev.platform_data; struct shmob_drm_platform_data *pdata = pdev->dev.platform_data;
const struct shmob_drm_config *config;
struct shmob_drm_device *sdev; struct shmob_drm_device *sdev;
struct drm_device *ddev; struct drm_device *ddev;
unsigned int i;
int ret; int ret;
if (pdata == NULL) { config = of_device_get_match_data(&pdev->dev);
if (!config && !pdata) {
dev_err(&pdev->dev, "no platform data\n"); dev_err(&pdev->dev, "no platform data\n");
return -EINVAL; return -EINVAL;
} }
/* /*
* Allocate and initialize the driver private data, I/O resources and * Allocate and initialize the DRM device, driver private data, I/O
* clocks. * resources and clocks.
*/ */
sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL); sdev = devm_drm_dev_alloc(&pdev->dev, &shmob_drm_driver,
if (sdev == NULL) struct shmob_drm_device, ddev);
return -ENOMEM; if (IS_ERR(sdev))
return PTR_ERR(sdev);
ddev = &sdev->ddev;
sdev->dev = &pdev->dev; sdev->dev = &pdev->dev;
sdev->pdata = pdata; if (config) {
sdev->config = *config;
} else {
sdev->pdata = pdata;
sdev->config.clk_source = pdata->clk_source;
sdev->config.clk_div = pdata->iface.clk_div;
}
spin_lock_init(&sdev->irq_lock); spin_lock_init(&sdev->irq_lock);
platform_set_drvdata(pdev, sdev); platform_set_drvdata(pdev, sdev);
@ -214,49 +211,32 @@ static int shmob_drm_probe(struct platform_device *pdev)
if (IS_ERR(sdev->mmio)) if (IS_ERR(sdev->mmio))
return PTR_ERR(sdev->mmio); return PTR_ERR(sdev->mmio);
ret = shmob_drm_setup_clocks(sdev, pdata->clk_source); ret = shmob_drm_setup_clocks(sdev, sdev->config.clk_source);
if (ret < 0) if (ret < 0)
return ret; return ret;
ret = shmob_drm_init_interface(sdev); ret = devm_pm_runtime_enable(&pdev->dev);
if (ret < 0) if (ret)
return ret; return ret;
/* Allocate and initialize the DRM device. */
ddev = drm_dev_alloc(&shmob_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
return PTR_ERR(ddev);
sdev->ddev = ddev;
ddev->dev_private = sdev;
ret = shmob_drm_modeset_init(sdev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to initialize mode setting\n");
goto err_free_drm_dev;
}
for (i = 0; i < 4; ++i) {
ret = shmob_drm_plane_create(sdev, i);
if (ret < 0) {
dev_err(&pdev->dev, "failed to create plane %u\n", i);
goto err_modeset_cleanup;
}
}
ret = drm_vblank_init(ddev, 1); ret = drm_vblank_init(ddev, 1);
if (ret < 0) { if (ret < 0) {
dev_err(&pdev->dev, "failed to initialize vblank\n"); dev_err(&pdev->dev, "failed to initialize vblank\n");
goto err_modeset_cleanup; return ret;
} }
ret = shmob_drm_modeset_init(sdev);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
"failed to initialize mode setting\n");
ret = platform_get_irq(pdev, 0); ret = platform_get_irq(pdev, 0);
if (ret < 0) if (ret < 0)
goto err_modeset_cleanup; goto err_modeset_cleanup;
sdev->irq = ret; sdev->irq = ret;
ret = request_irq(sdev->irq, shmob_drm_irq, 0, ddev->driver->name, ret = devm_request_irq(&pdev->dev, sdev->irq, shmob_drm_irq, 0,
ddev); ddev->driver->name, ddev);
if (ret < 0) { if (ret < 0) {
dev_err(&pdev->dev, "failed to install IRQ handler\n"); dev_err(&pdev->dev, "failed to install IRQ handler\n");
goto err_modeset_cleanup; goto err_modeset_cleanup;
@ -268,28 +248,35 @@ static int shmob_drm_probe(struct platform_device *pdev)
*/ */
ret = drm_dev_register(ddev, 0); ret = drm_dev_register(ddev, 0);
if (ret < 0) if (ret < 0)
goto err_irq_uninstall; goto err_modeset_cleanup;
drm_fbdev_generic_setup(ddev, 16); drm_fbdev_generic_setup(ddev, 16);
return 0; return 0;
err_irq_uninstall:
free_irq(sdev->irq, ddev);
err_modeset_cleanup: err_modeset_cleanup:
drm_kms_helper_poll_fini(ddev); drm_kms_helper_poll_fini(ddev);
err_free_drm_dev:
drm_dev_put(ddev);
return ret; return ret;
} }
static const struct shmob_drm_config shmob_arm_config = {
.clk_source = SHMOB_DRM_CLK_BUS,
.clk_div = 5,
};
static const struct of_device_id shmob_drm_of_table[] __maybe_unused = {
{ .compatible = "renesas,r8a7740-lcdc", .data = &shmob_arm_config, },
{ .compatible = "renesas,sh73a0-lcdc", .data = &shmob_arm_config, },
{ /* sentinel */ }
};
static struct platform_driver shmob_drm_platform_driver = { static struct platform_driver shmob_drm_platform_driver = {
.probe = shmob_drm_probe, .probe = shmob_drm_probe,
.remove_new = shmob_drm_remove, .remove_new = shmob_drm_remove,
.driver = { .driver = {
.name = "shmob-drm", .name = "shmob-drm",
.pm = pm_sleep_ptr(&shmob_drm_pm_ops), .of_match_table = of_match_ptr(shmob_drm_of_table),
.pm = &shmob_drm_pm_ops,
}, },
}; };

View File

@ -20,23 +20,33 @@ struct clk;
struct device; struct device;
struct drm_device; struct drm_device;
struct shmob_drm_config {
enum shmob_drm_clk_source clk_source;
unsigned int clk_div;
};
struct shmob_drm_device { struct shmob_drm_device {
struct device *dev; struct device *dev;
const struct shmob_drm_platform_data *pdata; const struct shmob_drm_platform_data *pdata;
struct shmob_drm_config config;
void __iomem *mmio; void __iomem *mmio;
struct clk *clock; struct clk *clock;
u32 lddckr; u32 lddckr;
u32 ldmt1r;
unsigned int irq; unsigned int irq;
spinlock_t irq_lock; /* Protects hardware LDINTR register */ spinlock_t irq_lock; /* Protects hardware LDINTR register */
struct drm_device *ddev; struct drm_device ddev;
struct shmob_drm_crtc crtc; struct shmob_drm_crtc crtc;
struct shmob_drm_encoder encoder; struct drm_encoder encoder;
struct shmob_drm_connector connector; struct drm_connector *connector;
}; };
static inline struct shmob_drm_device *to_shmob_device(struct drm_device *dev)
{
return container_of(dev, struct shmob_drm_device, ddev);
}
#endif /* __SHMOB_DRM_DRV_H__ */ #endif /* __SHMOB_DRM_DRV_H__ */

View File

@ -7,6 +7,7 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com) * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/ */
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h> #include <drm/drm_fourcc.h>
@ -17,6 +18,7 @@
#include "shmob_drm_crtc.h" #include "shmob_drm_crtc.h"
#include "shmob_drm_drv.h" #include "shmob_drm_drv.h"
#include "shmob_drm_kms.h" #include "shmob_drm_kms.h"
#include "shmob_drm_plane.h"
#include "shmob_drm_regs.h" #include "shmob_drm_regs.h"
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
@ -27,53 +29,73 @@ static const struct shmob_drm_format_info shmob_drm_format_infos[] = {
{ {
.fourcc = DRM_FORMAT_RGB565, .fourcc = DRM_FORMAT_RGB565,
.bpp = 16, .bpp = 16,
.yuv = false,
.lddfr = LDDFR_PKF_RGB16, .lddfr = LDDFR_PKF_RGB16,
.ldddsr = LDDDSR_LS | LDDDSR_WS,
.ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16,
}, { }, {
.fourcc = DRM_FORMAT_RGB888, .fourcc = DRM_FORMAT_RGB888,
.bpp = 24, .bpp = 24,
.yuv = false,
.lddfr = LDDFR_PKF_RGB24, .lddfr = LDDFR_PKF_RGB24,
.ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS,
.ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
LDBBSIFR_SWPB | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24,
}, { }, {
.fourcc = DRM_FORMAT_ARGB8888, .fourcc = DRM_FORMAT_ARGB8888,
.bpp = 32, .bpp = 32,
.yuv = false,
.lddfr = LDDFR_PKF_ARGB32, .lddfr = LDDFR_PKF_ARGB32,
.ldddsr = LDDDSR_LS,
.ldbbsifr = LDBBSIFR_AL_PK | LDBBSIFR_SWPL | LDBBSIFR_RY |
LDBBSIFR_RPKF_ARGB32,
}, { }, {
.fourcc = DRM_FORMAT_XRGB8888, .fourcc = DRM_FORMAT_XRGB8888,
.bpp = 32, .bpp = 32,
.yuv = false,
.lddfr = LDDFR_PKF_ARGB32, .lddfr = LDDFR_PKF_ARGB32,
.ldddsr = LDDDSR_LS,
.ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_RY |
LDBBSIFR_RPKF_ARGB32,
}, { }, {
.fourcc = DRM_FORMAT_NV12, .fourcc = DRM_FORMAT_NV12,
.bpp = 12, .bpp = 12,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_420, .lddfr = LDDFR_CC | LDDFR_YF_420,
.ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS,
.ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
LDBBSIFR_SWPB | LDBBSIFR_CHRR_420,
}, { }, {
.fourcc = DRM_FORMAT_NV21, .fourcc = DRM_FORMAT_NV21,
.bpp = 12, .bpp = 12,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_420, .lddfr = LDDFR_CC | LDDFR_YF_420,
.ldddsr = LDDDSR_LS | LDDDSR_WS,
.ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
LDBBSIFR_CHRR_420,
}, { }, {
.fourcc = DRM_FORMAT_NV16, .fourcc = DRM_FORMAT_NV16,
.bpp = 16, .bpp = 16,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_422, .lddfr = LDDFR_CC | LDDFR_YF_422,
.ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS,
.ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
LDBBSIFR_SWPB | LDBBSIFR_CHRR_422,
}, { }, {
.fourcc = DRM_FORMAT_NV61, .fourcc = DRM_FORMAT_NV61,
.bpp = 16, .bpp = 16,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_422, .lddfr = LDDFR_CC | LDDFR_YF_422,
.ldddsr = LDDDSR_LS | LDDDSR_WS,
.ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
LDBBSIFR_CHRR_422,
}, { }, {
.fourcc = DRM_FORMAT_NV24, .fourcc = DRM_FORMAT_NV24,
.bpp = 24, .bpp = 24,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_444, .lddfr = LDDFR_CC | LDDFR_YF_444,
.ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS,
.ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
LDBBSIFR_SWPB | LDBBSIFR_CHRR_444,
}, { }, {
.fourcc = DRM_FORMAT_NV42, .fourcc = DRM_FORMAT_NV42,
.bpp = 24, .bpp = 24,
.yuv = true,
.lddfr = LDDFR_CC | LDDFR_YF_444, .lddfr = LDDFR_CC | LDDFR_YF_444,
.ldddsr = LDDDSR_LS | LDDDSR_WS,
.ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
LDBBSIFR_CHRR_444,
}, },
}; };
@ -112,7 +134,7 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (format->yuv) { if (shmob_drm_format_is_yuv(format)) {
unsigned int chroma_cpp = format->bpp == 24 ? 2 : 1; unsigned int chroma_cpp = format->bpp == 24 ? 2 : 1;
if (mode_cmd->pitches[1] != mode_cmd->pitches[0] * chroma_cpp) { if (mode_cmd->pitches[1] != mode_cmd->pitches[0] * chroma_cpp) {
@ -127,29 +149,40 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = { static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
.fb_create = shmob_drm_fb_create, .fb_create = shmob_drm_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
}; };
int shmob_drm_modeset_init(struct shmob_drm_device *sdev) int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
{ {
struct drm_device *dev = &sdev->ddev;
int ret; int ret;
ret = drmm_mode_config_init(sdev->ddev); ret = drmm_mode_config_init(dev);
if (ret) if (ret)
return ret; return ret;
shmob_drm_crtc_create(sdev); ret = shmob_drm_crtc_create(sdev);
shmob_drm_encoder_create(sdev); if (ret < 0)
shmob_drm_connector_create(sdev, &sdev->encoder.encoder); return ret;
drm_kms_helper_poll_init(sdev->ddev); ret = shmob_drm_encoder_create(sdev);
if (ret < 0)
return ret;
sdev->ddev->mode_config.min_width = 0; ret = shmob_drm_connector_create(sdev, &sdev->encoder);
sdev->ddev->mode_config.min_height = 0; if (ret < 0)
sdev->ddev->mode_config.max_width = 4095; return ret;
sdev->ddev->mode_config.max_height = 4095;
sdev->ddev->mode_config.funcs = &shmob_drm_mode_config_funcs;
drm_helper_disable_unused_functions(sdev->ddev); drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
sdev->ddev.mode_config.min_width = 0;
sdev->ddev.mode_config.min_height = 0;
sdev->ddev.mode_config.max_width = 4095;
sdev->ddev.mode_config.max_height = 4095;
sdev->ddev.mode_config.funcs = &shmob_drm_mode_config_funcs;
return 0; return 0;
} }

View File

@ -17,11 +17,14 @@ struct shmob_drm_device;
struct shmob_drm_format_info { struct shmob_drm_format_info {
u32 fourcc; u32 fourcc;
unsigned int bpp; u32 lddfr; /* LCD Data Format Register */
bool yuv; u16 ldbbsifr; /* CHn Source Image Format Register low bits */
u32 lddfr; u8 ldddsr; /* LCDC Input Image Data Swap Register low bits */
u8 bpp;
}; };
#define shmob_drm_format_is_yuv(format) ((format)->lddfr & LDDFR_CC)
const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc); const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc);
int shmob_drm_modeset_init(struct shmob_drm_device *sdev); int shmob_drm_modeset_init(struct shmob_drm_device *sdev);

View File

@ -7,11 +7,14 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com) * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/ */
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_fb_dma_helper.h> #include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h> #include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h> #include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane_helper.h>
#include "shmob_drm_drv.h" #include "shmob_drm_drv.h"
#include "shmob_drm_kms.h" #include "shmob_drm_kms.h"
@ -19,102 +22,84 @@
#include "shmob_drm_regs.h" #include "shmob_drm_regs.h"
struct shmob_drm_plane { struct shmob_drm_plane {
struct drm_plane plane; struct drm_plane base;
unsigned int index; unsigned int index;
unsigned int alpha;
const struct shmob_drm_format_info *format;
unsigned long dma[2];
unsigned int src_x;
unsigned int src_y;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_w;
unsigned int crtc_h;
}; };
#define to_shmob_plane(p) container_of(p, struct shmob_drm_plane, plane) struct shmob_drm_plane_state {
struct drm_plane_state base;
static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane, const struct shmob_drm_format_info *format;
struct drm_framebuffer *fb, u32 dma[2];
int x, int y) };
static inline struct shmob_drm_plane *to_shmob_plane(struct drm_plane *plane)
{ {
return container_of(plane, struct shmob_drm_plane, base);
}
static inline struct shmob_drm_plane_state *to_shmob_plane_state(struct drm_plane_state *state)
{
return container_of(state, struct shmob_drm_plane_state, base);
}
static void shmob_drm_plane_compute_base(struct shmob_drm_plane_state *sstate)
{
struct drm_framebuffer *fb = sstate->base.fb;
unsigned int x = sstate->base.src_x >> 16;
unsigned int y = sstate->base.src_y >> 16;
struct drm_gem_dma_object *gem; struct drm_gem_dma_object *gem;
unsigned int bpp; unsigned int bpp;
bpp = splane->format->yuv ? 8 : splane->format->bpp; bpp = shmob_drm_format_is_yuv(sstate->format) ? 8 : sstate->format->bpp;
gem = drm_fb_dma_get_gem_obj(fb, 0); gem = drm_fb_dma_get_gem_obj(fb, 0);
splane->dma[0] = gem->dma_addr + fb->offsets[0] sstate->dma[0] = gem->dma_addr + fb->offsets[0]
+ y * fb->pitches[0] + x * bpp / 8; + y * fb->pitches[0] + x * bpp / 8;
if (splane->format->yuv) { if (shmob_drm_format_is_yuv(sstate->format)) {
bpp = splane->format->bpp - 8; bpp = sstate->format->bpp - 8;
gem = drm_fb_dma_get_gem_obj(fb, 1); gem = drm_fb_dma_get_gem_obj(fb, 1);
splane->dma[1] = gem->dma_addr + fb->offsets[1] sstate->dma[1] = gem->dma_addr + fb->offsets[1]
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1] + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+ x * (bpp == 16 ? 2 : 1); + x * (bpp == 16 ? 2 : 1);
} }
} }
static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane, static void shmob_drm_primary_plane_setup(struct shmob_drm_plane *splane,
struct drm_framebuffer *fb) struct drm_plane_state *state)
{ {
struct shmob_drm_device *sdev = splane->plane.dev->dev_private; struct shmob_drm_plane_state *sstate = to_shmob_plane_state(state);
struct shmob_drm_device *sdev = to_shmob_device(splane->base.dev);
struct drm_framebuffer *fb = state->fb;
/* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */
lcdc_write(sdev, LDDFR, sstate->format->lddfr | LDDFR_CF1);
lcdc_write(sdev, LDMLSR, fb->pitches[0]);
/* Word and long word swap. */
lcdc_write(sdev, LDDDSR, sstate->format->ldddsr);
lcdc_write_mirror(sdev, LDSA1R, sstate->dma[0]);
if (shmob_drm_format_is_yuv(sstate->format))
lcdc_write_mirror(sdev, LDSA2R, sstate->dma[1]);
lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS);
}
static void shmob_drm_overlay_plane_setup(struct shmob_drm_plane *splane,
struct drm_plane_state *state)
{
struct shmob_drm_plane_state *sstate = to_shmob_plane_state(state);
struct shmob_drm_device *sdev = to_shmob_device(splane->base.dev);
struct drm_framebuffer *fb = state->fb;
u32 format; u32 format;
/* TODO: Support ROP3 mode */ /* TODO: Support ROP3 mode */
format = LDBBSIFR_EN | (splane->alpha << LDBBSIFR_LAY_SHIFT); format = LDBBSIFR_EN | ((state->alpha >> 8) << LDBBSIFR_LAY_SHIFT) |
sstate->format->ldbbsifr;
switch (splane->format->fourcc) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV42:
format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW;
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV24:
format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
default:
format |= LDBBSIFR_SWPL;
break;
}
switch (splane->format->fourcc) {
case DRM_FORMAT_RGB565:
format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16;
break;
case DRM_FORMAT_RGB888:
format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24;
break;
case DRM_FORMAT_ARGB8888:
format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
break;
case DRM_FORMAT_XRGB8888:
format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
break;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420;
break;
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422;
break;
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444;
break;
}
#define plane_reg_dump(sdev, splane, reg) \ #define plane_reg_dump(sdev, splane, reg) \
dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \ dev_dbg(sdev->ddev.dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \
splane->index, #reg, \ splane->index, #reg, \
lcdc_read(sdev, reg(splane->index)), \ lcdc_read(sdev, reg(splane->index)), \
lcdc_read(sdev, reg(splane->index) + LCDC_SIDE_B_OFFSET)) lcdc_read(sdev, reg(splane->index) + LCDC_SIDE_B_OFFSET))
@ -127,29 +112,27 @@ static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
plane_reg_dump(sdev, splane, LDBnBSACR); plane_reg_dump(sdev, splane, LDBnBSACR);
lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index)); lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index));
dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index, dev_dbg(sdev->ddev.dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
"LDBCR", lcdc_read(sdev, LDBCR)); "LDBCR", lcdc_read(sdev, LDBCR));
lcdc_write(sdev, LDBnBSIFR(splane->index), format); lcdc_write(sdev, LDBnBSIFR(splane->index), format);
lcdc_write(sdev, LDBnBSSZR(splane->index), lcdc_write(sdev, LDBnBSSZR(splane->index),
(splane->crtc_h << LDBBSSZR_BVSS_SHIFT) | (state->crtc_h << LDBBSSZR_BVSS_SHIFT) |
(splane->crtc_w << LDBBSSZR_BHSS_SHIFT)); (state->crtc_w << LDBBSSZR_BHSS_SHIFT));
lcdc_write(sdev, LDBnBLOCR(splane->index), lcdc_write(sdev, LDBnBLOCR(splane->index),
(splane->crtc_y << LDBBLOCR_CVLC_SHIFT) | (state->crtc_y << LDBBLOCR_CVLC_SHIFT) |
(splane->crtc_x << LDBBLOCR_CHLC_SHIFT)); (state->crtc_x << LDBBLOCR_CHLC_SHIFT));
lcdc_write(sdev, LDBnBSMWR(splane->index), lcdc_write(sdev, LDBnBSMWR(splane->index),
fb->pitches[0] << LDBBSMWR_BSMW_SHIFT); fb->pitches[0] << LDBBSMWR_BSMW_SHIFT);
shmob_drm_plane_compute_base(splane, fb, splane->src_x, splane->src_y); lcdc_write(sdev, LDBnBSAYR(splane->index), sstate->dma[0]);
if (shmob_drm_format_is_yuv(sstate->format))
lcdc_write(sdev, LDBnBSAYR(splane->index), splane->dma[0]); lcdc_write(sdev, LDBnBSACR(splane->index), sstate->dma[1]);
if (splane->format->yuv)
lcdc_write(sdev, LDBnBSACR(splane->index), splane->dma[1]);
lcdc_write(sdev, LDBCR, lcdc_write(sdev, LDBCR,
LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index)); LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index));
dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index, dev_dbg(sdev->ddev.dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
"LDBCR", lcdc_read(sdev, LDBCR)); "LDBCR", lcdc_read(sdev, LDBCR));
plane_reg_dump(sdev, splane, LDBnBSIFR); plane_reg_dump(sdev, splane, LDBnBSIFR);
@ -160,75 +143,143 @@ static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
plane_reg_dump(sdev, splane, LDBnBSACR); plane_reg_dump(sdev, splane, LDBnBSACR);
} }
void shmob_drm_plane_setup(struct drm_plane *plane) static int shmob_drm_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{ {
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct shmob_drm_plane_state *sstate = to_shmob_plane_state(new_plane_state);
struct drm_crtc_state *crtc_state;
bool is_primary = plane->type == DRM_PLANE_TYPE_PRIMARY;
int ret;
if (!new_plane_state->crtc) {
/*
* The visible field is not reset by the DRM core but only
* updated by drm_atomic_helper_check_plane_state(), set it
* manually.
*/
new_plane_state->visible = false;
sstate->format = NULL;
return 0;
}
crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
!is_primary, true);
if (ret < 0)
return ret;
if (!new_plane_state->visible) {
sstate->format = NULL;
return 0;
}
sstate->format = shmob_drm_format_info(new_plane_state->fb->format->format);
if (!sstate->format) {
dev_dbg(plane->dev->dev,
"plane_atomic_check: unsupported format %p4cc\n",
&new_plane_state->fb->format->format);
return -EINVAL;
}
shmob_drm_plane_compute_base(sstate);
return 0;
}
static void shmob_drm_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct shmob_drm_plane *splane = to_shmob_plane(plane); struct shmob_drm_plane *splane = to_shmob_plane(plane);
if (plane->fb == NULL) if (!new_plane_state->visible)
return; return;
__shmob_drm_plane_setup(splane, plane->fb); if (plane->type == DRM_PLANE_TYPE_PRIMARY)
shmob_drm_primary_plane_setup(splane, new_plane_state);
else
shmob_drm_overlay_plane_setup(splane, new_plane_state);
} }
static int static void shmob_drm_plane_atomic_disable(struct drm_plane *plane,
shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_atomic_state *state)
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
struct shmob_drm_device *sdev = to_shmob_device(plane->dev);
struct shmob_drm_plane *splane = to_shmob_plane(plane); struct shmob_drm_plane *splane = to_shmob_plane(plane);
struct shmob_drm_device *sdev = plane->dev->dev_private;
const struct shmob_drm_format_info *format;
format = shmob_drm_format_info(fb->format->format); if (!old_state->crtc)
if (format == NULL) { return;
dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n",
fb->format->format);
return -EINVAL;
}
if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) { if (plane->type != DRM_PLANE_TYPE_OVERLAY)
dev_dbg(sdev->dev, "%s: scaling not supported\n", __func__); return;
return -EINVAL;
}
splane->format = format;
splane->src_x = src_x >> 16;
splane->src_y = src_y >> 16;
splane->crtc_x = crtc_x;
splane->crtc_y = crtc_y;
splane->crtc_w = crtc_w;
splane->crtc_h = crtc_h;
__shmob_drm_plane_setup(splane, fb);
return 0;
}
static int shmob_drm_plane_disable(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx)
{
struct shmob_drm_plane *splane = to_shmob_plane(plane);
struct shmob_drm_device *sdev = plane->dev->dev_private;
splane->format = NULL;
lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index));
lcdc_write(sdev, LDBnBSIFR(splane->index), 0); lcdc_write(sdev, LDBnBSIFR(splane->index), 0);
return 0; lcdc_write(sdev, LDBCR,
LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index));
} }
static void shmob_drm_plane_destroy(struct drm_plane *plane) static struct drm_plane_state *
shmob_drm_plane_atomic_duplicate_state(struct drm_plane *plane)
{ {
drm_plane_force_disable(plane); struct shmob_drm_plane_state *state;
drm_plane_cleanup(plane); struct shmob_drm_plane_state *copy;
if (WARN_ON(!plane->state))
return NULL;
state = to_shmob_plane_state(plane->state);
copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
if (copy == NULL)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
return &copy->base;
} }
static void shmob_drm_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
__drm_atomic_helper_plane_destroy_state(state);
kfree(to_shmob_plane_state(state));
}
static void shmob_drm_plane_reset(struct drm_plane *plane)
{
struct shmob_drm_plane_state *state;
if (plane->state) {
shmob_drm_plane_atomic_destroy_state(plane, plane->state);
plane->state = NULL;
}
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return;
__drm_atomic_helper_plane_reset(plane, &state->base);
}
static const struct drm_plane_helper_funcs shmob_drm_plane_helper_funcs = {
.atomic_check = shmob_drm_plane_atomic_check,
.atomic_update = shmob_drm_plane_atomic_update,
.atomic_disable = shmob_drm_plane_atomic_disable,
};
static const struct drm_plane_funcs shmob_drm_plane_funcs = { static const struct drm_plane_funcs shmob_drm_plane_funcs = {
.update_plane = shmob_drm_plane_update, .update_plane = drm_atomic_helper_update_plane,
.disable_plane = shmob_drm_plane_disable, .disable_plane = drm_atomic_helper_disable_plane,
.destroy = shmob_drm_plane_destroy, .reset = shmob_drm_plane_reset,
.atomic_duplicate_state = shmob_drm_plane_atomic_duplicate_state,
.atomic_destroy_state = shmob_drm_plane_atomic_destroy_state,
}; };
static const uint32_t formats[] = { static const uint32_t formats[] = {
@ -244,22 +295,23 @@ static const uint32_t formats[] = {
DRM_FORMAT_NV42, DRM_FORMAT_NV42,
}; };
int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index) struct drm_plane *shmob_drm_plane_create(struct shmob_drm_device *sdev,
enum drm_plane_type type,
unsigned int index)
{ {
struct shmob_drm_plane *splane; struct shmob_drm_plane *splane;
int ret;
splane = devm_kzalloc(sdev->dev, sizeof(*splane), GFP_KERNEL); splane = drmm_universal_plane_alloc(&sdev->ddev,
if (splane == NULL) struct shmob_drm_plane, base, 1,
return -ENOMEM; &shmob_drm_plane_funcs, formats,
ARRAY_SIZE(formats), NULL, type,
NULL);
if (IS_ERR(splane))
return ERR_CAST(splane);
splane->index = index; splane->index = index;
splane->alpha = 255;
ret = drm_universal_plane_init(sdev->ddev, &splane->plane, 1, drm_plane_helper_add(&splane->base, &shmob_drm_plane_helper_funcs);
&shmob_drm_plane_funcs,
formats, ARRAY_SIZE(formats), NULL,
DRM_PLANE_TYPE_OVERLAY, NULL);
return ret; return &splane->base;
} }

View File

@ -13,7 +13,8 @@
struct drm_plane; struct drm_plane;
struct shmob_drm_device; struct shmob_drm_device;
int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index); struct drm_plane *shmob_drm_plane_create(struct shmob_drm_device *sdev,
void shmob_drm_plane_setup(struct drm_plane *plane); enum drm_plane_type type,
unsigned int index);
#endif /* __SHMOB_DRM_PLANE_H__ */ #endif /* __SHMOB_DRM_PLANE_H__ */

View File

@ -910,7 +910,7 @@ static int ssd132x_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state); struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state);
struct drm_crtc *crtc = plane_state->crtc; struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state = NULL;
const struct drm_format_info *fi; const struct drm_format_info *fi;
unsigned int pitch; unsigned int pitch;
int ret; int ret;

View File

@ -68,4 +68,9 @@ enum amd_asic_type {
extern const char *amdgpu_asic_name[]; extern const char *amdgpu_asic_name[];
struct amdgpu_asic_type_quirk {
unsigned short device; /* PCI device ID */
u8 revision; /* revision ID */
unsigned short type; /* real ASIC type */
};
#endif /*__AMD_ASIC_TYPE_H__ */ #endif /*__AMD_ASIC_TYPE_H__ */

View File

@ -10,7 +10,7 @@
#ifndef __SHMOB_DRM_H__ #ifndef __SHMOB_DRM_H__
#define __SHMOB_DRM_H__ #define __SHMOB_DRM_H__
#include <drm/drm_mode.h> #include <video/videomode.h>
enum shmob_drm_clk_source { enum shmob_drm_clk_source {
SHMOB_DRM_CLK_BUS, SHMOB_DRM_CLK_BUS,
@ -18,72 +18,21 @@ enum shmob_drm_clk_source {
SHMOB_DRM_CLK_EXTERNAL, SHMOB_DRM_CLK_EXTERNAL,
}; };
enum shmob_drm_interface {
SHMOB_DRM_IFACE_RGB8, /* 24bpp, 8:8:8 */
SHMOB_DRM_IFACE_RGB9, /* 18bpp, 9:9 */
SHMOB_DRM_IFACE_RGB12A, /* 24bpp, 12:12 */
SHMOB_DRM_IFACE_RGB12B, /* 12bpp */
SHMOB_DRM_IFACE_RGB16, /* 16bpp */
SHMOB_DRM_IFACE_RGB18, /* 18bpp */
SHMOB_DRM_IFACE_RGB24, /* 24bpp */
SHMOB_DRM_IFACE_YUV422, /* 16bpp */
SHMOB_DRM_IFACE_SYS8A, /* 24bpp, 8:8:8 */
SHMOB_DRM_IFACE_SYS8B, /* 18bpp, 8:8:2 */
SHMOB_DRM_IFACE_SYS8C, /* 18bpp, 2:8:8 */
SHMOB_DRM_IFACE_SYS8D, /* 16bpp, 8:8 */
SHMOB_DRM_IFACE_SYS9, /* 18bpp, 9:9 */
SHMOB_DRM_IFACE_SYS12, /* 24bpp, 12:12 */
SHMOB_DRM_IFACE_SYS16A, /* 16bpp */
SHMOB_DRM_IFACE_SYS16B, /* 18bpp, 16:2 */
SHMOB_DRM_IFACE_SYS16C, /* 18bpp, 2:16 */
SHMOB_DRM_IFACE_SYS18, /* 18bpp */
SHMOB_DRM_IFACE_SYS24, /* 24bpp */
};
struct shmob_drm_backlight_data {
const char *name;
int max_brightness;
int (*get_brightness)(void);
int (*set_brightness)(int brightness);
};
struct shmob_drm_panel_data { struct shmob_drm_panel_data {
unsigned int width_mm; /* Panel width in mm */ unsigned int width_mm; /* Panel width in mm */
unsigned int height_mm; /* Panel height in mm */ unsigned int height_mm; /* Panel height in mm */
struct drm_mode_modeinfo mode; struct videomode mode;
}; };
struct shmob_drm_sys_interface_data {
unsigned int read_latch:6;
unsigned int read_setup:8;
unsigned int read_cycle:8;
unsigned int read_strobe:8;
unsigned int write_setup:8;
unsigned int write_cycle:8;
unsigned int write_strobe:8;
unsigned int cs_setup:3;
unsigned int vsync_active_high:1;
unsigned int vsync_dir_input:1;
};
#define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */
#define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */
#define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */
#define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */
#define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */
struct shmob_drm_interface_data { struct shmob_drm_interface_data {
enum shmob_drm_interface interface; unsigned int bus_fmt; /* MEDIA_BUS_FMT_* */
struct shmob_drm_sys_interface_data sys;
unsigned int clk_div; unsigned int clk_div;
unsigned int flags;
}; };
struct shmob_drm_platform_data { struct shmob_drm_platform_data {
enum shmob_drm_clk_source clk_source; enum shmob_drm_clk_source clk_source;
struct shmob_drm_interface_data iface; struct shmob_drm_interface_data iface;
struct shmob_drm_panel_data panel; struct shmob_drm_panel_data panel;
struct shmob_drm_backlight_data backlight;
}; };
#endif /* __SHMOB_DRM_H__ */ #endif /* __SHMOB_DRM_H__ */

View File

@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001 #define MEDIA_BUS_FMT_FIXED 0x0001
/* RGB - next is 0x1025 */ /* RGB - next is 0x1026 */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016 #define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@ -46,6 +46,7 @@
#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007 #define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008 #define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
#define MEDIA_BUS_FMT_RGB666_1X18 0x1009 #define MEDIA_BUS_FMT_RGB666_1X18 0x1009
#define MEDIA_BUS_FMT_RGB666_2X9_BE 0x1025
#define MEDIA_BUS_FMT_BGR666_1X18 0x1023 #define MEDIA_BUS_FMT_BGR666_1X18 0x1023
#define MEDIA_BUS_FMT_RBG888_1X24 0x100e #define MEDIA_BUS_FMT_RBG888_1X24 0x100e
#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015 #define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015