2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 09:13:55 +08:00

Merge tag 'drm-msm-next-2022-09-22' of https://gitlab.freedesktop.org/drm/msm into drm-next

msm-next for v6.1

DPU:
- simplified VBIF configuration
- cleaned up CTL interfaces to accept indices rather than flush masks

DSI:
- removed unused msm_display_dsc_config struct
- switch regulator calls to new bulk API
- switched to use PANEL_BRIDGE for directly attached panels

DSI PHY:
- converted drivers to use parent_hws instead of parent_names

DP:
- cleaned up pixel_rate handling

HDMI PHY:
- turned hdmi-phy-8996 into OF clk provider

core:
- misc dt-bindings fixes
- choose eDP as primary display if it's available
- support getting interconnects from either the mdss or the mdp5/dpu
  device nodes

gpu+gem:
- Shrinker + LRU re-work:
  - adds a shared GEM LRU+shrinker helper and moves msm over to that
  - reduces lock contention between retire and submit by avoiding the
    need to acquire obj lock in retire path (and instead using resv
    seeing obj's busyness in the shrinker
  - fix reclaim vs submit issues
- GEM fault injection for triggering userspace error paths
- Map/unmap optimization
- Improved robustness for a6xx GPU recovery

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGsrfrr9v1oR9S4oYfOs9jm=jbKQiwPBTrCRHrjYerJJFA@mail.gmail.com
This commit is contained in:
Dave Airlie 2022-09-28 11:35:25 +10:00
commit 95d8c67187
73 changed files with 1847 additions and 2074 deletions

View File

@ -24,6 +24,7 @@ properties:
- qcom,sm8350-dp
reg:
minItems: 4
items:
- description: ahb register block
- description: aux register block
@ -70,14 +71,28 @@ properties:
operating-points-v2:
maxItems: 1
opp-table: true
power-domains:
maxItems: 1
aux-bus:
$ref: /schemas/display/dp-aux-bus.yaml#
data-lanes:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 4
items:
maximum: 3
"#sound-dai-cells":
const: 0
vdda-0p9-supply: true
vdda-1p2-supply: true
vdda-0p9-supply:
deprecated: true
vdda-1p2-supply:
deprecated: true
ports:
$ref: /schemas/graph.yaml#/properties/ports
@ -98,10 +113,33 @@ required:
- clock-names
- phys
- phy-names
- "#sound-dai-cells"
- power-domains
- ports
allOf:
# AUX BUS does not exist on DP controllers
# Audio output also is present only on DP output
# p1 regions is present on DP, but not on eDP
- if:
properties:
compatible:
contains:
enum:
- qcom,sc7280-edp
- qcom,sc8180x-edp
then:
properties:
"#sound-dai-cells": false
reg:
maxItems: 4
else:
properties:
aux-bus: false
reg:
minItems: 5
required:
- "#sound-dai-cells"
additionalProperties: false
examples:
@ -140,9 +178,6 @@ examples:
power-domains = <&rpmhpd SC7180_CX>;
vdda-0p9-supply = <&vdda_usb_ss_dp_core>;
vdda-1p2-supply = <&vdda_usb_ss_dp_1p2>;
ports {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -62,6 +62,7 @@ patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
description: Node containing the properties of DPU.
additionalProperties: false
properties:
compatible:
@ -105,6 +106,9 @@ patternProperties:
maxItems: 1
operating-points-v2: true
opp-table:
type: object
ports:
$ref: /schemas/graph.yaml#/properties/ports
description: |

View File

@ -74,6 +74,7 @@ patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
description: Node containing the properties of DPU.
additionalProperties: false
properties:
compatible:
@ -113,6 +114,8 @@ patternProperties:
maxItems: 1
operating-points-v2: true
opp-table:
type: object
ports:
$ref: /schemas/graph.yaml#/properties/ports

View File

@ -73,6 +73,7 @@ patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
description: Node containing the properties of DPU.
additionalProperties: false
properties:
compatible:
@ -114,6 +115,8 @@ patternProperties:
maxItems: 1
operating-points-v2: true
opp-table:
type: object
ports:
$ref: /schemas/graph.yaml#/properties/ports

View File

@ -72,6 +72,7 @@ patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
description: Node containing the properties of DPU.
additionalProperties: false
properties:
compatible:
@ -112,6 +113,8 @@ patternProperties:
maxItems: 1
operating-points-v2: true
opp-table:
type: object
ports:
$ref: /schemas/graph.yaml#/properties/ports

View File

@ -65,6 +65,7 @@ patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
description: Node containing the properties of DPU.
additionalProperties: false
properties:
compatible:
@ -102,6 +103,9 @@ patternProperties:
maxItems: 1
operating-points-v2: true
opp-table:
type: object
ports:
$ref: /schemas/graph.yaml#/properties/ports
description: |

View File

@ -20,35 +20,24 @@ description: |
properties:
compatible:
items:
- enum:
- qcom,adreno-gmu-630.2
- pattern: '^qcom,adreno-gmu-6[0-9][0-9]\.[0-9]$'
- const: qcom,adreno-gmu
reg:
items:
- description: Core GMU registers
- description: GMU PDC registers
- description: GMU PDC sequence registers
minItems: 3
maxItems: 4
reg-names:
items:
- const: gmu
- const: gmu_pdc
- const: gmu_pdc_seq
minItems: 3
maxItems: 4
clocks:
items:
- description: GMU clock
- description: GPU CX clock
- description: GPU AXI clock
- description: GPU MEMNOC clock
minItems: 4
maxItems: 7
clock-names:
items:
- const: gmu
- const: cxo
- const: axi
- const: memnoc
minItems: 4
maxItems: 7
interrupts:
items:
@ -76,6 +65,9 @@ properties:
operating-points-v2: true
opp-table:
type: object
required:
- compatible
- reg
@ -91,6 +83,140 @@ required:
additionalProperties: false
allOf:
- if:
properties:
compatible:
contains:
enum:
- qcom,adreno-gmu-618.0
- qcom,adreno-gmu-630.2
then:
properties:
reg:
items:
- description: Core GMU registers
- description: GMU PDC registers
- description: GMU PDC sequence registers
reg-names:
items:
- const: gmu
- const: gmu_pdc
- const: gmu_pdc_seq
clocks:
items:
- description: GMU clock
- description: GPU CX clock
- description: GPU AXI clock
- description: GPU MEMNOC clock
clock-names:
items:
- const: gmu
- const: cxo
- const: axi
- const: memnoc
- if:
properties:
compatible:
contains:
enum:
- qcom,adreno-gmu-635.0
then:
properties:
reg:
items:
- description: Core GMU registers
- description: Resource controller registers
- description: GMU PDC registers
reg-names:
items:
- const: gmu
- const: rscc
- const: gmu_pdc
clocks:
items:
- description: GMU clock
- description: GPU CX clock
- description: GPU AXI clock
- description: GPU MEMNOC clock
- description: GPU AHB clock
- description: GPU HUB CX clock
- description: GPU SMMU vote clock
clock-names:
items:
- const: gmu
- const: cxo
- const: axi
- const: memnoc
- const: ahb
- const: hub
- const: smmu_vote
- if:
properties:
compatible:
contains:
enum:
- qcom,adreno-gmu-640.1
then:
properties:
reg:
items:
- description: Core GMU registers
- description: GMU PDC registers
- description: GMU PDC sequence registers
reg-names:
items:
- const: gmu
- const: gmu_pdc
- const: gmu_pdc_seq
- if:
properties:
compatible:
contains:
enum:
- qcom,adreno-gmu-650.2
then:
properties:
reg:
items:
- description: Core GMU registers
- description: Resource controller registers
- description: GMU PDC registers
- description: GMU PDC sequence registers
reg-names:
items:
- const: gmu
- const: rscc
- const: gmu_pdc
- const: gmu_pdc_seq
- if:
properties:
compatible:
contains:
enum:
- qcom,adreno-gmu-640.1
- qcom,adreno-gmu-650.2
then:
properties:
clocks:
items:
- description: GPU AHB clock
- description: GMU clock
- description: GPU CX clock
- description: GPU AXI clock
- description: GPU MEMNOC clock
clock-names:
items:
- const: ahb
- const: gmu
- const: cxo
- const: axi
- const: memnoc
examples:
- |
#include <dt-bindings/clock/qcom,gpucc-sdm845.h>

View File

@ -58,7 +58,8 @@ properties:
- const: ocmem
iommus:
maxItems: 1
minItems: 1
maxItems: 64
sram:
$ref: /schemas/types.yaml#/definitions/phandle-array

View File

@ -36,7 +36,7 @@ properties:
maxItems: 1
iommus:
maxItems: 1
maxItems: 4
ports:
$ref: /schemas/graph.yaml#/properties/ports

View File

@ -28,12 +28,15 @@ properties:
- const: hdmi_phy
clocks:
maxItems: 2
minItems: 2
maxItems: 3
clock-names:
minItems: 2
items:
- const: iface
- const: ref
- const: xo
power-domains:
maxItems: 1
@ -44,6 +47,9 @@ properties:
vddio-supply:
description: phandle to VDD I/O supply regulator
'#clock-cells':
const: 0
'#phy-cells':
const: 0
@ -75,9 +81,12 @@ examples:
"hdmi_phy";
clocks = <&mmcc 116>,
<&gcc 214>;
<&gcc 214>,
<&xo_board>;
clock-names = "iface",
"ref";
"ref",
"xo";
#clock-cells = <0>;
#phy-cells = <0>;
vddio-supply = <&vreg_l12a_1p8>;

View File

@ -165,6 +165,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
obj->resv = &obj->_resv;
drm_vma_node_reset(&obj->vma_node);
INIT_LIST_HEAD(&obj->lru_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
@ -951,6 +952,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
dma_resv_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
drm_gem_lru_remove(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
@ -1274,3 +1276,171 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
ww_acquire_fini(acquire_ctx);
}
EXPORT_SYMBOL(drm_gem_unlock_reservations);
/**
* drm_gem_lru_init - initialize a LRU
*
* @lru: The LRU to initialize
* @lock: The lock protecting the LRU
*/
void
drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock)
{
lru->lock = lock;
lru->count = 0;
INIT_LIST_HEAD(&lru->list);
}
EXPORT_SYMBOL(drm_gem_lru_init);
static void
drm_gem_lru_remove_locked(struct drm_gem_object *obj)
{
obj->lru->count -= obj->size >> PAGE_SHIFT;
WARN_ON(obj->lru->count < 0);
list_del(&obj->lru_node);
obj->lru = NULL;
}
/**
* drm_gem_lru_remove - remove object from whatever LRU it is in
*
* If the object is currently in any LRU, remove it.
*
* @obj: The GEM object to remove from current LRU
*/
void
drm_gem_lru_remove(struct drm_gem_object *obj)
{
struct drm_gem_lru *lru = obj->lru;
if (!lru)
return;
mutex_lock(lru->lock);
drm_gem_lru_remove_locked(obj);
mutex_unlock(lru->lock);
}
EXPORT_SYMBOL(drm_gem_lru_remove);
static void
drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
{
lockdep_assert_held_once(lru->lock);
if (obj->lru)
drm_gem_lru_remove_locked(obj);
lru->count += obj->size >> PAGE_SHIFT;
list_add_tail(&obj->lru_node, &lru->list);
obj->lru = lru;
}
/**
* drm_gem_lru_move_tail - move the object to the tail of the LRU
*
* If the object is already in this LRU it will be moved to the
* tail. Otherwise it will be removed from whichever other LRU
* it is in (if any) and moved into this LRU.
*
* @lru: The LRU to move the object into.
* @obj: The GEM object to move into this LRU
*/
void
drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
{
mutex_lock(lru->lock);
drm_gem_lru_move_tail_locked(lru, obj);
mutex_unlock(lru->lock);
}
EXPORT_SYMBOL(drm_gem_lru_move_tail);
/**
* drm_gem_lru_scan - helper to implement shrinker.scan_objects
*
* If the shrink callback succeeds, it is expected that the driver
* move the object out of this LRU.
*
* If the LRU possibly contain active buffers, it is the responsibility
* of the shrink callback to check for this (ie. dma_resv_test_signaled())
* or if necessary block until the buffer becomes idle.
*
* @lru: The LRU to scan
* @nr_to_scan: The number of pages to try to reclaim
* @shrink: Callback to try to shrink/reclaim the object.
*/
unsigned long
drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
bool (*shrink)(struct drm_gem_object *obj))
{
struct drm_gem_lru still_in_lru;
struct drm_gem_object *obj;
unsigned freed = 0;
drm_gem_lru_init(&still_in_lru, lru->lock);
mutex_lock(lru->lock);
while (freed < nr_to_scan) {
obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
if (!obj)
break;
drm_gem_lru_move_tail_locked(&still_in_lru, obj);
/*
* If it's in the process of being freed, gem_object->free()
* may be blocked on lock waiting to remove it. So just
* skip it.
*/
if (!kref_get_unless_zero(&obj->refcount))
continue;
/*
* Now that we own a reference, we can drop the lock for the
* rest of the loop body, to reduce contention with other
* code paths that need the LRU lock
*/
mutex_unlock(lru->lock);
/*
* Note that this still needs to be trylock, since we can
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
if (!dma_resv_trylock(obj->resv))
goto tail;
if (shrink(obj)) {
freed += obj->size >> PAGE_SHIFT;
/*
* If we succeeded in releasing the object's backing
* pages, we expect the driver to have moved the object
* out of this LRU
*/
WARN_ON(obj->lru == &still_in_lru);
WARN_ON(obj->lru == lru);
}
dma_resv_unlock(obj->resv);
tail:
drm_gem_object_put(obj);
mutex_lock(lru->lock);
}
/*
* Move objects we've skipped over out of the temporary still_in_lru
* back into this LRU
*/
list_for_each_entry (obj, &still_in_lru.list, lru_node)
obj->lru = lru;
list_splice_tail(&still_in_lru.list, &lru->list);
lru->count += still_in_lru.count;
mutex_unlock(lru->lock);
return freed;
}
EXPORT_SYMBOL(drm_gem_lru_scan);

View File

@ -68,7 +68,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);

View File

@ -62,7 +62,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);

View File

@ -1413,6 +1413,10 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00
#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011
#define REG_A6XX_RBBM_GBIF_HALT 0x00000016
#define REG_A6XX_RBBM_GBIF_HALT_ACK 0x00000017
#define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c
#define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001

View File

@ -873,9 +873,47 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
(val & 1), 100, 1000);
}
#define GBIF_CLIENT_HALT_MASK BIT(0)
#define GBIF_ARB_HALT_MASK BIT(1)
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
{
struct msm_gpu *gpu = &adreno_gpu->base;
if (!a6xx_has_gbif(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
0xf) == 0xf);
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
return;
}
/* Halt the gx side of GBIF */
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
/* Halt new client requests on GBIF */
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
/* Halt all AXI requests on GBIF */
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
/* The GBIF halt needs to be explicitly cleared */
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
}
/* Force the GMU off in case it isn't responsive */
static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
/* Flush all the queues */
a6xx_hfi_stop(gmu);
@ -887,6 +925,15 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Make sure there are no outstanding RPMh votes */
a6xx_gmu_rpmh_off(gmu);
/* Halt the gmu cm3 core */
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
a6xx_bus_clear_pending_transactions(adreno_gpu);
/* Reset GPU core blocks */
gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
udelay(100);
}
static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
@ -1014,36 +1061,6 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
return true;
}
#define GBIF_CLIENT_HALT_MASK BIT(0)
#define GBIF_ARB_HALT_MASK BIT(1)
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
{
struct msm_gpu *gpu = &adreno_gpu->base;
if (!a6xx_has_gbif(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
0xf) == 0xf);
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
return;
}
/* Halt new client requests on GBIF */
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
/* Halt all AXI requests on GBIF */
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
/* The GBIF halt needs to be explicitly cleared */
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
}
/* Gracefully try to shut down the GMU and by extension the GPU */
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
@ -1069,7 +1086,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
a6xx_bus_clear_pending_transactions(adreno_gpu);
/* tell the GMU we want to slumber */
a6xx_gmu_notify_slumber(gmu);
ret = a6xx_gmu_notify_slumber(gmu);
if (ret) {
a6xx_gmu_force_off(gmu);
return;
}
ret = gmu_poll_timeout(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,

View File

@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/devfreq.h>
#include <linux/reset.h>
#include <linux/soc/qcom/llcc-qcom.h>
#define GPU_PAS_ID 13
@ -146,7 +147,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, 0x31);
OUT_RING(ring, CACHE_INVALIDATE);
if (!sysprof) {
/*
@ -987,6 +988,10 @@ static int hw_init(struct msm_gpu *gpu)
/* Make sure the GMU keeps the GPU on while we set it up */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
/* Clear GBIF halt in case GX domain was not collapsed */
if (a6xx_has_gbif(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
/*
@ -1261,7 +1266,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int i;
int i, active_submits;
adreno_dump_info(gpu);
@ -1272,14 +1277,46 @@ static void a6xx_recover(struct msm_gpu *gpu)
if (hang_debug)
a6xx_dump(gpu);
/* Halt SQE first */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
/*
* Turn off keep alive that might have been enabled by the hang
* interrupt
*/
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
gpu->funcs->pm_suspend(gpu);
gpu->funcs->pm_resume(gpu);
pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
/* active_submit won't change until we make a submission */
mutex_lock(&gpu->active_lock);
active_submits = gpu->active_submits;
/*
* Temporarily clear active_submits count to silence a WARN() in the
* runtime suspend cb
*/
gpu->active_submits = 0;
/* Drop the rpm refcount from active submits */
if (active_submits)
pm_runtime_put(&gpu->pdev->dev);
/* And the final one from recover worker */
pm_runtime_put_sync(&gpu->pdev->dev);
/* Call into gpucc driver to poll for cx gdsc collapse */
reset_control_reset(gpu->cx_collapse);
pm_runtime_use_autosuspend(&gpu->pdev->dev);
if (active_submits)
pm_runtime_get(&gpu->pdev->dev);
pm_runtime_get_sync(&gpu->pdev->dev);
gpu->active_submits = active_submits;
mutex_unlock(&gpu->active_lock);
msm_gpu_hw_init(gpu);
}

View File

@ -412,7 +412,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct dpu_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
u32 flush_mask;
uint32_t stage_idx, lm_idx;
int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
bool bg_alpha_enable = false;
@ -420,6 +419,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
memset(fetch_active, 0, sizeof(fetch_active));
drm_atomic_crtc_for_each_plane(plane, crtc) {
enum dpu_sspp sspp_idx;
state = plane->state;
if (!state)
continue;
@ -430,14 +431,14 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
pstate = to_dpu_plane_state(state);
fb = state->fb;
dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
set_bit(dpu_plane_pipe(plane), fetch_active);
sspp_idx = dpu_plane_pipe(plane);
set_bit(sspp_idx, fetch_active);
DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
pstate->stage,
plane->base.id,
dpu_plane_pipe(plane) - SSPP_VIG0,
sspp_idx - SSPP_VIG0,
state->fb ? state->fb->base.id : -1);
format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
@ -447,13 +448,13 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
stage_idx = zpos_cnt[pstate->stage]++;
stage_cfg->stage[pstate->stage][stage_idx] =
dpu_plane_pipe(plane);
sspp_idx;
stage_cfg->multirect_index[pstate->stage][stage_idx] =
pstate->multirect_index;
trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
state, pstate, stage_idx,
dpu_plane_pipe(plane) - SSPP_VIG0,
sspp_idx - SSPP_VIG0,
format->base.pixel_format,
fb ? fb->modifier : 0);
@ -462,7 +463,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
pstate, format);
mixer[lm_idx].flush_mask |= flush_mask;
mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl,
sspp_idx);
if (bg_alpha_enable && !format->alpha_enable)
mixer[lm_idx].mixer_op_mode = 0;
@ -496,7 +498,6 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
for (i = 0; i < cstate->num_mixers; i++) {
mixer[i].mixer_op_mode = 0;
mixer[i].flush_mask = 0;
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
mixer[i].lm_ctl->ops.clear_all_blendstages(
mixer[i].lm_ctl);
@ -513,17 +514,14 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
/* stage config flush mask */
ctl->ops.update_pending_flush_mixer(ctl,
mixer[i].hw_lm->idx);
/* stage config flush mask */
ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
mixer[i].hw_lm->idx - LM_0,
mixer[i].mixer_op_mode,
ctl->idx - CTL_0,
mixer[i].flush_mask);
ctl->idx - CTL_0);
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
&stage_cfg);
@ -767,16 +765,9 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
dspp->ops.setup_pcc(dspp, &cfg);
}
mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
mixer[i].hw_dspp->idx);
/* stage config flush mask */
ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
mixer[i].hw_lm->idx - DSPP_0,
ctl->idx - CTL_0,
mixer[i].flush_mask);
ctl->ops.update_pending_flush_dspp(ctl,
mixer[i].hw_dspp->idx);
}
}
@ -1235,17 +1226,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
for (i = 1; i < SSPP_MAX; i++) {
if (pipe_staged[i]) {
if (pipe_staged[i])
dpu_plane_clear_multirect(pipe_staged[i]);
if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
DPU_ERROR(
"r1 only virt plane:%d not supported\n",
pipe_staged[i]->plane->base.id);
rc = -EINVAL;
goto end;
}
}
}
z_pos = -1;

View File

@ -97,7 +97,6 @@ struct dpu_crtc_mixer {
struct dpu_hw_ctl *lm_ctl;
struct dpu_hw_dspp *hw_dspp;
u32 mixer_op_mode;
u32 flush_mask;
};
/**

View File

@ -162,7 +162,7 @@ enum dpu_enc_rc_states {
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds
* @dsc: msm_display_dsc_config pointer, for DSC-enabled encoders
* @dsc: drm_dsc_config pointer, for DSC-enabled encoders
*/
struct dpu_encoder_virt {
struct drm_encoder base;
@ -208,7 +208,7 @@ struct dpu_encoder_virt {
bool wide_bus_en;
/* DSC configuration */
struct msm_display_dsc_config *dsc;
struct drm_dsc_config *dsc;
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
@ -1791,12 +1791,12 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
}
static u32
dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
u32 enc_ip_width)
{
int ssm_delay, total_pixels, soft_slice_per_enc;
soft_slice_per_enc = enc_ip_width / dsc->drm->slice_width;
soft_slice_per_enc = enc_ip_width / dsc->slice_width;
/*
* minimum number of initial line pixels is a sum of:
@ -1808,16 +1808,16 @@ dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
* 5. 6 additional pixels as the output of the rate buffer is
* 48 bits wide
*/
ssm_delay = ((dsc->drm->bits_per_component < 10) ? 84 : 92);
total_pixels = ssm_delay * 3 + dsc->drm->initial_xmit_delay + 47;
ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
if (soft_slice_per_enc > 1)
total_pixels += (ssm_delay * 3);
return DIV_ROUND_UP(total_pixels, dsc->drm->slice_width);
return DIV_ROUND_UP(total_pixels, dsc->slice_width);
}
static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
struct dpu_hw_pingpong *hw_pp,
struct msm_display_dsc_config *dsc,
struct drm_dsc_config *dsc,
u32 common_mode,
u32 initial_lines)
{
@ -1835,7 +1835,7 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
}
static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
struct msm_display_dsc_config *dsc)
struct drm_dsc_config *dsc)
{
/* coding only for 2LM, 2enc, 1 dsc config */
struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
@ -1858,14 +1858,15 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
}
}
pic_width = dsc->drm->pic_width;
dsc_common_mode = 0;
pic_width = dsc->pic_width;
dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
if (enc_master->intf_mode == INTF_MODE_VIDEO)
dsc_common_mode |= DSC_MODE_VIDEO;
this_frame_slices = pic_width / dsc->drm->slice_width;
intf_ip_w = this_frame_slices * dsc->drm->slice_width;
this_frame_slices = pic_width / dsc->slice_width;
intf_ip_w = this_frame_slices * dsc->slice_width;
/*
* dsc merge case: when using 2 encoders for the same stream,
@ -1980,7 +1981,6 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_mixer_cfg mixer;
int i, num_lm;
u32 flush_mask = 0;
struct dpu_global_state *global_state;
struct dpu_hw_blk *hw_lm[2];
struct dpu_hw_mixer *hw_mixer[2];
@ -1999,9 +1999,8 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
for (i = 0; i < num_lm; i++) {
hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx);
if (phys_enc->hw_ctl->ops.update_pending_flush)
phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask);
if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
/* clear all blendstages */
if (phys_enc->hw_ctl->ops.setup_blendstage)
@ -2061,6 +2060,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
if (phys_enc->hw_intf)
intf_cfg.intf = phys_enc->hw_intf->idx;
if (phys_enc->hw_wb)
intf_cfg.wb = phys_enc->hw_wb->idx;
if (phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;

View File

@ -36,7 +36,7 @@ struct msm_display_info {
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_cmd_mode;
bool is_te_using_watchdog_timer;
struct msm_display_dsc_config *dsc;
struct drm_dsc_config *dsc;
};
/**

View File

@ -1333,7 +1333,7 @@ static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
static const struct dpu_vbif_cfg msm8998_vbif[] = {
{
.name = "vbif_0", .id = VBIF_0,
.name = "vbif_rt", .id = VBIF_RT,
.base = 0, .len = 0x1040,
.default_ot_rd_limit = 32,
.default_ot_wr_limit = 32,
@ -1363,7 +1363,7 @@ static const struct dpu_vbif_cfg msm8998_vbif[] = {
static const struct dpu_vbif_cfg sdm845_vbif[] = {
{
.name = "vbif_0", .id = VBIF_0,
.name = "vbif_rt", .id = VBIF_RT,
.base = 0, .len = 0x1040,
.features = BIT(DPU_VBIF_QOS_REMAP),
.xin_halt_timeout = 0x4000,
@ -1939,11 +1939,6 @@ static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
const struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
{
int i;
struct dpu_mdss_cfg *dpu_cfg;
dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
if (!dpu_cfg)
return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
if (cfg_handler[i].hw_rev == hw_rev)

View File

@ -76,7 +76,7 @@ enum {
/**
* MDP TOP BLOCK features
* @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
* @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe
* @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
* @DPU_MDP_BWC, MDSS HW supports Bandwidth compression.
* @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth

View File

@ -150,92 +150,84 @@ static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
enum dpu_sspp sspp)
{
uint32_t flushbits = 0;
switch (sspp) {
case SSPP_VIG0:
flushbits = BIT(0);
ctx->pending_flush_mask |= BIT(0);
break;
case SSPP_VIG1:
flushbits = BIT(1);
ctx->pending_flush_mask |= BIT(1);
break;
case SSPP_VIG2:
flushbits = BIT(2);
ctx->pending_flush_mask |= BIT(2);
break;
case SSPP_VIG3:
flushbits = BIT(18);
ctx->pending_flush_mask |= BIT(18);
break;
case SSPP_RGB0:
flushbits = BIT(3);
ctx->pending_flush_mask |= BIT(3);
break;
case SSPP_RGB1:
flushbits = BIT(4);
ctx->pending_flush_mask |= BIT(4);
break;
case SSPP_RGB2:
flushbits = BIT(5);
ctx->pending_flush_mask |= BIT(5);
break;
case SSPP_RGB3:
flushbits = BIT(19);
ctx->pending_flush_mask |= BIT(19);
break;
case SSPP_DMA0:
flushbits = BIT(11);
ctx->pending_flush_mask |= BIT(11);
break;
case SSPP_DMA1:
flushbits = BIT(12);
ctx->pending_flush_mask |= BIT(12);
break;
case SSPP_DMA2:
flushbits = BIT(24);
ctx->pending_flush_mask |= BIT(24);
break;
case SSPP_DMA3:
flushbits = BIT(25);
ctx->pending_flush_mask |= BIT(25);
break;
case SSPP_CURSOR0:
flushbits = BIT(22);
ctx->pending_flush_mask |= BIT(22);
break;
case SSPP_CURSOR1:
flushbits = BIT(23);
ctx->pending_flush_mask |= BIT(23);
break;
default:
break;
}
return flushbits;
}
static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
enum dpu_lm lm)
{
uint32_t flushbits = 0;
switch (lm) {
case LM_0:
flushbits = BIT(6);
ctx->pending_flush_mask |= BIT(6);
break;
case LM_1:
flushbits = BIT(7);
ctx->pending_flush_mask |= BIT(7);
break;
case LM_2:
flushbits = BIT(8);
ctx->pending_flush_mask |= BIT(8);
break;
case LM_3:
flushbits = BIT(9);
ctx->pending_flush_mask |= BIT(9);
break;
case LM_4:
flushbits = BIT(10);
ctx->pending_flush_mask |= BIT(10);
break;
case LM_5:
flushbits = BIT(20);
ctx->pending_flush_mask |= BIT(20);
break;
default:
return -EINVAL;
break;
}
flushbits |= CTL_FLUSH_MASK_CTL;
return flushbits;
ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
}
static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
@ -294,29 +286,25 @@ static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
}
static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
enum dpu_dspp dspp)
{
uint32_t flushbits = 0;
switch (dspp) {
case DSPP_0:
flushbits = BIT(13);
ctx->pending_flush_mask |= BIT(13);
break;
case DSPP_1:
flushbits = BIT(14);
ctx->pending_flush_mask |= BIT(14);
break;
case DSPP_2:
flushbits = BIT(15);
ctx->pending_flush_mask |= BIT(15);
break;
case DSPP_3:
flushbits = BIT(21);
ctx->pending_flush_mask |= BIT(21);
break;
default:
return 0;
break;
}
return flushbits;
}
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
@ -685,9 +673,9 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};

View File

@ -129,6 +129,32 @@ struct dpu_hw_ctl_ops {
void (*update_pending_flush_merge_3d)(struct dpu_hw_ctl *ctx,
enum dpu_merge_3d blk);
/**
* OR in the given flushbits to the cached pending_flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : SSPP block index
*/
void (*update_pending_flush_sspp)(struct dpu_hw_ctl *ctx,
enum dpu_sspp blk);
/**
* OR in the given flushbits to the cached pending_flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : LM block index
*/
void (*update_pending_flush_mixer)(struct dpu_hw_ctl *ctx,
enum dpu_lm blk);
/**
* OR in the given flushbits to the cached pending_flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
* @blk : DSPP block index
*/
void (*update_pending_flush_dspp)(struct dpu_hw_ctl *ctx,
enum dpu_dspp blk);
/**
* Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
@ -171,15 +197,6 @@ struct dpu_hw_ctl_ops {
*/
int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
enum dpu_sspp blk);
uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
enum dpu_lm blk);
uint32_t (*get_bitmask_dspp)(struct dpu_hw_ctl *ctx,
enum dpu_dspp blk);
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer

View File

@ -37,7 +37,7 @@ static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
}
static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
struct msm_display_dsc_config *dsc,
struct drm_dsc_config *dsc,
u32 mode,
u32 initial_lines)
{
@ -52,89 +52,89 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
if (is_cmd_mode)
initial_lines += 1;
slice_last_group_size = 3 - (dsc->drm->slice_width % 3);
slice_last_group_size = 3 - (dsc->slice_width % 3);
data = (initial_lines << 20);
data |= ((slice_last_group_size - 1) << 18);
/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
data |= dsc->drm->bits_per_pixel << 12;
lsb = dsc->drm->bits_per_pixel % 4;
bpp = dsc->drm->bits_per_pixel / 4;
data |= dsc->bits_per_pixel << 12;
lsb = dsc->bits_per_pixel % 4;
bpp = dsc->bits_per_pixel / 4;
bpp *= 4;
bpp <<= 4;
bpp |= lsb;
data |= bpp << 8;
data |= (dsc->drm->block_pred_enable << 7);
data |= (dsc->drm->line_buf_depth << 3);
data |= (dsc->drm->simple_422 << 2);
data |= (dsc->drm->convert_rgb << 1);
data |= dsc->drm->bits_per_component;
data |= (dsc->block_pred_enable << 7);
data |= (dsc->line_buf_depth << 3);
data |= (dsc->simple_422 << 2);
data |= (dsc->convert_rgb << 1);
data |= dsc->bits_per_component;
DPU_REG_WRITE(c, DSC_ENC, data);
data = dsc->drm->pic_width << 16;
data |= dsc->drm->pic_height;
data = dsc->pic_width << 16;
data |= dsc->pic_height;
DPU_REG_WRITE(c, DSC_PICTURE, data);
data = dsc->drm->slice_width << 16;
data |= dsc->drm->slice_height;
data = dsc->slice_width << 16;
data |= dsc->slice_height;
DPU_REG_WRITE(c, DSC_SLICE, data);
data = dsc->drm->slice_chunk_size << 16;
data = dsc->slice_chunk_size << 16;
DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data);
data = dsc->drm->initial_dec_delay << 16;
data |= dsc->drm->initial_xmit_delay;
data = dsc->initial_dec_delay << 16;
data |= dsc->initial_xmit_delay;
DPU_REG_WRITE(c, DSC_DELAY, data);
data = dsc->drm->initial_scale_value;
data = dsc->initial_scale_value;
DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data);
data = dsc->drm->scale_decrement_interval;
data = dsc->scale_decrement_interval;
DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data);
data = dsc->drm->scale_increment_interval;
data = dsc->scale_increment_interval;
DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data);
data = dsc->drm->first_line_bpg_offset;
data = dsc->first_line_bpg_offset;
DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data);
data = dsc->drm->nfl_bpg_offset << 16;
data |= dsc->drm->slice_bpg_offset;
data = dsc->nfl_bpg_offset << 16;
data |= dsc->slice_bpg_offset;
DPU_REG_WRITE(c, DSC_BPG_OFFSET, data);
data = dsc->drm->initial_offset << 16;
data |= dsc->drm->final_offset;
data = dsc->initial_offset << 16;
data |= dsc->final_offset;
DPU_REG_WRITE(c, DSC_DSC_OFFSET, data);
det_thresh_flatness = 7 + 2 * (dsc->drm->bits_per_component - 8);
det_thresh_flatness = 7 + 2 * (dsc->bits_per_component - 8);
data = det_thresh_flatness << 10;
data |= dsc->drm->flatness_max_qp << 5;
data |= dsc->drm->flatness_min_qp;
data |= dsc->flatness_max_qp << 5;
data |= dsc->flatness_min_qp;
DPU_REG_WRITE(c, DSC_FLATNESS, data);
data = dsc->drm->rc_model_size;
data = dsc->rc_model_size;
DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data);
data = dsc->drm->rc_tgt_offset_low << 18;
data |= dsc->drm->rc_tgt_offset_high << 14;
data |= dsc->drm->rc_quant_incr_limit1 << 9;
data |= dsc->drm->rc_quant_incr_limit0 << 4;
data |= dsc->drm->rc_edge_factor;
data = dsc->rc_tgt_offset_low << 18;
data |= dsc->rc_tgt_offset_high << 14;
data |= dsc->rc_quant_incr_limit1 << 9;
data |= dsc->rc_quant_incr_limit0 << 4;
data |= dsc->rc_edge_factor;
DPU_REG_WRITE(c, DSC_RC, data);
}
static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
struct msm_display_dsc_config *dsc)
struct drm_dsc_config *dsc)
{
struct drm_dsc_rc_range_parameters *rc = dsc->drm->rc_range_params;
struct drm_dsc_rc_range_parameters *rc = dsc->rc_range_params;
struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
u32 off;
int i;
off = DSC_RC_BUF_THRESH;
for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) {
DPU_REG_WRITE(c, off, dsc->drm->rc_buf_thresh[i]);
DPU_REG_WRITE(c, off, dsc->rc_buf_thresh[i]);
off += 4;
}

View File

@ -31,7 +31,7 @@ struct dpu_hw_dsc_ops {
* @initial_lines: amount of initial lines to be used
*/
void (*dsc_config)(struct dpu_hw_dsc *hw_dsc,
struct msm_display_dsc_config *dsc,
struct drm_dsc_config *dsc,
u32 mode,
u32 initial_lines);
@ -41,7 +41,7 @@ struct dpu_hw_dsc_ops {
* @dsc: panel dsc parameters
*/
void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
struct msm_display_dsc_config *dsc);
struct drm_dsc_config *dsc);
};
struct dpu_hw_dsc {

View File

@ -273,11 +273,9 @@ enum dpu_wd_timer {
};
enum dpu_vbif {
VBIF_0,
VBIF_1,
VBIF_RT,
VBIF_NRT,
VBIF_MAX,
VBIF_RT = VBIF_0,
VBIF_NRT = VBIF_1
};
/**

View File

@ -780,8 +780,7 @@ static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
}
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
void __iomem *addr, const struct dpu_mdss_cfg *catalog,
bool is_virtual_pipe)
void __iomem *addr, const struct dpu_mdss_cfg *catalog)
{
struct dpu_hw_pipe *hw_pipe;
const struct dpu_sspp_cfg *cfg;

View File

@ -377,11 +377,9 @@ struct dpu_kms;
* @idx: Pipe index for which driver object is required
* @addr: Mapped register io address of MDP
* @catalog : Pointer to mdss catalog data
* @is_virtual_pipe: is this pipe virtual pipe
*/
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
void __iomem *addr, const struct dpu_mdss_cfg *catalog,
bool is_virtual_pipe);
void __iomem *addr, const struct dpu_mdss_cfg *catalog);
/**
* dpu_hw_sspp_destroy(): Destroys SSPP driver context

View File

@ -384,12 +384,9 @@ static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
struct icc_path *path1;
struct drm_device *dev = dpu_kms->dev;
struct device *dpu_dev = dev->dev;
struct device *mdss_dev = dpu_dev->parent;
/* Interconnects are a part of MDSS device tree binding, not the
* MDP/DPU device. */
path0 = of_icc_get(mdss_dev, "mdp0-mem");
path1 = of_icc_get(mdss_dev, "mdp1-mem");
path0 = msm_icc_get(dpu_dev, "mdp0-mem");
path1 = msm_icc_get(dpu_dev, "mdp1-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
@ -782,7 +779,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
(1UL << max_crtc_count) - 1, 0);
(1UL << max_crtc_count) - 1);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
@ -826,12 +823,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
_dpu_kms_mmu_destroy(dpu_kms);
if (dpu_kms->catalog) {
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
dpu_kms->hw_vbif[vbif_idx] = NULL;
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
if (dpu_kms->hw_vbif[i]) {
dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
dpu_kms->hw_vbif[i] = NULL;
}
}
}
@ -902,12 +897,10 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
int i;
struct dpu_kms *dpu_kms;
const struct dpu_mdss_cfg *cat;
struct dpu_hw_mdp *top;
dpu_kms = to_dpu_kms(kms);
cat = dpu_kms->catalog;
top = dpu_kms->hw_mdp;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
@ -1113,12 +1106,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx,
dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
if (IS_ERR(dpu_kms->hw_vbif[vbif_idx])) {
rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
if (!dpu_kms->hw_vbif[vbif_idx])
rc = -EINVAL;
DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
dpu_kms->hw_vbif[vbif_idx] = NULL;
goto power_error;

View File

@ -91,7 +91,7 @@ enum dpu_plane_qos {
/*
* struct dpu_plane - local dpu plane structure
* @aspace: address space pointer
* @mplane_list: List of multirect planes of the same pipe
* @csc_ptr: Points to dpu_csc_cfg structure to use for current
* @catalog: Points to dpu catalog structure
* @revalidate: force revalidation of all the plane properties
*/
@ -106,8 +106,6 @@ struct dpu_plane {
uint32_t color_fill;
bool is_error;
bool is_rt_pipe;
bool is_virtual;
struct list_head mplane_list;
const struct dpu_mdss_cfg *catalog;
};
@ -225,7 +223,7 @@ static void _dpu_plane_calc_clk(struct drm_plane *plane, struct dpu_hw_pipe_cfg
static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
const struct dpu_format *fmt, u32 src_width)
{
struct dpu_plane *pdpu, *tmp;
struct dpu_plane *pdpu;
struct dpu_plane_state *pstate;
u32 fixed_buff_size;
u32 total_fl;
@ -239,19 +237,7 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
pstate = to_dpu_plane_state(plane->state);
fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
u32 tmp_width;
if (!tmp->base.state->visible)
continue;
tmp_width = drm_rect_width(&tmp->base.state->src) >> 16;
DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
pdpu->base.base.id, tmp->base.base.id,
src_width,
tmp_width);
src_width = max_t(u32, src_width,
tmp_width);
}
/* FIXME: in multirect case account for the src_width of all the planes */
if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
if (fmt->chroma_sample == DPU_CHROMA_420) {
@ -854,13 +840,8 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
}
done:
if (dpu_plane[R0]->is_virtual) {
pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
} else {
pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
}
pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
@ -869,18 +850,6 @@ done:
return 0;
}
/**
* dpu_plane_get_ctl_flush - get control flush for the given plane
* @plane: Pointer to drm plane structure
* @ctl: Pointer to hardware control driver
* @flush_sspp: Pointer to sspp flush control word
*/
void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
u32 *flush_sspp)
{
*flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
}
static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@ -1266,19 +1235,13 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
static void _dpu_plane_atomic_disable(struct drm_plane *plane)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
trace_dpu_plane_disable(DRMID(plane), false,
pstate->multirect_mode);
pstate->pending = true;
if (is_dpu_plane_virtual(plane) &&
pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
}
static void dpu_plane_atomic_update(struct drm_plane *plane,
@ -1493,22 +1456,16 @@ enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
}
bool is_dpu_plane_virtual(struct drm_plane *plane)
{
return plane ? to_dpu_plane(plane)->is_virtual : false;
}
/* initialize plane */
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs, u32 master_plane_id)
unsigned long possible_crtcs)
{
struct drm_plane *plane = NULL, *master_plane = NULL;
struct drm_plane *plane = NULL;
const uint32_t *format_list;
struct dpu_plane *pdpu;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
int zpos_max = DPU_ZPOS_MAX;
uint32_t num_formats;
uint32_t supported_rotations;
int ret = -EINVAL;
@ -1524,18 +1481,9 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
/* cache local stuff for later */
plane = &pdpu->base;
pdpu->pipe = pipe;
pdpu->is_virtual = (master_plane_id != 0);
INIT_LIST_HEAD(&pdpu->mplane_list);
master_plane = drm_plane_find(dev, NULL, master_plane_id);
if (master_plane) {
struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
}
/* initialize underlying h/w driver */
pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
master_plane_id != 0);
pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog);
if (IS_ERR(pdpu->pipe_hw)) {
DPU_ERROR("[%u]SSPP init failed\n", pipe);
ret = PTR_ERR(pdpu->pipe_hw);
@ -1545,14 +1493,8 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp;
}
if (pdpu->is_virtual) {
format_list = pdpu->pipe_hw->cap->sblk->virt_format_list;
num_formats = pdpu->pipe_hw->cap->sblk->virt_num_formats;
}
else {
format_list = pdpu->pipe_hw->cap->sblk->format_list;
num_formats = pdpu->pipe_hw->cap->sblk->num_formats;
}
format_list = pdpu->pipe_hw->cap->sblk->format_list;
num_formats = pdpu->pipe_hw->cap->sblk->num_formats;
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
format_list, num_formats,
@ -1562,14 +1504,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
pdpu->catalog = kms->catalog;
if (kms->catalog->mixer_count &&
kms->catalog->mixer[0].sblk->maxblendstages) {
zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
}
ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
ret = drm_plane_create_zpos_property(plane, 0, 0, DPU_ZPOS_MAX);
if (ret)
DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
@ -1594,15 +1529,14 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
mutex_init(&pdpu->lock);
DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", plane->name,
pipe, plane->base.id, master_plane_id);
DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
pipe, plane->base.id);
return plane;
clean_sspp:
if (pdpu && pdpu->pipe_hw)
dpu_hw_sspp_destroy(pdpu->pipe_hw);
clean_plane:
list_del(&pdpu->mplane_list);
kfree(pdpu);
return ERR_PTR(ret);
}

View File

@ -64,23 +64,6 @@ struct dpu_multirect_plane_states {
*/
enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
/**
* is_dpu_plane_virtual - check for virtual plane
* @plane: Pointer to DRM plane object
* returns: true - if the plane is virtual
* false - if the plane is primary
*/
bool is_dpu_plane_virtual(struct drm_plane *plane);
/**
* dpu_plane_get_ctl_flush - get control flush mask
* @plane: Pointer to DRM plane object
* @ctl: Pointer to control hardware
* @flush_sspp: Pointer to sspp flush control word
*/
void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
u32 *flush_sspp);
/**
* dpu_plane_flush - final plane operations before commit flush
* @plane: Pointer to drm plane structure
@ -99,14 +82,11 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error);
* @pipe: dpu hardware pipe identifier
* @type: Plane type - PRIMARY/OVERLAY/CURSOR
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
* @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
* a regular plane initialization. A non-zero primary plane
* id will be passed for a virtual pipe initialization.
*
*/
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs, u32 master_plane_id);
unsigned long possible_crtcs);
/**
* dpu_plane_validate_multirecti_v2 - validate the multirect planes

View File

@ -11,6 +11,26 @@
#include "dpu_hw_vbif.h"
#include "dpu_trace.h"
static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx)
{
if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif))
return dpu_kms->hw_vbif[vbif_idx];
return NULL;
}
static const char *dpu_vbif_name(enum dpu_vbif idx)
{
switch (idx) {
case VBIF_RT:
return "VBIF_RT";
case VBIF_NRT:
return "VBIF_NRT";
default:
return "??";
}
}
/**
* _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
* @vbif: Pointer to hardware vbif driver
@ -42,12 +62,12 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
if (!status) {
rc = -ETIMEDOUT;
DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
vbif->idx - VBIF_0, xin_id);
DPU_ERROR("%s client %d not halting. TIMEDOUT.\n",
dpu_vbif_name(vbif->idx), xin_id);
} else {
rc = 0;
DRM_DEBUG_ATOMIC("VBIF %d client %d is halted\n",
vbif->idx - VBIF_0, xin_id);
DRM_DEBUG_ATOMIC("%s client %d is halted\n",
dpu_vbif_name(vbif->idx), xin_id);
}
return rc;
@ -87,8 +107,8 @@ static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
}
}
DRM_DEBUG_ATOMIC("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
vbif->idx - VBIF_0, params->xin_id,
DRM_DEBUG_ATOMIC("%s xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
dpu_vbif_name(vbif->idx), params->xin_id,
params->width, params->height, params->frame_rate,
pps, *ot_lim);
}
@ -133,8 +153,8 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
}
exit:
DRM_DEBUG_ATOMIC("vbif:%d xin:%d ot_lim:%d\n",
vbif->idx - VBIF_0, params->xin_id, ot_lim);
DRM_DEBUG_ATOMIC("%s xin:%d ot_lim:%d\n",
dpu_vbif_name(vbif->idx), params->xin_id, ot_lim);
return ot_lim;
}
@ -148,20 +168,15 @@ exit:
void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params)
{
struct dpu_hw_vbif *vbif = NULL;
struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
u32 ot_lim;
int ret, i;
int ret;
mdp = dpu_kms->hw_mdp;
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
if (dpu_kms->hw_vbif[i] &&
dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
vbif = dpu_kms->hw_vbif[i];
}
vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !mdp) {
DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
vbif != NULL, mdp != NULL);
@ -204,7 +219,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params)
{
struct dpu_hw_vbif *vbif = NULL;
struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
const struct dpu_vbif_qos_tbl *qos_tbl;
@ -216,13 +231,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
}
mdp = dpu_kms->hw_mdp;
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
if (dpu_kms->hw_vbif[i] &&
dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
vbif = dpu_kms->hw_vbif[i];
break;
}
}
vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !vbif->cap) {
DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
@ -245,8 +254,8 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
for (i = 0; i < qos_tbl->npriority_lvl; i++) {
DRM_DEBUG_ATOMIC("vbif:%d xin:%d lvl:%d/%d\n",
params->vbif_idx, params->xin_id, i,
DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n",
dpu_vbif_name(params->vbif_idx), params->xin_id, i,
qos_tbl->priority_lvl[i]);
vbif->ops.set_qos_remap(vbif, params->xin_id, i,
qos_tbl->priority_lvl[i]);
@ -266,8 +275,8 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
if (vbif && vbif->ops.clear_errors) {
vbif->ops.clear_errors(vbif, &pnd, &src);
if (pnd || src) {
DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
vbif->idx - VBIF_0, pnd, src);
DRM_DEBUG_KMS("%s: pnd 0x%X, src 0x%X\n",
dpu_vbif_name(vbif->idx), pnd, src);
}
}
}

View File

@ -902,12 +902,9 @@ fail:
static int mdp5_setup_interconnect(struct platform_device *pdev)
{
/* Interconnects are a part of MDSS device tree binding, not the
* MDP5 device. */
struct device *mdss_dev = pdev->dev.parent;
struct icc_path *path0 = of_icc_get(mdss_dev, "mdp0-mem");
struct icc_path *path1 = of_icc_get(mdss_dev, "mdp1-mem");
struct icc_path *path_rot = of_icc_get(mdss_dev, "rotator-mem");
struct icc_path *path0 = msm_icc_get(&pdev->dev, "mdp0-mem");
struct icc_path *path1 = msm_icc_get(&pdev->dev, "mdp1-mem");
struct icc_path *path_rot = msm_icc_get(&pdev->dev, "rotator-mem");
if (IS_ERR(path0))
return PTR_ERR(path0);

View File

@ -431,7 +431,7 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
if (rate == link_rate_hbr3)
pixel_div = 6;
else if (rate == 1620000 || rate == 270000)
else if (rate == 162000 || rate == 270000)
pixel_div = 2;
else if (rate == link_rate_hbr2)
pixel_div = 4;

View File

@ -1214,7 +1214,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
if (ret)
return ret;
dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
dp_ctrl_train_pattern_set(ctrl, pattern);
for (tries = 0; tries <= maximum_retries; tries++) {
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
@ -1238,8 +1238,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
return -ETIMEDOUT;
}
static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl);
static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
int *training_step)
{
@ -1358,25 +1356,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
if (ret)
DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
drm_dbg_dp(ctrl->drm_dev, "link rate=%d pixel_clk=%d\n",
ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
return ret;
}
static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
{
int ret = 0;
dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel",
ctrl->dp_ctrl.pixel_rate * 1000);
ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
if (ret)
DRM_ERROR("Unabled to start pixel clocks. ret=%d\n", ret);
drm_dbg_dp(ctrl->drm_dev, "link rate=%d pixel_clk=%d\n",
ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
drm_dbg_dp(ctrl->drm_dev, "link rate=%d\n", ctrl->link->link_params.rate);
return ret;
}
@ -1520,8 +1500,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
ctrl->link->phy_params.p_level = 0;
ctrl->link->phy_params.v_level = 0;
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
ret = dp_ctrl_setup_main_link(ctrl, &training_step);
if (ret)
goto end;
@ -1535,38 +1513,6 @@ end:
return ret;
}
static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl);
static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
{
int ret = 0;
if (!ctrl->link->phy_params.phy_test_pattern_sel) {
drm_dbg_dp(ctrl->drm_dev,
"no test pattern selected by sink\n");
return ret;
}
/*
* The global reset will need DP link related clocks to be
* running. Add the global reset just before disabling the
* link clocks and core clocks.
*/
ret = dp_ctrl_off(&ctrl->dp_ctrl);
if (ret) {
DRM_ERROR("failed to disable DP controller\n");
return ret;
}
ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
if (!ret)
ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl);
else
DRM_ERROR("failed to enable DP link controller\n");
return ret;
}
static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
{
bool success = false;
@ -1619,6 +1565,48 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
return success;
}
static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
{
int ret;
unsigned long pixel_rate;
if (!ctrl->link->phy_params.phy_test_pattern_sel) {
drm_dbg_dp(ctrl->drm_dev,
"no test pattern selected by sink\n");
return 0;
}
/*
* The global reset will need DP link related clocks to be
* running. Add the global reset just before disabling the
* link clocks and core clocks.
*/
ret = dp_ctrl_off(&ctrl->dp_ctrl);
if (ret) {
DRM_ERROR("failed to disable DP controller\n");
return ret;
}
ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
if (ret) {
DRM_ERROR("failed to enable DP link controller\n");
return ret;
}
pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
if (ret) {
DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
return ret;
}
dp_ctrl_send_phy_test_pattern(ctrl);
return 0;
}
void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
@ -1689,11 +1677,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
{
int rc = 0;
struct dp_ctrl_private *ctrl;
u32 rate = 0;
u32 rate;
int link_train_max_retries = 5;
u32 const phy_cts_pixel_clk_khz = 148500;
u8 link_status[DP_LINK_STATUS_SIZE];
unsigned int training_step;
unsigned long pixel_rate;
if (!dp_ctrl)
return -EINVAL;
@ -1701,25 +1690,24 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
rate = ctrl->panel->link_info.rate;
pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
dp_power_clk_enable(ctrl->power, DP_CORE_PM, true);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
drm_dbg_dp(ctrl->drm_dev,
"using phy test link parameters\n");
if (!ctrl->panel->dp_mode.drm_mode.clock)
ctrl->dp_ctrl.pixel_rate = phy_cts_pixel_clk_khz;
if (!pixel_rate)
pixel_rate = phy_cts_pixel_clk_khz;
} else {
ctrl->link->link_params.rate = rate;
ctrl->link->link_params.num_lanes =
ctrl->panel->link_info.num_lanes;
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
}
drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%d\n",
drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes,
ctrl->dp_ctrl.pixel_rate);
pixel_rate);
rc = dp_ctrl_enable_mainlink_clocks(ctrl);
if (rc)
@ -1816,31 +1804,12 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
return dp_ctrl_setup_main_link(ctrl, &training_step);
}
static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl)
{
int ret;
struct dp_ctrl_private *ctrl;
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
ret = dp_ctrl_enable_stream_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
return ret;
}
dp_ctrl_send_phy_test_pattern(ctrl);
return 0;
}
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
{
int ret = 0;
bool mainlink_ready = false;
struct dp_ctrl_private *ctrl;
unsigned long pixel_rate;
unsigned long pixel_rate_orig;
if (!dp_ctrl)
@ -1848,15 +1817,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock;
pixel_rate_orig = ctrl->dp_ctrl.pixel_rate;
if (dp_ctrl->wide_bus_en)
ctrl->dp_ctrl.pixel_rate >>= 1;
pixel_rate >>= 1;
drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%d\n",
drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
ctrl->link->link_params.rate,
ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
ctrl->link->link_params.num_lanes, pixel_rate);
if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */
ret = dp_ctrl_enable_mainlink_clocks(ctrl);
@ -1866,9 +1834,11 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
}
}
ret = dp_ctrl_enable_stream_clocks(ctrl);
dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
if (ret) {
DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
DRM_ERROR("Unable to start pixel clocks. ret=%d\n", ret);
goto end;
}

View File

@ -16,7 +16,6 @@
struct dp_ctrl {
bool orientation;
atomic_t aborted;
u32 pixel_rate;
bool wide_bus_en;
};

View File

@ -786,7 +786,7 @@ static int dp_link_process_link_training_request(struct dp_link_private *link)
link->request.test_lane_count);
link->dp_link.link_params.num_lanes = link->request.test_lane_count;
link->dp_link.link_params.rate =
link->dp_link.link_params.rate =
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
return 0;
@ -965,8 +965,7 @@ static int dp_link_process_link_status_update(struct dp_link_private *link)
if (channel_eq_done && clock_recovery_done)
return -EINVAL;
return 0;
return 0;
}
/**

View File

@ -6,14 +6,6 @@
#include "dsi.h"
#include "dsi_cfg.h"
struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
{
if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
return NULL;
return msm_dsi->encoder;
}
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
{
unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
@ -21,7 +13,7 @@ bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
return !(host_flags & MIPI_DSI_MODE_VIDEO);
}
struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
{
return msm_dsi_host_get_dsc_config(msm_dsi->host);
}
@ -220,7 +212,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder)
{
struct msm_drm_private *priv;
struct drm_bridge *ext_bridge;
int ret;
if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev))
@ -254,26 +245,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
/*
* check if the dsi encoder output is connected to a panel or an
* external bridge. We create a connector only if we're connected to a
* drm_panel device. When we're connected to an external bridge, we
* assume that the drm_bridge driver will create the connector itself.
*/
ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host);
if (ext_bridge)
msm_dsi->connector =
msm_dsi_manager_ext_bridge_init(msm_dsi->id);
else
msm_dsi->connector =
msm_dsi_manager_connector_init(msm_dsi->id);
if (IS_ERR(msm_dsi->connector)) {
ret = PTR_ERR(msm_dsi->connector);
ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
msm_dsi->connector = NULL;
goto fail;
}
@ -287,12 +262,6 @@ fail:
msm_dsi->bridge = NULL;
}
/* don't destroy connector if we didn't make it */
if (msm_dsi->connector && !msm_dsi->external_bridge)
msm_dsi->connector->funcs->destroy(msm_dsi->connector);
msm_dsi->connector = NULL;
return ret;
}

View File

@ -12,7 +12,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include "msm_drv.h"
#include "disp/msm_disp_snapshot.h"
@ -30,27 +29,12 @@ enum msm_dsi_phy_usecase {
MSM_DSI_PHY_SLAVE,
};
#define DSI_DEV_REGULATOR_MAX 8
#define DSI_BUS_CLK_MAX 4
/* Regulators for DSI devices */
struct dsi_reg_entry {
char name[32];
int enable_load;
int disable_load;
};
struct dsi_reg_config {
int num;
struct dsi_reg_entry regs[DSI_DEV_REGULATOR_MAX];
};
struct msm_dsi {
struct drm_device *dev;
struct platform_device *pdev;
/* connector managed by us when we're connected to a drm_panel */
struct drm_connector *connector;
/* internal dsi bridge attached to MDP interface */
struct drm_bridge *bridge;
@ -58,10 +42,8 @@ struct msm_dsi {
struct msm_dsi_phy *phy;
/*
* panel/external_bridge connected to dsi bridge output, only one of the
* two can be valid at a time
* external_bridge connected to dsi bridge output
*/
struct drm_panel *panel;
struct drm_bridge *external_bridge;
struct device *phy_dev;
@ -76,8 +58,7 @@ struct msm_dsi {
/* dsi manager */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
struct drm_connector *msm_dsi_manager_connector_init(u8 id);
struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
@ -87,11 +68,9 @@ void msm_dsi_manager_tpg_enable(void);
/* msm dsi */
static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
{
return msm_dsi->panel || msm_dsi->external_bridge;
return msm_dsi->external_bridge;
}
struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
/* dsi host */
struct msm_dsi_host;
int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
@ -116,9 +95,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
const struct drm_display_mode *mode);
enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
const struct drm_display_mode *mode);
struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host);
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host);
struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
int msm_dsi_host_register(struct mipi_dsi_host *host);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
@ -154,7 +131,7 @@ int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host);
struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host);
struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host);
/* dsi phy */
struct msm_dsi_phy;

View File

@ -9,16 +9,16 @@ static const char * const dsi_v2_bus_clk_names[] = {
"core_mmss", "iface", "bus",
};
static const struct regulator_bulk_data apq8064_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
{ .supply = "avdd", .init_load_uA = 10000 }, /* 3.0 V */
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
};
static const struct msm_dsi_config apq8064_dsi_cfg = {
.io_offset = 0,
.reg_cfg = {
.num = 3,
.regs = {
{"vdda", 100000, 100}, /* 1.2 V */
{"avdd", 10000, 100}, /* 3.0 V */
{"vddio", 100000, 100}, /* 1.8 V */
},
},
.regulator_data = apq8064_dsi_regulators,
.num_regulators = ARRAY_SIZE(apq8064_dsi_regulators),
.bus_clk_names = dsi_v2_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
.io_start = { 0x4700000, 0x5800000 },
@ -29,16 +29,16 @@ static const char * const dsi_6g_bus_clk_names[] = {
"mdp_core", "iface", "bus", "core_mmss",
};
static const struct regulator_bulk_data msm8974_apq8084_regulators[] = {
{ .supply = "vdd", .init_load_uA = 150000 }, /* 3.0 V */
{ .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
};
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 3,
.regs = {
{"vdd", 150000, 100}, /* 3.0 V */
{"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */
},
},
.regulator_data = msm8974_apq8084_regulators,
.num_regulators = ARRAY_SIZE(msm8974_apq8084_regulators),
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = { 0xfd922800, 0xfd922b00 },
@ -49,15 +49,15 @@ static const char * const dsi_8916_bus_clk_names[] = {
"mdp_core", "iface", "bus",
};
static const struct regulator_bulk_data msm8916_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
};
static const struct msm_dsi_config msm8916_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 2,
.regs = {
{"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */
},
},
.regulator_data = msm8916_dsi_regulators,
.num_regulators = ARRAY_SIZE(msm8916_dsi_regulators),
.bus_clk_names = dsi_8916_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
.io_start = { 0x1a98000 },
@ -68,34 +68,34 @@ static const char * const dsi_8976_bus_clk_names[] = {
"mdp_core", "iface", "bus",
};
static const struct regulator_bulk_data msm8976_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
};
static const struct msm_dsi_config msm8976_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 2,
.regs = {
{"vdda", 100000, 100}, /* 1.2 V */
{"vddio", 100000, 100}, /* 1.8 V */
},
},
.regulator_data = msm8976_dsi_regulators,
.num_regulators = ARRAY_SIZE(msm8976_dsi_regulators),
.bus_clk_names = dsi_8976_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names),
.io_start = { 0x1a94000, 0x1a96000 },
.num_dsi = 2,
};
static const struct regulator_bulk_data msm8994_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 100000 }, /* 1.25 V */
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
{ .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
{ .supply = "vdd", .init_load_uA = 100000 }, /* 1.8 V */
{ .supply = "lab_reg", .init_load_uA = -1 },
{ .supply = "ibb_reg", .init_load_uA = -1 },
};
static const struct msm_dsi_config msm8994_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 6,
.regs = {
{"vdda", 100000, 100}, /* 1.25 V */
{"vddio", 100000, 100}, /* 1.8 V */
{"vcca", 10000, 100}, /* 1.0 V */
{"vdd", 100000, 100}, /* 1.8 V */
{"lab_reg", -1, -1},
{"ibb_reg", -1, -1},
},
},
.regulator_data = msm8994_dsi_regulators,
.num_regulators = ARRAY_SIZE(msm8994_dsi_regulators),
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = { 0xfd998000, 0xfd9a0000 },
@ -106,16 +106,16 @@ static const char * const dsi_8996_bus_clk_names[] = {
"mdp_core", "iface", "bus", "core_mmss",
};
static const struct regulator_bulk_data msm8996_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 18160 }, /* 1.25 V */
{ .supply = "vcca", .init_load_uA = 17000 }, /* 0.925 V */
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
};
static const struct msm_dsi_config msm8996_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 2,
.regs = {
{"vdda", 18160, 1 }, /* 1.25 V */
{"vcca", 17000, 32 }, /* 0.925 V */
{"vddio", 100000, 100 },/* 1.8 V */
},
},
.regulator_data = msm8996_dsi_regulators,
.num_regulators = ARRAY_SIZE(msm8996_dsi_regulators),
.bus_clk_names = dsi_8996_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names),
.io_start = { 0x994000, 0x996000 },
@ -126,15 +126,15 @@ static const char * const dsi_msm8998_bus_clk_names[] = {
"iface", "bus", "core",
};
static const struct regulator_bulk_data msm8998_dsi_regulators[] = {
{ .supply = "vdd", .init_load_uA = 367000 }, /* 0.9 V */
{ .supply = "vdda", .init_load_uA = 62800 }, /* 1.2 V */
};
static const struct msm_dsi_config msm8998_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 2,
.regs = {
{"vdd", 367000, 16 }, /* 0.9 V */
{"vdda", 62800, 2 }, /* 1.2 V */
},
},
.regulator_data = msm8998_dsi_regulators,
.num_regulators = ARRAY_SIZE(msm8998_dsi_regulators),
.bus_clk_names = dsi_msm8998_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names),
.io_start = { 0xc994000, 0xc996000 },
@ -145,14 +145,14 @@ static const char * const dsi_sdm660_bus_clk_names[] = {
"iface", "bus", "core", "core_mmss",
};
static const struct regulator_bulk_data sdm660_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 12560 }, /* 1.2 V */
};
static const struct msm_dsi_config sdm660_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 2,
.regs = {
{"vdda", 12560, 4 }, /* 1.2 V */
},
},
.regulator_data = sdm660_dsi_regulators,
.num_regulators = ARRAY_SIZE(sdm660_dsi_regulators),
.bus_clk_names = dsi_sdm660_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sdm660_bus_clk_names),
.io_start = { 0xc994000, 0xc996000 },
@ -167,28 +167,28 @@ static const char * const dsi_sc7180_bus_clk_names[] = {
"iface", "bus",
};
static const struct regulator_bulk_data sdm845_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
};
static const struct msm_dsi_config sdm845_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 1,
.regs = {
{"vdda", 21800, 4 }, /* 1.2 V */
},
},
.regulator_data = sdm845_dsi_regulators,
.num_regulators = ARRAY_SIZE(sdm845_dsi_regulators),
.bus_clk_names = dsi_sdm845_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names),
.io_start = { 0xae94000, 0xae96000 },
.num_dsi = 2,
};
static const struct regulator_bulk_data sc7180_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
};
static const struct msm_dsi_config sc7180_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 1,
.regs = {
{"vdda", 21800, 4 }, /* 1.2 V */
},
},
.regulator_data = sc7180_dsi_regulators,
.num_regulators = ARRAY_SIZE(sc7180_dsi_regulators),
.bus_clk_names = dsi_sc7180_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names),
.io_start = { 0xae94000 },
@ -199,14 +199,14 @@ static const char * const dsi_sc7280_bus_clk_names[] = {
"iface", "bus",
};
static const struct regulator_bulk_data sc7280_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */
};
static const struct msm_dsi_config sc7280_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 1,
.regs = {
{"vdda", 8350, 0 }, /* 1.2 V */
},
},
.regulator_data = sc7280_dsi_regulators,
.num_regulators = ARRAY_SIZE(sc7280_dsi_regulators),
.bus_clk_names = dsi_sc7280_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names),
.io_start = { 0xae94000 },
@ -217,14 +217,14 @@ static const char * const dsi_qcm2290_bus_clk_names[] = {
"iface", "bus",
};
static const struct regulator_bulk_data qcm2290_dsi_cfg_regulators[] = {
{ .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
};
static const struct msm_dsi_config qcm2290_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 1,
.regs = {
{"vdda", 21800, 4 }, /* 1.2 V */
},
},
.regulator_data = qcm2290_dsi_cfg_regulators,
.num_regulators = ARRAY_SIZE(qcm2290_dsi_cfg_regulators),
.bus_clk_names = dsi_qcm2290_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_qcm2290_bus_clk_names),
.io_start = { 0x5e94000 },

View File

@ -32,7 +32,8 @@
struct msm_dsi_config {
u32 io_offset;
struct dsi_reg_config reg_cfg;
const struct regulator_bulk_data *regulator_data;
int num_regulators;
const char * const *bus_clk_names;
const int num_bus_clks;
const resource_size_t io_start[DSI_MAX];

View File

@ -33,7 +33,7 @@
#define DSI_RESET_TOGGLE_DELAY_MS 20
static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc);
static int dsi_populate_dsc_params(struct drm_dsc_config *dsc);
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
@ -108,7 +108,7 @@ struct msm_dsi_host {
void __iomem *ctrl_base;
phys_addr_t ctrl_size;
struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
struct regulator_bulk_data *supplies;
int num_bus_clks;
struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX];
@ -144,7 +144,6 @@ struct msm_dsi_host {
u32 err_work_state;
struct work_struct err_work;
struct work_struct hpd_work;
struct workqueue_struct *workqueue;
/* DSI 6G TX buffer*/
@ -161,10 +160,9 @@ struct msm_dsi_host {
struct regmap *sfpb;
struct drm_display_mode *mode;
struct msm_display_dsc_config *dsc;
struct drm_dsc_config *dsc;
/* connected device info */
struct device_node *device_node;
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
@ -205,9 +203,6 @@ static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
msm_writel(data, msm_host->ctrl_base + reg);
}
static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
static const struct msm_dsi_cfg_handler *dsi_get_config(
struct msm_dsi_host *msm_host)
{
@ -258,76 +253,6 @@ static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
return container_of(host, struct msm_dsi_host, base);
}
static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
{
struct regulator_bulk_data *s = msm_host->supplies;
const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
int i;
DBG("");
for (i = num - 1; i >= 0; i--)
if (regs[i].disable_load >= 0)
regulator_set_load(s[i].consumer,
regs[i].disable_load);
regulator_bulk_disable(num, s);
}
static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
{
struct regulator_bulk_data *s = msm_host->supplies;
const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
int ret, i;
DBG("");
for (i = 0; i < num; i++) {
if (regs[i].enable_load >= 0) {
ret = regulator_set_load(s[i].consumer,
regs[i].enable_load);
if (ret < 0) {
pr_err("regulator %d set op mode failed, %d\n",
i, ret);
goto fail;
}
}
}
ret = regulator_bulk_enable(num, s);
if (ret < 0) {
pr_err("regulator enable failed, %d\n", ret);
goto fail;
}
return 0;
fail:
for (i--; i >= 0; i--)
regulator_set_load(s[i].consumer, regs[i].disable_load);
return ret;
}
static int dsi_regulator_init(struct msm_dsi_host *msm_host)
{
struct regulator_bulk_data *s = msm_host->supplies;
const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
int i, ret;
for (i = 0; i < num; i++)
s[i].supply = regs[i].name;
ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
if (ret < 0) {
pr_err("%s: failed to init regulator, ret=%d\n",
__func__, ret);
return ret;
}
return 0;
}
int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
@ -916,7 +841,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
{
struct msm_display_dsc_config *dsc = msm_host->dsc;
struct drm_dsc_config *dsc = msm_host->dsc;
u32 reg, intf_width, reg_ctrl, reg_ctrl2;
u32 slice_per_intf, total_bytes_per_intf;
u32 pkt_per_line;
@ -927,24 +852,24 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
* compress mode registers
*/
intf_width = hdisplay;
slice_per_intf = DIV_ROUND_UP(intf_width, dsc->drm->slice_width);
slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
/* If slice_per_pkt is greater than slice_per_intf
* then default to 1. This can happen during partial
* update.
*/
if (slice_per_intf > dsc->drm->slice_count)
dsc->drm->slice_count = 1;
if (slice_per_intf > dsc->slice_count)
dsc->slice_count = 1;
slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->drm->slice_width);
bytes_in_slice = DIV_ROUND_UP(dsc->drm->slice_width * dsc->drm->bits_per_pixel, 8);
slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bits_per_pixel, 8);
dsc->drm->slice_chunk_size = bytes_in_slice;
dsc->slice_chunk_size = bytes_in_slice;
total_bytes_per_intf = bytes_in_slice * slice_per_intf;
eol_byte_num = total_bytes_per_intf % 3;
pkt_per_line = slice_per_intf / dsc->drm->slice_count;
pkt_per_line = slice_per_intf / dsc->slice_count;
if (is_cmd_mode) /* packet data type */
reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE);
@ -1009,7 +934,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
}
if (msm_host->dsc) {
struct msm_display_dsc_config *dsc = msm_host->dsc;
struct drm_dsc_config *dsc = msm_host->dsc;
/* update dsc params with timing params */
if (!dsc || !mode->hdisplay || !mode->vdisplay) {
@ -1018,9 +943,9 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
return;
}
dsc->drm->pic_width = mode->hdisplay;
dsc->drm->pic_height = mode->vdisplay;
DBG("Mode %dx%d\n", dsc->drm->pic_width, dsc->drm->pic_height);
dsc->pic_width = mode->hdisplay;
dsc->pic_height = mode->vdisplay;
DBG("Mode %dx%d\n", dsc->pic_width, dsc->pic_height);
/* we do the calculations for dsc parameters here so that
* panel can use these parameters
@ -1500,14 +1425,6 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
return len;
}
static void dsi_hpd_worker(struct work_struct *work)
{
struct msm_dsi_host *msm_host =
container_of(work, struct msm_dsi_host, hpd_work);
drm_helper_hpd_irq_event(msm_host->dev);
}
static void dsi_err_worker(struct work_struct *work)
{
struct msm_dsi_host *msm_host =
@ -1686,6 +1603,8 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
msm_host->lanes = dsi->lanes;
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
if (dsi->dsc)
msm_host->dsc = dsi->dsc;
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
@ -1697,8 +1616,6 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
return ret;
DBG("id=%d", msm_host->id);
if (msm_host->dev)
queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@ -1710,11 +1627,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
dsi_dev_detach(msm_host->pdev);
msm_host->device_node = NULL;
DBG("id=%d", msm_host->id);
if (msm_host->dev)
queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@ -1841,7 +1754,7 @@ static char bpg_offset[DSC_NUM_BUF_RANGES] = {
2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
};
static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc)
static int dsi_populate_dsc_params(struct drm_dsc_config *dsc)
{
int mux_words_size;
int groups_per_line, groups_total;
@ -1854,98 +1767,98 @@ static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc)
int final_value, final_scale;
int i;
dsc->drm->rc_model_size = 8192;
dsc->drm->first_line_bpg_offset = 12;
dsc->drm->rc_edge_factor = 6;
dsc->drm->rc_tgt_offset_high = 3;
dsc->drm->rc_tgt_offset_low = 3;
dsc->drm->simple_422 = 0;
dsc->drm->convert_rgb = 1;
dsc->drm->vbr_enable = 0;
dsc->rc_model_size = 8192;
dsc->first_line_bpg_offset = 12;
dsc->rc_edge_factor = 6;
dsc->rc_tgt_offset_high = 3;
dsc->rc_tgt_offset_low = 3;
dsc->simple_422 = 0;
dsc->convert_rgb = 1;
dsc->vbr_enable = 0;
/* handle only bpp = bpc = 8 */
for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++)
dsc->drm->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i];
dsc->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i];
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
dsc->drm->rc_range_params[i].range_min_qp = min_qp[i];
dsc->drm->rc_range_params[i].range_max_qp = max_qp[i];
dsc->drm->rc_range_params[i].range_bpg_offset = bpg_offset[i];
dsc->rc_range_params[i].range_min_qp = min_qp[i];
dsc->rc_range_params[i].range_max_qp = max_qp[i];
dsc->rc_range_params[i].range_bpg_offset = bpg_offset[i];
}
dsc->drm->initial_offset = 6144; /* Not bpp 12 */
if (dsc->drm->bits_per_pixel != 8)
dsc->drm->initial_offset = 2048; /* bpp = 12 */
dsc->initial_offset = 6144; /* Not bpp 12 */
if (dsc->bits_per_pixel != 8)
dsc->initial_offset = 2048; /* bpp = 12 */
mux_words_size = 48; /* bpc == 8/10 */
if (dsc->drm->bits_per_component == 12)
if (dsc->bits_per_component == 12)
mux_words_size = 64;
dsc->drm->initial_xmit_delay = 512;
dsc->drm->initial_scale_value = 32;
dsc->drm->first_line_bpg_offset = 12;
dsc->drm->line_buf_depth = dsc->drm->bits_per_component + 1;
dsc->initial_xmit_delay = 512;
dsc->initial_scale_value = 32;
dsc->first_line_bpg_offset = 12;
dsc->line_buf_depth = dsc->bits_per_component + 1;
/* bpc 8 */
dsc->drm->flatness_min_qp = 3;
dsc->drm->flatness_max_qp = 12;
dsc->drm->rc_quant_incr_limit0 = 11;
dsc->drm->rc_quant_incr_limit1 = 11;
dsc->drm->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
dsc->flatness_min_qp = 3;
dsc->flatness_max_qp = 12;
dsc->rc_quant_incr_limit0 = 11;
dsc->rc_quant_incr_limit1 = 11;
dsc->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
/* FIXME: need to call drm_dsc_compute_rc_parameters() so that rest of
* params are calculated
*/
groups_per_line = DIV_ROUND_UP(dsc->drm->slice_width, 3);
dsc->drm->slice_chunk_size = dsc->drm->slice_width * dsc->drm->bits_per_pixel / 8;
if ((dsc->drm->slice_width * dsc->drm->bits_per_pixel) % 8)
dsc->drm->slice_chunk_size++;
groups_per_line = DIV_ROUND_UP(dsc->slice_width, 3);
dsc->slice_chunk_size = dsc->slice_width * dsc->bits_per_pixel / 8;
if ((dsc->slice_width * dsc->bits_per_pixel) % 8)
dsc->slice_chunk_size++;
/* rbs-min */
min_rate_buffer_size = dsc->drm->rc_model_size - dsc->drm->initial_offset +
dsc->drm->initial_xmit_delay * dsc->drm->bits_per_pixel +
groups_per_line * dsc->drm->first_line_bpg_offset;
min_rate_buffer_size = dsc->rc_model_size - dsc->initial_offset +
dsc->initial_xmit_delay * dsc->bits_per_pixel +
groups_per_line * dsc->first_line_bpg_offset;
hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->drm->bits_per_pixel);
hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->bits_per_pixel);
dsc->drm->initial_dec_delay = hrd_delay - dsc->drm->initial_xmit_delay;
dsc->initial_dec_delay = hrd_delay - dsc->initial_xmit_delay;
dsc->drm->initial_scale_value = 8 * dsc->drm->rc_model_size /
(dsc->drm->rc_model_size - dsc->drm->initial_offset);
dsc->initial_scale_value = 8 * dsc->rc_model_size /
(dsc->rc_model_size - dsc->initial_offset);
slice_bits = 8 * dsc->drm->slice_chunk_size * dsc->drm->slice_height;
slice_bits = 8 * dsc->slice_chunk_size * dsc->slice_height;
groups_total = groups_per_line * dsc->drm->slice_height;
groups_total = groups_per_line * dsc->slice_height;
data = dsc->drm->first_line_bpg_offset * 2048;
data = dsc->first_line_bpg_offset * 2048;
dsc->drm->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->drm->slice_height - 1));
dsc->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->slice_height - 1));
pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->drm->bits_per_component + 4) - 2);
pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->bits_per_component + 4) - 2);
num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size -
((slice_bits - pre_num_extra_mux_bits) % mux_words_size));
data = 2048 * (dsc->drm->rc_model_size - dsc->drm->initial_offset + num_extra_mux_bits);
dsc->drm->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
data = 2048 * (dsc->rc_model_size - dsc->initial_offset + num_extra_mux_bits);
dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
/* bpp * 16 + 0.5 */
data = dsc->drm->bits_per_pixel * 16;
data = dsc->bits_per_pixel * 16;
data *= 2;
data++;
data /= 2;
target_bpp_x16 = data;
data = (dsc->drm->initial_xmit_delay * target_bpp_x16) / 16;
final_value = dsc->drm->rc_model_size - data + num_extra_mux_bits;
dsc->drm->final_offset = final_value;
data = (dsc->initial_xmit_delay * target_bpp_x16) / 16;
final_value = dsc->rc_model_size - data + num_extra_mux_bits;
dsc->final_offset = final_value;
final_scale = 8 * dsc->drm->rc_model_size / (dsc->drm->rc_model_size - final_value);
final_scale = 8 * dsc->rc_model_size / (dsc->rc_model_size - final_value);
data = (final_scale - 9) * (dsc->drm->nfl_bpg_offset + dsc->drm->slice_bpg_offset);
dsc->drm->scale_increment_interval = (2048 * dsc->drm->final_offset) / data;
data = (final_scale - 9) * (dsc->nfl_bpg_offset + dsc->slice_bpg_offset);
dsc->scale_increment_interval = (2048 * dsc->final_offset) / data;
dsc->drm->scale_decrement_interval = groups_per_line / (dsc->drm->initial_scale_value - 8);
dsc->scale_decrement_interval = groups_per_line / (dsc->initial_scale_value - 8);
return 0;
}
@ -1954,7 +1867,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
{
struct device *dev = &msm_host->pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *endpoint, *device_node;
struct device_node *endpoint;
int ret = 0;
/*
@ -1977,16 +1890,6 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
goto err;
}
/* Get panel node from the output port's endpoint data */
device_node = of_graph_get_remote_node(np, 1, 0);
if (!device_node) {
DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
ret = -ENODEV;
goto err;
}
msm_host->device_node = device_node;
if (of_property_read_bool(np, "syscon-sfpb")) {
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
@ -1997,8 +1900,6 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
}
}
of_node_put(device_node);
err:
of_node_put(endpoint);
@ -2028,6 +1929,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
{
struct msm_dsi_host *msm_host = NULL;
struct platform_device *pdev = msm_dsi->pdev;
const struct msm_dsi_config *cfg;
int ret;
msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
@ -2060,6 +1962,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
pr_err("%s: get config failed\n", __func__);
goto fail;
}
cfg = msm_host->cfg_hnd->cfg;
msm_host->id = dsi_host_get_id(msm_host);
if (msm_host->id < 0) {
@ -2069,13 +1972,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
}
/* fixup base address by io offset */
msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
msm_host->ctrl_base += cfg->io_offset;
ret = dsi_regulator_init(msm_host);
if (ret) {
pr_err("%s: regulator init failed\n", __func__);
ret = devm_regulator_bulk_get_const(&pdev->dev, cfg->num_regulators,
cfg->regulator_data,
&msm_host->supplies);
if (ret)
goto fail;
}
ret = dsi_clk_init(msm_host);
if (ret) {
@ -2126,7 +2029,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
INIT_WORK(&msm_host->err_work, dsi_err_worker);
INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
msm_dsi->id = msm_host->id;
@ -2159,23 +2061,9 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct drm_panel *panel;
int ret;
msm_host->dev = dev;
panel = msm_dsi_host_get_panel(&msm_host->base);
if (!IS_ERR(panel) && panel->dsc) {
struct msm_display_dsc_config *dsc = msm_host->dsc;
if (!dsc) {
dsc = devm_kzalloc(&msm_host->pdev->dev, sizeof(*dsc), GFP_KERNEL);
if (!dsc)
return -ENOMEM;
dsc->drm = panel->dsc;
msm_host->dsc = dsc;
}
}
ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
@ -2556,7 +2444,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
msm_dsi_sfpb_config(msm_host, true);
ret = dsi_host_regulator_enable(msm_host);
ret = regulator_bulk_enable(msm_host->cfg_hnd->cfg->num_regulators,
msm_host->supplies);
if (ret) {
pr_err("%s:Failed to enable vregs.ret=%d\n",
__func__, ret);
@ -2596,7 +2485,8 @@ fail_disable_clk:
cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put(&msm_host->pdev->dev);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators,
msm_host->supplies);
unlock_ret:
mutex_unlock(&msm_host->dev_mutex);
return ret;
@ -2623,7 +2513,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put(&msm_host->pdev->dev);
dsi_host_regulator_disable(msm_host);
regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators,
msm_host->supplies);
msm_dsi_sfpb_config(msm_host, false);
@ -2659,45 +2550,33 @@ enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
const struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
struct msm_display_dsc_config *dsc = msm_host->dsc;
struct drm_dsc_config *dsc = msm_host->dsc;
int pic_width = mode->hdisplay;
int pic_height = mode->vdisplay;
if (!msm_host->dsc)
return MODE_OK;
if (pic_width % dsc->drm->slice_width) {
if (pic_width % dsc->slice_width) {
pr_err("DSI: pic_width %d has to be multiple of slice %d\n",
pic_width, dsc->drm->slice_width);
pic_width, dsc->slice_width);
return MODE_H_ILLEGAL;
}
if (pic_height % dsc->drm->slice_height) {
if (pic_height % dsc->slice_height) {
pr_err("DSI: pic_height %d has to be multiple of slice %d\n",
pic_height, dsc->drm->slice_height);
pic_height, dsc->slice_height);
return MODE_V_ILLEGAL;
}
return MODE_OK;
}
struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host)
{
return of_drm_find_panel(to_msm_dsi_host(host)->device_node);
}
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host)
{
return to_msm_dsi_host(host)->mode_flags;
}
struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
return of_drm_find_bridge(msm_host->device_node);
}
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
@ -2771,7 +2650,7 @@ void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host)
DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER);
}
struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host)
struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);

View File

@ -141,14 +141,11 @@ static int enable_phy(struct msm_dsi *msm_dsi,
struct msm_dsi_phy_shared_timings *shared_timings)
{
struct msm_dsi_phy_clk_request clk_req;
int ret;
bool is_bonded_dsi = IS_BONDED_DSI();
msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_bonded_dsi);
ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings);
return ret;
return msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings);
}
static int
@ -214,39 +211,26 @@ static void dsi_mgr_phy_disable(int id)
}
}
struct dsi_connector {
struct drm_connector base;
int id;
};
struct dsi_bridge {
struct drm_bridge base;
int id;
};
#define to_dsi_connector(x) container_of(x, struct dsi_connector, base)
#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
static inline int dsi_mgr_connector_get_id(struct drm_connector *connector)
{
struct dsi_connector *dsi_connector = to_dsi_connector(connector);
return dsi_connector->id;
}
static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
{
struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
return dsi_bridge->id;
}
static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
static void msm_dsi_manager_set_split_display(u8 id)
{
struct msm_drm_private *priv = conn->dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct msm_drm_private *priv = msm_dsi->dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_dsi *master_dsi, *slave_dsi;
struct drm_panel *panel;
if (IS_BONDED_DSI() && !IS_MASTER_DSI_LINK(id)) {
master_dsi = other_dsi;
@ -256,89 +240,18 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
slave_dsi = other_dsi;
}
/*
* There is only 1 panel in the global panel list for bonded DSI mode.
* Therefore slave dsi should get the drm_panel instance from master
* dsi.
*/
panel = msm_dsi_host_get_panel(master_dsi->host);
if (IS_ERR(panel)) {
DRM_ERROR("Could not find panel for %u (%ld)\n", msm_dsi->id,
PTR_ERR(panel));
return PTR_ERR(panel);
}
if (!panel || !IS_BONDED_DSI())
goto out;
drm_object_attach_property(&conn->base,
conn->dev->mode_config.tile_property, 0);
if (!msm_dsi->external_bridge || !IS_BONDED_DSI())
return;
/*
* Set split display info to kms once bonded DSI panel is connected to
* both hosts.
*/
if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) {
if (other_dsi && other_dsi->external_bridge && kms->funcs->set_split_display) {
kms->funcs->set_split_display(kms, master_dsi->encoder,
slave_dsi->encoder,
msm_dsi_is_cmd_mode(msm_dsi));
}
out:
msm_dsi->panel = panel;
return 0;
}
static enum drm_connector_status dsi_mgr_connector_detect(
struct drm_connector *connector, bool force)
{
int id = dsi_mgr_connector_get_id(connector);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
return msm_dsi->panel ? connector_status_connected :
connector_status_disconnected;
}
static void dsi_mgr_connector_destroy(struct drm_connector *connector)
{
struct dsi_connector *dsi_connector = to_dsi_connector(connector);
DBG("");
drm_connector_cleanup(connector);
kfree(dsi_connector);
}
static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
{
int id = dsi_mgr_connector_get_id(connector);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
int num;
if (!panel)
return 0;
/*
* In bonded DSI mode, we have one connector that can be
* attached to the drm_panel.
*/
num = drm_panel_get_modes(panel, connector);
if (!num)
return 0;
return num;
}
static struct drm_encoder *
dsi_mgr_connector_best_encoder(struct drm_connector *connector)
{
int id = dsi_mgr_connector_get_id(connector);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
DBG("");
return msm_dsi_get_encoder(msm_dsi);
}
static void dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
@ -403,7 +316,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
@ -418,18 +330,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (!dsi_mgr_power_on_early(bridge))
dsi_mgr_bridge_power_on(bridge);
/* Always call panel functions once, because even for dual panels,
* there is only one drm_panel instance.
*/
if (panel) {
ret = drm_panel_prepare(panel);
if (ret) {
pr_err("%s: prepare panel %d failed, %d\n", __func__,
id, ret);
goto panel_prep_fail;
}
}
ret = msm_dsi_host_enable(host);
if (ret) {
pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
@ -449,9 +349,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
host1_en_fail:
msm_dsi_host_disable(host);
host_en_fail:
if (panel)
drm_panel_unprepare(panel);
panel_prep_fail:
return;
}
@ -469,62 +366,12 @@ void msm_dsi_manager_tpg_enable(void)
}
}
static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
/* Do nothing with the host if it is slave-DSI in case of bonded DSI */
if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
if (panel) {
ret = drm_panel_enable(panel);
if (ret) {
pr_err("%s: enable panel %d failed, %d\n", __func__, id,
ret);
}
}
}
static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
DBG("id=%d", id);
if (!msm_dsi_device_connected(msm_dsi))
return;
/* Do nothing with the host if it is slave-DSI in case of bonded DSI */
if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
return;
if (panel) {
ret = drm_panel_disable(panel);
if (ret)
pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
ret);
}
}
static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
@ -551,13 +398,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
pr_err("%s: host1 disable failed, %d\n", __func__, ret);
}
if (panel) {
ret = drm_panel_unprepare(panel);
if (ret)
pr_err("%s: Panel %d unprepare failed,%d\n", __func__,
id, ret);
}
msm_dsi_host_disable_irq(host);
if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_disable_irq(msm_dsi1->host);
@ -614,76 +454,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
return msm_dsi_host_check_dsc(host, mode);
}
static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
.detect = dsi_mgr_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = dsi_mgr_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = {
.get_modes = dsi_mgr_connector_get_modes,
.best_encoder = dsi_mgr_connector_best_encoder,
};
static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
.pre_enable = dsi_mgr_bridge_pre_enable,
.enable = dsi_mgr_bridge_enable,
.disable = dsi_mgr_bridge_disable,
.post_disable = dsi_mgr_bridge_post_disable,
.mode_set = dsi_mgr_bridge_mode_set,
.mode_valid = dsi_mgr_bridge_mode_valid,
};
/* initialize connector when we're connected to a drm_panel */
struct drm_connector *msm_dsi_manager_connector_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_connector *connector = NULL;
struct dsi_connector *dsi_connector;
int ret;
dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL);
if (!dsi_connector)
return ERR_PTR(-ENOMEM);
dsi_connector->id = id;
connector = &dsi_connector->base;
ret = drm_connector_init(msm_dsi->dev, connector,
&dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
if (ret)
return ERR_PTR(ret);
drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
/* Enable HPD to let hpd event is handled
* when panel is attached to the host.
*/
connector->polled = DRM_CONNECTOR_POLL_HPD;
/* Display driver doesn't support interlace now. */
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
drm_connector_attach_encoder(connector, msm_dsi->encoder);
ret = msm_dsi_manager_panel_init(connector, id);
if (ret) {
DRM_DEV_ERROR(msm_dsi->dev->dev, "init panel failed %d\n", ret);
goto fail;
}
return connector;
fail:
connector->funcs->destroy(connector);
return ERR_PTR(ret);
}
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
{
@ -722,18 +499,21 @@ fail:
return ERR_PTR(ret);
}
struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
int msm_dsi_manager_ext_bridge_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_device *dev = msm_dsi->dev;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_bridge *int_bridge, *ext_bridge;
int ret;
int_bridge = msm_dsi->bridge;
ext_bridge = msm_dsi->external_bridge =
msm_dsi_host_get_bridge(msm_dsi->host);
ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev,
msm_dsi->pdev->dev.of_node, 1, 0);
if (IS_ERR(ext_bridge))
return PTR_ERR(ext_bridge);
msm_dsi->external_bridge = ext_bridge;
encoder = msm_dsi->encoder;
@ -745,36 +525,32 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
ret = drm_bridge_attach(encoder, ext_bridge, int_bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret == -EINVAL) {
struct drm_connector *connector;
struct list_head *connector_list;
/* link the internal dsi bridge to the external bridge */
drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
/*
* we need the drm_connector created by the external bridge
* driver (or someone else) to feed it to our driver's
* priv->connector[] list, mainly for msm_fbdev_init()
* link the internal dsi bridge to the external bridge,
* connector is created by the next bridge.
*/
connector_list = &dev->mode_config.connector_list;
ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
if (ret < 0)
return ret;
} else {
struct drm_connector *connector;
list_for_each_entry(connector, connector_list, head) {
if (drm_connector_has_possible_encoder(connector, encoder))
return connector;
/* We are in charge of the connector, create one now. */
connector = drm_bridge_connector_init(dev, encoder);
if (IS_ERR(connector)) {
DRM_ERROR("Unable to create bridge connector\n");
return PTR_ERR(connector);
}
return ERR_PTR(-ENODEV);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0)
return ret;
}
connector = drm_bridge_connector_init(dev, encoder);
if (IS_ERR(connector)) {
DRM_ERROR("Unable to create bridge connector\n");
return ERR_CAST(connector);
}
/* The pipeline is ready, ping encoders if necessary */
msm_dsi_manager_set_split_display(id);
drm_connector_attach_encoder(connector, encoder);
return connector;
return 0;
}
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)

View File

@ -347,7 +347,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
} else {
timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
timing->shared_timings.clk_pre_inc_by_2 = 0;
timing->shared_timings.clk_pre_inc_by_2 = 0;
}
timing->ta_go = 3;
@ -507,82 +507,6 @@ int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
return 0;
}
static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
{
struct regulator_bulk_data *s = phy->supplies;
const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
struct device *dev = &phy->pdev->dev;
int num = phy->cfg->reg_cfg.num;
int i, ret;
for (i = 0; i < num; i++)
s[i].supply = regs[i].name;
ret = devm_regulator_bulk_get(dev, num, s);
if (ret < 0) {
if (ret != -EPROBE_DEFER) {
DRM_DEV_ERROR(dev,
"%s: failed to init regulator, ret=%d\n",
__func__, ret);
}
return ret;
}
return 0;
}
static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
{
struct regulator_bulk_data *s = phy->supplies;
const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
int num = phy->cfg->reg_cfg.num;
int i;
DBG("");
for (i = num - 1; i >= 0; i--)
if (regs[i].disable_load >= 0)
regulator_set_load(s[i].consumer, regs[i].disable_load);
regulator_bulk_disable(num, s);
}
static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
{
struct regulator_bulk_data *s = phy->supplies;
const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
struct device *dev = &phy->pdev->dev;
int num = phy->cfg->reg_cfg.num;
int ret, i;
DBG("");
for (i = 0; i < num; i++) {
if (regs[i].enable_load >= 0) {
ret = regulator_set_load(s[i].consumer,
regs[i].enable_load);
if (ret < 0) {
DRM_DEV_ERROR(dev,
"regulator %d set op mode failed, %d\n",
i, ret);
goto fail;
}
}
}
ret = regulator_bulk_enable(num, s);
if (ret < 0) {
DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
goto fail;
}
return 0;
fail:
for (i--; i >= 0; i--)
regulator_set_load(s[i].consumer, regs[i].disable_load);
return ret;
}
static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
{
struct device *dev = &phy->pdev->dev;
@ -697,12 +621,9 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->pdev = pdev;
phy->id = dsi_phy_get_id(phy);
if (phy->id < 0) {
ret = phy->id;
DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
__func__, ret);
goto fail;
}
if (phy->id < 0)
return dev_err_probe(dev, phy->id,
"Couldn't identify PHY index\n");
phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
"qcom,dsi-phy-regulator-ldo-mode");
@ -710,86 +631,71 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
phy->base = msm_ioremap_size(pdev, "dsi_phy", &phy->base_size);
if (IS_ERR(phy->base)) {
DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
ret = -ENOMEM;
goto fail;
}
if (IS_ERR(phy->base))
return dev_err_probe(dev, PTR_ERR(phy->base),
"Failed to map phy base\n");
phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", &phy->pll_size);
if (IS_ERR(phy->pll_base)) {
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
ret = -ENOMEM;
goto fail;
}
if (IS_ERR(phy->pll_base))
return dev_err_probe(dev, PTR_ERR(phy->pll_base),
"Failed to map pll base\n");
if (phy->cfg->has_phy_lane) {
phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", &phy->lane_size);
if (IS_ERR(phy->lane_base)) {
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", __func__);
ret = -ENOMEM;
goto fail;
}
if (IS_ERR(phy->lane_base))
return dev_err_probe(dev, PTR_ERR(phy->lane_base),
"Failed to map phy lane base\n");
}
if (phy->cfg->has_phy_regulator) {
phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", &phy->reg_size);
if (IS_ERR(phy->reg_base)) {
DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", __func__);
ret = -ENOMEM;
goto fail;
}
if (IS_ERR(phy->reg_base))
return dev_err_probe(dev, PTR_ERR(phy->reg_base),
"Failed to map phy regulator base\n");
}
if (phy->cfg->ops.parse_dt_properties) {
ret = phy->cfg->ops.parse_dt_properties(phy);
if (ret)
goto fail;
return ret;
}
ret = dsi_phy_regulator_init(phy);
ret = devm_regulator_bulk_get_const(dev, phy->cfg->num_regulators,
phy->cfg->regulator_data,
&phy->supplies);
if (ret)
goto fail;
return ret;
phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk)) {
DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
ret = PTR_ERR(phy->ahb_clk);
goto fail;
}
if (IS_ERR(phy->ahb_clk))
return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
"Unable to get ahb clk\n");
/* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock.
*/
ret = dsi_phy_enable_resource(phy);
if (ret)
goto fail;
return ret;
if (phy->cfg->ops.pll_init) {
ret = phy->cfg->ops.pll_init(phy);
if (ret) {
DRM_DEV_INFO(dev,
"%s: pll init failed: %d, need separate pll clk driver\n",
__func__, ret);
goto fail;
}
if (ret)
return dev_err_probe(dev, ret,
"PLL init failed; need separate clk driver\n");
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
phy->provided_clocks);
if (ret) {
DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
goto fail;
}
if (ret)
return dev_err_probe(dev, ret,
"Failed to register clk provider\n");
dsi_phy_disable_resource(phy);
platform_set_drvdata(pdev, phy);
return 0;
fail:
return ret;
}
static struct platform_driver dsi_phy_platform_driver = {
@ -829,7 +735,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
goto res_en_fail;
}
ret = dsi_phy_regulator_enable(phy);
ret = regulator_bulk_enable(phy->cfg->num_regulators, phy->supplies);
if (ret) {
DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
@ -866,7 +772,7 @@ pll_restor_fail:
if (phy->cfg->ops.disable)
phy->cfg->ops.disable(phy);
phy_en_fail:
dsi_phy_regulator_disable(phy);
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
reg_en_fail:
dsi_phy_disable_resource(phy);
res_en_fail:
@ -880,7 +786,7 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
phy->cfg->ops.disable(phy);
dsi_phy_regulator_disable(phy);
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
dsi_phy_disable_resource(phy);
}

View File

@ -29,7 +29,8 @@ struct msm_dsi_phy_ops {
};
struct msm_dsi_phy_cfg {
struct dsi_reg_config reg_cfg;
const struct regulator_bulk_data *regulator_data;
int num_regulators;
struct msm_dsi_phy_ops ops;
unsigned long min_pll_rate;
@ -98,7 +99,7 @@ struct msm_dsi_phy {
int id;
struct clk *ahb_clk;
struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
struct regulator_bulk_data *supplies;
struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg;

View File

@ -188,19 +188,19 @@ static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *
pr_debug("SSC is enabled\n");
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
config->ssc_stepsize & 0xff);
config->ssc_stepsize & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
config->ssc_stepsize >> 8);
config->ssc_stepsize >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
config->ssc_div_per & 0xff);
config->ssc_div_per & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
config->ssc_div_per >> 8);
config->ssc_div_per >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
config->ssc_adj_per & 0xff);
config->ssc_adj_per & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
config->ssc_adj_per >> 8);
config->ssc_adj_per >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
}
}
@ -215,16 +215,19 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
0xba);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
0xba);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE,
0x0c);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO,
0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1,
0xfa);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
0x4c);
0x4c);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
@ -236,18 +239,18 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *conf
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
config->decimal_div_start);
config->decimal_div_start);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
config->frac_div_start & 0xff);
config->frac_div_start & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
(config->frac_div_start & 0xff00) >> 8);
(config->frac_div_start & 0xff00) >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
(config->frac_div_start & 0x30000) >> 16);
(config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
config->pll_clock_inverters);
config->pll_clock_inverters);
}
static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@ -306,7 +309,7 @@ static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
data & ~BIT(5));
data & ~BIT(5));
ndelay(250);
}
@ -315,7 +318,7 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
data | BIT(5));
data | BIT(5));
dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
ndelay(250);
}
@ -326,7 +329,7 @@ static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
data & ~BIT(5));
data & ~BIT(5));
}
static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
@ -335,7 +338,7 @@ static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
data | BIT(5));
data | BIT(5));
}
static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
@ -356,7 +359,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
/* Start PLL */
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
0x01);
0x01);
/*
* ensure all PLL configurations are written prior to checking
@ -378,10 +381,10 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
dsi_pll_enable_global_clk(pll_10nm->slave);
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
0x01);
0x01);
if (pll_10nm->slave)
dsi_phy_write(pll_10nm->slave->phy->base +
REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
error:
return rc;
@ -486,7 +489,7 @@ static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
@ -515,7 +518,7 @@ static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
cached->bit_clk_div | (cached->pix_clk_div << 4));
cached->bit_clk_div | (cached->pix_clk_div << 4));
val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
@ -571,64 +574,59 @@ static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
*/
static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
{
char clk_name[32], parent[32], vco_name[32];
char parent2[32], parent3[32], parent4[32];
char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
.name = vco_name,
.name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_10nm_vco,
};
struct device *dev = &pll_10nm->phy->pdev->dev;
struct clk_hw *hw;
struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
struct clk_hw *pll_post_out_div, *pclk_mux;
int ret;
DBG("DSI%d", pll_10nm->phy->id);
snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_10nm->phy->id);
pll_10nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
if (ret)
return ret;
snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
hw = devm_clk_hw_register_divider(dev, clk_name,
parent, CLK_SET_RATE_PARENT,
pll_10nm->phy->pll_base +
REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
&pll_10nm->clk_hw, CLK_SET_RATE_PARENT,
pll_10nm->phy->pll_base +
REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
if (IS_ERR(pll_out_div)) {
ret = PTR_ERR(pll_out_div);
goto fail;
}
snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_10nm->phy->id);
/* BIT CLK: DIV_CTRL_3_0 */
hw = devm_clk_hw_register_divider(dev, clk_name, parent,
CLK_SET_RATE_PARENT,
pll_10nm->phy->base +
REG_DSI_10nm_PHY_CMN_CLK_CFG0,
0, 4, CLK_DIVIDER_ONE_BASED,
&pll_10nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
pll_out_div, CLK_SET_RATE_PARENT,
pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
0, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
if (IS_ERR(pll_bit)) {
ret = PTR_ERR(pll_bit);
goto fail;
}
snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
CLK_SET_RATE_PARENT, 1, 8);
hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
pll_bit, CLK_SET_RATE_PARENT, 1, 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@ -636,52 +634,45 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **prov
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
0, 1, 2);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
clk_name, pll_bit, 0, 1, 2);
if (IS_ERR(pll_by_2_bit)) {
ret = PTR_ERR(pll_by_2_bit);
goto fail;
}
snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
0, 1, 4);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(dev,
clk_name, pll_out_div, 0, 1, 4);
if (IS_ERR(pll_post_out_div)) {
ret = PTR_ERR(pll_post_out_div);
goto fail;
}
snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_10nm->phy->id);
hw = devm_clk_hw_register_mux(dev, clk_name,
((const char *[]){
parent, parent2, parent3, parent4
}), 4, 0, pll_10nm->phy->base +
REG_DSI_10nm_PHY_CMN_CLK_CFG1,
0, 2, 0, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
pclk_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
((const struct clk_hw *[]){
pll_bit,
pll_by_2_bit,
pll_out_div,
pll_post_out_div,
}), 4, 0, pll_10nm->phy->base +
REG_DSI_10nm_PHY_CMN_CLK_CFG1, 0, 2, 0, NULL);
if (IS_ERR(pclk_mux)) {
ret = PTR_ERR(pclk_mux);
goto fail;
}
snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
hw = devm_clk_hw_register_divider(dev, clk_name, parent,
0, pll_10nm->phy->base +
REG_DSI_10nm_PHY_CMN_CLK_CFG0,
4, 4, CLK_DIVIDER_ONE_BASED,
&pll_10nm->postdiv_lock);
hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, pclk_mux,
0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
4, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@ -1028,14 +1019,14 @@ static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy)
return 0;
}
static const struct regulator_bulk_data dsi_phy_10nm_regulators[] = {
{ .supply = "vdds", .init_load_uA = 36000 },
};
const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
.has_phy_lane = true,
.reg_cfg = {
.num = 1,
.regs = {
{"vdds", 36000, 32},
},
},
.regulator_data = dsi_phy_10nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
.ops = {
.enable = dsi_10nm_phy_enable,
.disable = dsi_10nm_phy_disable,
@ -1052,12 +1043,8 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
.has_phy_lane = true,
.reg_cfg = {
.num = 1,
.regs = {
{"vdds", 36000, 32},
},
},
.regulator_data = dsi_phy_10nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
.ops = {
.enable = dsi_10nm_phy_enable,
.disable = dsi_10nm_phy_disable,

View File

@ -711,7 +711,7 @@ static int dsi_14nm_pll_restore_state(struct msm_dsi_phy *phy)
cached_state->vco_rate, 0);
if (ret) {
DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@ -764,14 +764,14 @@ static int dsi_14nm_set_usecase(struct msm_dsi_phy *phy)
static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
const char *name,
const char *parent_name,
const struct clk_hw *parent_hw,
unsigned long flags,
u8 shift)
{
struct dsi_pll_14nm_postdiv *pll_postdiv;
struct device *dev = &pll_14nm->phy->pdev->dev;
struct clk_init_data postdiv_init = {
.parent_names = (const char *[]) { parent_name },
.parent_hws = (const struct clk_hw *[]) { parent_hw },
.num_parents = 1,
.name = name,
.flags = flags,
@ -800,72 +800,70 @@ static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **provided_clocks)
{
char clk_name[32], parent[32], vco_name[32];
char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
.name = vco_name,
.name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_14nm_vco,
};
struct device *dev = &pll_14nm->phy->pdev->dev;
struct clk_hw *hw;
struct clk_hw *hw, *n1_postdiv, *n1_postdivby2;
int ret;
DBG("DSI%d", pll_14nm->phy->id);
snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_14nm->phy->id);
pll_14nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_14nm->clk_hw);
if (ret)
return ret;
snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
/* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,
CLK_SET_RATE_PARENT, 0);
if (IS_ERR(hw))
return PTR_ERR(hw);
n1_postdiv = pll_14nm_postdiv_register(pll_14nm, clk_name,
&pll_14nm->clk_hw, CLK_SET_RATE_PARENT, 0);
if (IS_ERR(n1_postdiv))
return PTR_ERR(n1_postdiv);
snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->phy->id);
snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_14nm->phy->id);
/* DSI Byte clock = VCO_CLK / N1 / 8 */
hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
CLK_SET_RATE_PARENT, 1, 8);
hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
n1_postdiv, CLK_SET_RATE_PARENT, 1, 8);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
/*
* Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
* on the way. Don't let it set parent.
*/
hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);
if (IS_ERR(hw))
return PTR_ERR(hw);
n1_postdivby2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
clk_name, n1_postdiv, 0, 1, 2);
if (IS_ERR(n1_postdivby2))
return PTR_ERR(n1_postdivby2);
snprintf(clk_name, 32, "dsi%dpll", pll_14nm->phy->id);
snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_14nm->phy->id);
/* DSI pixel clock = VCO_CLK / N1 / 2 / N2
* This is the output of N2 post-divider, bits 4-7 in
* REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
*/
hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);
hw = pll_14nm_postdiv_register(pll_14nm, clk_name, n1_postdivby2,
0, 4);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
return 0;
}
@ -952,7 +950,8 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
"%s: D-PHY timing calculation failed\n",
__func__);
return -EINVAL;
}
@ -1005,7 +1004,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
ret = dsi_14nm_set_usecase(phy);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
__func__, ret);
return ret;
}
@ -1024,14 +1023,18 @@ static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
wmb();
}
static const struct regulator_bulk_data dsi_phy_14nm_17mA_regulators[] = {
{ .supply = "vcca", .init_load_uA = 17000 },
};
static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = {
{ .supply = "vcca", .init_load_uA = 73400 },
};
const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
.has_phy_lane = true,
.reg_cfg = {
.num = 1,
.regs = {
{"vcca", 17000, 32},
},
},
.regulator_data = dsi_phy_14nm_17mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
@ -1047,12 +1050,8 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
.has_phy_lane = true,
.reg_cfg = {
.num = 1,
.regs = {
{"vcca", 73400, 32},
},
},
.regulator_data = dsi_phy_14nm_73p4mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_14nm_73p4mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
@ -1068,12 +1067,8 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = {
.has_phy_lane = true,
.reg_cfg = {
.num = 1,
.regs = {
{"vcca", 17000, 32},
},
},
.regulator_data = dsi_phy_14nm_17mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,

View File

@ -129,15 +129,15 @@ static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
dsi_20nm_phy_regulator_ctrl(phy, false);
}
static const struct regulator_bulk_data dsi_phy_20nm_regulators[] = {
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
{ .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
};
const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.has_phy_regulator = true,
.reg_cfg = {
.num = 2,
.regs = {
{"vddio", 100000, 100}, /* 1.8 V */
{"vcca", 10000, 100}, /* 1.0 V */
},
},
.regulator_data = dsi_phy_20nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_20nm_regulators),
.ops = {
.enable = dsi_20nm_phy_enable,
.disable = dsi_20nm_phy_disable,

View File

@ -104,7 +104,7 @@ static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
* reset bit off and back on.
*/
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
}
@ -201,9 +201,9 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
/* Add hardware recommended delay for correct PLL configuration */
@ -316,12 +316,12 @@ static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
for (i = 0; i < 2; i++) {
/* DSI Uniphy lock detect setting */
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
0x0c, 100);
0x0c, 100);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
/* poll for PLL ready status */
locked = pll_28nm_poll_for_ready(pll_28nm,
max_reads, timeout_us);
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
timeout_us);
if (locked)
break;
@ -508,28 +508,28 @@ static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
}
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
cached_state->postdiv3);
cached_state->postdiv3);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
cached_state->postdiv1);
cached_state->postdiv1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
cached_state->byte_mux);
cached_state->byte_mux);
return 0;
}
static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
{
char clk_name[32], parent1[32], parent2[32], vco_name[32];
char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref", .name = "xo",
},
.num_parents = 1,
.name = vco_name,
.name = clk_name,
.flags = CLK_IGNORE_UNUSED,
};
struct device *dev = &pll_28nm->phy->pdev->dev;
struct clk_hw *hw;
struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux;
int ret;
DBG("%d", pll_28nm->phy->id);
@ -539,55 +539,49 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
else
vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
pll_28nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
if (ret)
return ret;
snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
hw = devm_clk_hw_register_divider(dev, clk_name,
parent1, CLK_SET_RATE_PARENT,
snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
&pll_28nm->clk_hw, CLK_SET_RATE_PARENT,
pll_28nm->phy->pll_base +
REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
0, 4, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
if (IS_ERR(analog_postdiv))
return PTR_ERR(analog_postdiv);
snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
parent1, CLK_SET_RATE_PARENT,
1, 2);
if (IS_ERR(hw))
return PTR_ERR(hw);
snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2);
if (IS_ERR(indirect_path_div2))
return PTR_ERR(indirect_path_div2);
snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
hw = devm_clk_hw_register_divider(dev, clk_name,
parent1, 0, pll_28nm->phy->pll_base +
snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id);
hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
&pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
0, 8, 0, NULL);
0, 8, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
hw = devm_clk_hw_register_mux(dev, clk_name,
((const char *[]){
parent1, parent2
snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id);
byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
((const struct clk_hw *[]){
&pll_28nm->clk_hw,
indirect_path_div2,
}), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
if (IS_ERR(byte_mux))
return PTR_ERR(byte_mux);
snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
parent1, CLK_SET_RATE_PARENT, 1, 4);
snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id);
hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
byte_mux, CLK_SET_RATE_PARENT, 1, 4);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
@ -627,31 +621,31 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
if (timing->clk_zero & BIT(8))
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
@ -713,7 +707,8 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
"%s: D-PHY timing calculation failed\n",
__func__);
return -EINVAL;
}
@ -769,14 +764,14 @@ static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
wmb();
}
static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = {
{ .supply = "vddio", .init_load_uA = 100000 },
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.has_phy_regulator = true,
.reg_cfg = {
.num = 1,
.regs = {
{"vddio", 100000, 100},
},
},
.regulator_data = dsi_phy_28nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
@ -792,12 +787,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
.has_phy_regulator = true,
.reg_cfg = {
.num = 1,
.regs = {
{"vddio", 100000, 100},
},
},
.regulator_data = dsi_phy_28nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
@ -813,12 +804,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.has_phy_regulator = true,
.reg_cfg = {
.num = 1,
.regs = {
{"vddio", 100000, 100}, /* 1.8 V */
},
},
.regulator_data = dsi_phy_28nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,

View File

@ -104,29 +104,29 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
fb_divider = fb_divider / 2 - 1;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
fb_divider & 0xff);
fb_divider & 0xff);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
val |= (fb_divider >> 8) & 0x07;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
val);
val);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
val);
val);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
0xf);
0xf);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
val |= 0x7 << 4;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
val);
val);
return 0;
}
@ -206,7 +206,7 @@ static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw)
/* enable the PLL */
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
@ -367,23 +367,23 @@ static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
cached_state->vco_rate, 0);
if (ret) {
DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
cached_state->postdiv3);
cached_state->postdiv3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
cached_state->postdiv2);
cached_state->postdiv2);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
cached_state->postdiv1);
cached_state->postdiv1);
return 0;
}
static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
{
char *clk_name, *parent_name, *vco_name;
char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
@ -404,20 +404,8 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
if (!bytediv)
return -ENOMEM;
vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
if (!vco_name)
return -ENOMEM;
parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
if (!parent_name)
return -ENOMEM;
clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
if (!clk_name)
return -ENOMEM;
snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
vco_init.name = vco_name;
snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
vco_init.name = clk_name;
pll_28nm->clk_hw.init = &vco_init;
@ -429,13 +417,14 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
bytediv->hw.init = &bytediv_init;
bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1);
snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id + 1);
bytediv_init.name = clk_name;
bytediv_init.ops = &clk_bytediv_ops;
bytediv_init.flags = CLK_SET_RATE_PARENT;
bytediv_init.parent_names = (const char * const *) &parent_name;
bytediv_init.parent_hws = (const struct clk_hw*[]){
&pll_28nm->clk_hw,
};
bytediv_init.num_parents = 1;
/* DIV2 */
@ -444,12 +433,12 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
return ret;
provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1);
snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id + 1);
/* DIV3 */
hw = devm_clk_hw_register_divider(dev, clk_name,
parent_name, 0, pll_28nm->phy->pll_base +
hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
&pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
0, 8, 0, NULL);
0, 8, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
@ -489,29 +478,29 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
@ -523,7 +512,7 @@ static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
0x100);
0x100);
}
static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
@ -544,7 +533,7 @@ static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
int i = 5000;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
0x3);
0x3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
@ -577,11 +566,11 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
0x00);
0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
0x01);
0x01);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
0x66);
0x66);
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
@ -602,7 +591,8 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
"%s: D-PHY timing calculation failed\n",
__func__);
return -EINVAL;
}
@ -648,14 +638,14 @@ static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
wmb();
}
static const struct regulator_bulk_data dsi_phy_28nm_8960_regulators[] = {
{ .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.has_phy_regulator = true,
.reg_cfg = {
.num = 1,
.regs = {
{"vddio", 100000, 100}, /* 1.8 V */
},
},
.regulator_data = dsi_phy_28nm_8960_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_28nm_8960_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,

View File

@ -176,19 +176,19 @@ static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *c
pr_debug("SSC is enabled\n");
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
config->ssc_stepsize & 0xff);
config->ssc_stepsize & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
config->ssc_stepsize >> 8);
config->ssc_stepsize >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
config->ssc_div_per & 0xff);
config->ssc_div_per & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
config->ssc_div_per >> 8);
config->ssc_div_per >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
config->ssc_adj_per & 0xff);
config->ssc_adj_per & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
config->ssc_adj_per >> 8);
config->ssc_adj_per >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
}
}
@ -208,7 +208,7 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
}
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
analog_controls_five_1);
analog_controls_five_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
@ -245,17 +245,20 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi
void __iomem *base = pll->phy->pll_base;
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, config->decimal_div_start);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1,
config->decimal_div_start);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1,
config->frac_div_start & 0xff);
config->frac_div_start & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1,
(config->frac_div_start & 0xff00) >> 8);
(config->frac_div_start & 0xff00) >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
(config->frac_div_start & 0x30000) >> 16);
(config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, pll->phy->cphy_mode ? 0x00 : 0x10);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, config->pll_clock_inverters);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1,
pll->phy->cphy_mode ? 0x00 : 0x10);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS,
config->pll_clock_inverters);
}
static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@ -341,7 +344,7 @@ static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
data | BIT(5) | BIT(4));
data | BIT(5) | BIT(4));
}
static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
@ -500,7 +503,7 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = dsi_phy_read(pll_7nm->phy->pll_base +
REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
@ -529,7 +532,7 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
dsi_phy_write(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
cached->bit_clk_div | (cached->pix_clk_div << 4));
cached->bit_clk_div | (cached->pix_clk_div << 4));
val = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
@ -585,65 +588,60 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
*/
static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provided_clocks)
{
char clk_name[32], parent[32], vco_name[32];
char parent2[32];
char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
.name = vco_name,
.name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_7nm_vco,
};
struct device *dev = &pll_7nm->phy->pdev->dev;
struct clk_hw *hw;
struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
struct clk_hw *pll_post_out_div, *phy_pll_out_dsi_parent;
int ret;
DBG("DSI%d", pll_7nm->phy->id);
snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_7nm->phy->id);
pll_7nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_7nm->clk_hw);
if (ret)
return ret;
snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
hw = devm_clk_hw_register_divider(dev, clk_name,
parent, CLK_SET_RATE_PARENT,
pll_7nm->phy->pll_base +
REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
&pll_7nm->clk_hw, CLK_SET_RATE_PARENT,
pll_7nm->phy->pll_base +
REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
if (IS_ERR(pll_out_div)) {
ret = PTR_ERR(pll_out_div);
goto fail;
}
snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_7nm->phy->id);
/* BIT CLK: DIV_CTRL_3_0 */
hw = devm_clk_hw_register_divider(dev, clk_name, parent,
CLK_SET_RATE_PARENT,
pll_7nm->phy->base +
REG_DSI_7nm_PHY_CMN_CLK_CFG0,
0, 4, CLK_DIVIDER_ONE_BASED,
&pll_7nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
pll_out_div, CLK_SET_RATE_PARENT,
pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
0, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
if (IS_ERR(pll_bit)) {
ret = PTR_ERR(pll_bit);
goto fail;
}
snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
CLK_SET_RATE_PARENT, 1,
pll_7nm->phy->cphy_mode ? 7 : 8);
hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
pll_bit, CLK_SET_RATE_PARENT, 1,
pll_7nm->phy->cphy_mode ? 7 : 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@ -651,25 +649,25 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
0, 1, 2);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
clk_name, pll_bit, 0, 1, 2);
if (IS_ERR(pll_by_2_bit)) {
ret = PTR_ERR(pll_by_2_bit);
goto fail;
}
snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
if (pll_7nm->phy->cphy_mode)
hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 2, 7);
pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
dev, clk_name, pll_out_div, 0, 2, 7);
else
hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 4);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
dev, clk_name, pll_out_div, 0, 1, 4);
if (IS_ERR(pll_post_out_div)) {
ret = PTR_ERR(pll_post_out_div);
goto fail;
}
@ -682,34 +680,32 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3);
snprintf(parent, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
phy_pll_out_dsi_parent = pll_post_out_div;
} else {
snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id);
hw = devm_clk_hw_register_mux(dev, clk_name,
((const char *[]){
parent, parent2,
}), 2, 0, pll_7nm->phy->base +
hw = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
((const struct clk_hw *[]){
pll_bit,
pll_by_2_bit,
}), 2, 0, pll_7nm->phy->base +
REG_DSI_7nm_PHY_CMN_CLK_CFG1,
0, 1, 0, NULL);
0, 1, 0, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
}
snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
phy_pll_out_dsi_parent = hw;
}
snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
hw = devm_clk_hw_register_divider(dev, clk_name, parent,
0, pll_7nm->phy->base +
REG_DSI_7nm_PHY_CMN_CLK_CFG0,
4, 4, CLK_DIVIDER_ONE_BASED,
&pll_7nm->postdiv_lock);
hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
phy_pll_out_dsi_parent, 0,
pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
4, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@ -841,7 +837,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev,
"%s: PHY timing calculation failed\n", __func__);
"%s: PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@ -960,10 +956,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5,
timing->shared_timings.clk_pre);
timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7,
timing->shared_timings.clk_post);
timing->shared_timings.clk_post);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
@ -982,9 +978,9 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
timing->shared_timings.clk_pre);
timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
timing->shared_timings.clk_post);
timing->shared_timings.clk_post);
}
/* DSI lane settings */
@ -1036,14 +1032,18 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
DBG("DSI%d PHY disabled", phy->id);
}
static const struct regulator_bulk_data dsi_phy_7nm_36mA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 36000 },
};
static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 37550 },
};
const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
.has_phy_lane = true,
.reg_cfg = {
.num = 1,
.regs = {
{"vdds", 36000, 32},
},
},
.regulator_data = dsi_phy_7nm_36mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
@ -1065,12 +1065,8 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
.has_phy_lane = true,
.reg_cfg = {
.num = 1,
.regs = {
{"vdds", 36000, 32},
},
},
.regulator_data = dsi_phy_7nm_36mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
@ -1087,12 +1083,8 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = {
.has_phy_lane = true,
.reg_cfg = {
.num = 1,
.regs = {
{"vdds", 37550, 0},
},
},
.regulator_data = dsi_phy_7nm_37750uA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_7nm_37750uA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,

View File

@ -691,15 +691,13 @@ static const struct clk_ops hdmi_8996_pll_ops = {
.is_enabled = hdmi_8996_pll_is_enabled,
};
static const char * const hdmi_pll_parents[] = {
"xo",
};
static const struct clk_init_data pll_init = {
.name = "hdmipll",
.ops = &hdmi_8996_pll_ops,
.parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
.parent_data = (const struct clk_parent_data[]){
{ .fw_name = "xo", .name = "xo_board" },
},
.num_parents = 1,
.flags = CLK_IGNORE_UNUSED,
};
@ -707,8 +705,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hdmi_pll_8996 *pll;
struct clk *clk;
int i;
int i, ret;
pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
if (!pll)
@ -735,10 +732,16 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
}
pll->clk_hw.init = &pll_init;
clk = devm_clk_register(dev, &pll->clk_hw);
if (IS_ERR(clk)) {
ret = devm_clk_hw_register(dev, &pll->clk_hw);
if (ret) {
DRM_DEV_ERROR(dev, "failed to register pll clock\n");
return -EINVAL;
return ret;
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw);
if (ret) {
DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
return ret;
}
return 0;

View File

@ -7,6 +7,7 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/fault-inject.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
@ -326,6 +327,13 @@ void msm_debugfs_init(struct drm_minor *minor)
if (priv->kms && priv->kms->funcs->debugfs_init)
priv->kms->funcs->debugfs_init(priv->kms, minor);
#ifdef CONFIG_FAULT_INJECTION
fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
&fail_gem_alloc);
fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
&fail_gem_iova);
#endif
}
#endif

View File

@ -6,6 +6,7 @@
*/
#include <linux/dma-mapping.h>
#include <linux/fault-inject.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
@ -78,6 +79,11 @@ static bool modeset = true;
MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
module_param(modeset, bool, 0600);
#ifdef CONFIG_FAULT_INJECTION
DECLARE_FAULT_ATTR(fail_gem_alloc);
DECLARE_FAULT_ATTR(fail_gem_iova);
#endif
static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@ -418,14 +424,18 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock);
INIT_LIST_HEAD(&priv->inactive_willneed);
INIT_LIST_HEAD(&priv->inactive_dontneed);
INIT_LIST_HEAD(&priv->inactive_unpinned);
mutex_init(&priv->mm_lock);
/*
* Initialize the LRUs:
*/
mutex_init(&priv->lru.lock);
drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
/* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL);
might_lock(&priv->mm_lock);
might_lock(&priv->lru.lock);
fs_reclaim_release(GFP_KERNEL);
drm_mode_config_init(ddev);
@ -469,6 +479,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
}
}
drm_helper_move_panel_connectors_to_head(ddev);
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
@ -697,6 +709,9 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
flags |= MSM_BO_WC;
}
if (should_fail(&fail_gem_alloc, args->size))
return -ENOMEM;
return msm_gem_new_handle(dev, file, args->size,
args->flags, &args->handle, NULL);
}
@ -758,6 +773,9 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
if (!priv->gpu)
return -EINVAL;
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
/*
* Don't pin the memory here - just get an address so that userspace can
* be productive
@ -779,6 +797,9 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
if (priv->gpu->aspace == ctx->aspace)
return -EOPNOTSUPP;
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
return msm_gem_set_iova(obj, ctx->aspace, iova);
}
@ -883,13 +904,13 @@ static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
* retired, so if the fence is not found it means there is nothing
* to wait for
*/
ret = mutex_lock_interruptible(&queue->lock);
ret = mutex_lock_interruptible(&queue->idr_lock);
if (ret)
return ret;
fence = idr_find(&queue->fence_idr, fence_id);
if (fence)
fence = dma_fence_get_rcu(fence);
mutex_unlock(&queue->lock);
mutex_unlock(&queue->idr_lock);
if (!fence)
return 0;

View File

@ -33,6 +33,13 @@
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
#ifdef CONFIG_FAULT_INJECTION
extern struct fault_attr fail_gem_alloc;
extern struct fault_attr fail_gem_iova;
#else
# define should_fail(attr, size) 0
#endif
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
@ -95,11 +102,6 @@ struct msm_drm_thread {
struct kthread_worker *worker;
};
/* DSC config */
struct msm_display_dsc_config {
struct drm_dsc_config *drm;
};
struct msm_drm_private {
struct drm_device *dev;
@ -141,28 +143,60 @@ struct msm_drm_private {
struct mutex obj_lock;
/**
* LRUs of inactive GEM objects. Every bo is either in one of the
* inactive lists (depending on whether or not it is shrinkable) or
* gpu->active_list (for the gpu it is active on[1]), or transiently
* on a temporary list as the shrinker is running.
* lru:
*
* Note that inactive_willneed also contains pinned and vmap'd bos,
* but the number of pinned-but-not-active objects is small (scanout
* buffers, ringbuffer, etc).
* The various LRU's that a GEM object is in at various stages of
* it's lifetime. Objects start out in the unbacked LRU. When
* pinned (for scannout or permanently mapped GPU buffers, like
* ringbuffer, memptr, fw, etc) it moves to the pinned LRU. When
* unpinned, it moves into willneed or dontneed LRU depending on
* madvise state. When backing pages are evicted (willneed) or
* purged (dontneed) it moves back into the unbacked LRU.
*
* These lists are protected by mm_lock (which should be acquired
* before per GEM object lock). One should *not* hold mm_lock in
* get_pages()/vmap()/etc paths, as they can trigger the shrinker.
*
* [1] if someone ever added support for the old 2d cores, there could be
* more than one gpu object
* The dontneed LRU is considered by the shrinker for objects
* that are candidate for purging, and the willneed LRU is
* considered for objects that could be evicted.
*/
struct list_head inactive_willneed; /* inactive + potentially unpin/evictable */
struct list_head inactive_dontneed; /* inactive + shrinkable */
struct list_head inactive_unpinned; /* inactive + purged or unpinned */
long shrinkable_count; /* write access under mm_lock */
long evictable_count; /* write access under mm_lock */
struct mutex mm_lock;
struct {
/**
* unbacked:
*
* The LRU for GEM objects without backing pages allocated.
* This mostly exists so that objects are always is one
* LRU.
*/
struct drm_gem_lru unbacked;
/**
* pinned:
*
* The LRU for pinned GEM objects
*/
struct drm_gem_lru pinned;
/**
* willneed:
*
* The LRU for unpinned GEM objects which are in madvise
* WILLNEED state (ie. can be evicted)
*/
struct drm_gem_lru willneed;
/**
* dontneed:
*
* The LRU for unpinned GEM objects which are in madvise
* DONTNEED state (ie. can be purged)
*/
struct drm_gem_lru dontneed;
/**
* lock:
*
* Protects manipulation of all of the LRUs.
*/
struct mutex lock;
} lru;
struct workqueue_struct *wq;
@ -289,7 +323,7 @@ void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
#else
static inline void __init msm_dsi_register(void)
{
@ -319,7 +353,7 @@ static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
return false;
}
static inline struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
{
return NULL;
}
@ -432,6 +466,8 @@ void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
phys_addr_t *size);
void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name);
struct icc_path *msm_icc_get(struct device *dev, const char *name);
#define msm_writel(data, addr) writel((data), (addr))
#define msm_readl(addr) readl((addr))

View File

@ -19,7 +19,7 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
static void update_inactive(struct msm_gem_object *msm_obj);
static void update_lru(struct drm_gem_object *obj);
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
@ -97,7 +97,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
@ -132,7 +132,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (msm_obj->flags & MSM_BO_WC)
sync_for_device(msm_obj);
update_inactive(msm_obj);
update_lru(obj);
}
return msm_obj->pages;
@ -174,40 +174,45 @@ static void put_pages(struct drm_gem_object *obj)
put_pages_vram(obj);
msm_obj->pages = NULL;
update_lru(obj);
}
}
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p;
msm_gem_lock(obj);
msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
msm_gem_unlock(obj);
return ERR_PTR(-EBUSY);
}
p = get_pages(obj);
if (!IS_ERR(p)) {
msm_obj->pin_count++;
update_inactive(msm_obj);
to_msm_bo(obj)->pin_count++;
update_lru(obj);
}
msm_gem_unlock(obj);
return p;
}
void msm_gem_put_pages(struct drm_gem_object *obj)
struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p;
msm_gem_lock(obj);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
update_inactive(msm_obj);
p = msm_gem_pin_pages_locked(obj);
msm_gem_unlock(obj);
return p;
}
void msm_gem_unpin_pages(struct drm_gem_object *obj)
{
msm_gem_lock(obj);
msm_gem_unpin_locked(obj);
msm_gem_unlock(obj);
}
@ -273,7 +278,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
int ret;
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
@ -302,7 +307,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
@ -321,7 +326,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace == aspace)
@ -352,7 +357,7 @@ put_iova_spaces(struct drm_gem_object *obj, bool close)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace) {
@ -370,7 +375,7 @@ put_iova_vmas(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma, *tmp;
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
del_vma(vma);
@ -383,7 +388,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
{
struct msm_gem_vma *vma;
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
vma = lookup_vma(obj, aspace);
@ -423,19 +428,18 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
prot |= IOMMU_CACHE;
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY;
pages = get_pages(obj);
pages = msm_gem_pin_pages_locked(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
if (!ret)
msm_obj->pin_count++;
if (ret)
msm_gem_unpin_locked(obj);
return ret;
}
@ -444,12 +448,12 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
update_inactive(msm_obj);
update_lru(obj);
}
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
@ -465,7 +469,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
struct msm_gem_vma *vma;
int ret;
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
vma = get_vma_locked(obj, aspace, range_start, range_end);
if (IS_ERR(vma))
@ -626,7 +630,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
if (obj->import_attach)
return ERR_PTR(-ENODEV);
@ -658,7 +662,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
goto fail;
}
update_inactive(msm_obj);
update_lru(obj);
}
return msm_obj->vaddr;
@ -699,7 +703,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
GEM_WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
@ -729,8 +733,7 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
/* If the obj is inactive, we might need to move it
* between inactive lists
*/
if (msm_obj->active_count == 0)
update_inactive(msm_obj);
update_lru(obj);
msm_gem_unlock(obj);
@ -742,7 +745,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
@ -757,7 +760,6 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED;
update_inactive(msm_obj);
drm_gem_free_mmap_offset(obj);
@ -780,10 +782,8 @@ void msm_gem_evict(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
GEM_WARN_ON(is_unevictable(msm_obj));
GEM_WARN_ON(!msm_obj->evictable);
GEM_WARN_ON(msm_obj->active_count);
/* Get rid of any iommu mapping(s): */
put_iova_spaces(obj, false);
@ -791,15 +791,13 @@ void msm_gem_evict(struct drm_gem_object *obj)
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
put_pages(obj);
update_inactive(msm_obj);
}
void msm_gem_vunmap(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(obj);
if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
return;
@ -808,66 +806,37 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL;
}
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
static void update_lru(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
might_sleep();
GEM_WARN_ON(!msm_gem_is_locked(obj));
GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
GEM_WARN_ON(msm_obj->dontneed);
if (msm_obj->active_count++ == 0) {
mutex_lock(&priv->mm_lock);
if (msm_obj->evictable)
mark_unevictable(msm_obj);
list_move_tail(&msm_obj->mm_list, &gpu->active_list);
mutex_unlock(&priv->mm_lock);
}
}
void msm_gem_active_put(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
might_sleep();
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_assert_locked(&msm_obj->base);
if (--msm_obj->active_count == 0) {
update_inactive(msm_obj);
if (!msm_obj->pages) {
GEM_WARN_ON(msm_obj->pin_count);
GEM_WARN_ON(msm_obj->vmap_count);
drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
} else if (msm_obj->pin_count || msm_obj->vmap_count) {
drm_gem_lru_move_tail(&priv->lru.pinned, obj);
} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
drm_gem_lru_move_tail(&priv->lru.willneed, obj);
} else {
GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
}
}
static void update_inactive(struct msm_gem_object *msm_obj)
bool msm_gem_active(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
msm_gem_assert_locked(obj);
GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
if (to_msm_bo(obj)->pin_count)
return true;
if (msm_obj->active_count != 0)
return;
mutex_lock(&priv->mm_lock);
if (msm_obj->dontneed)
mark_unpurgeable(msm_obj);
if (msm_obj->evictable)
mark_unevictable(msm_obj);
list_del(&msm_obj->mm_list);
if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
mark_evictable(msm_obj);
} else if (msm_obj->madv == MSM_MADV_DONTNEED) {
list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
mark_purgeable(msm_obj);
} else {
GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
}
mutex_unlock(&priv->mm_lock);
return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@ -910,7 +879,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
stats->all.count++;
stats->all.size += obj->size;
if (is_active(msm_obj)) {
if (msm_gem_active(obj)) {
stats->active.count++;
stats->active.size += obj->size;
}
@ -938,7 +907,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
@ -1015,15 +984,6 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
mutex_lock(&priv->mm_lock);
if (msm_obj->dontneed)
mark_unpurgeable(msm_obj);
list_del(&msm_obj->mm_list);
mutex_unlock(&priv->mm_lock);
/* object should not be on active list: */
GEM_WARN_ON(is_active(msm_obj));
put_iova_spaces(obj, true);
if (obj->import_attach) {
@ -1183,13 +1143,6 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
to_msm_bo(obj)->vram_node = &vma->node;
/* Call chain get_pages() -> update_inactive() tries to
* access msm_obj->mm_list, but it is not initialized yet.
* To avoid NULL pointer dereference error, initialize
* mm_list to be empty.
*/
INIT_LIST_HEAD(&msm_obj->mm_list);
msm_gem_lock(obj);
pages = get_pages(obj);
msm_gem_unlock(obj);
@ -1212,9 +1165,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
mutex_lock(&priv->mm_lock);
list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
mutex_unlock(&priv->mm_lock);
drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);
@ -1270,9 +1221,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
msm_gem_unlock(obj);
mutex_lock(&priv->mm_lock);
list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
mutex_unlock(&priv->mm_lock);
drm_gem_lru_move_tail(&priv->lru.pinned, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);

View File

@ -93,16 +93,6 @@ struct msm_gem_object {
*/
uint8_t madv;
/**
* Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)?
*/
bool dontneed : 1;
/**
* Is object evictable (ie. counted in priv->evictable_count)?
*/
bool evictable : 1;
/**
* count of active vmap'ing
*/
@ -114,17 +104,6 @@ struct msm_gem_object {
*/
struct list_head node;
/**
* An object is either:
* inactive - on priv->inactive_dontneed or priv->inactive_willneed
* (depending on purgeability status)
* active - on one one of the gpu's active_list.. well, at
* least for now we don't have (I don't think) hw sync between
* 2d and 3d one devices which have both, meaning we need to
* block on submit if a bo is already on other ring
*/
struct list_head mm_list;
struct page **pages;
struct sg_table *sgt;
void *vaddr;
@ -138,7 +117,6 @@ struct msm_gem_object {
char name[32]; /* Identifier to print for the debugfs files */
int active_count;
int pin_count;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
@ -159,8 +137,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
void msm_gem_unpin_pages(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@ -171,8 +149,7 @@ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
void msm_gem_active_put(struct drm_gem_object *obj);
bool msm_gem_active(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
@ -208,12 +185,6 @@ msm_gem_lock(struct drm_gem_object *obj)
dma_resv_lock(obj->resv, NULL);
}
static inline bool __must_check
msm_gem_trylock(struct drm_gem_object *obj)
{
return dma_resv_trylock(obj->resv);
}
static inline int
msm_gem_lock_interruptible(struct drm_gem_object *obj)
{
@ -226,8 +197,8 @@ msm_gem_unlock(struct drm_gem_object *obj)
dma_resv_unlock(obj->resv);
}
static inline bool
msm_gem_is_locked(struct drm_gem_object *obj)
static inline void
msm_gem_assert_locked(struct drm_gem_object *obj)
{
/*
* Destroying the object is a special case.. msm_gem_free_object()
@ -241,13 +212,10 @@ msm_gem_is_locked(struct drm_gem_object *obj)
* Unfortunately lockdep is not aware of this detail. So when the
* refcount drops to zero, we pretend it is already locked.
*/
return dma_resv_is_locked(obj->resv) || (kref_read(&obj->refcount) == 0);
}
static inline bool is_active(struct msm_gem_object *msm_obj)
{
GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
return msm_obj->active_count;
lockdep_assert_once(
(kref_read(&obj->refcount) == 0) ||
(lockdep_is_held(&obj->resv->lock.base) != LOCK_STATE_NOT_HELD)
);
}
/* imported/exported objects are not purgeable: */
@ -264,81 +232,15 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj)
static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
{
GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
msm_gem_assert_locked(&msm_obj->base);
return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
}
static inline void mark_purgeable(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (is_unpurgeable(msm_obj))
return;
if (GEM_WARN_ON(msm_obj->dontneed))
return;
priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
msm_obj->dontneed = true;
}
static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (is_unpurgeable(msm_obj))
return;
if (GEM_WARN_ON(!msm_obj->dontneed))
return;
priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
GEM_WARN_ON(priv->shrinkable_count < 0);
msm_obj->dontneed = false;
}
static inline bool is_unevictable(struct msm_gem_object *msm_obj)
{
return is_unpurgeable(msm_obj) || msm_obj->vaddr;
}
static inline void mark_evictable(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (is_unevictable(msm_obj))
return;
if (WARN_ON(msm_obj->evictable))
return;
priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT;
msm_obj->evictable = true;
}
static inline void mark_unevictable(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (is_unevictable(msm_obj))
return;
if (WARN_ON(!msm_obj->evictable))
return;
priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT;
WARN_ON(priv->evictable_count < 0);
msm_obj->evictable = false;
}
void msm_gem_purge(struct drm_gem_object *obj);
void msm_gem_evict(struct drm_gem_object *obj);
void msm_gem_vunmap(struct drm_gem_object *obj);
@ -390,9 +292,8 @@ struct msm_gem_submit {
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
#define BO_LOCKED 0x4000 /* obj lock is held */
#define BO_ACTIVE 0x2000 /* active refcnt is held */
#define BO_OBJ_PINNED 0x1000 /* obj (pages) is pinned and on active list */
#define BO_VMA_PINNED 0x0800 /* vma (virtual address) is pinned */
#define BO_OBJ_PINNED 0x2000 /* obj (pages) is pinned and on active list */
#define BO_VMA_PINNED 0x1000 /* vma (virtual address) is pinned */
uint32_t flags;
union {
struct msm_gem_object *obj;

View File

@ -63,12 +63,12 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
msm_gem_get_pages(obj);
msm_gem_pin_pages(obj);
return 0;
}
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
msm_gem_put_pages(obj);
msm_gem_unpin_pages(obj);
}

View File

@ -24,103 +24,77 @@ static bool can_swap(void)
return enable_eviction && get_nr_swap_pages() > 0;
}
static bool can_block(struct shrink_control *sc)
{
if (sc->gfp_mask & __GFP_ATOMIC)
return false;
return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
}
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
unsigned count = priv->shrinkable_count;
unsigned count = priv->lru.dontneed.count;
if (can_swap())
count += priv->evictable_count;
count += priv->lru.willneed.count;
return count;
}
static bool
purge(struct msm_gem_object *msm_obj)
purge(struct drm_gem_object *obj)
{
if (!is_purgeable(msm_obj))
if (!is_purgeable(to_msm_bo(obj)))
return false;
/*
* This will move the obj out of still_in_list to
* the purged list
*/
msm_gem_purge(&msm_obj->base);
if (msm_gem_active(obj))
return false;
msm_gem_purge(obj);
return true;
}
static bool
evict(struct msm_gem_object *msm_obj)
evict(struct drm_gem_object *obj)
{
if (is_unevictable(msm_obj))
if (is_unevictable(to_msm_bo(obj)))
return false;
msm_gem_evict(&msm_obj->base);
if (msm_gem_active(obj))
return false;
msm_gem_evict(obj);
return true;
}
static unsigned long
scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
bool (*shrink)(struct msm_gem_object *msm_obj))
static bool
wait_for_idle(struct drm_gem_object *obj)
{
unsigned freed = 0;
struct list_head still_in_list;
enum dma_resv_usage usage = dma_resv_usage_rw(true);
return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
}
INIT_LIST_HEAD(&still_in_list);
static bool
active_purge(struct drm_gem_object *obj)
{
if (!wait_for_idle(obj))
return false;
mutex_lock(&priv->mm_lock);
return purge(obj);
}
while (freed < nr_to_scan) {
struct msm_gem_object *msm_obj = list_first_entry_or_null(
list, typeof(*msm_obj), mm_list);
static bool
active_evict(struct drm_gem_object *obj)
{
if (!wait_for_idle(obj))
return false;
if (!msm_obj)
break;
list_move_tail(&msm_obj->mm_list, &still_in_list);
/*
* If it is in the process of being freed, msm_gem_free_object
* can be blocked on mm_lock waiting to remove it. So just
* skip it.
*/
if (!kref_get_unless_zero(&msm_obj->base.refcount))
continue;
/*
* Now that we own a reference, we can drop mm_lock for the
* rest of the loop body, to reduce contention with the
* retire_submit path (which could make more objects purgeable)
*/
mutex_unlock(&priv->mm_lock);
/*
* Note that this still needs to be trylock, since we can
* hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held)
*/
if (!msm_gem_trylock(&msm_obj->base))
goto tail;
if (shrink(msm_obj))
freed += msm_obj->base.size >> PAGE_SHIFT;
msm_gem_unlock(&msm_obj->base);
tail:
drm_gem_object_put(&msm_obj->base);
mutex_lock(&priv->mm_lock);
}
list_splice_tail(&still_in_list, list);
mutex_unlock(&priv->mm_lock);
return freed;
return evict(obj);
}
static unsigned long
@ -128,21 +102,34 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
unsigned long freed;
struct {
struct drm_gem_lru *lru;
bool (*shrink)(struct drm_gem_object *obj);
bool cond;
unsigned long freed;
} stages[] = {
/* Stages of progressively more aggressive/expensive reclaim: */
{ &priv->lru.dontneed, purge, true },
{ &priv->lru.willneed, evict, can_swap() },
{ &priv->lru.dontneed, active_purge, can_block(sc) },
{ &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
};
long nr = sc->nr_to_scan;
unsigned long freed = 0;
freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge);
for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
if (!stages[i].cond)
continue;
stages[i].freed =
drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
nr -= stages[i].freed;
freed += stages[i].freed;
}
if (freed > 0)
trace_msm_gem_purge(freed << PAGE_SHIFT);
if (can_swap() && freed < sc->nr_to_scan) {
int evicted = scan(priv, sc->nr_to_scan - freed,
&priv->inactive_willneed, evict);
if (evicted > 0)
trace_msm_gem_evict(evicted << PAGE_SHIFT);
freed += evicted;
if (freed) {
trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
stages[1].freed, stages[2].freed,
stages[3].freed);
}
return (freed > 0) ? freed : SHRINK_STOP;
@ -173,12 +160,12 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
static const int vmap_shrink_limit = 15;
static bool
vmap_shrink(struct msm_gem_object *msm_obj)
vmap_shrink(struct drm_gem_object *obj)
{
if (!is_vunmapable(msm_obj))
if (!is_vunmapable(to_msm_bo(obj)))
return false;
msm_gem_vunmap(&msm_obj->base);
msm_gem_vunmap(obj);
return true;
}
@ -188,17 +175,18 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
struct list_head *mm_lists[] = {
&priv->inactive_dontneed,
&priv->inactive_willneed,
priv->gpu ? &priv->gpu->active_list : NULL,
struct drm_gem_lru *lrus[] = {
&priv->lru.dontneed,
&priv->lru.willneed,
&priv->lru.pinned,
NULL,
};
unsigned idx, unmapped = 0;
for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) {
unmapped += scan(priv, vmap_shrink_limit - unmapped,
mm_lists[idx], vmap_shrink);
for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
unmapped += drm_gem_lru_scan(lrus[idx],
vmap_shrink_limit - unmapped,
vmap_shrink);
}
*(unsigned long *)ptr += unmapped;

View File

@ -26,6 +26,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
uint32_t nr_cmds)
{
static atomic_t ident = ATOMIC_INIT(0);
struct msm_gem_submit *submit;
uint64_t sz;
int ret;
@ -36,7 +37,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
if (sz > SIZE_MAX)
return ERR_PTR(-ENOMEM);
submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
submit = kzalloc(sz, GFP_KERNEL);
if (!submit)
return ERR_PTR(-ENOMEM);
@ -52,9 +53,13 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->gpu = gpu;
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
submit->pid = get_pid(task_pid(current));
submit->ring = gpu->rb[queue->ring_nr];
submit->fault_dumped = false;
/* Get a unique identifier for the submission for logging purposes */
submit->ident = atomic_inc_return(&ident) - 1;
INIT_LIST_HEAD(&submit->node);
return submit;
@ -67,9 +72,9 @@ void __msm_gem_submit_destroy(struct kref *kref)
unsigned i;
if (submit->fence_id) {
mutex_lock(&submit->queue->lock);
mutex_lock(&submit->queue->idr_lock);
idr_remove(&submit->queue->fence_idr, submit->fence_id);
mutex_unlock(&submit->queue->lock);
mutex_unlock(&submit->queue->idr_lock);
}
dma_fence_put(submit->user_fence);
@ -238,17 +243,13 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
if (flags & BO_OBJ_PINNED)
msm_gem_unpin_locked(obj);
if (flags & BO_ACTIVE)
msm_gem_active_put(obj);
if (flags & BO_LOCKED)
dma_resv_unlock(obj->resv);
}
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
{
unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED |
BO_ACTIVE | BO_LOCKED;
unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | BO_LOCKED;
submit_cleanup_bo(submit, i, cleanup_flags);
if (!(submit->bos[i].flags & BO_VALID))
@ -353,18 +354,6 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
submit->valid = true;
/*
* Increment active_count first, so if under memory pressure, we
* don't inadvertently evict a bo needed by the submit in order
* to pin an earlier bo in the same submit.
*/
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
msm_gem_active_get(obj, submit->gpu);
submit->bos[i].flags |= BO_ACTIVE;
}
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
struct msm_gem_vma *vma;
@ -512,11 +501,11 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
unsigned cleanup_flags = BO_LOCKED;
unsigned cleanup_flags = BO_LOCKED | BO_OBJ_PINNED;
unsigned i;
if (error)
cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE;
cleanup_flags |= BO_VMA_PINNED;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
@ -533,10 +522,6 @@ void msm_submit_retire(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
msm_gem_lock(obj);
/* Note, VMA already fence-unpinned before submit: */
submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE);
msm_gem_unlock(obj);
drm_gem_object_put(obj);
}
}
@ -718,7 +703,6 @@ static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
@ -729,10 +713,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_submit_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
int out_fence_fd = -1;
struct pid *pid = get_pid(task_pid(current));
bool has_ww_ticket = false;
unsigned i;
int ret, submitid;
int ret;
if (!gpu)
return -ENXIO;
@ -764,35 +747,26 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!queue)
return -ENOENT;
/* Get a unique identifier for the submission for logging purposes */
submitid = atomic_inc_return(&ident) - 1;
ring = gpu->rb[queue->ring_nr];
trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
args->nr_bos, args->nr_cmds);
ret = mutex_lock_interruptible(&queue->lock);
if (ret)
goto out_post_unlock;
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
ret = out_fence_fd;
goto out_unlock;
return ret;
}
}
submit = submit_create(dev, gpu, queue, args->nr_bos,
args->nr_cmds);
if (IS_ERR(submit)) {
ret = PTR_ERR(submit);
submit = NULL;
goto out_unlock;
}
submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
if (IS_ERR(submit))
return PTR_ERR(submit);
submit->pid = pid;
submit->ident = submitid;
trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
args->nr_bos, args->nr_cmds);
ret = mutex_lock_interruptible(&queue->lock);
if (ret)
goto out_post_unlock;
if (args->flags & MSM_SUBMIT_SUDO)
submit->in_rb = true;
@ -887,6 +861,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
mutex_lock(&queue->idr_lock);
/*
* If using userspace provided seqno fence, validate that the id
* is available before arming sched job. Since access to fence_idr
@ -895,6 +871,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
idr_find(&queue->fence_idr, args->fence)) {
mutex_unlock(&queue->idr_lock);
ret = -EINVAL;
goto out;
}
@ -927,6 +904,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->user_fence, 1,
INT_MAX, GFP_KERNEL);
}
mutex_unlock(&queue->idr_lock);
if (submit->fence_id < 0) {
ret = submit->fence_id;
submit->fence_id = 0;
@ -965,9 +945,9 @@ out_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
mutex_unlock(&queue->lock);
out_post_unlock:
if (submit)
msm_gem_submit_put(submit);
out_post_unlock:
if (!IS_ERR_OR_NULL(post_deps)) {
for (i = 0; i < args->nr_out_syncobjs; ++i) {
kfree(post_deps[i].chain);

View File

@ -16,6 +16,7 @@
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
#include <linux/devcoredump.h>
#include <linux/reset.h>
#include <linux/sched/task.h>
/*
@ -394,7 +395,6 @@ static void recover_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
pm_runtime_put_sync(&gpu->pdev->dev);
kfree(cmd);
kfree(comm);
@ -423,9 +423,7 @@ static void recover_worker(struct kthread_work *work)
/* retire completed submits, plus the one that hung: */
retire_submits(gpu);
pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->recover(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
/*
* Replay all remaining submits starting with highest priority
@ -442,6 +440,8 @@ static void recover_worker(struct kthread_work *work)
}
}
pm_runtime_put(&gpu->pdev->dev);
mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
@ -664,11 +664,12 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
mutex_lock(&gpu->active_lock);
gpu->active_submits--;
WARN_ON(gpu->active_submits < 0);
if (!gpu->active_submits)
if (!gpu->active_submits) {
msm_devfreq_idle(gpu);
mutex_unlock(&gpu->active_lock);
pm_runtime_put_autosuspend(&gpu->pdev->dev);
}
pm_runtime_put_autosuspend(&gpu->pdev->dev);
mutex_unlock(&gpu->active_lock);
msm_gem_submit_put(submit);
}
@ -757,14 +758,17 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* Update devfreq on transition from idle->active: */
mutex_lock(&gpu->active_lock);
if (!gpu->active_submits)
if (!gpu->active_submits) {
pm_runtime_get(&gpu->pdev->dev);
msm_devfreq_active(gpu);
}
gpu->active_submits++;
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
pm_runtime_put(&gpu->pdev->dev);
hangcheck_timer_reset(gpu);
}
@ -846,7 +850,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
sched_set_fifo_low(gpu->worker->task);
INIT_LIST_HEAD(&gpu->active_list);
mutex_init(&gpu->active_lock);
mutex_init(&gpu->lock);
init_waitqueue_head(&gpu->retire_event);
@ -901,6 +904,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
gpu->cx_collapse = devm_reset_control_get_optional_exclusive(&pdev->dev,
"cx_collapse");
gpu->pdev = pdev;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
@ -974,8 +980,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
DBG("%s", gpu->name);
WARN_ON(!list_empty(&gpu->active_list));
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;

View File

@ -13,6 +13,7 @@
#include <linux/interconnect.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include "msm_drv.h"
#include "msm_fence.h"
@ -187,12 +188,6 @@ struct msm_gpu {
*/
int cur_ctx_seqno;
/*
* List of GEM active objects on this gpu. Protected by
* msm_drm_private::mm_lock
*/
struct list_head active_list;
/**
* lock:
*
@ -277,6 +272,9 @@ struct msm_gpu {
bool hw_apriv;
struct thermal_cooling_device *cooling;
/* To poll for cx gdsc collapse during gpu recovery */
struct reset_control *cx_collapse;
};
static inline struct msm_gpu *dev_to_gpu(struct device *dev)
@ -466,7 +464,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* @node: node in the context's list of submitqueues
* @fence_idr: maps fence-id to dma_fence for userspace visible fence
* seqno, protected by submitqueue lock
* @lock: submitqueue lock
* @idr_lock: for serializing access to fence_idr
* @lock: submitqueue lock for serializing submits on a queue
* @ref: reference count
* @entity: the submit job-queue
*/
@ -479,6 +478,7 @@ struct msm_gpu_submitqueue {
struct msm_file_private *ctx;
struct list_head node;
struct idr fence_idr;
struct mutex idr_lock;
struct mutex lock;
struct kref ref;
struct drm_sched_entity *entity;

View File

@ -213,6 +213,8 @@ void msm_devfreq_init(struct msm_gpu *gpu)
if (IS_ERR(df->devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
dev_pm_qos_remove_request(&df->idle_freq);
dev_pm_qos_remove_request(&df->boost_freq);
df->devfreq = NULL;
return;
}

View File

@ -115,29 +115,27 @@ TRACE_EVENT(msm_gmu_freq_change,
);
TRACE_EVENT(msm_gem_purge,
TP_PROTO(u32 bytes),
TP_ARGS(bytes),
TRACE_EVENT(msm_gem_shrink,
TP_PROTO(u32 nr_to_scan, u32 purged, u32 evicted,
u32 active_purged, u32 active_evicted),
TP_ARGS(nr_to_scan, purged, evicted, active_purged, active_evicted),
TP_STRUCT__entry(
__field(u32, bytes)
__field(u32, nr_to_scan)
__field(u32, purged)
__field(u32, evicted)
__field(u32, active_purged)
__field(u32, active_evicted)
),
TP_fast_assign(
__entry->bytes = bytes;
__entry->nr_to_scan = nr_to_scan;
__entry->purged = purged;
__entry->evicted = evicted;
__entry->active_purged = active_purged;
__entry->active_evicted = active_evicted;
),
TP_printk("Purging %u bytes", __entry->bytes)
);
TRACE_EVENT(msm_gem_evict,
TP_PROTO(u32 bytes),
TP_ARGS(bytes),
TP_STRUCT__entry(
__field(u32, bytes)
),
TP_fast_assign(
__entry->bytes = bytes;
),
TP_printk("Evicting %u bytes", __entry->bytes)
TP_printk("nr_to_scan=%u pg, purged=%u pg, evicted=%u pg, active_purged=%u pg, active_evicted=%u pg",
__entry->nr_to_scan, __entry->purged, __entry->evicted,
__entry->active_purged, __entry->active_evicted)
);

View File

@ -5,6 +5,8 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
#include <linux/interconnect.h>
#include "msm_drv.h"
/*
@ -124,3 +126,23 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
work->worker = worker;
kthread_init_work(&work->work, fn);
}
struct icc_path *msm_icc_get(struct device *dev, const char *name)
{
struct device *mdss_dev = dev->parent;
struct icc_path *path;
path = of_icc_get(dev, name);
if (path)
return path;
/*
* If there are no interconnects attached to the corresponding device
* node, of_icc_get() will return NULL.
*
* If the MDP5/DPU device node doesn't have interconnects, lookup the
* path in the parent (MDSS) device.
*/
return of_icc_get(mdss_dev, name);
}

View File

@ -21,6 +21,7 @@ struct msm_iommu_pagetable {
struct msm_mmu base;
struct msm_mmu *parent;
struct io_pgtable_ops *pgtbl_ops;
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
};
@ -29,23 +30,84 @@ static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
return container_of(mmu, struct msm_iommu_pagetable, base);
}
/* based on iommu_pgsize() in iommu.c: */
static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
unsigned long iova, phys_addr_t paddr,
size_t size, size_t *count)
{
unsigned int pgsize_idx, pgsize_idx_next;
unsigned long pgsizes;
size_t offset, pgsize, pgsize_next;
unsigned long addr_merge = paddr | iova;
/* Page sizes supported by the hardware and small enough for @size */
pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
/* Constrain the page sizes further based on the maximum alignment */
if (likely(addr_merge))
pgsizes &= GENMASK(__ffs(addr_merge), 0);
/* Make sure we have at least one suitable page size */
BUG_ON(!pgsizes);
/* Pick the biggest page size remaining */
pgsize_idx = __fls(pgsizes);
pgsize = BIT(pgsize_idx);
if (!count)
return pgsize;
/* Find the next biggest support page size, if it exists */
pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
if (!pgsizes)
goto out_set_count;
pgsize_idx_next = __ffs(pgsizes);
pgsize_next = BIT(pgsize_idx_next);
/*
* There's no point trying a bigger page size unless the virtual
* and physical addresses are similarly offset within the larger page.
*/
if ((iova ^ paddr) & (pgsize_next - 1))
goto out_set_count;
/* Calculate the offset to the next page size alignment boundary */
offset = pgsize_next - (addr_merge & (pgsize_next - 1));
/*
* If size is big enough to accommodate the larger page, reduce
* the number of smaller pages.
*/
if (offset + pgsize_next <= size)
size = offset;
out_set_count:
*count = size >> pgsize_idx;
return pgsize;
}
static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
size_t size)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
size_t unmapped = 0;
/* Unmap the block one page at a time */
while (size) {
unmapped += ops->unmap(ops, iova, 4096, NULL);
iova += 4096;
size -= 4096;
size_t unmapped, pgsize, count;
pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
if (!unmapped)
break;
iova += unmapped;
size -= unmapped;
}
iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
return (unmapped == size) ? 0 : -EINVAL;
return (size == 0) ? 0 : -EINVAL;
}
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
@ -54,7 +116,6 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
struct scatterlist *sg;
size_t mapped = 0;
u64 addr = iova;
unsigned int i;
@ -62,17 +123,26 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
/* Map the block one page at a time */
while (size) {
if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
msm_iommu_pagetable_unmap(mmu, iova, mapped);
size_t pgsize, count, mapped = 0;
int ret;
pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
ret = ops->map_pages(ops, addr, phys, pgsize, count,
prot, GFP_KERNEL, &mapped);
/* map_pages could fail after mapping some of the pages,
* so update the counters before error handling.
*/
phys += mapped;
addr += mapped;
size -= mapped;
if (ret) {
msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
return -EINVAL;
}
phys += 4096;
addr += 4096;
size -= 4096;
mapped += 4096;
}
}
@ -207,6 +277,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* Needed later for TLB flush */
pagetable->parent = parent;
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
/*

View File

@ -196,6 +196,9 @@ static int rd_open(struct inode *inode, struct file *file)
file->private_data = rd;
rd->open = true;
/* Reset fifo to clear any previously unread data: */
rd->fifo.head = rd->fifo.tail = 0;
/* the parsing tools need to know gpu-id to know which
* register database to load.
*

View File

@ -29,8 +29,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
msm_gem_unlock(obj);
}
pm_runtime_get_sync(&gpu->pdev->dev);
/* TODO move submit path over to using a per-ring lock.. */
mutex_lock(&gpu->lock);
@ -38,8 +36,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
mutex_unlock(&gpu->lock);
pm_runtime_put(&gpu->pdev->dev);
return dma_fence_get(submit->hw_fence);
}

View File

@ -200,6 +200,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
*id = queue->id;
idr_init(&queue->fence_idr);
mutex_init(&queue->idr_lock);
mutex_init(&queue->lock);
list_add_tail(&queue->node, &ctx->submitqueues);

View File

@ -174,6 +174,41 @@ struct drm_gem_object_funcs {
const struct vm_operations_struct *vm_ops;
};
/**
* struct drm_gem_lru - A simple LRU helper
*
* A helper for tracking GEM objects in a given state, to aid in
* driver's shrinker implementation. Tracks the count of pages
* for lockless &shrinker.count_objects, and provides
* &drm_gem_lru_scan for driver's &shrinker.scan_objects
* implementation.
*/
struct drm_gem_lru {
/**
* @lock:
*
* Lock protecting movement of GEM objects between LRUs. All
* LRUs that the object can move between should be protected
* by the same lock.
*/
struct mutex *lock;
/**
* @count:
*
* The total number of backing pages of the GEM objects in
* this LRU.
*/
long count;
/**
* @list:
*
* The LRU list.
*/
struct list_head list;
};
/**
* struct drm_gem_object - GEM buffer object
*
@ -312,6 +347,20 @@ struct drm_gem_object {
*
*/
const struct drm_gem_object_funcs *funcs;
/**
* @lru_node:
*
* List node in a &drm_gem_lru.
*/
struct list_head lru_node;
/**
* @lru:
*
* The current LRU list that the GEM object is on.
*/
struct drm_gem_lru *lru;
};
/**
@ -420,4 +469,10 @@ void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
void drm_gem_lru_remove(struct drm_gem_object *obj);
void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
bool (*shrink)(struct drm_gem_object *obj));
#endif /* __DRM_GEM_H__ */

View File

@ -179,6 +179,7 @@ struct mipi_dsi_device_info {
* @lp_rate: maximum lane frequency for low power mode in hertz, this should
* be set to the real limits of the hardware, zero is only accepted for
* legacy drivers
* @dsc: panel/bridge DSC pps payload to be sent
*/
struct mipi_dsi_device {
struct mipi_dsi_host *host;
@ -191,6 +192,7 @@ struct mipi_dsi_device {
unsigned long mode_flags;
unsigned long hs_rate;
unsigned long lp_rate;
struct drm_dsc_config *dsc;
};
#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"

View File

@ -188,13 +188,6 @@ struct drm_panel {
* Panel entry in registry.
*/
struct list_head list;
/**
* @dsc:
*
* Panel DSC pps payload to be sent
*/
struct drm_dsc_config *dsc;
};
void drm_panel_init(struct drm_panel *panel, struct device *dev,