Merge tag 'drm-misc-fixes-2023-12-07' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

drm-misc-fixes for v6.7-rc5:
- Document nouveau's GSP-RM.
- Flush vmm harder on nouveau tu102.
- Panfrost fix for imported dma-buf objects, and device frequency.
- Kconfig Build fix for tc358768.
- Call end_fb_access after atomic commit.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/05a26dc0-8cf1-4b1f-abb6-3bf471fbfc99@linux.intel.com
This commit is contained in:
Dave Airlie 2023-12-08 12:16:10 +10:00
commit 9ac4883d24
10 changed files with 207 additions and 32 deletions

View File

@ -313,6 +313,7 @@ config DRM_TOSHIBA_TC358768
select REGMAP_I2C
select DRM_PANEL
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
help
Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.

View File

@ -2012,7 +2012,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
return ret;
drm_atomic_helper_async_commit(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_helper_unprepare_planes(dev, state);
return 0;
}
@ -2072,7 +2072,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
return 0;
err:
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_helper_unprepare_planes(dev, state);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit);
@ -2650,6 +2650,39 @@ fail_prepare_fb:
}
EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
/**
* drm_atomic_helper_unprepare_planes - release plane resources on aborts
* @dev: DRM device
* @state: atomic state object with old state structures
*
* This function cleans up plane state, specifically framebuffers, from the
* atomic state. It undoes the effects of drm_atomic_helper_prepare_planes()
* when aborting an atomic commit. For cleaning up after a successful commit
* use drm_atomic_helper_cleanup_planes().
*/
void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
int i;
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->end_fb_access)
funcs->end_fb_access(plane, new_plane_state);
}
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->cleanup_fb)
funcs->cleanup_fb(plane, new_plane_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_unprepare_planes);
static bool plane_crtc_active(const struct drm_plane_state *state)
{
return state->crtc && state->crtc->state->active;
@ -2784,6 +2817,17 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs->atomic_flush(crtc, old_state);
}
/*
* Signal end of framebuffer access here before hw_done. After hw_done,
* a later commit might have already released the plane state.
*/
for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->end_fb_access)
funcs->end_fb_access(plane, old_plane_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
@ -2911,40 +2955,22 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
* configuration. Hence the old configuration must be perserved in @old_state to
* be able to call this function.
*
* This function must also be called on the new state when the atomic update
* fails at any point after calling drm_atomic_helper_prepare_planes().
* This function may not be called on the new state when the atomic update
* fails at any point after calling drm_atomic_helper_prepare_planes(). Use
* drm_atomic_helper_unprepare_planes() in this case.
*/
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_plane_state *old_plane_state;
int i;
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->end_fb_access)
funcs->end_fb_access(plane, new_plane_state);
}
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
struct drm_plane_state *plane_state;
/*
* This might be called before swapping when commit is aborted,
* in which case we have to cleanup the new state.
*/
if (old_plane_state == plane->state)
plane_state = new_plane_state;
else
plane_state = old_plane_state;
funcs = plane->helper_private;
if (funcs->cleanup_fb)
funcs->cleanup_fb(plane, plane_state);
funcs->cleanup_fb(plane, old_plane_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);

View File

@ -7488,7 +7488,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
intel_color_cleanup_commit(new_crtc_state);
drm_atomic_helper_cleanup_planes(dev, &state->base);
drm_atomic_helper_unprepare_planes(dev, &state->base);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}

View File

@ -2474,7 +2474,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
err_cleanup:
if (ret)
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_helper_unprepare_planes(dev, state);
done:
pm_runtime_put_autosuspend(dev->dev);
return ret;

View File

@ -26,6 +26,49 @@
* DEALINGS IN THE SOFTWARE.
*/
/**
* msgqTxHeader -- TX queue data structure
* @version: the version of this structure, must be 0
* @size: the size of the entire queue, including this header
* @msgSize: the padded size of queue element, 16 is minimum
* @msgCount: the number of elements in this queue
* @writePtr: head index of this queue
* @flags: 1 = swap the RX pointers
* @rxHdrOff: offset of readPtr in this structure
* @entryOff: offset of beginning of queue (msgqRxHeader), relative to
* beginning of this structure
*
* The command queue is a queue of RPCs that are sent from the driver to the
* GSP. The status queue is a queue of messages/responses from GSP-RM to the
* driver. Although the driver allocates memory for both queues, the command
* queue is owned by the driver and the status queue is owned by GSP-RM. In
* addition, the headers of the two queues must not share the same 4K page.
*
* Each queue is prefixed with this data structure. The idea is that a queue
* and its header are written to only by their owner. That is, only the
* driver writes to the command queue and command queue header, and only the
* GSP writes to the status (receive) queue and its header.
*
* This is enforced by the concept of "swapping" the RX pointers. This is
* why the 'flags' field must be set to 1. 'rxHdrOff' is how the GSP knows
* where the where the tail pointer of its status queue.
*
* When the driver writes a new RPC to the command queue, it updates writePtr.
* When it reads a new message from the status queue, it updates readPtr. In
* this way, the GSP knows when a new command is in the queue (it polls
* writePtr) and it knows how much free space is in the status queue (it
* checks readPtr). The driver never cares about how much free space is in
* the status queue.
*
* As usual, producers write to the head pointer, and consumers read from the
* tail pointer. When head == tail, the queue is empty.
*
* So to summarize:
* command.writePtr = head of command queue
* command.readPtr = tail of status queue
* status.writePtr = head of status queue
* status.readPtr = tail of command queue
*/
typedef struct
{
NvU32 version; // queue version
@ -38,6 +81,14 @@ typedef struct
NvU32 entryOff; // Offset of entries from start of backing store.
} msgqTxHeader;
/**
* msgqRxHeader - RX queue data structure
* @readPtr: tail index of the other queue
*
* Although this is a separate struct, it could easily be merged into
* msgqTxHeader. msgqTxHeader.rxHdrOff is simply the offset of readPtr
* from the beginning of msgqTxHeader.
*/
typedef struct
{
NvU32 readPtr; // message id of last message read

View File

@ -1377,6 +1377,13 @@ r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
return 0;
}
/**
* r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
*
* The GSP sequencer is a list of I/O commands that the GSP can send to
* the driver to perform for various purposes. The most common usage is to
* perform a special mid-initialization reset.
*/
static int
r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
{
@ -1716,6 +1723,23 @@ r535_gsp_libos_id8(const char *name)
return id;
}
/**
* create_pte_array() - creates a PTE array of a physically contiguous buffer
* @ptes: pointer to the array
* @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned)
* @size: size of the buffer
*
* GSP-RM sometimes expects physically-contiguous buffers to have an array of
* "PTEs" for each page in that buffer. Although in theory that allows for
* the buffer to be physically discontiguous, GSP-RM does not currently
* support that.
*
* In this case, the PTEs are DMA addresses of each page of the buffer. Since
* the buffer is physically contiguous, calculating all the PTEs is simple
* math.
*
* See memdescGetPhysAddrsForGpu()
*/
static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
{
unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE);
@ -1725,6 +1749,35 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT);
}
/**
* r535_gsp_libos_init() -- create the libos arguments structure
*
* The logging buffers are byte queues that contain encoded printf-like
* messages from GSP-RM. They need to be decoded by a special application
* that can parse the buffers.
*
* The 'loginit' buffer contains logs from early GSP-RM init and
* exception dumps. The 'logrm' buffer contains the subsequent logs. Both are
* written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE.
*
* The physical address map for the log buffer is stored in the buffer
* itself, starting with offset 1. Offset 0 contains the "put" pointer.
*
* The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
* configured for a larger page size (e.g. 64K pages), we need to give
* the GSP an array of 4K pages. Fortunately, since the buffer is
* physically contiguous, it's simple math to calculate the addresses.
*
* The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently
* ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the
* buffers to be physically contiguous anyway.
*
* The memory allocated for the arguments must remain until the GSP sends the
* init_done RPC.
*
* See _kgspInitLibosLoggingStructures (allocates memory for buffers)
* See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array)
*/
static int
r535_gsp_libos_init(struct nvkm_gsp *gsp)
{
@ -1835,6 +1888,35 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]);
}
/**
* nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
*
* The GSP uses a three-level page table, called radix3, to map the firmware.
* Each 64-bit "pointer" in the table is either the bus address of an entry in
* the next table (for levels 0 and 1) or the bus address of the next page in
* the GSP firmware image itself.
*
* Level 0 contains a single entry in one page that points to the first page
* of level 1.
*
* Level 1, since it's also only one page in size, contains up to 512 entries,
* one for each page in Level 2.
*
* Level 2 can be up to 512 pages in size, and each of those entries points to
* the next page of the firmware image. Since there can be up to 512*512
* pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB.
*
* Internally, the GSP has its window into system memory, but the base
* physical address of the aperture is not 0. In fact, it varies depending on
* the GPU architecture. Since the GPU is a PCI device, this window is
* accessed via DMA and is therefore bound by IOMMU translation. The end
* result is that GSP-RM must translate the bus addresses in the table to GSP
* physical addresses. All this should happen transparently.
*
* Returns 0 on success, or negative error code
*
* See kgspCreateRadix3_IMPL
*/
static int
nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
struct nvkm_gsp_radix3 *rx3)

View File

@ -31,7 +31,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
mutex_lock(&vmm->mmu->mutex);

View File

@ -29,14 +29,20 @@ static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfr
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct panfrost_device *ptdev = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
int err;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
dev_pm_opp_put(opp);
return dev_pm_opp_set_rate(dev, *freq);
err = dev_pm_opp_set_rate(dev, *freq);
if (!err)
ptdev->pfdevfreq.current_frequency = *freq;
return err;
}
static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
@ -58,7 +64,6 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
spin_lock_irqsave(&pfdevfreq->lock, irqflags);
panfrost_devfreq_update_utilization(pfdevfreq);
pfdevfreq->current_frequency = status->current_frequency;
status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time,
pfdevfreq->idle_time));
@ -164,6 +169,14 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_profile.initial_freq = cur_freq;
/*
* We could wait until panfrost_devfreq_target() to set this value, but
* since the simple_ondemand governor works asynchronously, there's a
* chance by the time someone opens the device's fdinfo file, current
* frequency hasn't been updated yet, so let's just do an early set.
*/
pfdevfreq->current_frequency = cur_freq;
/*
* Set the recommend OPP this will enable and configure the regulator
* if any and will avoid a switch off by regulator_late_cleanup()

View File

@ -200,7 +200,7 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
enum drm_gem_object_status res = 0;
if (bo->base.pages)
if (bo->base.base.import_attach || bo->base.pages)
res |= DRM_GEM_OBJECT_RESIDENT;
if (bo->base.madv == PANFROST_MADV_DONTNEED)

View File

@ -97,6 +97,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
struct drm_atomic_state *state);
#define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0)
#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1)