2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2025-01-04 03:33:58 +08:00

drm-misc-next for 5.17:

UAPI Changes:
 
 Cross-subsystem Changes:
 
  * dma-buf: Make fences mandatory in dma_resv_add_excl_fence
 
 Core Changes:
 
  * Move hashtable to legacy code
  * Return error pointers from struct drm_driver.gem_create_object
 
  * cma-helper: Improve public interfaces; Remove CONFIG_DRM_KMS_CMA_HELPER option
  * mipi-dbi: Don't depend on CMA helpers
  * ttm: Don't include DRM hashtable; Stop prunning fences after wait; Documentation
 
 Driver Changes:
 
  * aspeed: Select CONFIG_DRM_GEM_CMA_HELPER
 
  * bridge/lontium-lt9611: Fix HDMI sensing
  * bridge/parade-ps8640: Fixes
  * bridge/sn65dsi86: Defer probe is no dsi host found
 
  * fsl-dcu: Select CONFIG_DRM_GEM_CMA_HELPER
 
  * i915: Remove dma_resv_prune
 
  * omapdrm: Fix scatterlist export; Support virtual planes; Fixes
 
  * panel: Boe-tv110c9m,Inx-hj110iz: Update init code
 
  * qxl: Use dma-resv iterator
 
  * rockchip: Use generic fbdev emulation
 
  * tidss: Fixes
 
  * vmwgfx: Fix leak on probe errors; Fail probing on broken hosts; New
    placement for MOB page tables; Hide internal BOs from userspace; Cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEchf7rIzpz2NEoWjlaA3BHVMLeiMFAmGx6nQACgkQaA3BHVML
 eiOI1ggAhha7eKBtOjpuiLsumTEp51Z/rSl2To2KME5BHm4knHleVUeRr8OKGWH8
 NXPcMNGLUjLvJezZwgbbZd38+uvKjJy9BEpeI8Z/FKB8czbD98uX2LKMBRjbNALO
 cn0Zm/Djl8k9LN71te2wFszs7njRfTnXyTMfskpb2VX8z/m2w+OTbdTIr9llREb+
 QVJLYwYQx9b2YxTRwuGBpnrnx8bS5aOS073FHMQA5Uk2nMv/eTWf+AKYePWSQrxH
 uDsbdfi8vLrz8+V2irKCrUiZxAMqDq5rO2TZJ7pjvPlDBDaABax0Qhz083yhawle
 O/UguU7BODmjvD7fEILH9FIu09iL5A==
 =2fxZ
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2021-12-09' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.17:

UAPI Changes:

Cross-subsystem Changes:

 * dma-buf: Make fences mandatory in dma_resv_add_excl_fence

Core Changes:

 * Move hashtable to legacy code
 * Return error pointers from struct drm_driver.gem_create_object

 * cma-helper: Improve public interfaces; Remove CONFIG_DRM_KMS_CMA_HELPER option
 * mipi-dbi: Don't depend on CMA helpers
 * ttm: Don't include DRM hashtable; Stop prunning fences after wait; Documentation

Driver Changes:

 * aspeed: Select CONFIG_DRM_GEM_CMA_HELPER

 * bridge/lontium-lt9611: Fix HDMI sensing
 * bridge/parade-ps8640: Fixes
 * bridge/sn65dsi86: Defer probe is no dsi host found

 * fsl-dcu: Select CONFIG_DRM_GEM_CMA_HELPER

 * i915: Remove dma_resv_prune

 * omapdrm: Fix scatterlist export; Support virtual planes; Fixes

 * panel: Boe-tv110c9m,Inx-hj110iz: Update init code

 * qxl: Use dma-resv iterator

 * rockchip: Use generic fbdev emulation

 * tidss: Fixes

 * vmwgfx: Fix leak on probe errors; Fail probing on broken hosts; New
   placement for MOB page tables; Hide internal BOs from userspace; Cleanups

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/YbHskHZc9HoAYuPZ@linux-uq9g.fritz.box
This commit is contained in:
Dave Airlie 2021-12-10 15:08:11 +10:00
commit 15bb79910f
100 changed files with 1812 additions and 835 deletions

View File

@ -646,6 +646,17 @@ See drivers/gpu/drm/amd/display/TODO for tasks.
Contact: Harry Wentland, Alex Deucher Contact: Harry Wentland, Alex Deucher
vmwgfx: Replace hashtable with Linux' implementation
----------------------------------------------------
The vmwgfx driver uses its own hashtable implementation. Replace the
code with Linux' implementation and update the callers. It's mostly a
refactoring task, but the interfaces are different.
Contact: Zack Rusin, Thomas Zimmermann <tzimmermann@suse.de>
Level: Intermediate
Bootsplash Bootsplash
========== ==========

View File

@ -305,8 +305,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
if (old) if (old)
i = old->shared_count; i = old->shared_count;
if (fence) dma_fence_get(fence);
dma_fence_get(fence);
write_seqcount_begin(&obj->seq); write_seqcount_begin(&obj->seq);
/* write_seqcount_begin provides the necessary memory barrier */ /* write_seqcount_begin provides the necessary memory barrier */

View File

@ -217,13 +217,6 @@ config DRM_GEM_CMA_HELPER
help help
Choose this if you need the GEM CMA helper functions Choose this if you need the GEM CMA helper functions
config DRM_KMS_CMA_HELPER
bool
depends on DRM
select DRM_GEM_CMA_HELPER
help
Choose this if you need the KMS CMA helper functions
config DRM_GEM_SHMEM_HELPER config DRM_GEM_SHMEM_HELPER
tristate tristate
depends on DRM && MMU depends on DRM && MMU

View File

@ -6,7 +6,7 @@
drm-y := drm_aperture.o drm_auth.o drm_cache.o \ drm-y := drm_aperture.o drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o \ drm_file.o drm_gem.o drm_ioctl.o \
drm_drv.o \ drm_drv.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \ drm_sysfs.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o drm_displayid.o \ drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o drm_displayid.o \
drm_trace_points.o drm_prime.o \ drm_trace_points.o drm_prime.o \
drm_vma_manager.o \ drm_vma_manager.o \
@ -20,8 +20,8 @@ drm-y := drm_aperture.o drm_auth.o drm_cache.o \
drm_managed.o drm_vblank_work.o drm_managed.o drm_vblank_work.o
drm-$(CONFIG_DRM_LEGACY) += drm_agpsupport.o drm_bufs.o drm_context.o drm_dma.o \ drm-$(CONFIG_DRM_LEGACY) += drm_agpsupport.o drm_bufs.o drm_context.o drm_dma.o \
drm_irq.o drm_legacy_misc.o drm_lock.o drm_memory.o \ drm_hashtab.o drm_irq.o drm_legacy_misc.o drm_lock.o \
drm_scatter.o drm_vm.o drm_memory.o drm_scatter.o drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o
@ -36,6 +36,7 @@ obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
obj-$(CONFIG_DRM_NOMODESET) += drm_nomodeset.o obj-$(CONFIG_DRM_NOMODESET) += drm_nomodeset.o
drm_cma_helper-y := drm_gem_cma_helper.o drm_cma_helper-y := drm_gem_cma_helper.o
drm_cma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_cma_helper.o
obj-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_cma_helper.o obj-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_cma_helper.o
drm_shmem_helper-y := drm_gem_shmem_helper.o drm_shmem_helper-y := drm_gem_shmem_helper.o
@ -60,7 +61,6 @@ drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o

View File

@ -6,7 +6,6 @@ config DRM_HDLCD
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK depends on COMMON_CLK
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
help help
Choose this option if you have an ARM High Definition Colour LCD Choose this option if you have an ARM High Definition Colour LCD
controller. controller.
@ -27,7 +26,6 @@ config DRM_MALI_DISPLAY
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK depends on COMMON_CLK
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS
help help

View File

@ -4,7 +4,6 @@ config DRM_KOMEDA
depends on DRM && OF depends on DRM && OF
depends on COMMON_CLK depends on COMMON_CLK
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS
help help

View File

@ -5,7 +5,7 @@ config DRM_ASPEED_GFX
depends on (COMPILE_TEST || ARCH_ASPEED) depends on (COMPILE_TEST || ARCH_ASPEED)
depends on MMU depends on MMU
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER
select DMA_CMA if HAVE_DMA_CONTIGUOUS select DMA_CMA if HAVE_DMA_CONTIGUOUS
select CMA if HAVE_DMA_CONTIGUOUS select CMA if HAVE_DMA_CONTIGUOUS
select MFD_SYSCON select MFD_SYSCON

View File

@ -4,7 +4,6 @@ config DRM_ATMEL_HLCDC
depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL select DRM_PANEL
help help
Choose this option if you have an ATMEL SoC with an HLCDC display Choose this option if you have an ATMEL SoC with an HLCDC display

View File

@ -586,7 +586,7 @@ lt9611_connector_detect(struct drm_connector *connector, bool force)
int connected = 0; int connected = 0;
regmap_read(lt9611->regmap, 0x825e, &reg_val); regmap_read(lt9611->regmap, 0x825e, &reg_val);
connected = (reg_val & BIT(2)); connected = (reg_val & BIT(0));
lt9611->status = connected ? connector_status_connected : lt9611->status = connected ? connector_status_connected :
connector_status_disconnected; connector_status_disconnected;
@ -892,7 +892,7 @@ static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
int connected; int connected;
regmap_read(lt9611->regmap, 0x825e, &reg_val); regmap_read(lt9611->regmap, 0x825e, &reg_val);
connected = reg_val & BIT(2); connected = reg_val & BIT(0);
lt9611->status = connected ? connector_status_connected : lt9611->status = connected ? connector_status_connected :
connector_status_disconnected; connector_status_disconnected;

View File

@ -449,6 +449,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL; return -EINVAL;
ps_bridge->aux.drm_dev = bridge->dev;
ret = drm_dp_aux_register(&ps_bridge->aux); ret = drm_dp_aux_register(&ps_bridge->aux);
if (ret) { if (ret) {
dev_err(dev, "failed to register DP AUX channel: %d\n", ret); dev_err(dev, "failed to register DP AUX channel: %d\n", ret);

View File

@ -704,7 +704,7 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge)
static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata) static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
{ {
int ret, val; int val;
struct mipi_dsi_host *host; struct mipi_dsi_host *host;
struct mipi_dsi_device *dsi; struct mipi_dsi_device *dsi;
struct device *dev = pdata->dev; struct device *dev = pdata->dev;
@ -714,16 +714,12 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
}; };
host = of_find_mipi_dsi_host_by_node(pdata->host_node); host = of_find_mipi_dsi_host_by_node(pdata->host_node);
if (!host) { if (!host)
DRM_ERROR("failed to find dsi host\n"); return -EPROBE_DEFER;
return -ENODEV;
}
dsi = devm_mipi_dsi_device_register_full(dev, host, &info); dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) { if (IS_ERR(dsi))
DRM_ERROR("failed to create dsi device\n");
return PTR_ERR(dsi); return PTR_ERR(dsi);
}
/* TODO: setting to 4 MIPI lanes always for now */ /* TODO: setting to 4 MIPI lanes always for now */
dsi->lanes = 4; dsi->lanes = 4;
@ -739,13 +735,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
pdata->dsi = dsi; pdata->dsi = dsi;
ret = devm_mipi_dsi_attach(dev, dsi); return devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
DRM_ERROR("failed to attach dsi to host\n");
return ret;
}
return 0;
} }
static int ti_sn_bridge_attach(struct drm_bridge *bridge, static int ti_sn_bridge_attach(struct drm_bridge *bridge,
@ -1267,8 +1257,10 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
drm_bridge_add(&pdata->bridge); drm_bridge_add(&pdata->bridge);
ret = ti_sn_attach_host(pdata); ret = ti_sn_attach_host(pdata);
if (ret) if (ret) {
dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n");
goto err_remove_bridge; goto err_remove_bridge;
}
return 0; return 0;

View File

@ -32,14 +32,18 @@
* The DRM GEM/CMA helpers use this allocator as a means to provide buffer * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
* objects that are physically contiguous in memory. This is useful for * objects that are physically contiguous in memory. This is useful for
* display drivers that are unable to map scattered buffers via an IOMMU. * display drivers that are unable to map scattered buffers via an IOMMU.
*
* For GEM callback helpers in struct &drm_gem_object functions, see likewise
* named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps
* drm_gem_cma_vmap()). These helpers perform the necessary type conversion.
*/ */
static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = { static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
.free = drm_gem_cma_free_object, .free = drm_gem_cma_object_free,
.print_info = drm_gem_cma_print_info, .print_info = drm_gem_cma_object_print_info,
.get_sg_table = drm_gem_cma_get_sg_table, .get_sg_table = drm_gem_cma_object_get_sg_table,
.vmap = drm_gem_cma_vmap, .vmap = drm_gem_cma_object_vmap,
.mmap = drm_gem_cma_mmap, .mmap = drm_gem_cma_object_mmap,
.vm_ops = &drm_gem_cma_vm_ops, .vm_ops = &drm_gem_cma_vm_ops,
}; };
@ -63,18 +67,21 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
int ret = 0; int ret = 0;
if (drm->driver->gem_create_object) if (drm->driver->gem_create_object) {
gem_obj = drm->driver->gem_create_object(drm, size); gem_obj = drm->driver->gem_create_object(drm, size);
else if (IS_ERR(gem_obj))
gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); return ERR_CAST(gem_obj);
if (!gem_obj) cma_obj = to_drm_gem_cma_obj(gem_obj);
return ERR_PTR(-ENOMEM); } else {
cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
if (!cma_obj)
return ERR_PTR(-ENOMEM);
gem_obj = &cma_obj->base;
}
if (!gem_obj->funcs) if (!gem_obj->funcs)
gem_obj->funcs = &drm_gem_cma_default_funcs; gem_obj->funcs = &drm_gem_cma_default_funcs;
cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
if (private) { if (private) {
drm_gem_private_object_init(drm, gem_obj, size); drm_gem_private_object_init(drm, gem_obj, size);
@ -192,18 +199,16 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
} }
/** /**
* drm_gem_cma_free_object - free resources associated with a CMA GEM object * drm_gem_cma_free - free resources associated with a CMA GEM object
* @gem_obj: GEM object to free * @cma_obj: CMA GEM object to free
* *
* This function frees the backing memory of the CMA GEM object, cleans up the * This function frees the backing memory of the CMA GEM object, cleans up the
* GEM object state and frees the memory used to store the object itself. * GEM object state and frees the memory used to store the object itself.
* If the buffer is imported and the virtual address is set, it is released. * If the buffer is imported and the virtual address is set, it is released.
* Drivers using the CMA helpers should set this as their
* &drm_gem_object_funcs.free callback.
*/ */
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj)
{ {
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj); struct drm_gem_object *gem_obj = &cma_obj->base;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr); struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr);
if (gem_obj->import_attach) { if (gem_obj->import_attach) {
@ -224,7 +229,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
kfree(cma_obj); kfree(cma_obj);
} }
EXPORT_SYMBOL_GPL(drm_gem_cma_free_object); EXPORT_SYMBOL_GPL(drm_gem_cma_free);
/** /**
* drm_gem_cma_dumb_create_internal - create a dumb buffer object * drm_gem_cma_dumb_create_internal - create a dumb buffer object
@ -371,18 +376,15 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
/** /**
* drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
* @cma_obj: CMA GEM object
* @p: DRM printer * @p: DRM printer
* @indent: Tab indentation level * @indent: Tab indentation level
* @obj: GEM object
* *
* This function can be used as the &drm_driver->gem_print_info callback. * This function prints paddr and vaddr for use in e.g. debugfs output.
* It prints paddr and vaddr for use in e.g. debugfs output.
*/ */
void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
const struct drm_gem_object *obj) struct drm_printer *p, unsigned int indent)
{ {
const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr); drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr); drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
} }
@ -391,18 +393,17 @@ EXPORT_SYMBOL(drm_gem_cma_print_info);
/** /**
* drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
* pages for a CMA GEM object * pages for a CMA GEM object
* @obj: GEM object * @cma_obj: CMA GEM object
* *
* This function exports a scatter/gather table by * This function exports a scatter/gather table by calling the standard
* calling the standard DMA mapping API. Drivers using the CMA helpers should * DMA mapping API.
* set this as their &drm_gem_object_funcs.get_sg_table callback.
* *
* Returns: * Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure. * A pointer to the scatter/gather table of pinned pages or NULL on failure.
*/ */
struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj) struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj)
{ {
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); struct drm_gem_object *obj = &cma_obj->base;
struct sg_table *sgt; struct sg_table *sgt;
int ret; int ret;
@ -468,23 +469,19 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
/** /**
* drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
* address space * address space
* @obj: GEM object * @cma_obj: CMA GEM object
* @map: Returns the kernel virtual address of the CMA GEM object's backing * @map: Returns the kernel virtual address of the CMA GEM object's backing
* store. * store.
* *
* This function maps a buffer into the kernel's * This function maps a buffer into the kernel's virtual address space.
* virtual address space. Since the CMA buffers are already mapped into the * Since the CMA buffers are already mapped into the kernel virtual address
* kernel virtual address space this simply returns the cached virtual * space this simply returns the cached virtual address.
* address. Drivers using the CMA helpers should set this as their DRM
* driver's &drm_gem_object_funcs.vmap callback.
* *
* Returns: * Returns:
* 0 on success, or a negative error code otherwise. * 0 on success, or a negative error code otherwise.
*/ */
int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map)
{ {
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
dma_buf_map_set_vaddr(map, cma_obj->vaddr); dma_buf_map_set_vaddr(map, cma_obj->vaddr);
return 0; return 0;
@ -493,20 +490,19 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
/** /**
* drm_gem_cma_mmap - memory-map an exported CMA GEM object * drm_gem_cma_mmap - memory-map an exported CMA GEM object
* @obj: GEM object * @cma_obj: CMA GEM object
* @vma: VMA for the area to be mapped * @vma: VMA for the area to be mapped
* *
* This function maps a buffer into a userspace process's address space. * This function maps a buffer into a userspace process's address space.
* In addition to the usual GEM VMA setup it immediately faults in the entire * In addition to the usual GEM VMA setup it immediately faults in the entire
* object instead of using on-demand faulting. Drivers that use the CMA * object instead of using on-demand faulting.
* helpers should set this as their &drm_gem_object_funcs.mmap callback.
* *
* Returns: * Returns:
* 0 on success or a negative error code on failure. * 0 on success or a negative error code on failure.
*/ */
int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma)
{ {
struct drm_gem_cma_object *cma_obj; struct drm_gem_object *obj = &cma_obj->base;
int ret; int ret;
/* /*
@ -517,8 +513,6 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vma->vm_flags &= ~VM_PFNMAP; vma->vm_flags &= ~VM_PFNMAP;
cma_obj = to_drm_gem_cma_obj(obj);
if (cma_obj->map_noncoherent) { if (cma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);

View File

@ -56,14 +56,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
if (dev->driver->gem_create_object) if (dev->driver->gem_create_object) {
obj = dev->driver->gem_create_object(dev, size); obj = dev->driver->gem_create_object(dev, size);
else if (IS_ERR(obj))
obj = kzalloc(sizeof(*shmem), GFP_KERNEL); return ERR_CAST(obj);
if (!obj) shmem = to_drm_gem_shmem_obj(obj);
return ERR_PTR(-ENOMEM); } else {
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
shmem = to_drm_gem_shmem_obj(obj); if (!shmem)
return ERR_PTR(-ENOMEM);
obj = &shmem->base;
}
if (!obj->funcs) if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs; obj->funcs = &drm_gem_shmem_funcs;

View File

@ -197,8 +197,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
if (dev->driver->gem_create_object) { if (dev->driver->gem_create_object) {
gem = dev->driver->gem_create_object(dev, size); gem = dev->driver->gem_create_object(dev, size);
if (!gem) if (IS_ERR(gem))
return ERR_PTR(-ENOMEM); return ERR_CAST(gem);
gbo = drm_gem_vram_of_gem(gem); gbo = drm_gem_vram_of_gem(gem);
} else { } else {
gbo = kzalloc(sizeof(*gbo), GFP_KERNEL); gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);

View File

@ -32,16 +32,16 @@
* Thomas Hellström <thomas-at-tungstengraphics-dot-com> * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/ */
#include <linux/export.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/rculist.h> #include <linux/rculist.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_print.h> #include <drm/drm_print.h>
#include "drm_legacy.h"
int drm_ht_create(struct drm_open_hash *ht, unsigned int order) int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{ {
unsigned int size = 1 << order; unsigned int size = 1 << order;
@ -58,7 +58,6 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
} }
return 0; return 0;
} }
EXPORT_SYMBOL(drm_ht_create);
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{ {
@ -135,7 +134,6 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
} }
return 0; return 0;
} }
EXPORT_SYMBOL(drm_ht_insert_item);
/* /*
* Just insert an item and return any "bits" bit key that hasn't been * Just insert an item and return any "bits" bit key that hasn't been
@ -164,7 +162,6 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it
} }
return 0; return 0;
} }
EXPORT_SYMBOL(drm_ht_just_insert_please);
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
struct drm_hash_item **item) struct drm_hash_item **item)
@ -178,7 +175,6 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
*item = hlist_entry(list, struct drm_hash_item, head); *item = hlist_entry(list, struct drm_hash_item, head);
return 0; return 0;
} }
EXPORT_SYMBOL(drm_ht_find_item);
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{ {
@ -197,7 +193,6 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
hlist_del_init_rcu(&item->head); hlist_del_init_rcu(&item->head);
return 0; return 0;
} }
EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht) void drm_ht_remove(struct drm_open_hash *ht)
{ {
@ -206,4 +201,3 @@ void drm_ht_remove(struct drm_open_hash *ht)
ht->table = NULL; ht->table = NULL;
} }
} }
EXPORT_SYMBOL(drm_ht_remove);

View File

@ -35,9 +35,47 @@
#include <drm/drm_legacy.h> #include <drm/drm_legacy.h>
struct agp_memory; struct agp_memory;
struct drm_buf_desc;
struct drm_device; struct drm_device;
struct drm_file; struct drm_file;
struct drm_buf_desc; struct drm_hash_item;
struct drm_open_hash;
/*
* Hash-table Support
*/
#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
/* drm_hashtab.c */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add);
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
void drm_ht_remove(struct drm_open_hash *ht);
#endif
/*
* RCU-safe interface
*
* The user of this API needs to make sure that two or more instances of the
* hash table manipulation functions are never run simultaneously.
* The lookup function drm_ht_find_item_rcu may, however, run simultaneously
* with any of the manipulation functions as long as it's called from within
* an RCU read-locked section.
*/
#define drm_ht_insert_item_rcu drm_ht_insert_item
#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
#define drm_ht_remove_key_rcu drm_ht_remove_key
#define drm_ht_remove_item_rcu drm_ht_remove_item
#define drm_ht_find_item_rcu drm_ht_find_item
/* /*
* Generic DRM Contexts * Generic DRM Contexts

View File

@ -15,9 +15,10 @@
#include <drm/drm_connector.h> #include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h> #include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h> #include <drm/drm_file.h>
#include <drm/drm_format_helper.h> #include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h> #include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h> #include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h> #include <drm/drm_modes.h>
@ -200,13 +201,19 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap) struct drm_rect *clip, bool swap)
{ {
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem); struct dma_buf_map map[DRM_FORMAT_MAX_PLANES];
void *src = cma_obj->vaddr; struct dma_buf_map data[DRM_FORMAT_MAX_PLANES];
void *src;
int ret; int ret;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret) if (ret)
return ret; return ret;
src = data[0].vaddr; /* TODO: Use mapping abstraction properly */
ret = drm_gem_fb_vmap(fb, map, data);
if (ret)
goto out_drm_gem_fb_end_cpu_access;
switch (fb->format->format) { switch (fb->format->format) {
case DRM_FORMAT_RGB565: case DRM_FORMAT_RGB565:
@ -221,9 +228,11 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
default: default:
drm_err_once(fb->dev, "Format is not supported: %p4cc\n", drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
&fb->format->format); &fb->format->format);
return -EINVAL; ret = -EINVAL;
} }
drm_gem_fb_vunmap(fb, map);
out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
return ret; return ret;
@ -249,8 +258,8 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{ {
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); struct dma_buf_map map[DRM_FORMAT_MAX_PLANES];
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem); struct dma_buf_map data[DRM_FORMAT_MAX_PLANES];
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev); struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1; unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1; unsigned int width = rect->x2 - rect->x1;
@ -266,6 +275,10 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (!drm_dev_enter(fb->dev, &idx)) if (!drm_dev_enter(fb->dev, &idx))
return; return;
ret = drm_gem_fb_vmap(fb, map, data);
if (ret)
goto err_drm_dev_exit;
full = width == fb->width && height == fb->height; full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@ -277,7 +290,7 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (ret) if (ret)
goto err_msg; goto err_msg;
} else { } else {
tr = cma_obj->vaddr; tr = data[0].vaddr; /* TODO: Use mapping abstraction properly */
} }
mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1, mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1,
@ -289,6 +302,9 @@ err_msg:
if (ret) if (ret)
drm_err_once(fb->dev, "Failed to update display %d\n", ret); drm_err_once(fb->dev, "Failed to update display %d\n", ret);
drm_gem_fb_vunmap(fb, map);
err_drm_dev_exit:
drm_dev_exit(idx); drm_dev_exit(idx);
} }
@ -1117,8 +1133,8 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi,
/* /*
* Even though it's not the SPI device that does DMA (the master does), * Even though it's not the SPI device that does DMA (the master does),
* the dma mask is necessary for the dma_alloc_wc() in * the dma mask is necessary for the dma_alloc_wc() in the GEM code
* drm_gem_cma_create(). The dma_addr returned will be a physical * (e.g., drm_gem_cma_create()). The dma_addr returned will be a physical
* address which might be different from the bus address, but this is * address which might be different from the bus address, but this is
* not a problem since the address will not be used. * not a problem since the address will not be used.
* The virtual address is used in the transfer and the SPI core * The virtual address is used in the transfer and the SPI core

View File

@ -3,8 +3,8 @@ config DRM_FSL_DCU
tristate "DRM Support for Freescale DCU" tristate "DRM Support for Freescale DCU"
depends on DRM && OF && ARM && COMMON_CLK depends on DRM && OF && ARM && COMMON_CLK
select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_CLASS_DEVICE
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL select DRM_PANEL
select REGMAP_MMIO select REGMAP_MMIO
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS

View File

@ -4,7 +4,6 @@ config DRM_HISI_KIRIN
depends on DRM && OF && ARM64 depends on DRM && OF && ARM64
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
select DRM_MIPI_DSI select DRM_MIPI_DSI
help help
Choose this option if you have a hisilicon Kirin chipsets(hi6220). Choose this option if you have a hisilicon Kirin chipsets(hi6220).

View File

@ -60,7 +60,6 @@ i915-y += i915_driver.o \
# core library code # core library code
i915-y += \ i915-y += \
dma_resv_utils.o \
i915_memcpy.o \ i915_memcpy.o \
i915_mm.o \ i915_mm.o \
i915_sw_fence.o \ i915_sw_fence.o \

View File

@ -1,17 +0,0 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/dma-resv.h>
#include "dma_resv_utils.h"
void dma_resv_prune(struct dma_resv *resv)
{
if (dma_resv_trylock(resv)) {
if (dma_resv_test_signaled(resv, true))
dma_resv_add_excl_fence(resv, NULL);
dma_resv_unlock(resv);
}
}

View File

@ -1,13 +0,0 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2020 Intel Corporation
*/
#ifndef DMA_RESV_UTILS_H
#define DMA_RESV_UTILS_H
struct dma_resv;
void dma_resv_prune(struct dma_resv *resv);
#endif /* DMA_RESV_UTILS_H */

View File

@ -15,7 +15,6 @@
#include "gt/intel_gt_requests.h" #include "gt/intel_gt_requests.h"
#include "dma_resv_utils.h"
#include "i915_trace.h" #include "i915_trace.h"
static bool swap_available(void) static bool swap_available(void)
@ -229,8 +228,6 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
} }
dma_resv_prune(obj->base.resv);
scanned += obj->base.size >> PAGE_SHIFT; scanned += obj->base.size >> PAGE_SHIFT;
skip: skip:
i915_gem_object_put(obj); i915_gem_object_put(obj);

View File

@ -10,7 +10,6 @@
#include "gt/intel_engine.h" #include "gt/intel_engine.h"
#include "dma_resv_utils.h"
#include "i915_gem_ioctls.h" #include "i915_gem_ioctls.h"
#include "i915_gem_object.h" #include "i915_gem_object.h"
@ -52,13 +51,6 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
} }
dma_resv_iter_end(&cursor); dma_resv_iter_end(&cursor);
/*
* Opportunistically prune the fences iff we know they have *all* been
* signaled.
*/
if (timeout > 0)
dma_resv_prune(resv);
return ret; return ret;
} }

View File

@ -4,7 +4,7 @@ config DRM_IMX
select DRM_KMS_HELPER select DRM_KMS_HELPER
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER select DRM_KMS_HELPER
depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST) depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on IMX_IPUV3_CORE depends on IMX_IPUV3_CORE
help help

View File

@ -1,7 +1,7 @@
config DRM_IMX_DCSS config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS" tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER select IMX_IRQSTEER
select DRM_KMS_CMA_HELPER select DRM_KMS_HELPER
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64 depends on DRM && ARCH_MXC && ARM64
help help

View File

@ -8,7 +8,6 @@ config DRM_INGENIC
select DRM_BRIDGE select DRM_BRIDGE
select DRM_PANEL_BRIDGE select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help help

View File

@ -3,7 +3,6 @@ config DRM_KMB_DISPLAY
depends on DRM depends on DRM
depends on ARCH_KEEMBAY || COMPILE_TEST depends on ARCH_KEEMBAY || COMPILE_TEST
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_MIPI_DSI select DRM_MIPI_DSI
help help

View File

@ -221,7 +221,7 @@ struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t siz
bo = kzalloc(sizeof(*bo), GFP_KERNEL); bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo) if (!bo)
return NULL; return ERR_PTR(-ENOMEM);
mutex_init(&bo->lock); mutex_init(&bo->lock);
INIT_LIST_HEAD(&bo->va); INIT_LIST_HEAD(&bo->va);

View File

@ -10,7 +10,6 @@ config DRM_MCDE
select DRM_BRIDGE select DRM_BRIDGE
select DRM_PANEL_BRIDGE select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help help

View File

@ -4,7 +4,6 @@ config DRM_MESON
depends on DRM && OF && (ARM || ARM64) depends on DRM && OF && (ARM || ARM64)
depends on ARCH_MESON || COMPILE_TEST depends on ARCH_MESON || COMPILE_TEST
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_DISPLAY_CONNECTOR select DRM_DISPLAY_CONNECTOR
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS

View File

@ -10,7 +10,7 @@ config DRM_MXSFB
depends on COMMON_CLK depends on COMMON_CLK
select DRM_MXS select DRM_MXS
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_PANEL select DRM_PANEL
select DRM_PANEL_BRIDGE select DRM_PANEL_BRIDGE
help help

View File

@ -9,6 +9,7 @@ omapdrm-y := omap_drv.o \
omap_debugfs.o \ omap_debugfs.o \
omap_crtc.o \ omap_crtc.o \
omap_plane.o \ omap_plane.o \
omap_overlay.o \
omap_encoder.o \ omap_encoder.o \
omap_fb.o \ omap_fb.o \
omap_gem.o \ omap_gem.o \

View File

@ -92,6 +92,8 @@ struct dispc_features {
u8 mgr_height_start; u8 mgr_height_start;
u16 mgr_width_max; u16 mgr_width_max;
u16 mgr_height_max; u16 mgr_height_max;
u16 ovl_width_max;
u16 ovl_height_max;
unsigned long max_lcd_pclk; unsigned long max_lcd_pclk;
unsigned long max_tv_pclk; unsigned long max_tv_pclk;
unsigned int max_downscale; unsigned int max_downscale;
@ -1279,8 +1281,8 @@ static u32 dispc_ovl_get_burst_size(struct dispc_device *dispc,
return dispc->feat->burst_size_unit * 8; return dispc->feat->burst_size_unit * 8;
} }
static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc, bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
enum omap_plane_id plane, u32 fourcc) enum omap_plane_id plane, u32 fourcc)
{ {
const u32 *modes; const u32 *modes;
unsigned int i; unsigned int i;
@ -2487,6 +2489,11 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
return 0; return 0;
} }
enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane)
{
return dispc->feat->overlay_caps[plane];
}
#define DIV_FRAC(dividend, divisor) \ #define DIV_FRAC(dividend, divisor) \
((dividend) * 100 / (divisor) - ((dividend) / (divisor) * 100)) ((dividend) * 100 / (divisor) - ((dividend) / (divisor) * 100))
@ -2599,6 +2606,12 @@ static int dispc_ovl_calc_scaling(struct dispc_device *dispc,
return 0; return 0;
} }
void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height)
{
*width = dispc->feat->ovl_width_max;
*height = dispc->feat->ovl_height_max;
}
static int dispc_ovl_setup_common(struct dispc_device *dispc, static int dispc_ovl_setup_common(struct dispc_device *dispc,
enum omap_plane_id plane, enum omap_plane_id plane,
enum omap_overlay_caps caps, enum omap_overlay_caps caps,
@ -4240,6 +4253,8 @@ static const struct dispc_features omap24xx_dispc_feats = {
.mgr_height_start = 26, .mgr_height_start = 26,
.mgr_width_max = 2048, .mgr_width_max = 2048,
.mgr_height_max = 2048, .mgr_height_max = 2048,
.ovl_width_max = 2048,
.ovl_height_max = 2048,
.max_lcd_pclk = 66500000, .max_lcd_pclk = 66500000,
.max_downscale = 2, .max_downscale = 2,
/* /*
@ -4278,6 +4293,8 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = {
.mgr_height_start = 26, .mgr_height_start = 26,
.mgr_width_max = 2048, .mgr_width_max = 2048,
.mgr_height_max = 2048, .mgr_height_max = 2048,
.ovl_width_max = 2048,
.ovl_height_max = 2048,
.max_lcd_pclk = 173000000, .max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000, .max_tv_pclk = 59000000,
.max_downscale = 4, .max_downscale = 4,
@ -4313,6 +4330,8 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = {
.mgr_height_start = 26, .mgr_height_start = 26,
.mgr_width_max = 2048, .mgr_width_max = 2048,
.mgr_height_max = 2048, .mgr_height_max = 2048,
.ovl_width_max = 2048,
.ovl_height_max = 2048,
.max_lcd_pclk = 173000000, .max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000, .max_tv_pclk = 59000000,
.max_downscale = 4, .max_downscale = 4,
@ -4348,6 +4367,8 @@ static const struct dispc_features omap36xx_dispc_feats = {
.mgr_height_start = 26, .mgr_height_start = 26,
.mgr_width_max = 2048, .mgr_width_max = 2048,
.mgr_height_max = 2048, .mgr_height_max = 2048,
.ovl_width_max = 2048,
.ovl_height_max = 2048,
.max_lcd_pclk = 173000000, .max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000, .max_tv_pclk = 59000000,
.max_downscale = 4, .max_downscale = 4,
@ -4383,6 +4404,8 @@ static const struct dispc_features am43xx_dispc_feats = {
.mgr_height_start = 26, .mgr_height_start = 26,
.mgr_width_max = 2048, .mgr_width_max = 2048,
.mgr_height_max = 2048, .mgr_height_max = 2048,
.ovl_width_max = 2048,
.ovl_height_max = 2048,
.max_lcd_pclk = 173000000, .max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000, .max_tv_pclk = 59000000,
.max_downscale = 4, .max_downscale = 4,
@ -4418,6 +4441,8 @@ static const struct dispc_features omap44xx_dispc_feats = {
.mgr_height_start = 26, .mgr_height_start = 26,
.mgr_width_max = 2048, .mgr_width_max = 2048,
.mgr_height_max = 2048, .mgr_height_max = 2048,
.ovl_width_max = 2048,
.ovl_height_max = 2048,
.max_lcd_pclk = 170000000, .max_lcd_pclk = 170000000,
.max_tv_pclk = 185625000, .max_tv_pclk = 185625000,
.max_downscale = 4, .max_downscale = 4,
@ -4457,6 +4482,8 @@ static const struct dispc_features omap54xx_dispc_feats = {
.mgr_height_start = 27, .mgr_height_start = 27,
.mgr_width_max = 4096, .mgr_width_max = 4096,
.mgr_height_max = 4096, .mgr_height_max = 4096,
.ovl_width_max = 2048,
.ovl_height_max = 4096,
.max_lcd_pclk = 170000000, .max_lcd_pclk = 170000000,
.max_tv_pclk = 192000000, .max_tv_pclk = 192000000,
.max_downscale = 4, .max_downscale = 4,
@ -4842,7 +4869,7 @@ static int dispc_remove(struct platform_device *pdev)
return 0; return 0;
} }
static int dispc_runtime_suspend(struct device *dev) static __maybe_unused int dispc_runtime_suspend(struct device *dev)
{ {
struct dispc_device *dispc = dev_get_drvdata(dev); struct dispc_device *dispc = dev_get_drvdata(dev);
@ -4857,7 +4884,7 @@ static int dispc_runtime_suspend(struct device *dev)
return 0; return 0;
} }
static int dispc_runtime_resume(struct device *dev) static __maybe_unused int dispc_runtime_resume(struct device *dev)
{ {
struct dispc_device *dispc = dev_get_drvdata(dev); struct dispc_device *dispc = dev_get_drvdata(dev);

View File

@ -5058,7 +5058,7 @@ static int dsi_remove(struct platform_device *pdev)
return 0; return 0;
} }
static int dsi_runtime_suspend(struct device *dev) static __maybe_unused int dsi_runtime_suspend(struct device *dev)
{ {
struct dsi_data *dsi = dev_get_drvdata(dev); struct dsi_data *dsi = dev_get_drvdata(dev);
@ -5071,7 +5071,7 @@ static int dsi_runtime_suspend(struct device *dev)
return 0; return 0;
} }
static int dsi_runtime_resume(struct device *dev) static __maybe_unused int dsi_runtime_resume(struct device *dev)
{ {
struct dsi_data *dsi = dev_get_drvdata(dev); struct dsi_data *dsi = dev_get_drvdata(dev);

View File

@ -1569,7 +1569,7 @@ static void dss_shutdown(struct platform_device *pdev)
DSSDBG("shutdown\n"); DSSDBG("shutdown\n");
} }
static int dss_runtime_suspend(struct device *dev) static __maybe_unused int dss_runtime_suspend(struct device *dev)
{ {
struct dss_device *dss = dev_get_drvdata(dev); struct dss_device *dss = dev_get_drvdata(dev);
@ -1581,7 +1581,7 @@ static int dss_runtime_suspend(struct device *dev)
return 0; return 0;
} }
static int dss_runtime_resume(struct device *dev) static __maybe_unused int dss_runtime_resume(struct device *dev)
{ {
struct dss_device *dss = dev_get_drvdata(dev); struct dss_device *dss = dev_get_drvdata(dev);
int r; int r;

View File

@ -397,6 +397,11 @@ int dispc_get_num_mgrs(struct dispc_device *dispc);
const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc, const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
enum omap_plane_id plane); enum omap_plane_id plane);
void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height);
bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
enum omap_plane_id plane, u32 fourcc);
enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane);
u32 dispc_read_irqstatus(struct dispc_device *dispc); u32 dispc_read_irqstatus(struct dispc_device *dispc);
void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask); void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
void dispc_write_irqenable(struct dispc_device *dispc, u32 mask); void dispc_write_irqenable(struct dispc_device *dispc, u32 mask);

View File

@ -879,7 +879,7 @@ static int venc_remove(struct platform_device *pdev)
return 0; return 0;
} }
static int venc_runtime_suspend(struct device *dev) static __maybe_unused int venc_runtime_suspend(struct device *dev)
{ {
struct venc_device *venc = dev_get_drvdata(dev); struct venc_device *venc = dev_get_drvdata(dev);
@ -889,7 +889,7 @@ static int venc_runtime_suspend(struct device *dev)
return 0; return 0;
} }
static int venc_runtime_resume(struct device *dev) static __maybe_unused int venc_runtime_resume(struct device *dev)
{ {
struct venc_device *venc = dev_get_drvdata(dev); struct venc_device *venc = dev_get_drvdata(dev);

View File

@ -117,6 +117,102 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
dispc_runtime_put(priv->dispc); dispc_runtime_put(priv->dispc);
} }
static int drm_atomic_state_normalized_zpos_cmp(const void *a, const void *b)
{
const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
if (sa->normalized_zpos != sb->normalized_zpos)
return sa->normalized_zpos - sb->normalized_zpos;
else
return sa->plane->base.id - sb->plane->base.id;
}
/*
* This replaces the drm_atomic_normalize_zpos to handle the dual overlay case.
*
* Since both halves need to be 'appear' side by side the zpos is
* recalculated when dealing with dual overlay cases so that the other
* planes zpos is consistent.
*/
static int omap_atomic_update_normalize_zpos(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_state, *new_state;
struct drm_plane *plane;
int c, i, n, inc;
int total_planes = dev->mode_config.num_total_plane;
struct drm_plane_state **states;
int ret = 0;
states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
if (!states)
return -ENOMEM;
for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, c) {
if (old_state->plane_mask == new_state->plane_mask &&
!new_state->zpos_changed)
continue;
/* Reset plane increment and index value for every crtc */
n = 0;
/*
* Normalization process might create new states for planes
* which normalized_zpos has to be recalculated.
*/
drm_for_each_plane_mask(plane, dev, new_state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(new_state->state,
plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto done;
}
states[n++] = plane_state;
}
sort(states, n, sizeof(*states),
drm_atomic_state_normalized_zpos_cmp, NULL);
for (i = 0, inc = 0; i < n; i++) {
plane = states[i]->plane;
states[i]->normalized_zpos = i + inc;
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] updated normalized zpos value %d\n",
plane->base.id, plane->name,
states[i]->normalized_zpos);
if (is_omap_plane_dual_overlay(states[i]))
inc++;
}
new_state->zpos_changed = true;
}
done:
kfree(states);
return ret;
}
static int omap_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
ret = drm_atomic_helper_check(dev, state);
if (ret)
return ret;
if (dev->mode_config.normalize_zpos) {
ret = omap_atomic_update_normalize_zpos(dev, state);
if (ret)
return ret;
}
return 0;
}
static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = { static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
.atomic_commit_tail = omap_atomic_commit_tail, .atomic_commit_tail = omap_atomic_commit_tail,
}; };
@ -124,10 +220,86 @@ static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs =
static const struct drm_mode_config_funcs omap_mode_config_funcs = { static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.fb_create = omap_framebuffer_create, .fb_create = omap_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed, .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check, .atomic_check = omap_atomic_check,
.atomic_commit = drm_atomic_helper_commit, .atomic_commit = drm_atomic_helper_commit,
}; };
/* Global/shared object state funcs */
/*
* This is a helper that returns the private state currently in operation.
* Note that this would return the "old_state" if called in the atomic check
* path, and the "new_state" after the atomic swap has been done.
*/
struct omap_global_state *
omap_get_existing_global_state(struct omap_drm_private *priv)
{
return to_omap_global_state(priv->glob_obj.state);
}
/*
* This acquires the modeset lock set aside for global state, creates
* a new duplicated private object state.
*/
struct omap_global_state *__must_check
omap_get_global_state(struct drm_atomic_state *s)
{
struct omap_drm_private *priv = s->dev->dev_private;
struct drm_private_state *priv_state;
priv_state = drm_atomic_get_private_obj_state(s, &priv->glob_obj);
if (IS_ERR(priv_state))
return ERR_CAST(priv_state);
return to_omap_global_state(priv_state);
}
static struct drm_private_state *
omap_global_duplicate_state(struct drm_private_obj *obj)
{
struct omap_global_state *state;
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
return &state->base;
}
static void omap_global_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct omap_global_state *omap_state = to_omap_global_state(state);
kfree(omap_state);
}
static const struct drm_private_state_funcs omap_global_state_funcs = {
.atomic_duplicate_state = omap_global_duplicate_state,
.atomic_destroy_state = omap_global_destroy_state,
};
static int omap_global_obj_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_global_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
drm_atomic_private_obj_init(dev, &priv->glob_obj, &state->base,
&omap_global_state_funcs);
return 0;
}
static void omap_global_obj_fini(struct omap_drm_private *priv)
{
drm_atomic_private_obj_fini(&priv->glob_obj);
}
static void omap_disconnect_pipelines(struct drm_device *ddev) static void omap_disconnect_pipelines(struct drm_device *ddev)
{ {
struct omap_drm_private *priv = ddev->dev_private; struct omap_drm_private *priv = ddev->dev_private;
@ -231,8 +403,6 @@ static int omap_modeset_init(struct drm_device *dev)
if (!omapdss_stack_is_ready()) if (!omapdss_stack_is_ready())
return -EPROBE_DEFER; return -EPROBE_DEFER;
drm_mode_config_init(dev);
ret = omap_modeset_init_properties(dev); ret = omap_modeset_init_properties(dev);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -583,10 +753,20 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
omap_gem_init(ddev); omap_gem_init(ddev);
drm_mode_config_init(ddev);
ret = omap_global_obj_init(ddev);
if (ret)
goto err_gem_deinit;
ret = omap_hwoverlays_init(priv);
if (ret)
goto err_free_priv_obj;
ret = omap_modeset_init(ddev); ret = omap_modeset_init(ddev);
if (ret) { if (ret) {
dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret); dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret);
goto err_gem_deinit; goto err_free_overlays;
} }
/* Initialize vblank handling, start with all CRTCs disabled. */ /* Initialize vblank handling, start with all CRTCs disabled. */
@ -618,7 +798,12 @@ err_cleanup_helpers:
omap_fbdev_fini(ddev); omap_fbdev_fini(ddev);
err_cleanup_modeset: err_cleanup_modeset:
omap_modeset_fini(ddev); omap_modeset_fini(ddev);
err_free_overlays:
omap_hwoverlays_destroy(priv);
err_free_priv_obj:
omap_global_obj_fini(priv);
err_gem_deinit: err_gem_deinit:
drm_mode_config_cleanup(ddev);
omap_gem_deinit(ddev); omap_gem_deinit(ddev);
destroy_workqueue(priv->wq); destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev); omap_disconnect_pipelines(ddev);
@ -642,6 +827,9 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_atomic_helper_shutdown(ddev); drm_atomic_helper_shutdown(ddev);
omap_modeset_fini(ddev); omap_modeset_fini(ddev);
omap_hwoverlays_destroy(priv);
omap_global_obj_fini(priv);
drm_mode_config_cleanup(ddev);
omap_gem_deinit(ddev); omap_gem_deinit(ddev);
destroy_workqueue(priv->wq); destroy_workqueue(priv->wq);

View File

@ -14,6 +14,7 @@
#include "dss/omapdss.h" #include "dss/omapdss.h"
#include "dss/dss.h" #include "dss/dss.h"
#include <drm/drm_atomic.h>
#include <drm/drm_gem.h> #include <drm/drm_gem.h>
#include <drm/omap_drm.h> #include <drm/omap_drm.h>
@ -24,6 +25,7 @@
#include "omap_gem.h" #include "omap_gem.h"
#include "omap_irq.h" #include "omap_irq.h"
#include "omap_plane.h" #include "omap_plane.h"
#include "omap_overlay.h"
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* verbose debug */ #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* verbose debug */
@ -40,6 +42,19 @@ struct omap_drm_pipeline {
unsigned int alias_id; unsigned int alias_id;
}; };
/*
* Global private object state for tracking resources that are shared across
* multiple kms objects (planes/crtcs/etc).
*/
#define to_omap_global_state(x) container_of(x, struct omap_global_state, base)
struct omap_global_state {
struct drm_private_state base;
/* global atomic state of assignment between overlays and planes */
struct drm_plane *hwoverlay_to_plane[8];
};
struct omap_drm_private { struct omap_drm_private {
struct drm_device *ddev; struct drm_device *ddev;
struct device *dev; struct device *dev;
@ -57,6 +72,11 @@ struct omap_drm_private {
unsigned int num_planes; unsigned int num_planes;
struct drm_plane *planes[8]; struct drm_plane *planes[8];
unsigned int num_ovls;
struct omap_hw_overlay *overlays[8];
struct drm_private_obj glob_obj;
struct drm_fb_helper *fbdev; struct drm_fb_helper *fbdev;
struct workqueue_struct *wq; struct workqueue_struct *wq;
@ -85,4 +105,8 @@ struct omap_drm_private {
void omap_debugfs_init(struct drm_minor *minor); void omap_debugfs_init(struct drm_minor *minor);
struct omap_global_state * __must_check omap_get_global_state(struct drm_atomic_state *s);
struct omap_global_state *omap_get_existing_global_state(struct omap_drm_private *priv);
#endif /* __OMAPDRM_DRV_H__ */ #endif /* __OMAPDRM_DRV_H__ */

View File

@ -131,7 +131,9 @@ static u32 drm_rotation_to_tiler(unsigned int drm_rot)
/* update ovl info for scanout, handles cases of multi-planar fb's, etc. /* update ovl info for scanout, handles cases of multi-planar fb's, etc.
*/ */
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct drm_plane_state *state, struct omap_overlay_info *info) struct drm_plane_state *state,
struct omap_overlay_info *info,
struct omap_overlay_info *r_info)
{ {
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format; const struct drm_format_info *format = omap_fb->format;
@ -218,6 +220,35 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
} else { } else {
info->p_uv_addr = 0; info->p_uv_addr = 0;
} }
if (r_info) {
info->width /= 2;
info->out_width /= 2;
*r_info = *info;
if (fb->format->is_yuv) {
if (info->width & 1) {
info->width++;
r_info->width--;
}
if (info->out_width & 1) {
info->out_width++;
r_info->out_width--;
}
}
r_info->pos_x = info->pos_x + info->out_width;
r_info->paddr = get_linear_addr(fb, format, 0,
x + info->width, y);
if (fb->format->format == DRM_FORMAT_NV12) {
r_info->p_uv_addr =
get_linear_addr(fb, format, 1,
x + info->width, y);
}
}
} }
/* pin, prepare for scanout: */ /* pin, prepare for scanout: */

View File

@ -26,7 +26,9 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
int omap_framebuffer_pin(struct drm_framebuffer *fb); int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb); void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct drm_plane_state *state, struct omap_overlay_info *info); struct drm_plane_state *state,
struct omap_overlay_info *info,
struct omap_overlay_info *r_info);
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb); bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);

View File

@ -789,7 +789,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
if (omap_obj->flags & OMAP_BO_TILED_MASK) { if (omap_obj->flags & OMAP_BO_TILED_MASK) {
block = tiler_reserve_2d(fmt, block = tiler_reserve_2d(fmt,
omap_obj->width, omap_obj->width,
omap_obj->height, 0); omap_obj->height, PAGE_SIZE);
} else { } else {
block = tiler_reserve_1d(obj->size); block = tiler_reserve_1d(obj->size);
} }
@ -851,6 +851,11 @@ static void omap_gem_unpin_locked(struct drm_gem_object *obj)
return; return;
if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) { if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
if (omap_obj->sgt) {
sg_free_table(omap_obj->sgt);
kfree(omap_obj->sgt);
omap_obj->sgt = NULL;
}
ret = tiler_unpin(omap_obj->block); ret = tiler_unpin(omap_obj->block);
if (ret) { if (ret) {
dev_err(obj->dev->dev, dev_err(obj->dev->dev,
@ -963,6 +968,78 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
return 0; return 0;
} }
struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
dma_addr_t addr;
struct sg_table *sgt;
struct scatterlist *sg;
unsigned int count, len, stride, i;
int ret;
ret = omap_gem_pin(obj, &addr);
if (ret)
return ERR_PTR(ret);
mutex_lock(&omap_obj->lock);
sgt = omap_obj->sgt;
if (sgt)
goto out;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM;
goto err_unpin;
}
if (omap_obj->flags & OMAP_BO_TILED_MASK) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
len = omap_obj->width << (int)fmt;
count = omap_obj->height;
stride = tiler_stride(fmt, 0);
} else {
len = obj->size;
count = 1;
stride = 0;
}
ret = sg_alloc_table(sgt, count, GFP_KERNEL);
if (ret)
goto err_free;
for_each_sg(sgt->sgl, sg, count, i) {
sg_set_page(sg, phys_to_page(addr), len, offset_in_page(addr));
sg_dma_address(sg) = addr;
sg_dma_len(sg) = len;
addr += stride;
}
omap_obj->sgt = sgt;
out:
mutex_unlock(&omap_obj->lock);
return sgt;
err_free:
kfree(sgt);
err_unpin:
mutex_unlock(&omap_obj->lock);
omap_gem_unpin(obj);
return ERR_PTR(ret);
}
void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
if (WARN_ON(omap_obj->sgt != sgt))
return;
omap_gem_unpin(obj);
}
#ifdef CONFIG_DRM_FBDEV_EMULATION #ifdef CONFIG_DRM_FBDEV_EMULATION
/* /*
* Get kernel virtual address for CPU access.. this more or less only * Get kernel virtual address for CPU access.. this more or less only

View File

@ -82,5 +82,7 @@ u32 omap_gem_flags(struct drm_gem_object *obj);
int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
int x, int y, dma_addr_t *dma_addr); int x, int y, dma_addr_t *dma_addr);
int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient); int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient);
struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj);
void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt);
#endif /* __OMAPDRM_GEM_H__ */ #endif /* __OMAPDRM_GEM_H__ */

View File

@ -23,45 +23,21 @@ static struct sg_table *omap_gem_map_dma_buf(
{ {
struct drm_gem_object *obj = attachment->dmabuf->priv; struct drm_gem_object *obj = attachment->dmabuf->priv;
struct sg_table *sg; struct sg_table *sg;
dma_addr_t dma_addr; sg = omap_gem_get_sg(obj);
int ret; if (IS_ERR(sg))
return sg;
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
if (!sg)
return ERR_PTR(-ENOMEM);
/* camera, etc, need physically contiguous.. but we need a
* better way to know this..
*/
ret = omap_gem_pin(obj, &dma_addr);
if (ret)
goto out;
ret = sg_alloc_table(sg, 1, GFP_KERNEL);
if (ret)
goto out;
sg_init_table(sg->sgl, 1);
sg_dma_len(sg->sgl) = obj->size;
sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
sg_dma_address(sg->sgl) = dma_addr;
/* this must be after omap_gem_pin() to ensure we have pages attached */ /* this must be after omap_gem_pin() to ensure we have pages attached */
omap_gem_dma_sync_buffer(obj, dir); omap_gem_dma_sync_buffer(obj, dir);
return sg; return sg;
out:
kfree(sg);
return ERR_PTR(ret);
} }
static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sg, enum dma_data_direction dir) struct sg_table *sg, enum dma_data_direction dir)
{ {
struct drm_gem_object *obj = attachment->dmabuf->priv; struct drm_gem_object *obj = attachment->dmabuf->priv;
omap_gem_unpin(obj); omap_gem_put_sg(obj, sg);
sg_free_table(sg);
kfree(sg);
} }
static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
@ -114,7 +90,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
DEFINE_DMA_BUF_EXPORT_INFO(exp_info); DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &omap_dmabuf_ops; exp_info.ops = &omap_dmabuf_ops;
exp_info.size = obj->size; exp_info.size = omap_gem_mmap_size(obj);
exp_info.flags = flags; exp_info.flags = flags;
exp_info.priv = obj; exp_info.priv = obj;

View File

@ -0,0 +1,212 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
* Author: Benoit Parrot <bparrot@ti.com>
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
/*
* overlay funcs
*/
static const char * const overlay_id_to_name[] = {
[OMAP_DSS_GFX] = "gfx",
[OMAP_DSS_VIDEO1] = "vid1",
[OMAP_DSS_VIDEO2] = "vid2",
[OMAP_DSS_VIDEO3] = "vid3",
};
/*
* Find a free overlay with the required caps and supported fourcc
*/
static struct omap_hw_overlay *
omap_plane_find_free_overlay(struct drm_device *dev, struct drm_plane *hwoverlay_to_plane[],
u32 caps, u32 fourcc)
{
struct omap_drm_private *priv = dev->dev_private;
int i;
DBG("caps: %x fourcc: %x", caps, fourcc);
for (i = 0; i < priv->num_ovls; i++) {
struct omap_hw_overlay *cur = priv->overlays[i];
DBG("%d: id: %d cur->caps: %x",
cur->idx, cur->id, cur->caps);
/* skip if already in-use */
if (hwoverlay_to_plane[cur->idx])
continue;
/* skip if doesn't support some required caps: */
if (caps & ~cur->caps)
continue;
/* check supported format */
if (!dispc_ovl_color_mode_supported(priv->dispc,
cur->id, fourcc))
continue;
return cur;
}
DBG("no match");
return NULL;
}
/*
* Assign a new overlay to a plane with the required caps and supported fourcc
* If a plane need a new overlay, the previous one should have been released
* with omap_overlay_release()
* This should be called from the plane atomic_check() in order to prepare the
* next global overlay_map to be enabled when atomic transaction is valid.
*/
int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane,
u32 caps, u32 fourcc, struct omap_hw_overlay **overlay,
struct omap_hw_overlay **r_overlay)
{
/* Get the global state of the current atomic transaction */
struct omap_global_state *state = omap_get_global_state(s);
struct drm_plane **overlay_map = state->hwoverlay_to_plane;
struct omap_hw_overlay *ovl, *r_ovl;
ovl = omap_plane_find_free_overlay(s->dev, overlay_map, caps, fourcc);
if (!ovl)
return -ENOMEM;
overlay_map[ovl->idx] = plane;
*overlay = ovl;
if (r_overlay) {
r_ovl = omap_plane_find_free_overlay(s->dev, overlay_map,
caps, fourcc);
if (!r_ovl) {
overlay_map[r_ovl->idx] = NULL;
*overlay = NULL;
return -ENOMEM;
}
overlay_map[r_ovl->idx] = plane;
*r_overlay = r_ovl;
}
DBG("%s: assign to plane %s caps %x", ovl->name, plane->name, caps);
if (r_overlay) {
DBG("%s: assign to right of plane %s caps %x",
r_ovl->name, plane->name, caps);
}
return 0;
}
/*
* Release an overlay from a plane if the plane gets not visible or the plane
* need a new overlay if overlay caps changes.
* This should be called from the plane atomic_check() in order to prepare the
* next global overlay_map to be enabled when atomic transaction is valid.
*/
void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay)
{
/* Get the global state of the current atomic transaction */
struct omap_global_state *state = omap_get_global_state(s);
struct drm_plane **overlay_map = state->hwoverlay_to_plane;
if (!overlay)
return;
if (WARN_ON(!overlay_map[overlay->idx]))
return;
DBG("%s: release from plane %s", overlay->name, overlay_map[overlay->idx]->name);
overlay_map[overlay->idx] = NULL;
}
/*
* Update an overlay state that was attached to a plane before the current atomic state.
* This should be called from the plane atomic_update() or atomic_disable(),
* where an overlay association to a plane could have changed between the old and current
* atomic state.
*/
void omap_overlay_update_state(struct omap_drm_private *priv,
struct omap_hw_overlay *overlay)
{
struct omap_global_state *state = omap_get_existing_global_state(priv);
struct drm_plane **overlay_map = state->hwoverlay_to_plane;
/* Check if this overlay is not used anymore, then disable it */
if (!overlay_map[overlay->idx]) {
DBG("%s: disabled", overlay->name);
/* disable the overlay */
dispc_ovl_enable(priv->dispc, overlay->id, false);
}
}
static void omap_overlay_destroy(struct omap_hw_overlay *overlay)
{
kfree(overlay);
}
static struct omap_hw_overlay *omap_overlay_init(enum omap_plane_id overlay_id,
enum omap_overlay_caps caps)
{
struct omap_hw_overlay *overlay;
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return ERR_PTR(-ENOMEM);
overlay->name = overlay_id_to_name[overlay_id];
overlay->id = overlay_id;
overlay->caps = caps;
return overlay;
}
int omap_hwoverlays_init(struct omap_drm_private *priv)
{
static const enum omap_plane_id hw_plane_ids[] = {
OMAP_DSS_GFX, OMAP_DSS_VIDEO1,
OMAP_DSS_VIDEO2, OMAP_DSS_VIDEO3,
};
u32 num_overlays = dispc_get_num_ovls(priv->dispc);
enum omap_overlay_caps caps;
int i, ret;
for (i = 0; i < num_overlays; i++) {
struct omap_hw_overlay *overlay;
caps = dispc_ovl_get_caps(priv->dispc, hw_plane_ids[i]);
overlay = omap_overlay_init(hw_plane_ids[i], caps);
if (IS_ERR(overlay)) {
ret = PTR_ERR(overlay);
dev_err(priv->dev, "failed to construct overlay for %s (%d)\n",
overlay_id_to_name[i], ret);
omap_hwoverlays_destroy(priv);
return ret;
}
overlay->idx = priv->num_ovls;
priv->overlays[priv->num_ovls++] = overlay;
}
return 0;
}
void omap_hwoverlays_destroy(struct omap_drm_private *priv)
{
int i;
for (i = 0; i < priv->num_ovls; i++) {
omap_overlay_destroy(priv->overlays[i]);
priv->overlays[i] = NULL;
}
priv->num_ovls = 0;
}

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
* Author: Benoit Parrot <bparrot@ti.com>
*/
#ifndef __OMAPDRM_OVERLAY_H__
#define __OMAPDRM_OVERLAY_H__
#include <linux/types.h>
enum drm_plane_type;
struct drm_device;
struct drm_mode_object;
struct drm_plane;
/* Used to associate a HW overlay/plane to a plane */
struct omap_hw_overlay {
unsigned int idx;
const char *name;
enum omap_plane_id id;
enum omap_overlay_caps caps;
};
int omap_hwoverlays_init(struct omap_drm_private *priv);
void omap_hwoverlays_destroy(struct omap_drm_private *priv);
int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane,
u32 caps, u32 fourcc, struct omap_hw_overlay **overlay,
struct omap_hw_overlay **r_overlay);
void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay);
void omap_overlay_update_state(struct omap_drm_private *priv, struct omap_hw_overlay *overlay);
#endif /* __OMAPDRM_OVERLAY_H__ */

View File

@ -8,6 +8,7 @@
#include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h> #include <drm/drm_plane_helper.h>
#include <drm/drm_fourcc.h>
#include "omap_dmm_tiler.h" #include "omap_dmm_tiler.h"
#include "omap_drv.h" #include "omap_drv.h"
@ -16,14 +17,30 @@
* plane funcs * plane funcs
*/ */
#define to_omap_plane_state(x) container_of(x, struct omap_plane_state, base)
struct omap_plane_state {
/* Must be first. */
struct drm_plane_state base;
struct omap_hw_overlay *overlay;
struct omap_hw_overlay *r_overlay; /* right overlay */
};
#define to_omap_plane(x) container_of(x, struct omap_plane, base) #define to_omap_plane(x) container_of(x, struct omap_plane, base)
struct omap_plane { struct omap_plane {
struct drm_plane base; struct drm_plane base;
enum omap_plane_id id; enum omap_plane_id id;
const char *name;
}; };
bool is_omap_plane_dual_overlay(struct drm_plane_state *state)
{
struct omap_plane_state *omap_state = to_omap_plane_state(state);
return !!omap_state->r_overlay;
}
static int omap_plane_prepare_fb(struct drm_plane *plane, static int omap_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state) struct drm_plane_state *new_state)
{ {
@ -46,13 +63,35 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{ {
struct omap_drm_private *priv = plane->dev->dev_private; struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane *omap_plane = to_omap_plane(plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane); plane);
struct omap_overlay_info info; struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct omap_plane_state *new_omap_state;
struct omap_plane_state *old_omap_state;
struct omap_overlay_info info, r_info;
enum omap_plane_id ovl_id, r_ovl_id;
int ret; int ret;
bool dual_ovl;
DBG("%s, crtc=%p fb=%p", omap_plane->name, new_state->crtc, new_omap_state = to_omap_plane_state(new_state);
old_omap_state = to_omap_plane_state(old_state);
dual_ovl = is_omap_plane_dual_overlay(new_state);
/* Cleanup previously held overlay if needed */
if (old_omap_state->overlay)
omap_overlay_update_state(priv, old_omap_state->overlay);
if (old_omap_state->r_overlay)
omap_overlay_update_state(priv, old_omap_state->r_overlay);
if (!new_omap_state->overlay) {
DBG("[PLANE:%d:%s] no overlay attached", plane->base.id, plane->name);
return;
}
ovl_id = new_omap_state->overlay->id;
DBG("%s, crtc=%p fb=%p", plane->name, new_state->crtc,
new_state->fb); new_state->fb);
memset(&info, 0, sizeof(info)); memset(&info, 0, sizeof(info));
@ -67,65 +106,155 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
info.color_encoding = new_state->color_encoding; info.color_encoding = new_state->color_encoding;
info.color_range = new_state->color_range; info.color_range = new_state->color_range;
/* update scanout: */ r_info = info;
omap_framebuffer_update_scanout(new_state->fb, new_state, &info);
DBG("%dx%d -> %dx%d (%d)", info.width, info.height, /* update scanout: */
info.out_width, info.out_height, omap_framebuffer_update_scanout(new_state->fb, new_state, &info,
info.screen_width); dual_ovl ? &r_info : NULL);
DBG("%s: %dx%d -> %dx%d (%d)",
new_omap_state->overlay->name, info.width, info.height,
info.out_width, info.out_height, info.screen_width);
DBG("%d,%d %pad %pad", info.pos_x, info.pos_y, DBG("%d,%d %pad %pad", info.pos_x, info.pos_y,
&info.paddr, &info.p_uv_addr); &info.paddr, &info.p_uv_addr);
if (dual_ovl) {
r_ovl_id = new_omap_state->r_overlay->id;
/*
* If the current plane uses 2 hw planes the very next
* zorder is used by the r_overlay so we just use the
* main overlay zorder + 1
*/
r_info.zorder = info.zorder + 1;
DBG("%s: %dx%d -> %dx%d (%d)",
new_omap_state->r_overlay->name,
r_info.width, r_info.height,
r_info.out_width, r_info.out_height, r_info.screen_width);
DBG("%d,%d %pad %pad", r_info.pos_x, r_info.pos_y,
&r_info.paddr, &r_info.p_uv_addr);
}
/* and finally, update omapdss: */ /* and finally, update omapdss: */
ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info, ret = dispc_ovl_setup(priv->dispc, ovl_id, &info,
omap_crtc_timings(new_state->crtc), false, omap_crtc_timings(new_state->crtc), false,
omap_crtc_channel(new_state->crtc)); omap_crtc_channel(new_state->crtc));
if (ret) { if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane %s\n", dev_err(plane->dev->dev, "Failed to setup plane %s\n",
omap_plane->name); plane->name);
dispc_ovl_enable(priv->dispc, omap_plane->id, false); dispc_ovl_enable(priv->dispc, ovl_id, false);
return; return;
} }
dispc_ovl_enable(priv->dispc, omap_plane->id, true); dispc_ovl_enable(priv->dispc, ovl_id, true);
if (dual_ovl) {
ret = dispc_ovl_setup(priv->dispc, r_ovl_id, &r_info,
omap_crtc_timings(new_state->crtc), false,
omap_crtc_channel(new_state->crtc));
if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane right-overlay %s\n",
plane->name);
dispc_ovl_enable(priv->dispc, r_ovl_id, false);
dispc_ovl_enable(priv->dispc, ovl_id, false);
return;
}
dispc_ovl_enable(priv->dispc, r_ovl_id, true);
}
} }
static void omap_plane_atomic_disable(struct drm_plane *plane, static void omap_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{ {
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct omap_drm_private *priv = plane->dev->dev_private; struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane *omap_plane = to_omap_plane(plane); struct omap_plane *omap_plane = to_omap_plane(plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct omap_plane_state *new_omap_state;
struct omap_plane_state *old_omap_state;
new_omap_state = to_omap_plane_state(new_state);
old_omap_state = to_omap_plane_state(old_state);
if (!old_omap_state->overlay)
return;
new_state->rotation = DRM_MODE_ROTATE_0; new_state->rotation = DRM_MODE_ROTATE_0;
new_state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id; new_state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id;
dispc_ovl_enable(priv->dispc, omap_plane->id, false); omap_overlay_update_state(priv, old_omap_state->overlay);
new_omap_state->overlay = NULL;
if (is_omap_plane_dual_overlay(old_state)) {
omap_overlay_update_state(priv, old_omap_state->r_overlay);
new_omap_state->r_overlay = NULL;
}
} }
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
static int omap_plane_atomic_check(struct drm_plane *plane, static int omap_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{ {
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane); plane);
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
plane);
struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane_state *omap_state = to_omap_plane_state(new_plane_state);
struct omap_global_state *omap_overlay_global_state;
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state;
bool new_r_hw_overlay = false;
bool new_hw_overlay = false;
u32 max_width, max_height;
struct drm_crtc *crtc;
u16 width, height;
u32 caps = 0;
u32 fourcc;
int ret;
if (!new_plane_state->fb) omap_overlay_global_state = omap_get_global_state(state);
if (IS_ERR(omap_overlay_global_state))
return PTR_ERR(omap_overlay_global_state);
dispc_ovl_get_max_size(priv->dispc, &width, &height);
max_width = width << 16;
max_height = height << 16;
crtc = new_plane_state->crtc ? new_plane_state->crtc : plane->state->crtc;
if (!crtc)
return 0; return 0;
/* crtc should only be NULL when disabling (i.e., !new_plane_state->fb) */ crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
if (WARN_ON(!new_plane_state->crtc))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state,
new_plane_state->crtc);
/* we should have a crtc state if the plane is attached to a crtc */ /* we should have a crtc state if the plane is attached to a crtc */
if (WARN_ON(!crtc_state)) if (WARN_ON(!crtc_state))
return 0; return 0;
if (!crtc_state->enable) /*
* Note: these are just sanity checks to filter out totally bad scaling
* factors. The real limits must be calculated case by case, and
* unfortunately we currently do those checks only at the commit
* phase in dispc.
*/
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
FRAC_16_16(1, 8), FRAC_16_16(8, 1),
true, true);
if (ret)
return ret;
DBG("%s: visible %d -> %d", plane->name,
old_plane_state->visible, new_plane_state->visible);
if (!new_plane_state->visible) {
omap_overlay_release(state, omap_state->overlay);
omap_overlay_release(state, omap_state->r_overlay);
omap_state->overlay = NULL;
omap_state->r_overlay = NULL;
return 0; return 0;
}
if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0) if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0)
return -EINVAL; return -EINVAL;
@ -136,10 +265,96 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->adjusted_mode.vdisplay) if (new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL; return -EINVAL;
/* Make sure dimensions are within bounds. */
if (new_plane_state->src_h > max_height || new_plane_state->crtc_h > height)
return -EINVAL;
if (new_plane_state->src_w > max_width || new_plane_state->crtc_w > width) {
bool is_fourcc_yuv = new_plane_state->fb->format->is_yuv;
if (is_fourcc_yuv && (((new_plane_state->src_w >> 16) / 2 & 1) ||
new_plane_state->crtc_w / 2 & 1)) {
/*
* When calculating the split overlay width
* and it yield an odd value we will need to adjust
* the indivual width +/- 1. So make sure it fits
*/
if (new_plane_state->src_w <= ((2 * width - 1) << 16) &&
new_plane_state->crtc_w <= (2 * width - 1))
new_r_hw_overlay = true;
else
return -EINVAL;
} else {
if (new_plane_state->src_w <= (2 * max_width) &&
new_plane_state->crtc_w <= (2 * width))
new_r_hw_overlay = true;
else
return -EINVAL;
}
}
if (new_plane_state->rotation != DRM_MODE_ROTATE_0 && if (new_plane_state->rotation != DRM_MODE_ROTATE_0 &&
!omap_framebuffer_supports_rotation(new_plane_state->fb)) !omap_framebuffer_supports_rotation(new_plane_state->fb))
return -EINVAL; return -EINVAL;
if ((new_plane_state->src_w >> 16) != new_plane_state->crtc_w ||
(new_plane_state->src_h >> 16) != new_plane_state->crtc_h)
caps |= OMAP_DSS_OVL_CAP_SCALE;
fourcc = new_plane_state->fb->format->format;
/*
* (re)allocate hw overlay if we don't have one or
* there is a caps mismatch
*/
if (!omap_state->overlay || (caps & ~omap_state->overlay->caps)) {
new_hw_overlay = true;
} else {
/* check supported format */
if (!dispc_ovl_color_mode_supported(priv->dispc, omap_state->overlay->id,
fourcc))
new_hw_overlay = true;
}
/*
* check if we need two overlays and only have 1 or
* if we had 2 overlays but will only need 1
*/
if ((new_r_hw_overlay && !omap_state->r_overlay) ||
(!new_r_hw_overlay && omap_state->r_overlay))
new_hw_overlay = true;
if (new_hw_overlay) {
struct omap_hw_overlay *old_ovl = omap_state->overlay;
struct omap_hw_overlay *old_r_ovl = omap_state->r_overlay;
struct omap_hw_overlay *new_ovl = NULL;
struct omap_hw_overlay *new_r_ovl = NULL;
omap_overlay_release(state, old_ovl);
omap_overlay_release(state, old_r_ovl);
ret = omap_overlay_assign(state, plane, caps, fourcc, &new_ovl,
new_r_hw_overlay ? &new_r_ovl : NULL);
if (ret) {
DBG("%s: failed to assign hw_overlay", plane->name);
omap_state->overlay = NULL;
omap_state->r_overlay = NULL;
return ret;
}
omap_state->overlay = new_ovl;
if (new_r_hw_overlay)
omap_state->r_overlay = new_r_ovl;
else
omap_state->r_overlay = NULL;
}
DBG("plane: %s overlay_id: %d", plane->name, omap_state->overlay->id);
if (omap_state->r_overlay)
DBG("plane: %s r_overlay_id: %d", plane->name, omap_state->r_overlay->id);
return 0; return 0;
} }
@ -155,7 +370,7 @@ static void omap_plane_destroy(struct drm_plane *plane)
{ {
struct omap_plane *omap_plane = to_omap_plane(plane); struct omap_plane *omap_plane = to_omap_plane(plane);
DBG("%s", omap_plane->name); DBG("%s", plane->name);
drm_plane_cleanup(plane); drm_plane_cleanup(plane);
@ -189,11 +404,17 @@ void omap_plane_install_properties(struct drm_plane *plane,
static void omap_plane_reset(struct drm_plane *plane) static void omap_plane_reset(struct drm_plane *plane)
{ {
struct omap_plane *omap_plane = to_omap_plane(plane); struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_plane_state *omap_state;
drm_atomic_helper_plane_reset(plane); if (plane->state)
if (!plane->state) drm_atomic_helper_plane_destroy_state(plane, plane->state);
omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
if (!omap_state)
return; return;
__drm_atomic_helper_plane_reset(plane, &omap_state->base);
/* /*
* Set the zpos default depending on whether we are a primary or overlay * Set the zpos default depending on whether we are a primary or overlay
* plane. * plane.
@ -204,6 +425,47 @@ static void omap_plane_reset(struct drm_plane *plane)
plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE; plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE;
} }
static struct drm_plane_state *
omap_plane_atomic_duplicate_state(struct drm_plane *plane)
{
struct omap_plane_state *state, *current_state;
if (WARN_ON(!plane->state))
return NULL;
current_state = to_omap_plane_state(plane->state);
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &state->base);
state->overlay = current_state->overlay;
state->r_overlay = current_state->r_overlay;
return &state->base;
}
static void omap_plane_atomic_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
struct omap_plane_state *omap_state = to_omap_plane_state(state);
if (omap_state->overlay)
drm_printf(p, "\toverlay=%s (caps=0x%x)\n",
omap_state->overlay->name,
omap_state->overlay->caps);
else
drm_printf(p, "\toverlay=None\n");
if (omap_state->r_overlay)
drm_printf(p, "\tr_overlay=%s (caps=0x%x)\n",
omap_state->r_overlay->name,
omap_state->r_overlay->caps);
else
drm_printf(p, "\tr_overlay=None\n");
}
static int omap_plane_atomic_set_property(struct drm_plane *plane, static int omap_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_plane_state *state,
struct drm_property *property, struct drm_property *property,
@ -239,10 +501,11 @@ static const struct drm_plane_funcs omap_plane_funcs = {
.disable_plane = drm_atomic_helper_disable_plane, .disable_plane = drm_atomic_helper_disable_plane,
.reset = omap_plane_reset, .reset = omap_plane_reset,
.destroy = omap_plane_destroy, .destroy = omap_plane_destroy,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_duplicate_state = omap_plane_atomic_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_set_property = omap_plane_atomic_set_property, .atomic_set_property = omap_plane_atomic_set_property,
.atomic_get_property = omap_plane_atomic_get_property, .atomic_get_property = omap_plane_atomic_get_property,
.atomic_print_state = omap_plane_atomic_print_state,
}; };
static bool omap_plane_supports_yuv(struct drm_plane *plane) static bool omap_plane_supports_yuv(struct drm_plane *plane)
@ -261,20 +524,6 @@ static bool omap_plane_supports_yuv(struct drm_plane *plane)
return false; return false;
} }
static const char *plane_id_to_name[] = {
[OMAP_DSS_GFX] = "gfx",
[OMAP_DSS_VIDEO1] = "vid1",
[OMAP_DSS_VIDEO2] = "vid2",
[OMAP_DSS_VIDEO3] = "vid3",
};
static const enum omap_plane_id plane_idx_to_id[] = {
OMAP_DSS_GFX,
OMAP_DSS_VIDEO1,
OMAP_DSS_VIDEO2,
OMAP_DSS_VIDEO3,
};
/* initialize plane */ /* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev, struct drm_plane *omap_plane_init(struct drm_device *dev,
int idx, enum drm_plane_type type, int idx, enum drm_plane_type type,
@ -284,27 +533,25 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
unsigned int num_planes = dispc_get_num_ovls(priv->dispc); unsigned int num_planes = dispc_get_num_ovls(priv->dispc);
struct drm_plane *plane; struct drm_plane *plane;
struct omap_plane *omap_plane; struct omap_plane *omap_plane;
enum omap_plane_id id;
int ret; int ret;
u32 nformats; u32 nformats;
const u32 *formats; const u32 *formats;
if (WARN_ON(idx >= ARRAY_SIZE(plane_idx_to_id))) if (WARN_ON(idx >= num_planes))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
id = plane_idx_to_id[idx];
DBG("%s: type=%d", plane_id_to_name[id], type);
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL); omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane) if (!omap_plane)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
formats = dispc_ovl_get_color_modes(priv->dispc, id); omap_plane->id = idx;
DBG("%d: type=%d", omap_plane->id, type);
DBG(" crtc_mask: 0x%04x", possible_crtcs);
formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id);
for (nformats = 0; formats[nformats]; ++nformats) for (nformats = 0; formats[nformats]; ++nformats)
; ;
omap_plane->id = id;
omap_plane->name = plane_id_to_name[id];
plane = &omap_plane->base; plane = &omap_plane->base;
@ -334,8 +581,8 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
return plane; return plane;
error: error:
dev_err(dev->dev, "%s(): could not create plane: %s\n", dev_err(dev->dev, "%s(): could not create plane: %d\n",
__func__, plane_id_to_name[id]); __func__, omap_plane->id);
kfree(omap_plane); kfree(omap_plane);
return NULL; return NULL;

View File

@ -22,5 +22,6 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
u32 possible_crtcs); u32 possible_crtcs);
void omap_plane_install_properties(struct drm_plane *plane, void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj); struct drm_mode_object *obj);
bool is_omap_plane_dual_overlay(struct drm_plane_state *state);
#endif /* __OMAPDRM_PLANE_H__ */ #endif /* __OMAPDRM_PLANE_H__ */

View File

@ -152,7 +152,7 @@ config DRM_PANEL_ILITEK_ILI9341
tristate "Ilitek ILI9341 240x320 QVGA panels" tristate "Ilitek ILI9341 240x320 QVGA panels"
depends on OF && SPI depends on OF && SPI
depends on DRM_KMS_HELPER depends on DRM_KMS_HELPER
depends on DRM_KMS_CMA_HELPER depends on DRM_GEM_CMA_HELPER
depends on BACKLIGHT_CLASS_DEVICE depends on BACKLIGHT_CLASS_DEVICE
select DRM_MIPI_DBI select DRM_MIPI_DBI
help help

View File

@ -84,8 +84,8 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0x0D, 0x63), _INIT_DCS_CMD(0x0D, 0x63),
_INIT_DCS_CMD(0x0E, 0x91), _INIT_DCS_CMD(0x0E, 0x91),
_INIT_DCS_CMD(0x0F, 0x73), _INIT_DCS_CMD(0x0F, 0x73),
_INIT_DCS_CMD(0x95, 0xEB), _INIT_DCS_CMD(0x95, 0xE6),
_INIT_DCS_CMD(0x96, 0xEB), _INIT_DCS_CMD(0x96, 0xF0),
_INIT_DCS_CMD(0x30, 0x11), _INIT_DCS_CMD(0x30, 0x11),
_INIT_DCS_CMD(0x6D, 0x66), _INIT_DCS_CMD(0x6D, 0x66),
_INIT_DCS_CMD(0x75, 0xA2), _INIT_DCS_CMD(0x75, 0xA2),
@ -111,18 +111,18 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1), _INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB1, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29), _INIT_DCS_CMD(0xB1, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xB2, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73), _INIT_DCS_CMD(0xB2, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
_INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
_INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0),
_INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1), _INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB5, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29), _INIT_DCS_CMD(0xB5, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xB6, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73), _INIT_DCS_CMD(0xB6, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
_INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7), _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0),
_INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1), _INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
_INIT_DCS_CMD(0xB9, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29), _INIT_DCS_CMD(0xB9, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
_INIT_DCS_CMD(0xBA, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73), _INIT_DCS_CMD(0xBA, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
_INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
_INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0),
_INIT_DCS_CMD(0xFF, 0x24), _INIT_DCS_CMD(0xFF, 0x24),
_INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0xFB, 0x01),
@ -225,6 +225,7 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0x7F, 0x3C), _INIT_DCS_CMD(0x7F, 0x3C),
_INIT_DCS_CMD(0x82, 0x04), _INIT_DCS_CMD(0x82, 0x04),
_INIT_DCS_CMD(0x97, 0xC0), _INIT_DCS_CMD(0x97, 0xC0),
_INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00), _INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00),
_INIT_DCS_CMD(0x91, 0x44), _INIT_DCS_CMD(0x91, 0x44),
_INIT_DCS_CMD(0x92, 0xA9), _INIT_DCS_CMD(0x92, 0xA9),
@ -332,12 +333,39 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
_INIT_DCS_CMD(0x34, 0x78), _INIT_DCS_CMD(0x34, 0x78),
_INIT_DCS_CMD(0x35, 0x16), _INIT_DCS_CMD(0x35, 0x16),
_INIT_DCS_CMD(0xC8, 0x04), _INIT_DCS_CMD(0xC8, 0x04),
_INIT_DCS_CMD(0xC9, 0x80), _INIT_DCS_CMD(0xC9, 0x9E),
_INIT_DCS_CMD(0xCA, 0x4E), _INIT_DCS_CMD(0xCA, 0x4E),
_INIT_DCS_CMD(0xCB, 0x00), _INIT_DCS_CMD(0xCB, 0x00),
_INIT_DCS_CMD(0xA9, 0x4C),
_INIT_DCS_CMD(0xAA, 0x47),
_INIT_DCS_CMD(0xA9, 0x49),
_INIT_DCS_CMD(0xAA, 0x4B),
_INIT_DCS_CMD(0xAB, 0x48),
_INIT_DCS_CMD(0xAC, 0x43),
_INIT_DCS_CMD(0xAD, 0x40),
_INIT_DCS_CMD(0xAE, 0x50),
_INIT_DCS_CMD(0xAF, 0x44),
_INIT_DCS_CMD(0xB0, 0x54),
_INIT_DCS_CMD(0xB1, 0x4E),
_INIT_DCS_CMD(0xB2, 0x4D),
_INIT_DCS_CMD(0xB3, 0x4C),
_INIT_DCS_CMD(0xB4, 0x41),
_INIT_DCS_CMD(0xB5, 0x47),
_INIT_DCS_CMD(0xB6, 0x53),
_INIT_DCS_CMD(0xB7, 0x3E),
_INIT_DCS_CMD(0xB8, 0x51),
_INIT_DCS_CMD(0xB9, 0x3C),
_INIT_DCS_CMD(0xBA, 0x3B),
_INIT_DCS_CMD(0xBB, 0x46),
_INIT_DCS_CMD(0xBC, 0x45),
_INIT_DCS_CMD(0xBD, 0x55),
_INIT_DCS_CMD(0xBE, 0x3D),
_INIT_DCS_CMD(0xBF, 0x3F),
_INIT_DCS_CMD(0xC0, 0x52),
_INIT_DCS_CMD(0xC1, 0x4A),
_INIT_DCS_CMD(0xC2, 0x39),
_INIT_DCS_CMD(0xC3, 0x4F),
_INIT_DCS_CMD(0xC4, 0x3A),
_INIT_DCS_CMD(0xC5, 0x42),
_INIT_DCS_CMD(0xFF, 0x27), _INIT_DCS_CMD(0xFF, 0x27),
_INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0xFB, 0x01),
@ -419,7 +447,7 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
{}, {},
}; };
static const struct panel_init_cmd inx_init_cmd[] = { static const struct panel_init_cmd inx_hj110iz_init_cmd[] = {
_INIT_DCS_CMD(0xFF, 0x20), _INIT_DCS_CMD(0xFF, 0x20),
_INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x05, 0xD1), _INIT_DCS_CMD(0x05, 0xD1),
@ -428,10 +456,10 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0x08, 0x4B), _INIT_DCS_CMD(0x08, 0x4B),
_INIT_DCS_CMD(0x0E, 0x91), _INIT_DCS_CMD(0x0E, 0x91),
_INIT_DCS_CMD(0x0F, 0x69), _INIT_DCS_CMD(0x0F, 0x69),
_INIT_DCS_CMD(0x95, 0xFF), _INIT_DCS_CMD(0x95, 0xF5),
_INIT_DCS_CMD(0x96, 0xFF), _INIT_DCS_CMD(0x96, 0xF5),
_INIT_DCS_CMD(0x9D, 0x0A), _INIT_DCS_CMD(0x9D, 0x00),
_INIT_DCS_CMD(0x9E, 0x0A), _INIT_DCS_CMD(0x9E, 0x00),
_INIT_DCS_CMD(0x69, 0x98), _INIT_DCS_CMD(0x69, 0x98),
_INIT_DCS_CMD(0x75, 0xA2), _INIT_DCS_CMD(0x75, 0xA2),
_INIT_DCS_CMD(0x77, 0xB3), _INIT_DCS_CMD(0x77, 0xB3),
@ -493,17 +521,17 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0x2A, 0x03), _INIT_DCS_CMD(0x2A, 0x03),
_INIT_DCS_CMD(0x2B, 0x03), _INIT_DCS_CMD(0x2B, 0x03),
_INIT_DCS_CMD(0x2F, 0x06), _INIT_DCS_CMD(0x2F, 0x05),
_INIT_DCS_CMD(0x30, 0x32), _INIT_DCS_CMD(0x30, 0x32),
_INIT_DCS_CMD(0x31, 0x43), _INIT_DCS_CMD(0x31, 0x43),
_INIT_DCS_CMD(0x33, 0x06), _INIT_DCS_CMD(0x33, 0x05),
_INIT_DCS_CMD(0x34, 0x32), _INIT_DCS_CMD(0x34, 0x32),
_INIT_DCS_CMD(0x35, 0x43), _INIT_DCS_CMD(0x35, 0x43),
_INIT_DCS_CMD(0x37, 0x44), _INIT_DCS_CMD(0x37, 0x44),
_INIT_DCS_CMD(0x38, 0x40), _INIT_DCS_CMD(0x38, 0x40),
_INIT_DCS_CMD(0x39, 0x00), _INIT_DCS_CMD(0x39, 0x00),
_INIT_DCS_CMD(0x3A, 0x01), _INIT_DCS_CMD(0x3A, 0x18),
_INIT_DCS_CMD(0x3B, 0x48), _INIT_DCS_CMD(0x3B, 0x00),
_INIT_DCS_CMD(0x3D, 0x93), _INIT_DCS_CMD(0x3D, 0x93),
_INIT_DCS_CMD(0xAB, 0x44), _INIT_DCS_CMD(0xAB, 0x44),
_INIT_DCS_CMD(0xAC, 0x40), _INIT_DCS_CMD(0xAC, 0x40),
@ -520,8 +548,8 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0x56, 0x08), _INIT_DCS_CMD(0x56, 0x08),
_INIT_DCS_CMD(0x58, 0x21), _INIT_DCS_CMD(0x58, 0x21),
_INIT_DCS_CMD(0x59, 0x40), _INIT_DCS_CMD(0x59, 0x40),
_INIT_DCS_CMD(0x5A, 0x09), _INIT_DCS_CMD(0x5A, 0x00),
_INIT_DCS_CMD(0x5B, 0x48), _INIT_DCS_CMD(0x5B, 0x2C),
_INIT_DCS_CMD(0x5E, 0x00, 0x10), _INIT_DCS_CMD(0x5E, 0x00, 0x10),
_INIT_DCS_CMD(0x5F, 0x00), _INIT_DCS_CMD(0x5F, 0x00),
@ -558,33 +586,36 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0xEF, 0x01), _INIT_DCS_CMD(0xEF, 0x01),
_INIT_DCS_CMD(0xF0, 0x7A), _INIT_DCS_CMD(0xF0, 0x7A),
_INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00),
_INIT_DCS_CMD(0xFF, 0x25), _INIT_DCS_CMD(0xFF, 0x25),
_INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x05, 0x00), _INIT_DCS_CMD(0x05, 0x00),
_INIT_DCS_CMD(0x13, 0x02),
_INIT_DCS_CMD(0x14, 0xDF),
_INIT_DCS_CMD(0xF1, 0x10), _INIT_DCS_CMD(0xF1, 0x10),
_INIT_DCS_CMD(0x1E, 0x00), _INIT_DCS_CMD(0x1E, 0x00),
_INIT_DCS_CMD(0x1F, 0x09), _INIT_DCS_CMD(0x1F, 0x00),
_INIT_DCS_CMD(0x20, 0x46), _INIT_DCS_CMD(0x20, 0x2C),
_INIT_DCS_CMD(0x25, 0x00), _INIT_DCS_CMD(0x25, 0x00),
_INIT_DCS_CMD(0x26, 0x09), _INIT_DCS_CMD(0x26, 0x00),
_INIT_DCS_CMD(0x27, 0x46), _INIT_DCS_CMD(0x27, 0x2C),
_INIT_DCS_CMD(0x3F, 0x80), _INIT_DCS_CMD(0x3F, 0x80),
_INIT_DCS_CMD(0x40, 0x00), _INIT_DCS_CMD(0x40, 0x00),
_INIT_DCS_CMD(0x43, 0x00), _INIT_DCS_CMD(0x43, 0x00),
_INIT_DCS_CMD(0x44, 0x09), _INIT_DCS_CMD(0x44, 0x18),
_INIT_DCS_CMD(0x45, 0x46), _INIT_DCS_CMD(0x45, 0x00),
_INIT_DCS_CMD(0x48, 0x09), _INIT_DCS_CMD(0x48, 0x00),
_INIT_DCS_CMD(0x49, 0x46), _INIT_DCS_CMD(0x49, 0x2C),
_INIT_DCS_CMD(0x5B, 0x80), _INIT_DCS_CMD(0x5B, 0x80),
_INIT_DCS_CMD(0x5C, 0x00), _INIT_DCS_CMD(0x5C, 0x00),
_INIT_DCS_CMD(0x5D, 0x01), _INIT_DCS_CMD(0x5D, 0x00),
_INIT_DCS_CMD(0x5E, 0x46), _INIT_DCS_CMD(0x5E, 0x00),
_INIT_DCS_CMD(0x61, 0x01), _INIT_DCS_CMD(0x61, 0x00),
_INIT_DCS_CMD(0x62, 0x46), _INIT_DCS_CMD(0x62, 0x2C),
_INIT_DCS_CMD(0x68, 0x10), _INIT_DCS_CMD(0x68, 0x10),
_INIT_DCS_CMD(0xFF, 0x26), _INIT_DCS_CMD(0xFF, 0x26),
_INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0xFB, 0x01),
@ -700,16 +731,22 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0xA3, 0x30), _INIT_DCS_CMD(0xA3, 0x30),
_INIT_DCS_CMD(0xA4, 0xC0), _INIT_DCS_CMD(0xA4, 0xC0),
_INIT_DCS_CMD(0xE8, 0x00), _INIT_DCS_CMD(0xE8, 0x00),
_INIT_DCS_CMD(0x97, 0x3C),
_INIT_DCS_CMD(0x98, 0x02),
_INIT_DCS_CMD(0x99, 0x95),
_INIT_DCS_CMD(0x9A, 0x06),
_INIT_DCS_CMD(0x9B, 0x00),
_INIT_DCS_CMD(0x9C, 0x0B),
_INIT_DCS_CMD(0x9D, 0x0A),
_INIT_DCS_CMD(0x9E, 0x90),
_INIT_DCS_CMD(0xFF, 0xF0), _INIT_DCS_CMD(0xFF, 0xF0),
_INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x3A, 0x08), _INIT_DCS_CMD(0x3A, 0x08),
_INIT_DCS_CMD(0xFF, 0xD0), _INIT_DCS_CMD(0xFF, 0xD0),
_INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x00, 0x33), _INIT_DCS_CMD(0x00, 0x33),
_INIT_DCS_CMD(0x02, 0x77),
_INIT_DCS_CMD(0x08, 0x01), _INIT_DCS_CMD(0x08, 0x01),
_INIT_DCS_CMD(0x09, 0xBF), _INIT_DCS_CMD(0x09, 0xBF),
_INIT_DCS_CMD(0x28, 0x30),
_INIT_DCS_CMD(0x2F, 0x33), _INIT_DCS_CMD(0x2F, 0x33),
_INIT_DCS_CMD(0xFF, 0x23), _INIT_DCS_CMD(0xFF, 0x23),
_INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0xFB, 0x01),
@ -718,6 +755,9 @@ static const struct panel_init_cmd inx_init_cmd[] = {
_INIT_DCS_CMD(0xFF, 0x20), _INIT_DCS_CMD(0xFF, 0x20),
_INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0xFB, 0x01),
_INIT_DCS_CMD(0x30, 0x00), _INIT_DCS_CMD(0x30, 0x00),
_INIT_DCS_CMD(0xFF, 0x24),
_INIT_DCS_CMD(0x5C, 0x88),
_INIT_DCS_CMD(0x5D, 0x08),
_INIT_DCS_CMD(0xFF, 0x10), _INIT_DCS_CMD(0xFF, 0x10),
_INIT_DCS_CMD(0xB9, 0x01), _INIT_DCS_CMD(0xB9, 0x01),
_INIT_DCS_CMD(0xFF, 0x20), _INIT_DCS_CMD(0xFF, 0x20),
@ -1312,7 +1352,7 @@ static const struct panel_desc inx_hj110iz_desc = {
| MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO_HSE
| MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_CLOCK_NON_CONTINUOUS
| MIPI_DSI_MODE_VIDEO_BURST, | MIPI_DSI_MODE_VIDEO_BURST,
.init_cmds = inx_init_cmd, .init_cmds = inx_hj110iz_init_cmd,
}; };
static const struct drm_display_mode boe_tv101wum_nl6_default_mode = { static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {

View File

@ -223,7 +223,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
obj = kzalloc(sizeof(*obj), GFP_KERNEL); obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj) if (!obj)
return NULL; return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&obj->mappings.list); INIT_LIST_HEAD(&obj->mappings.list);
mutex_init(&obj->mappings.lock); mutex_init(&obj->mappings.lock);

View File

@ -6,7 +6,6 @@ config DRM_PL111
depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
depends on COMMON_CLK depends on COMMON_CLK
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_BRIDGE select DRM_BRIDGE
select DRM_PANEL_BRIDGE select DRM_PANEL_BRIDGE

View File

@ -57,13 +57,16 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
struct qxl_bo *bo; struct qxl_bo *bo;
list_for_each_entry(bo, &qdev->gem.objects, list) { list_for_each_entry(bo, &qdev->gem.objects, list) {
struct dma_resv_list *fobj; struct dma_resv_iter cursor;
int rel; struct dma_fence *fence;
int rel = 0;
rcu_read_lock(); dma_resv_iter_begin(&cursor, bo->tbo.base.resv, true);
fobj = dma_resv_shared_list(bo->tbo.base.resv); dma_resv_for_each_fence_unlocked(&cursor, fence) {
rel = fobj ? fobj->shared_count : 0; if (dma_resv_iter_is_restarted(&cursor))
rcu_read_unlock(); rel = 0;
++rel;
}
seq_printf(m, "size %ld, pc %d, num releases %d\n", seq_printf(m, "size %ld, pc %d, num releases %d\n",
(unsigned long)bo->tbo.base.size, (unsigned long)bo->tbo.base.size,

View File

@ -5,7 +5,6 @@ config DRM_RCAR_DU
depends on ARM || ARM64 depends on ARM || ARM64
depends on ARCH_RENESAS || COMPILE_TEST depends on ARCH_RENESAS || COMPILE_TEST
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS
help help

View File

@ -327,11 +327,11 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
*/ */
static const struct drm_gem_object_funcs rcar_du_gem_funcs = { static const struct drm_gem_object_funcs rcar_du_gem_funcs = {
.free = drm_gem_cma_free_object, .free = drm_gem_cma_object_free,
.print_info = drm_gem_cma_print_info, .print_info = drm_gem_cma_object_print_info,
.get_sg_table = drm_gem_cma_get_sg_table, .get_sg_table = drm_gem_cma_object_get_sg_table,
.vmap = drm_gem_cma_vmap, .vmap = drm_gem_cma_object_vmap,
.mmap = drm_gem_cma_mmap, .mmap = drm_gem_cma_object_mmap,
.vm_ops = &drm_gem_cma_vm_ops, .vm_ops = &drm_gem_cma_vm_ops,
}; };

View File

@ -5,7 +5,6 @@
rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \ rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
rockchip_drm_gem.o rockchip_drm_vop.o rockchip_vop_reg.o rockchip_drm_gem.o rockchip_drm_vop.o rockchip_vop_reg.o
rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o

View File

@ -26,7 +26,6 @@
#include "rockchip_drm_drv.h" #include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h" #include "rockchip_drm_fb.h"
#include "rockchip_drm_fbdev.h"
#include "rockchip_drm_gem.h" #include "rockchip_drm_gem.h"
#define DRIVER_NAME "rockchip" #define DRIVER_NAME "rockchip"
@ -159,10 +158,6 @@ static int rockchip_drm_bind(struct device *dev)
drm_mode_config_reset(drm_dev); drm_mode_config_reset(drm_dev);
ret = rockchip_drm_fbdev_init(drm_dev);
if (ret)
goto err_unbind_all;
/* init kms poll for handling hpd */ /* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm_dev); drm_kms_helper_poll_init(drm_dev);
@ -170,10 +165,11 @@ static int rockchip_drm_bind(struct device *dev)
if (ret) if (ret)
goto err_kms_helper_poll_fini; goto err_kms_helper_poll_fini;
drm_fbdev_generic_setup(drm_dev, 0);
return 0; return 0;
err_kms_helper_poll_fini: err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev); drm_kms_helper_poll_fini(drm_dev);
rockchip_drm_fbdev_fini(drm_dev);
err_unbind_all: err_unbind_all:
component_unbind_all(dev, drm_dev); component_unbind_all(dev, drm_dev);
err_iommu_cleanup: err_iommu_cleanup:
@ -189,7 +185,6 @@ static void rockchip_drm_unbind(struct device *dev)
drm_dev_unregister(drm_dev); drm_dev_unregister(drm_dev);
rockchip_drm_fbdev_fini(drm_dev);
drm_kms_helper_poll_fini(drm_dev); drm_kms_helper_poll_fini(drm_dev);
drm_atomic_helper_shutdown(drm_dev); drm_atomic_helper_shutdown(drm_dev);
@ -203,7 +198,6 @@ DEFINE_DRM_GEM_FOPS(rockchip_drm_driver_fops);
static const struct drm_driver rockchip_drm_driver = { static const struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
.dumb_create = rockchip_gem_dumb_create, .dumb_create = rockchip_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, .prime_fd_to_handle = drm_gem_prime_fd_to_handle,

View File

@ -43,8 +43,6 @@ struct rockchip_crtc_state {
* @mm_lock: protect drm_mm on multi-threads. * @mm_lock: protect drm_mm on multi-threads.
*/ */
struct rockchip_drm_private { struct rockchip_drm_private {
struct drm_fb_helper fbdev_helper;
struct drm_gem_object *fbdev_bo;
struct iommu_domain *domain; struct iommu_domain *domain;
struct mutex mm_lock; struct mutex mm_lock;
struct drm_mm mm; struct drm_mm mm;

View File

@ -1,164 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
#include <drm/drm.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_fbdev.h"
#define PREFERRED_BPP 32
#define to_drm_private(x) \
container_of(x, struct rockchip_drm_private, fbdev_helper)
static int rockchip_fbdev_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
struct drm_fb_helper *helper = info->par;
struct rockchip_drm_private *private = to_drm_private(helper);
return drm_gem_prime_mmap(private->fbdev_bo, vma);
}
static const struct fb_ops rockchip_drm_fbdev_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_mmap = rockchip_fbdev_mmap,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
};
static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct rockchip_drm_private *private = to_drm_private(helper);
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_device *dev = helper->dev;
struct rockchip_gem_object *rk_obj;
struct drm_framebuffer *fb;
unsigned int bytes_per_pixel;
unsigned long offset;
struct fb_info *fbi;
size_t size;
int ret;
bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
rk_obj = rockchip_gem_create_object(dev, size, true);
if (IS_ERR(rk_obj))
return -ENOMEM;
private->fbdev_bo = &rk_obj->base;
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info.\n");
ret = PTR_ERR(fbi);
goto out;
}
helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd,
private->fbdev_bo);
if (IS_ERR(helper->fb)) {
DRM_DEV_ERROR(dev->dev,
"Failed to allocate DRM framebuffer.\n");
ret = PTR_ERR(helper->fb);
goto out;
}
fbi->fbops = &rockchip_drm_fbdev_ops;
fb = helper->fb;
drm_fb_helper_fill_info(fbi, helper, sizes);
offset = fbi->var.xoffset * bytes_per_pixel;
offset += fbi->var.yoffset * fb->pitches[0];
dev->mode_config.fb_base = 0;
fbi->screen_base = rk_obj->kvaddr + offset;
fbi->screen_size = rk_obj->base.size;
fbi->fix.smem_len = rk_obj->base.size;
DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n",
fb->width, fb->height, fb->format->depth,
rk_obj->kvaddr,
offset, size);
return 0;
out:
rockchip_gem_free_object(&rk_obj->base);
return ret;
}
static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = {
.fb_probe = rockchip_drm_fbdev_create,
};
int rockchip_drm_fbdev_init(struct drm_device *dev)
{
struct rockchip_drm_private *private = dev->dev_private;
struct drm_fb_helper *helper;
int ret;
if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
return -EINVAL;
helper = &private->fbdev_helper;
drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
ret = drm_fb_helper_init(dev, helper);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
"Failed to initialize drm fb helper - %d.\n",
ret);
return ret;
}
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
"Failed to set initial hw config - %d.\n",
ret);
goto err_drm_fb_helper_fini;
}
return 0;
err_drm_fb_helper_fini:
drm_fb_helper_fini(helper);
return ret;
}
void rockchip_drm_fbdev_fini(struct drm_device *dev)
{
struct rockchip_drm_private *private = dev->dev_private;
struct drm_fb_helper *helper;
helper = &private->fbdev_helper;
drm_fb_helper_unregister_fbi(helper);
if (helper->fb)
drm_framebuffer_put(helper->fb);
drm_fb_helper_fini(helper);
}

View File

@ -1,24 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
#ifndef _ROCKCHIP_DRM_FBDEV_H
#define _ROCKCHIP_DRM_FBDEV_H
#ifdef CONFIG_DRM_FBDEV_EMULATION
int rockchip_drm_fbdev_init(struct drm_device *dev);
void rockchip_drm_fbdev_fini(struct drm_device *dev);
#else
static inline int rockchip_drm_fbdev_init(struct drm_device *dev)
{
return 0;
}
static inline void rockchip_drm_fbdev_fini(struct drm_device *dev)
{
}
#endif
#endif /* _ROCKCHIP_DRM_FBDEV_H */

View File

@ -5,7 +5,6 @@ config DRM_SHMOBILE
depends on ARCH_SHMOBILE || COMPILE_TEST depends on ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
help help
Choose this option if you have an SH Mobile chipset. Choose this option if you have an SH Mobile chipset.

View File

@ -5,7 +5,6 @@ config DRM_STI
select RESET_CONTROLLER select RESET_CONTROLLER
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL select DRM_PANEL
select FW_LOADER select FW_LOADER
select SND_SOC_HDMI_CODEC if SND_SOC select SND_SOC_HDMI_CODEC if SND_SOC

View File

@ -4,7 +4,6 @@ config DRM_STM
depends on DRM && (ARCH_STM32 || ARCH_MULTIPLATFORM) depends on DRM && (ARCH_STM32 || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL_BRIDGE select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS
select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB

View File

@ -5,7 +5,6 @@ config DRM_SUN4I
depends on ARCH_SUNXI || COMPILE_TEST depends on ARCH_SUNXI || COMPILE_TEST
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL select DRM_PANEL
select REGMAP_MMIO select REGMAP_MMIO
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS

View File

@ -3,7 +3,6 @@ config DRM_TIDSS
depends on DRM && OF depends on DRM && OF
depends on ARM || ARM64 || COMPILE_TEST depends on ARM || ARM64 || COMPILE_TEST
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
help help
The TI Keystone family SoCs introduced a new generation of The TI Keystone family SoCs introduced a new generation of

View File

@ -88,7 +88,7 @@ static int __maybe_unused tidss_resume(struct device *dev)
return drm_mode_config_helper_resume(&tidss->ddev); return drm_mode_config_helper_resume(&tidss->ddev);
} }
static const struct dev_pm_ops tidss_pm_ops = { static __maybe_unused const struct dev_pm_ops tidss_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume) SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume)
SET_RUNTIME_PM_OPS(tidss_pm_runtime_suspend, tidss_pm_runtime_resume, NULL) SET_RUNTIME_PM_OPS(tidss_pm_runtime_suspend, tidss_pm_runtime_resume, NULL)
}; };

View File

@ -3,7 +3,6 @@ config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller" tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM depends on DRM && OF && ARM
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_BRIDGE select DRM_BRIDGE
select DRM_PANEL_BRIDGE select DRM_PANEL_BRIDGE

View File

@ -3,7 +3,7 @@
config DRM_ARCPGU config DRM_ARCPGU
tristate "ARC PGU" tristate "ARC PGU"
depends on DRM && OF depends on DRM && OF
select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER select DRM_KMS_HELPER
help help
Choose this option if you have an ARC PGU controller. Choose this option if you have an ARC PGU controller.
@ -71,7 +71,7 @@ config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels" tristate "DRM support for HX8357D display panels"
depends on DRM && SPI depends on DRM && SPI
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_CLASS_DEVICE
help help
@ -84,7 +84,7 @@ config TINYDRM_ILI9163
tristate "DRM support for ILI9163 display panels" tristate "DRM support for ILI9163 display panels"
depends on DRM && SPI depends on DRM && SPI
select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_MIPI_DBI select DRM_MIPI_DBI
help help
@ -97,7 +97,7 @@ config TINYDRM_ILI9225
tristate "DRM support for ILI9225 display panels" tristate "DRM support for ILI9225 display panels"
depends on DRM && SPI depends on DRM && SPI
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI select DRM_MIPI_DBI
help help
DRM driver for the following Ilitek ILI9225 panels: DRM driver for the following Ilitek ILI9225 panels:
@ -109,7 +109,7 @@ config TINYDRM_ILI9341
tristate "DRM support for ILI9341 display panels" tristate "DRM support for ILI9341 display panels"
depends on DRM && SPI depends on DRM && SPI
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_CLASS_DEVICE
help help
@ -122,7 +122,7 @@ config TINYDRM_ILI9486
tristate "DRM support for ILI9486 display panels" tristate "DRM support for ILI9486 display panels"
depends on DRM && SPI depends on DRM && SPI
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_CLASS_DEVICE
help help
@ -136,7 +136,7 @@ config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT" tristate "DRM support for MI0283QT"
depends on DRM && SPI depends on DRM && SPI
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_CLASS_DEVICE
help help
@ -147,7 +147,7 @@ config TINYDRM_REPAPER
tristate "DRM support for Pervasive Displays RePaper panels (V231)" tristate "DRM support for Pervasive Displays RePaper panels (V231)"
depends on DRM && SPI depends on DRM && SPI
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER
help help
DRM driver for the following Pervasive Displays panels: DRM driver for the following Pervasive Displays panels:
1.44" TFT EPD Panel (E1144CS021) 1.44" TFT EPD Panel (E1144CS021)
@ -161,7 +161,7 @@ config TINYDRM_ST7586
tristate "DRM support for Sitronix ST7586 display panels" tristate "DRM support for Sitronix ST7586 display panels"
depends on DRM && SPI depends on DRM && SPI
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI select DRM_MIPI_DBI
help help
DRM driver for the following Sitronix ST7586 panels: DRM driver for the following Sitronix ST7586 panels:
@ -173,7 +173,7 @@ config TINYDRM_ST7735R
tristate "DRM support for Sitronix ST7715R/ST7735R display panels" tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI depends on DRM && SPI
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_MIPI_DBI select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_CLASS_DEVICE
help help

View File

@ -1086,7 +1086,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
if (timeout == 0) if (timeout == 0)
return -EBUSY; return -EBUSY;
dma_resv_add_excl_fence(bo->base.resv, NULL);
return 0; return 0;
} }
EXPORT_SYMBOL(ttm_bo_wait); EXPORT_SYMBOL(ttm_bo_wait);

View File

@ -8,7 +8,6 @@ config DRM_TVE200
select DRM_BRIDGE select DRM_BRIDGE
select DRM_PANEL_BRIDGE select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help help

View File

@ -70,11 +70,11 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
struct drm_gem_object *obj; struct drm_gem_object *obj;
if (size == 0) if (size == 0)
return NULL; return ERR_PTR(-EINVAL);
bo = kzalloc(sizeof(*bo), GFP_KERNEL); bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo) if (!bo)
return NULL; return ERR_PTR(-ENOMEM);
obj = &bo->base.base; obj = &bo->base.base;
obj->funcs = &v3d_gem_funcs; obj->funcs = &v3d_gem_funcs;

View File

@ -6,7 +6,6 @@ config DRM_VC4
depends on SND && SND_SOC depends on SND && SND_SOC
depends on COMMON_CLK depends on COMMON_CLK
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_PANEL_BRIDGE select DRM_PANEL_BRIDGE
select SND_PCM select SND_PCM

View File

@ -177,7 +177,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
bo->validated_shader = NULL; bo->validated_shader = NULL;
} }
drm_gem_cma_free_object(obj); drm_gem_cma_free(&bo->base);
} }
static void vc4_bo_remove_from_cache(struct vc4_bo *bo) static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
@ -720,7 +720,7 @@ static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
return -EINVAL; return -EINVAL;
} }
return drm_gem_cma_mmap(obj, vma); return drm_gem_cma_mmap(&bo->base, vma);
} }
static const struct vm_operations_struct vc4_vm_ops = { static const struct vm_operations_struct vc4_vm_ops = {
@ -732,8 +732,8 @@ static const struct vm_operations_struct vc4_vm_ops = {
static const struct drm_gem_object_funcs vc4_gem_object_funcs = { static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
.free = vc4_free_object, .free = vc4_free_object,
.export = vc4_prime_export, .export = vc4_prime_export,
.get_sg_table = drm_gem_cma_get_sg_table, .get_sg_table = drm_gem_cma_object_get_sg_table,
.vmap = drm_gem_cma_vmap, .vmap = drm_gem_cma_object_vmap,
.mmap = vc4_gem_object_mmap, .mmap = vc4_gem_object_mmap,
.vm_ops = &vc4_vm_ops, .vm_ops = &vc4_vm_ops,
}; };

View File

@ -97,7 +97,7 @@ static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, siz
obj = kzalloc(sizeof(*obj), GFP_KERNEL); obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj) if (!obj)
return NULL; return ERR_PTR(-ENOMEM);
/* /*
* vgem doesn't have any begin/end cpu access ioctls, therefore must use * vgem doesn't have any begin/end cpu access ioctls, therefore must use

View File

@ -139,7 +139,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
if (!shmem) if (!shmem)
return NULL; return ERR_PTR(-ENOMEM);
dshmem = &shmem->base.base; dshmem = &shmem->base.base;
dshmem->base.funcs = &virtio_gpu_shmem_funcs; dshmem->base.funcs = &virtio_gpu_shmem_funcs;

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \ vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \ vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
@ -9,7 +9,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
vmwgfx_devcaps.o ttm_object.o ttm_memory.o vmwgfx_devcaps.o ttm_object.o ttm_memory.o vmwgfx_system_manager.o
vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o

View File

@ -34,7 +34,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/swap.h>
#include <drm/drm_device.h> #include <drm/drm_device.h>
#include <drm/drm_file.h> #include <drm/drm_file.h>
@ -173,69 +172,7 @@ static struct kobj_type ttm_mem_zone_kobj_type = {
.sysfs_ops = &ttm_mem_zone_ops, .sysfs_ops = &ttm_mem_zone_ops,
.default_attrs = ttm_mem_zone_attrs, .default_attrs = ttm_mem_zone_attrs,
}; };
static struct kobj_type ttm_mem_glob_kobj_type = {0};
static struct attribute ttm_mem_global_lower_mem_limit = {
.name = "lower_mem_limit",
.mode = S_IRUGO | S_IWUSR
};
static ssize_t ttm_mem_global_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
{
struct ttm_mem_global *glob =
container_of(kobj, struct ttm_mem_global, kobj);
uint64_t val = 0;
spin_lock(&glob->lock);
val = glob->lower_mem_limit;
spin_unlock(&glob->lock);
/* convert from number of pages to KB */
val <<= (PAGE_SHIFT - 10);
return snprintf(buffer, PAGE_SIZE, "%llu\n",
(unsigned long long) val);
}
static ssize_t ttm_mem_global_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer,
size_t size)
{
int chars;
uint64_t val64;
unsigned long val;
struct ttm_mem_global *glob =
container_of(kobj, struct ttm_mem_global, kobj);
chars = sscanf(buffer, "%lu", &val);
if (chars == 0)
return size;
val64 = val;
/* convert from KB to number of pages */
val64 >>= (PAGE_SHIFT - 10);
spin_lock(&glob->lock);
glob->lower_mem_limit = val64;
spin_unlock(&glob->lock);
return size;
}
static struct attribute *ttm_mem_global_attrs[] = {
&ttm_mem_global_lower_mem_limit,
NULL
};
static const struct sysfs_ops ttm_mem_global_ops = {
.show = &ttm_mem_global_show,
.store = &ttm_mem_global_store,
};
static struct kobj_type ttm_mem_glob_kobj_type = {
.sysfs_ops = &ttm_mem_global_ops,
.default_attrs = ttm_mem_global_attrs,
};
static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
bool from_wq, uint64_t extra) bool from_wq, uint64_t extra)
@ -435,11 +372,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev)
si_meminfo(&si); si_meminfo(&si);
spin_lock(&glob->lock);
/* set it as 0 by default to keep original behavior of OOM */
glob->lower_mem_limit = 0;
spin_unlock(&glob->lock);
ret = ttm_mem_init_kernel_zone(glob, &si); ret = ttm_mem_init_kernel_zone(glob, &si);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_zone; goto out_no_zone;
@ -526,35 +458,6 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
} }
EXPORT_SYMBOL(ttm_mem_global_free); EXPORT_SYMBOL(ttm_mem_global_free);
/*
* check if the available mem is under lower memory limit
*
* a. if no swap disk at all or free swap space is under swap_mem_limit
* but available system mem is bigger than sys_mem_limit, allow TTM
* allocation;
*
* b. if the available system mem is less than sys_mem_limit but free
* swap disk is bigger than swap_mem_limit, allow TTM allocation.
*/
bool
ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
uint64_t num_pages,
struct ttm_operation_ctx *ctx)
{
int64_t available;
/* We allow over commit during suspend */
if (ctx->force_alloc)
return false;
available = get_nr_swap_pages() + si_mem_available();
available -= num_pages;
if (available < glob->lower_mem_limit)
return true;
return false;
}
static int ttm_mem_global_reserve(struct ttm_mem_global *glob, static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone, struct ttm_mem_zone *single_zone,
uint64_t amount, bool reserve) uint64_t amount, bool reserve)

View File

@ -50,8 +50,6 @@
* @work: The workqueue callback for the shrink queue. * @work: The workqueue callback for the shrink queue.
* @lock: Lock to protect the @shrink - and the memory accounting members, * @lock: Lock to protect the @shrink - and the memory accounting members,
* that is, essentially the whole structure with some exceptions. * that is, essentially the whole structure with some exceptions.
* @lower_mem_limit: include lower limit of swap space and lower limit of
* system memory.
* @zones: Array of pointers to accounting zones. * @zones: Array of pointers to accounting zones.
* @num_zones: Number of populated entries in the @zones array. * @num_zones: Number of populated entries in the @zones array.
* @zone_kernel: Pointer to the kernel zone. * @zone_kernel: Pointer to the kernel zone.
@ -69,7 +67,6 @@ extern struct ttm_mem_global {
struct workqueue_struct *swap_queue; struct workqueue_struct *swap_queue;
struct work_struct work; struct work_struct work;
spinlock_t lock; spinlock_t lock;
uint64_t lower_mem_limit;
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
unsigned int num_zones; unsigned int num_zones;
struct ttm_mem_zone *zone_kernel; struct ttm_mem_zone *zone_kernel;
@ -91,6 +88,5 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
void ttm_mem_global_free_page(struct ttm_mem_global *glob, void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page, uint64_t size); struct page *page, uint64_t size);
size_t ttm_round_pot(size_t size); size_t ttm_round_pot(size_t size);
bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages,
struct ttm_operation_ctx *ctx);
#endif #endif

View File

@ -73,7 +73,7 @@ struct ttm_object_file {
struct ttm_object_device *tdev; struct ttm_object_device *tdev;
spinlock_t lock; spinlock_t lock;
struct list_head ref_list; struct list_head ref_list;
struct drm_open_hash ref_hash[TTM_REF_NUM]; struct vmwgfx_open_hash ref_hash[TTM_REF_NUM];
struct kref refcount; struct kref refcount;
}; };
@ -91,7 +91,7 @@ struct ttm_object_file {
struct ttm_object_device { struct ttm_object_device {
spinlock_t object_lock; spinlock_t object_lock;
struct drm_open_hash object_hash; struct vmwgfx_open_hash object_hash;
atomic_t object_count; atomic_t object_count;
struct ttm_mem_global *mem_glob; struct ttm_mem_global *mem_glob;
struct dma_buf_ops ops; struct dma_buf_ops ops;
@ -123,7 +123,7 @@ struct ttm_object_device {
struct ttm_ref_object { struct ttm_ref_object {
struct rcu_head rcu_head; struct rcu_head rcu_head;
struct drm_hash_item hash; struct vmwgfx_hash_item hash;
struct list_head head; struct list_head head;
struct kref kref; struct kref kref;
enum ttm_ref_type ref_type; enum ttm_ref_type ref_type;
@ -247,12 +247,12 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
struct ttm_base_object * struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key) ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
{ {
struct drm_hash_item *hash; struct vmwgfx_hash_item *hash;
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; struct vmwgfx_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
int ret; int ret;
rcu_read_lock(); rcu_read_lock();
ret = drm_ht_find_item_rcu(ht, key, &hash); ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
if (ret) { if (ret) {
rcu_read_unlock(); rcu_read_unlock();
return NULL; return NULL;
@ -267,12 +267,12 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key) uint32_t key)
{ {
struct ttm_base_object *base = NULL; struct ttm_base_object *base = NULL;
struct drm_hash_item *hash; struct vmwgfx_hash_item *hash;
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; struct vmwgfx_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
int ret; int ret;
rcu_read_lock(); rcu_read_lock();
ret = drm_ht_find_item_rcu(ht, key, &hash); ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
if (likely(ret == 0)) { if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
@ -312,12 +312,12 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
bool ttm_ref_object_exists(struct ttm_object_file *tfile, bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_base_object *base) struct ttm_base_object *base)
{ {
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; struct vmwgfx_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
struct drm_hash_item *hash; struct vmwgfx_hash_item *hash;
struct ttm_ref_object *ref; struct ttm_ref_object *ref;
rcu_read_lock(); rcu_read_lock();
if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0)) if (unlikely(vmwgfx_ht_find_item_rcu(ht, base->handle, &hash) != 0))
goto out_false; goto out_false;
/* /*
@ -349,9 +349,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
enum ttm_ref_type ref_type, bool *existed, enum ttm_ref_type ref_type, bool *existed,
bool require_existed) bool require_existed)
{ {
struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; struct vmwgfx_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref; struct ttm_ref_object *ref;
struct drm_hash_item *hash; struct vmwgfx_hash_item *hash;
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
struct ttm_operation_ctx ctx = { struct ttm_operation_ctx ctx = {
.interruptible = false, .interruptible = false,
@ -367,7 +367,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
while (ret == -EINVAL) { while (ret == -EINVAL) {
rcu_read_lock(); rcu_read_lock();
ret = drm_ht_find_item_rcu(ht, base->handle, &hash); ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash);
if (ret == 0) { if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
@ -398,7 +398,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
kref_init(&ref->kref); kref_init(&ref->kref);
spin_lock(&tfile->lock); spin_lock(&tfile->lock);
ret = drm_ht_insert_item_rcu(ht, &ref->hash); ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash);
if (likely(ret == 0)) { if (likely(ret == 0)) {
list_add_tail(&ref->head, &tfile->ref_list); list_add_tail(&ref->head, &tfile->ref_list);
@ -426,11 +426,11 @@ ttm_ref_object_release(struct kref *kref)
container_of(kref, struct ttm_ref_object, kref); container_of(kref, struct ttm_ref_object, kref);
struct ttm_base_object *base = ref->obj; struct ttm_base_object *base = ref->obj;
struct ttm_object_file *tfile = ref->tfile; struct ttm_object_file *tfile = ref->tfile;
struct drm_open_hash *ht; struct vmwgfx_open_hash *ht;
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
ht = &tfile->ref_hash[ref->ref_type]; ht = &tfile->ref_hash[ref->ref_type];
(void)drm_ht_remove_item_rcu(ht, &ref->hash); (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash);
list_del(&ref->head); list_del(&ref->head);
spin_unlock(&tfile->lock); spin_unlock(&tfile->lock);
@ -446,13 +446,13 @@ ttm_ref_object_release(struct kref *kref)
int ttm_ref_object_base_unref(struct ttm_object_file *tfile, int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
unsigned long key, enum ttm_ref_type ref_type) unsigned long key, enum ttm_ref_type ref_type)
{ {
struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; struct vmwgfx_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref; struct ttm_ref_object *ref;
struct drm_hash_item *hash; struct vmwgfx_hash_item *hash;
int ret; int ret;
spin_lock(&tfile->lock); spin_lock(&tfile->lock);
ret = drm_ht_find_item(ht, key, &hash); ret = vmwgfx_ht_find_item(ht, key, &hash);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
spin_unlock(&tfile->lock); spin_unlock(&tfile->lock);
return -EINVAL; return -EINVAL;
@ -486,7 +486,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
spin_unlock(&tfile->lock); spin_unlock(&tfile->lock);
for (i = 0; i < TTM_REF_NUM; ++i) for (i = 0; i < TTM_REF_NUM; ++i)
drm_ht_remove(&tfile->ref_hash[i]); vmwgfx_ht_remove(&tfile->ref_hash[i]);
ttm_object_file_unref(&tfile); ttm_object_file_unref(&tfile);
} }
@ -508,7 +508,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
INIT_LIST_HEAD(&tfile->ref_list); INIT_LIST_HEAD(&tfile->ref_list);
for (i = 0; i < TTM_REF_NUM; ++i) { for (i = 0; i < TTM_REF_NUM; ++i) {
ret = drm_ht_create(&tfile->ref_hash[i], hash_order); ret = vmwgfx_ht_create(&tfile->ref_hash[i], hash_order);
if (ret) { if (ret) {
j = i; j = i;
goto out_err; goto out_err;
@ -518,7 +518,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
return tfile; return tfile;
out_err: out_err:
for (i = 0; i < j; ++i) for (i = 0; i < j; ++i)
drm_ht_remove(&tfile->ref_hash[i]); vmwgfx_ht_remove(&tfile->ref_hash[i]);
kfree(tfile); kfree(tfile);
@ -539,7 +539,7 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
tdev->mem_glob = mem_glob; tdev->mem_glob = mem_glob;
spin_lock_init(&tdev->object_lock); spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0); atomic_set(&tdev->object_count, 0);
ret = drm_ht_create(&tdev->object_hash, hash_order); ret = vmwgfx_ht_create(&tdev->object_hash, hash_order);
if (ret != 0) if (ret != 0)
goto out_no_object_hash; goto out_no_object_hash;
@ -564,7 +564,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
WARN_ON_ONCE(!idr_is_empty(&tdev->idr)); WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
idr_destroy(&tdev->idr); idr_destroy(&tdev->idr);
drm_ht_remove(&tdev->object_hash); vmwgfx_ht_remove(&tdev->object_hash);
kfree(tdev); kfree(tdev);
} }

View File

@ -42,9 +42,8 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <drm/drm_hashtab.h>
#include "ttm_memory.h" #include "ttm_memory.h"
#include "vmwgfx_hashtab.h"
/** /**
* enum ttm_ref_type * enum ttm_ref_type

View File

@ -494,7 +494,7 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
drm_vma_node_reset(&bo->base.vma_node); drm_vma_node_reset(&bo->base.vma_node);
ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size, ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
ttm_bo_type_device, placement, 0, ttm_bo_type_kernel, placement, 0,
&ctx, NULL, NULL, NULL); &ctx, NULL, NULL, NULL);
if (unlikely(ret)) if (unlikely(ret))
goto error_account; goto error_account;

View File

@ -145,6 +145,13 @@ struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
(unsigned int) max, (unsigned int) max,
(unsigned int) min, (unsigned int) min,
(unsigned int) fifo->capabilities); (unsigned int) fifo->capabilities);
if (unlikely(min >= max)) {
drm_warn(&dev_priv->drm,
"FIFO memory is not usable. Driver failed to initialize.");
return ERR_PTR(-ENXIO);
}
return fifo; return fifo;
} }

View File

@ -42,7 +42,7 @@
*/ */
struct vmw_cmdbuf_res { struct vmw_cmdbuf_res {
struct vmw_resource *res; struct vmw_resource *res;
struct drm_hash_item hash; struct vmwgfx_hash_item hash;
struct list_head head; struct list_head head;
enum vmw_cmdbuf_res_state state; enum vmw_cmdbuf_res_state state;
struct vmw_cmdbuf_res_manager *man; struct vmw_cmdbuf_res_manager *man;
@ -59,7 +59,7 @@ struct vmw_cmdbuf_res {
* @resources and @list are protected by the cmdbuf mutex for now. * @resources and @list are protected by the cmdbuf mutex for now.
*/ */
struct vmw_cmdbuf_res_manager { struct vmw_cmdbuf_res_manager {
struct drm_open_hash resources; struct vmwgfx_open_hash resources;
struct list_head list; struct list_head list;
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
}; };
@ -81,11 +81,11 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type, enum vmw_cmdbuf_res_type res_type,
u32 user_key) u32 user_key)
{ {
struct drm_hash_item *hash; struct vmwgfx_hash_item *hash;
int ret; int ret;
unsigned long key = user_key | (res_type << 24); unsigned long key = user_key | (res_type << 24);
ret = drm_ht_find_item(&man->resources, key, &hash); ret = vmwgfx_ht_find_item(&man->resources, key, &hash);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ERR_PTR(ret); return ERR_PTR(ret);
@ -105,7 +105,7 @@ static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
struct vmw_cmdbuf_res *entry) struct vmw_cmdbuf_res *entry)
{ {
list_del(&entry->head); list_del(&entry->head);
WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash)); WARN_ON(vmwgfx_ht_remove_item(&man->resources, &entry->hash));
vmw_resource_unreference(&entry->res); vmw_resource_unreference(&entry->res);
kfree(entry); kfree(entry);
} }
@ -167,7 +167,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
vmw_cmdbuf_res_free(entry->man, entry); vmw_cmdbuf_res_free(entry->man, entry);
break; break;
case VMW_CMDBUF_RES_DEL: case VMW_CMDBUF_RES_DEL:
ret = drm_ht_insert_item(&entry->man->resources, &entry->hash); ret = vmwgfx_ht_insert_item(&entry->man->resources, &entry->hash);
BUG_ON(ret); BUG_ON(ret);
list_move_tail(&entry->head, &entry->man->list); list_move_tail(&entry->head, &entry->man->list);
entry->state = VMW_CMDBUF_RES_COMMITTED; entry->state = VMW_CMDBUF_RES_COMMITTED;
@ -206,7 +206,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
return -ENOMEM; return -ENOMEM;
cres->hash.key = user_key | (res_type << 24); cres->hash.key = user_key | (res_type << 24);
ret = drm_ht_insert_item(&man->resources, &cres->hash); ret = vmwgfx_ht_insert_item(&man->resources, &cres->hash);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
kfree(cres); kfree(cres);
goto out_invalid_key; goto out_invalid_key;
@ -244,10 +244,10 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource **res_p) struct vmw_resource **res_p)
{ {
struct vmw_cmdbuf_res *entry; struct vmw_cmdbuf_res *entry;
struct drm_hash_item *hash; struct vmwgfx_hash_item *hash;
int ret; int ret;
ret = drm_ht_find_item(&man->resources, user_key | (res_type << 24), ret = vmwgfx_ht_find_item(&man->resources, user_key | (res_type << 24),
&hash); &hash);
if (likely(ret != 0)) if (likely(ret != 0))
return -EINVAL; return -EINVAL;
@ -260,7 +260,7 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
*res_p = NULL; *res_p = NULL;
break; break;
case VMW_CMDBUF_RES_COMMITTED: case VMW_CMDBUF_RES_COMMITTED:
(void) drm_ht_remove_item(&man->resources, &entry->hash); (void) vmwgfx_ht_remove_item(&man->resources, &entry->hash);
list_del(&entry->head); list_del(&entry->head);
entry->state = VMW_CMDBUF_RES_DEL; entry->state = VMW_CMDBUF_RES_DEL;
list_add_tail(&entry->head, list); list_add_tail(&entry->head, list);
@ -295,7 +295,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
man->dev_priv = dev_priv; man->dev_priv = dev_priv;
INIT_LIST_HEAD(&man->list); INIT_LIST_HEAD(&man->list);
ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER); ret = vmwgfx_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
if (ret == 0) if (ret == 0)
return man; return man;
@ -320,7 +320,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
list_for_each_entry_safe(entry, next, &man->list, head) list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry); vmw_cmdbuf_res_free(man, entry);
drm_ht_remove(&man->resources); vmwgfx_ht_remove(&man->resources);
kfree(man); kfree(man);
} }

View File

@ -1070,6 +1070,12 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
"3D will be disabled.\n"); "3D will be disabled.\n");
dev_priv->has_mob = false; dev_priv->has_mob = false;
} }
if (vmw_sys_man_init(dev_priv) != 0) {
drm_info(&dev_priv->drm,
"No MOB page table memory available. "
"3D will be disabled.\n");
dev_priv->has_mob = false;
}
} }
if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) { if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
@ -1120,8 +1126,10 @@ out_no_fifo:
vmw_overlay_close(dev_priv); vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv); vmw_kms_close(dev_priv);
out_no_kms: out_no_kms:
if (dev_priv->has_mob) if (dev_priv->has_mob) {
vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
vmw_sys_man_fini(dev_priv);
}
if (dev_priv->has_gmr) if (dev_priv->has_gmr)
vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_devcaps_destroy(dev_priv); vmw_devcaps_destroy(dev_priv);
@ -1155,7 +1163,7 @@ static void vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb); unregister_pm_notifier(&dev_priv->pm_nb);
if (dev_priv->ctx.res_ht_initialized) if (dev_priv->ctx.res_ht_initialized)
drm_ht_remove(&dev_priv->ctx.res_ht); vmwgfx_ht_remove(&dev_priv->ctx.res_ht);
vfree(dev_priv->ctx.cmd_bounce); vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) { if (dev_priv->enable_fb) {
vmw_fb_off(dev_priv); vmw_fb_off(dev_priv);
@ -1171,8 +1179,10 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_release_device_early(dev_priv); vmw_release_device_early(dev_priv);
if (dev_priv->has_mob) if (dev_priv->has_mob) {
vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
vmw_sys_man_fini(dev_priv);
}
vmw_devcaps_destroy(dev_priv); vmw_devcaps_destroy(dev_priv);
vmw_vram_manager_fini(dev_priv); vmw_vram_manager_fini(dev_priv);
ttm_device_fini(&dev_priv->bdev); ttm_device_fini(&dev_priv->bdev);
@ -1616,34 +1626,40 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
if (ret) if (ret)
return ret; goto out_error;
ret = pcim_enable_device(pdev); ret = pcim_enable_device(pdev);
if (ret) if (ret)
return ret; goto out_error;
vmw = devm_drm_dev_alloc(&pdev->dev, &driver, vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
struct vmw_private, drm); struct vmw_private, drm);
if (IS_ERR(vmw)) if (IS_ERR(vmw)) {
return PTR_ERR(vmw); ret = PTR_ERR(vmw);
goto out_error;
}
pci_set_drvdata(pdev, &vmw->drm); pci_set_drvdata(pdev, &vmw->drm);
ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev); ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
if (ret) if (ret)
return ret; goto out_error;
ret = vmw_driver_load(vmw, ent->device); ret = vmw_driver_load(vmw, ent->device);
if (ret) if (ret)
return ret; goto out_release;
ret = drm_dev_register(&vmw->drm, 0); ret = drm_dev_register(&vmw->drm, 0);
if (ret) { if (ret)
vmw_driver_unload(&vmw->drm); goto out_unload;
return ret;
}
return 0; return 0;
out_unload:
vmw_driver_unload(&vmw->drm);
out_release:
ttm_mem_global_release(&ttm_mem_glob);
out_error:
return ret;
} }
static int __init vmwgfx_init(void) static int __init vmwgfx_init(void)

View File

@ -34,7 +34,6 @@
#include <drm/drm_auth.h> #include <drm/drm_auth.h>
#include <drm/drm_device.h> #include <drm/drm_device.h>
#include <drm/drm_file.h> #include <drm/drm_file.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_rect.h> #include <drm/drm_rect.h>
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
@ -43,6 +42,7 @@
#include "ttm_object.h" #include "ttm_object.h"
#include "vmwgfx_fence.h" #include "vmwgfx_fence.h"
#include "vmwgfx_hashtab.h"
#include "vmwgfx_reg.h" #include "vmwgfx_reg.h"
#include "vmwgfx_validation.h" #include "vmwgfx_validation.h"
@ -82,8 +82,9 @@
VMWGFX_NUM_GB_SURFACE +\ VMWGFX_NUM_GB_SURFACE +\
VMWGFX_NUM_GB_SCREEN_TARGET) VMWGFX_NUM_GB_SCREEN_TARGET)
#define VMW_PL_GMR (TTM_PL_PRIV + 0) #define VMW_PL_GMR (TTM_PL_PRIV + 0)
#define VMW_PL_MOB (TTM_PL_PRIV + 1) #define VMW_PL_MOB (TTM_PL_PRIV + 1)
#define VMW_PL_SYSTEM (TTM_PL_PRIV + 2)
#define VMW_RES_CONTEXT ttm_driver_type0 #define VMW_RES_CONTEXT ttm_driver_type0
#define VMW_RES_SURFACE ttm_driver_type1 #define VMW_RES_SURFACE ttm_driver_type1
@ -133,7 +134,7 @@ struct vmw_buffer_object {
*/ */
struct vmw_validate_buffer { struct vmw_validate_buffer {
struct ttm_validate_buffer base; struct ttm_validate_buffer base;
struct drm_hash_item hash; struct vmwgfx_hash_item hash;
bool validate_as_mob; bool validate_as_mob;
}; };
@ -406,7 +407,7 @@ struct vmw_ctx_validation_info;
* @ctx: The validation context * @ctx: The validation context
*/ */
struct vmw_sw_context{ struct vmw_sw_context{
struct drm_open_hash res_ht; struct vmwgfx_open_hash res_ht;
bool res_ht_initialized; bool res_ht_initialized;
bool kernel; bool kernel;
struct vmw_fpriv *fp; struct vmw_fpriv *fp;
@ -1039,7 +1040,6 @@ extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_sys_placement; extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement; extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement; extern struct ttm_placement vmw_sys_placement;
extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement; extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement; extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_nonfixed_placement; extern struct ttm_placement vmw_nonfixed_placement;
@ -1251,6 +1251,12 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type);
void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type); void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type);
/**
* System memory manager
*/
int vmw_sys_man_init(struct vmw_private *dev_priv);
void vmw_sys_man_fini(struct vmw_private *dev_priv);
/** /**
* Prime - vmwgfx_prime.c * Prime - vmwgfx_prime.c
*/ */

View File

@ -4117,7 +4117,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_binding_state_reset(sw_context->staged_bindings); vmw_binding_state_reset(sw_context->staged_bindings);
if (!sw_context->res_ht_initialized) { if (!sw_context->res_ht_initialized) {
ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unlock; goto out_unlock;

View File

@ -0,0 +1,199 @@
/*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Simple open hash tab implementation.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <linux/export.h>
#include <linux/hash.h>
#include <linux/mm.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <drm/drm_print.h>
#include "vmwgfx_hashtab.h"
int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order)
{
unsigned int size = 1 << order;
ht->order = order;
ht->table = NULL;
if (size <= PAGE_SIZE / sizeof(*ht->table))
ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
else
ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
return 0;
}
void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key)
{
struct vmwgfx_hash_item *entry;
struct hlist_head *h_list;
unsigned int hashed_key;
int count = 0;
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
hlist_for_each_entry(entry, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
static struct hlist_node *vmwgfx_ht_find_key(struct vmwgfx_open_hash *ht, unsigned long key)
{
struct vmwgfx_hash_item *entry;
struct hlist_head *h_list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
return &entry->head;
if (entry->key > key)
break;
}
return NULL;
}
static struct hlist_node *vmwgfx_ht_find_key_rcu(struct vmwgfx_open_hash *ht, unsigned long key)
{
struct vmwgfx_hash_item *entry;
struct hlist_head *h_list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
hlist_for_each_entry_rcu(entry, h_list, head) {
if (entry->key == key)
return &entry->head;
if (entry->key > key)
break;
}
return NULL;
}
int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
{
struct vmwgfx_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *parent;
unsigned int hashed_key;
unsigned long key = item->key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
break;
parent = &entry->head;
}
if (parent)
hlist_add_behind_rcu(&item->head, parent);
else
hlist_add_head_rcu(&item->head, h_list);
return 0;
}
/*
* Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add)
{
int ret;
unsigned long mask = (1UL << bits) - 1;
unsigned long first, unshifted_key;
unshifted_key = hash_long(seed, bits);
first = unshifted_key;
do {
item->key = (unshifted_key << shift) + add;
ret = vmwgfx_ht_insert_item(ht, item);
if (ret)
unshifted_key = (unshifted_key + 1) & mask;
} while (ret && (unshifted_key != first));
if (ret) {
DRM_ERROR("Available key bit space exhausted\n");
return -EINVAL;
}
return 0;
}
int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
struct vmwgfx_hash_item **item)
{
struct hlist_node *list;
list = vmwgfx_ht_find_key_rcu(ht, key);
if (!list)
return -EINVAL;
*item = hlist_entry(list, struct vmwgfx_hash_item, head);
return 0;
}
int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key)
{
struct hlist_node *list;
list = vmwgfx_ht_find_key(ht, key);
if (list) {
hlist_del_init_rcu(list);
return 0;
}
return -EINVAL;
}
int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
{
hlist_del_init_rcu(&item->head);
return 0;
}
void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht)
{
if (ht->table) {
kvfree(ht->table);
ht->table = NULL;
}
}

View File

@ -1,5 +1,4 @@
/************************************************************************** /*
*
* Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA. * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
* All Rights Reserved. * All Rights Reserved.
* *
@ -22,9 +21,8 @@
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE. * USE OR OTHER DEALINGS IN THE SOFTWARE.
* */
*
**************************************************************************/
/* /*
* Simple open hash tab implementation. * Simple open hash tab implementation.
* *
@ -32,48 +30,54 @@
* Thomas Hellström <thomas-at-tungstengraphics-dot-com> * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/ */
#ifndef DRM_HASHTAB_H /*
#define DRM_HASHTAB_H * TODO: Replace this hashtable with Linux' generic implementation
* from <linux/hashtable.h>.
*/
#ifndef VMWGFX_HASHTAB_H
#define VMWGFX_HASHTAB_H
#include <linux/list.h> #include <linux/list.h>
#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) #define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
struct drm_hash_item { struct vmwgfx_hash_item {
struct hlist_node head; struct hlist_node head;
unsigned long key; unsigned long key;
}; };
struct drm_open_hash { struct vmwgfx_open_hash {
struct hlist_head *table; struct hlist_head *table;
u8 order; u8 order;
}; };
int drm_ht_create(struct drm_open_hash *ht, unsigned int order); int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order);
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
unsigned long seed, int bits, int shift, unsigned long seed, int bits, int shift,
unsigned long add); unsigned long add);
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
struct vmwgfx_hash_item **item);
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key);
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key);
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
void drm_ht_remove(struct drm_open_hash *ht); void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht);
/* /*
* RCU-safe interface * RCU-safe interface
* *
* The user of this API needs to make sure that two or more instances of the * The user of this API needs to make sure that two or more instances of the
* hash table manipulation functions are never run simultaneously. * hash table manipulation functions are never run simultaneously.
* The lookup function drm_ht_find_item_rcu may, however, run simultaneously * The lookup function vmwgfx_ht_find_item_rcu may, however, run simultaneously
* with any of the manipulation functions as long as it's called from within * with any of the manipulation functions as long as it's called from within
* an RCU read-locked section. * an RCU read-locked section.
*/ */
#define drm_ht_insert_item_rcu drm_ht_insert_item #define vmwgfx_ht_insert_item_rcu vmwgfx_ht_insert_item
#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please #define vmwgfx_ht_just_insert_please_rcu vmwgfx_ht_just_insert_please
#define drm_ht_remove_key_rcu drm_ht_remove_key #define vmwgfx_ht_remove_key_rcu vmwgfx_ht_remove_key
#define drm_ht_remove_item_rcu drm_ht_remove_item #define vmwgfx_ht_remove_item_rcu vmwgfx_ht_remove_item
#define drm_ht_find_item_rcu drm_ht_find_item #define vmwgfx_ht_find_item_rcu vmwgfx_ht_find_item
#endif #endif

View File

@ -0,0 +1,90 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright 2021 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
#include <linux/slab.h>
static int vmw_sys_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res)
{
*res = kzalloc(sizeof(**res), GFP_KERNEL);
if (!*res)
return -ENOMEM;
ttm_resource_init(bo, place, *res);
return 0;
}
static void vmw_sys_man_free(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
kfree(res);
}
static const struct ttm_resource_manager_func vmw_sys_manager_func = {
.alloc = vmw_sys_man_alloc,
.free = vmw_sys_man_free,
};
int vmw_sys_man_init(struct vmw_private *dev_priv)
{
struct ttm_device *bdev = &dev_priv->bdev;
struct ttm_resource_manager *man =
kzalloc(sizeof(*man), GFP_KERNEL);
if (!man)
return -ENOMEM;
man->use_tt = true;
man->func = &vmw_sys_manager_func;
ttm_resource_manager_init(man, 0);
ttm_set_driver_manager(bdev, VMW_PL_SYSTEM, man);
ttm_resource_manager_set_used(man, true);
return 0;
}
void vmw_sys_man_fini(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev,
VMW_PL_SYSTEM);
ttm_resource_manager_evict_all(&dev_priv->bdev, man);
ttm_resource_manager_set_used(man, false);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&dev_priv->bdev, VMW_PL_SYSTEM, NULL);
kfree(man);
}

View File

@ -92,6 +92,13 @@ static const struct ttm_place gmr_vram_placement_flags[] = {
} }
}; };
static const struct ttm_place vmw_sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_SYSTEM,
.flags = 0
};
struct ttm_placement vmw_vram_gmr_placement = { struct ttm_placement vmw_vram_gmr_placement = {
.num_placement = 2, .num_placement = 2,
.placement = vram_gmr_placement_flags, .placement = vram_gmr_placement_flags,
@ -113,28 +120,11 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags .busy_placement = &sys_placement_flags
}; };
static const struct ttm_place evictable_placement_flags[] = { struct ttm_placement vmw_pt_sys_placement = {
{ .num_placement = 1,
.fpfn = 0, .placement = &vmw_sys_placement_flags,
.lpfn = 0, .num_busy_placement = 1,
.mem_type = TTM_PL_SYSTEM, .busy_placement = &vmw_sys_placement_flags
.flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_VRAM,
.flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
.flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_MOB,
.flags = 0
}
}; };
static const struct ttm_place nonfixed_placement_flags[] = { static const struct ttm_place nonfixed_placement_flags[] = {
@ -156,13 +146,6 @@ static const struct ttm_place nonfixed_placement_flags[] = {
} }
}; };
struct ttm_placement vmw_evictable_placement = {
.num_placement = 4,
.placement = evictable_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
};
struct ttm_placement vmw_srf_placement = { struct ttm_placement vmw_srf_placement = {
.num_placement = 1, .num_placement = 1,
.num_busy_placement = 2, .num_busy_placement = 2,
@ -484,6 +467,9 @@ static int vmw_ttm_bind(struct ttm_device *bdev,
&vmw_be->vsgt, ttm->num_pages, &vmw_be->vsgt, ttm->num_pages,
vmw_be->gmr_id); vmw_be->gmr_id);
break; break;
case VMW_PL_SYSTEM:
/* Nothing to be done for a system bind */
break;
default: default:
BUG(); BUG();
} }
@ -507,6 +493,8 @@ static void vmw_ttm_unbind(struct ttm_device *bdev,
case VMW_PL_MOB: case VMW_PL_MOB:
vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
break; break;
case VMW_PL_SYSTEM:
break;
default: default:
BUG(); BUG();
} }
@ -624,6 +612,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *
switch (mem->mem_type) { switch (mem->mem_type) {
case TTM_PL_SYSTEM: case TTM_PL_SYSTEM:
case VMW_PL_SYSTEM:
case VMW_PL_GMR: case VMW_PL_GMR:
case VMW_PL_MOB: case VMW_PL_MOB:
return 0; return 0;
@ -670,6 +659,11 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
(void) ttm_bo_wait(bo, false, false); (void) ttm_bo_wait(bo, false, false);
} }
static bool vmw_memtype_is_system(uint32_t mem_type)
{
return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM;
}
static int vmw_move(struct ttm_buffer_object *bo, static int vmw_move(struct ttm_buffer_object *bo,
bool evict, bool evict,
struct ttm_operation_ctx *ctx, struct ttm_operation_ctx *ctx,
@ -680,7 +674,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
int ret; int ret;
if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) { if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) {
ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem); ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
if (ret) if (ret)
return ret; return ret;
@ -689,7 +683,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
vmw_move_notify(bo, bo->resource, new_mem); vmw_move_notify(bo, bo->resource, new_mem);
if (old_man->use_tt && new_man->use_tt) { if (old_man->use_tt && new_man->use_tt) {
if (bo->resource->mem_type == TTM_PL_SYSTEM) { if (vmw_memtype_is_system(bo->resource->mem_type)) {
ttm_bo_move_null(bo, new_mem); ttm_bo_move_null(bo, new_mem);
return 0; return 0;
} }
@ -736,7 +730,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
int ret; int ret;
ret = vmw_bo_create_kernel(dev_priv, bo_size, ret = vmw_bo_create_kernel(dev_priv, bo_size,
&vmw_sys_placement, &vmw_pt_sys_placement,
&bo); &bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;

View File

@ -43,7 +43,7 @@
*/ */
struct vmw_validation_bo_node { struct vmw_validation_bo_node {
struct ttm_validate_buffer base; struct ttm_validate_buffer base;
struct drm_hash_item hash; struct vmwgfx_hash_item hash;
unsigned int coherent_count; unsigned int coherent_count;
u32 as_mob : 1; u32 as_mob : 1;
u32 cpu_blit : 1; u32 cpu_blit : 1;
@ -72,7 +72,7 @@ struct vmw_validation_bo_node {
*/ */
struct vmw_validation_res_node { struct vmw_validation_res_node {
struct list_head head; struct list_head head;
struct drm_hash_item hash; struct vmwgfx_hash_item hash;
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_buffer_object *new_backup; struct vmw_buffer_object *new_backup;
unsigned long new_backup_offset; unsigned long new_backup_offset;
@ -184,9 +184,9 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
return NULL; return NULL;
if (ctx->ht) { if (ctx->ht) {
struct drm_hash_item *hash; struct vmwgfx_hash_item *hash;
if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash)) if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
bo_node = container_of(hash, typeof(*bo_node), hash); bo_node = container_of(hash, typeof(*bo_node), hash);
} else { } else {
struct vmw_validation_bo_node *entry; struct vmw_validation_bo_node *entry;
@ -221,9 +221,9 @@ vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
return NULL; return NULL;
if (ctx->ht) { if (ctx->ht) {
struct drm_hash_item *hash; struct vmwgfx_hash_item *hash;
if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash)) if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash))
res_node = container_of(hash, typeof(*res_node), hash); res_node = container_of(hash, typeof(*res_node), hash);
} else { } else {
struct vmw_validation_res_node *entry; struct vmw_validation_res_node *entry;
@ -280,7 +280,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
if (ctx->ht) { if (ctx->ht) {
bo_node->hash.key = (unsigned long) vbo; bo_node->hash.key = (unsigned long) vbo;
ret = drm_ht_insert_item(ctx->ht, &bo_node->hash); ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash);
if (ret) { if (ret) {
DRM_ERROR("Failed to initialize a buffer " DRM_ERROR("Failed to initialize a buffer "
"validation entry.\n"); "validation entry.\n");
@ -335,7 +335,7 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
if (ctx->ht) { if (ctx->ht) {
node->hash.key = (unsigned long) res; node->hash.key = (unsigned long) res;
ret = drm_ht_insert_item(ctx->ht, &node->hash); ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash);
if (ret) { if (ret) {
DRM_ERROR("Failed to initialize a resource validation " DRM_ERROR("Failed to initialize a resource validation "
"entry.\n"); "entry.\n");
@ -688,13 +688,13 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
return; return;
list_for_each_entry(entry, &ctx->bo_list, base.head) list_for_each_entry(entry, &ctx->bo_list, base.head)
(void) drm_ht_remove_item(ctx->ht, &entry->hash); (void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash);
list_for_each_entry(val, &ctx->resource_list, head) list_for_each_entry(val, &ctx->resource_list, head)
(void) drm_ht_remove_item(ctx->ht, &val->hash); (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
list_for_each_entry(val, &ctx->resource_ctx_list, head) list_for_each_entry(val, &ctx->resource_ctx_list, head)
(void) drm_ht_remove_item(ctx->ht, &val->hash); (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
ctx->ht = NULL; ctx->ht = NULL;
} }

View File

@ -31,9 +31,10 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/ww_mutex.h> #include <linux/ww_mutex.h>
#include <drm/drm_hashtab.h>
#include <drm/ttm/ttm_execbuf_util.h> #include <drm/ttm/ttm_execbuf_util.h>
#include "vmwgfx_hashtab.h"
#define VMW_RES_DIRTY_NONE 0 #define VMW_RES_DIRTY_NONE 0
#define VMW_RES_DIRTY_SET BIT(0) #define VMW_RES_DIRTY_SET BIT(0)
#define VMW_RES_DIRTY_CLEAR BIT(1) #define VMW_RES_DIRTY_CLEAR BIT(1)
@ -73,7 +74,7 @@ struct vmw_validation_mem {
* @total_mem: Amount of reserved memory. * @total_mem: Amount of reserved memory.
*/ */
struct vmw_validation_context { struct vmw_validation_context {
struct drm_open_hash *ht; struct vmwgfx_open_hash *ht;
struct list_head resource_list; struct list_head resource_list;
struct list_head resource_ctx_list; struct list_head resource_ctx_list;
struct list_head bo_list; struct list_head bo_list;
@ -151,7 +152,7 @@ vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
* available at validation context declaration time * available at validation context declaration time
*/ */
static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx, static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
struct drm_open_hash *ht) struct vmwgfx_open_hash *ht)
{ {
ctx->ht = ht; ctx->ht = ht;
} }

View File

@ -7,7 +7,6 @@ config DRM_ZYNQMP_DPSUB
depends on XILINX_ZYNQMP_DPDMA depends on XILINX_ZYNQMP_DPDMA
select DMA_ENGINE select DMA_ENGINE
select DRM_GEM_CMA_HELPER select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
select DRM_KMS_HELPER select DRM_KMS_HELPER
select GENERIC_PHY select GENERIC_PHY
help help

View File

@ -6,16 +6,13 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <drm/drm_hashtab.h> #include <drm/drm_legacy.h>
#include <drm/drm_mode_config.h> #include <drm/drm_mode_config.h>
struct drm_driver; struct drm_driver;
struct drm_minor; struct drm_minor;
struct drm_master; struct drm_master;
struct drm_device_dma;
struct drm_vblank_crtc; struct drm_vblank_crtc;
struct drm_sg_mem;
struct drm_local_map;
struct drm_vma_offset_manager; struct drm_vma_offset_manager;
struct drm_vram_mm; struct drm_vram_mm;
struct drm_fb_helper; struct drm_fb_helper;

View File

@ -291,8 +291,9 @@ struct drm_driver {
/** /**
* @gem_create_object: constructor for gem objects * @gem_create_object: constructor for gem objects
* *
* Hook for allocating the GEM object struct, for use by the CMA and * Hook for allocating the GEM object struct, for use by the CMA
* SHMEM GEM helpers. * and SHMEM GEM helpers. Returns a GEM object on success, or an
* ERR_PTR()-encoded error code otherwise.
*/ */
struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
size_t size); size_t size);

View File

@ -32,42 +32,108 @@ struct drm_gem_cma_object {
#define to_drm_gem_cma_obj(gem_obj) \ #define to_drm_gem_cma_obj(gem_obj) \
container_of(gem_obj, struct drm_gem_cma_object, base) container_of(gem_obj, struct drm_gem_cma_object, base)
#ifndef CONFIG_MMU struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ size_t size);
.get_unmapped_area = drm_gem_cma_get_unmapped_area, void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj);
#else void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS struct drm_printer *p, unsigned int indent);
#endif struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj);
int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map);
int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma);
extern const struct vm_operations_struct drm_gem_cma_vm_ops;
/*
* GEM object functions
*/
/** /**
* DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers * drm_gem_cma_object_free - GEM object function for drm_gem_cma_free()
* @name: name for the generated structure * @obj: GEM object to free
* *
* This macro autogenerates a suitable &struct file_operations for CMA based * This function wraps drm_gem_cma_free_object(). Drivers that employ the CMA helpers
* drivers, which can be assigned to &drm_driver.fops. Note that this structure * should use it as their &drm_gem_object_funcs.free handler.
* cannot be shared between drivers, because it contains a reference to the
* current module using THIS_MODULE.
*
* Note that the declaration is already marked as static - if you need a
* non-static version of this you're probably doing it wrong and will break the
* THIS_MODULE reference by accident.
*/ */
#define DEFINE_DRM_GEM_CMA_FOPS(name) \ static inline void drm_gem_cma_object_free(struct drm_gem_object *obj)
static const struct file_operations name = {\ {
.owner = THIS_MODULE,\ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
.open = drm_open,\
.release = drm_release,\
.unlocked_ioctl = drm_ioctl,\
.compat_ioctl = drm_compat_ioctl,\
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek,\
.mmap = drm_gem_mmap,\
DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
}
/* free GEM object */ drm_gem_cma_free(cma_obj);
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); }
/**
* drm_gem_cma_object_print_info() - Print &drm_gem_cma_object info for debugfs
* @p: DRM printer
* @indent: Tab indentation level
* @obj: GEM object
*
* This function wraps drm_gem_cma_print_info(). Drivers that employ the CMA helpers
* should use this function as their &drm_gem_object_funcs.print_info handler.
*/
static inline void drm_gem_cma_object_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj)
{
const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
drm_gem_cma_print_info(cma_obj, p, indent);
}
/**
* drm_gem_cma_object_get_sg_table - GEM object function for drm_gem_cma_get_sg_table()
* @obj: GEM object
*
* This function wraps drm_gem_cma_get_sg_table(). Drivers that employ the CMA helpers should
* use it as their &drm_gem_object_funcs.get_sg_table handler.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
*/
static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_object *obj)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
return drm_gem_cma_get_sg_table(cma_obj);
}
/*
* drm_gem_cma_object_vmap - GEM object function for drm_gem_cma_vmap()
* @obj: GEM object
* @map: Returns the kernel virtual address of the CMA GEM object's backing store.
*
* This function wraps drm_gem_cma_vmap(). Drivers that employ the CMA helpers should
* use it as their &drm_gem_object_funcs.vmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
return drm_gem_cma_vmap(cma_obj, map);
}
/**
* drm_gem_cma_object_mmap - GEM object function for drm_gem_cma_mmap()
* @obj: GEM object
* @vma: VMA for the area to be mapped
*
* This function wraps drm_gem_cma_mmap(). Drivers that employ the cma helpers should
* use it as their &drm_gem_object_funcs.mmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
static inline int drm_gem_cma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
return drm_gem_cma_mmap(cma_obj, vma);
}
/*
* Driver ops
*/
/* create memory region for DRM framebuffer */ /* create memory region for DRM framebuffer */
int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv, int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
@ -79,30 +145,10 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm, struct drm_device *drm,
struct drm_mode_create_dumb *args); struct drm_mode_create_dumb *args);
/* allocate physical memory */
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
size_t size);
extern const struct vm_operations_struct drm_gem_cma_vm_ops;
#ifndef CONFIG_MMU
unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags);
#endif
void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object * struct drm_gem_object *
drm_gem_cma_prime_import_sg_table(struct drm_device *dev, drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct dma_buf_attachment *attach,
struct sg_table *sgt); struct sg_table *sgt);
int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
/** /**
* DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations
@ -185,4 +231,47 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
struct dma_buf_attachment *attach, struct dma_buf_attachment *attach,
struct sg_table *sgt); struct sg_table *sgt);
/*
* File ops
*/
#ifndef CONFIG_MMU
unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags);
#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
.get_unmapped_area = drm_gem_cma_get_unmapped_area,
#else
#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS
#endif
/**
* DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers
* @name: name for the generated structure
*
* This macro autogenerates a suitable &struct file_operations for CMA based
* drivers, which can be assigned to &drm_driver.fops. Note that this structure
* cannot be shared between drivers, because it contains a reference to the
* current module using THIS_MODULE.
*
* Note that the declaration is already marked as static - if you need a
* non-static version of this you're probably doing it wrong and will break the
* THIS_MODULE reference by accident.
*/
#define DEFINE_DRM_GEM_CMA_FOPS(name) \
static const struct file_operations name = {\
.owner = THIS_MODULE,\
.open = drm_open,\
.release = drm_release,\
.unlocked_ioctl = drm_ioctl,\
.compat_ioctl = drm_compat_ioctl,\
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek,\
.mmap = drm_gem_mmap,\
DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
}
#endif /* __DRM_GEM_CMA_HELPER_H__ */ #endif /* __DRM_GEM_CMA_HELPER_H__ */

View File

@ -37,7 +37,6 @@
#include <drm/drm.h> #include <drm/drm.h>
#include <drm/drm_auth.h> #include <drm/drm_auth.h>
#include <drm/drm_hashtab.h>
struct drm_device; struct drm_device;
struct drm_driver; struct drm_driver;
@ -51,6 +50,20 @@ struct pci_driver;
* you're doing it terribly wrong. * you're doing it terribly wrong.
*/ */
/*
* Hash-table Support
*/
struct drm_hash_item {
struct hlist_node head;
unsigned long key;
};
struct drm_open_hash {
struct hlist_head *table;
u8 order;
};
/** /**
* DMA buffer. * DMA buffer.
*/ */

View File

@ -32,7 +32,6 @@
#define _TTM_BO_API_H_ #define _TTM_BO_API_H_
#include <drm/drm_gem.h> #include <drm/drm_gem.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_vma_manager.h> #include <drm/drm_vma_manager.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/list.h> #include <linux/list.h>

View File

@ -35,6 +35,17 @@
/* /*
* Memory regions for data placement. * Memory regions for data placement.
*
* Buffers placed in TTM_PL_SYSTEM are considered under TTMs control and can
* be swapped out whenever TTMs thinks it is a good idea.
* In cases where drivers would like to use TTM_PL_SYSTEM as a valid
* placement they need to be able to handle the issues that arise due to the
* above manually.
*
* For BO's which reside in system memory but for which the accelerator
* requires direct access (i.e. their usage needs to be synchronized
* between the CPU and accelerator via fences) a new, driver private
* placement that can handle such scenarios is a good idea.
*/ */
#define TTM_PL_SYSTEM 0 #define TTM_PL_SYSTEM 0