2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-11 14:53:56 +08:00

Removed the TTM Huge Page functionnality to address a crash, a timeout

fix for udl, CONFIG_FB dependency improvements, a fix for a circular
 locking depency in imx, a NULL pointer dereference fix for virtio, and a
 naming collision fix for drm/locking.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCYYuA2wAKCRDj7w1vZxhR
 xQdZAP4oqiWpCO1JfnZdvEJ/lOULqvdzYkbUZexshGLdbb4ECwEA83TzIbQvXP8p
 jsC1hPNAIsOBkQ+nGZwJkTOtDcpEAQ4=
 =N2pK
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-fixes-2021-11-10' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

Removed the TTM Huge Page functionnality to address a crash, a timeout
fix for udl, CONFIG_FB dependency improvements, a fix for a circular
locking depency in imx, a NULL pointer dereference fix for virtio, and a
naming collision fix for drm/locking.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20211110082114.vfpkpnecwdfg27lk@gilmour
This commit is contained in:
Dave Airlie 2021-11-11 08:14:02 +10:00
commit f8ca7b7419
14 changed files with 23 additions and 193 deletions

View File

@ -6154,8 +6154,7 @@ T: git git://anongit.freedesktop.org/drm/drm
F: Documentation/devicetree/bindings/display/
F: Documentation/devicetree/bindings/gpu/
F: Documentation/gpu/
F: drivers/gpu/drm/
F: drivers/gpu/vga/
F: drivers/gpu/
F: include/drm/
F: include/linux/vga*
F: include/uapi/drm/

View File

@ -117,9 +117,8 @@ config DRM_DEBUG_MODESET_LOCK
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
depends on FB=y || FB=DRM
select DRM_KMS_HELPER
depends on DRM_KMS_HELPER
depends on FB=y || FB=DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT

View File

@ -60,9 +60,10 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
goto unlock;
}
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT, 1);
drm_dev_exit(idx);
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT);
drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}

View File

@ -79,7 +79,7 @@
static DEFINE_WW_CLASS(crtc_ww_class);
#if IS_ENABLED(CONFIG_DRM_DEBUG_MODESET_LOCK)
static noinline depot_stack_handle_t __stack_depot_save(void)
static noinline depot_stack_handle_t __drm_stack_depot_save(void)
{
unsigned long entries[8];
unsigned int n;
@ -89,7 +89,7 @@ static noinline depot_stack_handle_t __stack_depot_save(void)
return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
}
static void __stack_depot_print(depot_stack_handle_t stack_depot)
static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
{
struct drm_printer p = drm_debug_printer("drm_modeset_lock");
unsigned long *entries;
@ -108,11 +108,11 @@ static void __stack_depot_print(depot_stack_handle_t stack_depot)
kfree(buf);
}
#else /* CONFIG_DRM_DEBUG_MODESET_LOCK */
static depot_stack_handle_t __stack_depot_save(void)
static depot_stack_handle_t __drm_stack_depot_save(void)
{
return 0;
}
static void __stack_depot_print(depot_stack_handle_t stack_depot)
static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
{
}
#endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */
@ -266,7 +266,7 @@ EXPORT_SYMBOL(drm_modeset_acquire_fini);
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
{
if (WARN_ON(ctx->contended))
__stack_depot_print(ctx->stack_depot);
__drm_stack_depot_print(ctx->stack_depot);
while (!list_empty(&ctx->locked)) {
struct drm_modeset_lock *lock;
@ -286,7 +286,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
int ret;
if (WARN_ON(ctx->contended))
__stack_depot_print(ctx->stack_depot);
__drm_stack_depot_print(ctx->stack_depot);
if (ctx->trylock_only) {
lockdep_assert_held(&ctx->ww_ctx);
@ -317,7 +317,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
ret = 0;
} else if (ret == -EDEADLK) {
ctx->contended = lock;
ctx->stack_depot = __stack_depot_save();
ctx->stack_depot = __drm_stack_depot_save();
}
return ret;

View File

@ -81,7 +81,6 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_plane_state *old_plane_state, *new_plane_state;
bool plane_disabling = false;
int i;
bool fence_cookie = dma_fence_begin_signalling();
drm_atomic_helper_commit_modeset_disables(dev, state);
@ -112,7 +111,6 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
}
drm_atomic_helper_commit_hw_done(state);
dma_fence_end_signalling(fence_cookie);
}
static const struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {

View File

@ -56,7 +56,7 @@ static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
nouveau_bo_del_io_reserve_lru(bo);
prot = vm_get_page_prot(vma->vm_flags);
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
nouveau_bo_add_io_reserve_lru(bo);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;

View File

@ -61,7 +61,7 @@ static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
goto unlock_resv;
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT, 1);
TTM_BO_VM_NUM_PREFAULT);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
goto unlock_mclk;

View File

@ -173,89 +173,6 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_vm_reserve);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/**
* ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults
* @vmf: Fault data
* @bo: The buffer object
* @page_offset: Page offset from bo start
* @fault_page_size: The size of the fault in pages.
* @pgprot: The page protections.
* Does additional checking whether it's possible to insert a PUD or PMD
* pfn and performs the insertion.
*
* Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if
* a huge fault was not possible, or on insertion error.
*/
static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
struct ttm_buffer_object *bo,
pgoff_t page_offset,
pgoff_t fault_page_size,
pgprot_t pgprot)
{
pgoff_t i;
vm_fault_t ret;
unsigned long pfn;
pfn_t pfnt;
struct ttm_tt *ttm = bo->ttm;
bool write = vmf->flags & FAULT_FLAG_WRITE;
/* Fault should not cross bo boundary. */
page_offset &= ~(fault_page_size - 1);
if (page_offset + fault_page_size > bo->resource->num_pages)
goto out_fallback;
if (bo->resource->bus.is_iomem)
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
else
pfn = page_to_pfn(ttm->pages[page_offset]);
/* pfn must be fault_page_size aligned. */
if ((pfn & (fault_page_size - 1)) != 0)
goto out_fallback;
/* Check that memory is contiguous. */
if (!bo->resource->bus.is_iomem) {
for (i = 1; i < fault_page_size; ++i) {
if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
goto out_fallback;
}
} else if (bo->bdev->funcs->io_mem_pfn) {
for (i = 1; i < fault_page_size; ++i) {
if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i)
goto out_fallback;
}
}
pfnt = __pfn_to_pfn_t(pfn, PFN_DEV);
if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT))
ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT))
ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write);
#endif
else
WARN_ON_ONCE(ret = VM_FAULT_FALLBACK);
if (ret != VM_FAULT_NOPAGE)
goto out_fallback;
return VM_FAULT_NOPAGE;
out_fallback:
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
}
#else
static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
struct ttm_buffer_object *bo,
pgoff_t page_offset,
pgoff_t fault_page_size,
pgprot_t pgprot)
{
return VM_FAULT_FALLBACK;
}
#endif
/**
* ttm_bo_vm_fault_reserved - TTM fault helper
* @vmf: The struct vm_fault given as argument to the fault callback
@ -263,7 +180,6 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
* @num_prefault: Maximum number of prefault pages. The caller may want to
* specify this based on madvice settings and the size of the GPU object
* backed by the memory.
* @fault_page_size: The size of the fault in pages.
*
* This function inserts one or more page table entries pointing to the
* memory backing the buffer object, and then returns a return code
@ -277,8 +193,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
*/
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
pgoff_t num_prefault,
pgoff_t fault_page_size)
pgoff_t num_prefault)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
@ -329,11 +244,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
prot = pgprot_decrypted(prot);
}
/* We don't prefault on huge faults. Yet. */
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1)
return ttm_bo_vm_insert_huge(vmf, bo, page_offset,
fault_page_size, prot);
/*
* Speculatively prefault a number of pages. Only error on
* first page.
@ -429,7 +339,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
prot = vma->vm_page_prot;
if (drm_dev_enter(ddev, &idx)) {
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, prot);

View File

@ -30,7 +30,7 @@ static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
int bval = (i + block * EDID_LENGTH) << 8;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x02, (0x80 | (0x02 << 5)), bval,
0xA1, read_buff, 2, HZ);
0xA1, read_buff, 2, 1000);
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
kfree(read_buff);

View File

@ -163,10 +163,11 @@ static __poll_t virtio_gpu_poll(struct file *filp,
struct drm_file *drm_file = filp->private_data;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
struct drm_device *dev = drm_file->minor->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_pending_event *e = NULL;
__poll_t mask = 0;
if (!vfpriv->ring_idx_mask)
if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask)
return drm_poll(filp, wait);
poll_wait(filp, &drm_file->event_wait, wait);

View File

@ -1550,10 +1550,6 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
pgoff_t start, pgoff_t end);
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
enum page_entry_size pe_size);
#endif
/* Transparent hugepage support - vmwgfx_thp.c */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

View File

@ -477,7 +477,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
else
prot = vm_get_page_prot(vma->vm_flags);
ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault, 1);
ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
@ -486,73 +486,3 @@ out_unlock:
return ret;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
enum page_entry_size pe_size)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct vmw_buffer_object *vbo =
container_of(bo, struct vmw_buffer_object, base);
pgprot_t prot;
vm_fault_t ret;
pgoff_t fault_page_size;
bool write = vmf->flags & FAULT_FLAG_WRITE;
switch (pe_size) {
case PE_SIZE_PMD:
fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
break;
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
case PE_SIZE_PUD:
fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
break;
#endif
default:
WARN_ON_ONCE(1);
return VM_FAULT_FALLBACK;
}
/* Always do write dirty-tracking and COW on PTE level. */
if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping(vma->vm_flags)))
return VM_FAULT_FALLBACK;
ret = ttm_bo_vm_reserve(bo, vmf);
if (ret)
return ret;
if (vbo->dirty) {
pgoff_t allowed_prefault;
unsigned long page_offset;
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
if (page_offset >= bo->resource->num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
/*
* Write protect, so we get a new fault on write, and can
* split.
*/
prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
} else {
prot = vm_get_page_prot(vma->vm_flags);
}
ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
out_unlock:
dma_resv_unlock(bo->base.resv);
return ret;
}
#endif

View File

@ -61,9 +61,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
.fault = vmw_bo_vm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
.huge_fault = vmw_bo_vm_huge_fault,
#endif
};
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);

View File

@ -584,8 +584,7 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
pgoff_t num_prefault,
pgoff_t fault_page_size);
pgoff_t num_prefault);
vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);