2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-29 15:43:59 +08:00

drm/ttm: wire up the new pool as default one v2

Provide the necessary parameters by all drivers and use the new pool alloc
when no driver specific function is provided.

v2: fix the GEM VRAM helpers

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Dave Airlie <airlied@redhat.com>
Reviewed-by: Madhav Chauhan <madhav.chauhan@amd.com>
Tested-by: Huang Rui <ray.huang@amd.com>
Link: https://patchwork.freedesktop.org/patch/397081/?series=83051&rev=1
This commit is contained in:
Christian König 2020-10-24 13:10:28 +02:00
parent d099fc8f54
commit ee5d2a8e54
10 changed files with 36 additions and 26 deletions

View File

@ -1914,10 +1914,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
&amdgpu_bo_driver,
r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
adev->need_swiotlb,
dma_addressing_limited(adev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);

View File

@ -1045,10 +1045,10 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
vmm->vram_base = vram_base;
vmm->vram_size = vram_size;
ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager,
true);
false, true);
if (ret)
return ret;

View File

@ -281,6 +281,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev;
bool need_swiotlb = false;
int typei, ret;
ret = nouveau_ttm_init_host(drm, 0);
@ -315,11 +316,14 @@ nouveau_ttm_init(struct nouveau_drm *drm)
drm->agp.cma = pci->agp.cma;
}
ret = ttm_bo_device_init(&drm->ttm.bdev,
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
dev->vma_offset_manager,
drm->client.mmu.dmabits <= 32 ? true : false);
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
need_swiotlb = !!swiotlb_nr_tbl();
#endif
ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver,
drm->dev->dev, dev->anon_inode->i_mapping,
dev->vma_offset_manager, need_swiotlb,
drm->client.mmu.dmabits <= 32);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret;

View File

@ -194,11 +194,10 @@ int qxl_ttm_init(struct qxl_device *qdev)
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&qdev->mman.bdev,
&qxl_bo_driver,
r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
qdev->ddev.anon_inode->i_mapping,
qdev->ddev.vma_offset_manager,
false);
false, false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;

View File

@ -846,10 +846,10 @@ int radeon_ttm_init(struct radeon_device *rdev)
int r;
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev,
&radeon_bo_driver,
r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
rdev->ddev->anon_inode->i_mapping,
rdev->ddev->vma_offset_manager,
rdev->need_swiotlb,
dma_addressing_limited(&rdev->pdev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);

View File

@ -1283,6 +1283,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
ttm_pool_fini(&bdev->pool);
if (!ret)
ttm_bo_global_release();
@ -1307,9 +1309,10 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct device *dev,
struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
bool need_dma32)
bool use_dma_alloc, bool use_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret;
@ -1324,12 +1327,13 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->driver = driver;
ttm_bo_init_sysman(bdev);
ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
bdev->need_dma32 = need_dma32;
bdev->need_dma32 = use_dma32;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&ttm_global_mutex);

View File

@ -454,7 +454,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
}
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE));
ttm_pool_mgr_init(glob->zone_kernel->max_mem / (2 * PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);

View File

@ -37,7 +37,6 @@
#include <linux/file.h>
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
/**
* Allocates a ttm structure for the given BO.
@ -321,7 +320,7 @@ int ttm_tt_populate(struct ttm_bo_device *bdev,
if (bdev->driver->ttm_tt_populate)
ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
else
ret = ttm_pool_populate(ttm, ctx);
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
if (ret)
return ret;
@ -363,6 +362,6 @@ void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
if (bdev->driver->ttm_tt_unpopulate)
bdev->driver->ttm_tt_unpopulate(bdev, ttm);
else
ttm_pool_unpopulate(ttm);
ttm_pool_free(&bdev->pool, ttm);
ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
}

View File

@ -878,10 +878,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
drm_vma_offset_manager_init(&dev_priv->vma_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_bo_device_init(&dev_priv->bdev,
&vmw_bo_driver,
ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
dev_priv->dev->dev,
dev->anon_inode->i_mapping,
&dev_priv->vma_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");

View File

@ -42,6 +42,7 @@
#include "ttm_module.h"
#include "ttm_placement.h"
#include "ttm_tt.h"
#include "ttm_pool.h"
/**
* struct ttm_bo_driver
@ -295,6 +296,7 @@ struct ttm_bo_device {
* Protected by internal locks.
*/
struct drm_vma_offset_manager *vma_manager;
struct ttm_pool pool;
/*
* Protected by the global:lru lock.
@ -395,11 +397,11 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
* @bdev: A pointer to a struct ttm_bo_device to initialize.
* @glob: A pointer to an initialized struct ttm_bo_global.
* @driver: A pointer to a struct ttm_bo_driver set up by the caller.
* @dev: The core kernel device pointer for DMA mappings and allocations.
* @mapping: The address space to use for this bo.
* @vma_manager: A pointer to a vma manager.
* @file_page_offset: Offset into the device address space that is available
* for buffer data. This ensures compatibility with other users of the
* address space.
* @use_dma_alloc: If coherent DMA allocation API should be used.
* @use_dma32: If we should use GFP_DMA32 for device memory allocations.
*
* Initializes a struct ttm_bo_device:
* Returns:
@ -407,9 +409,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
*/
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct device *dev,
struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
bool need_dma32);
bool use_dma_alloc, bool use_dma32);
/**
* ttm_bo_unmap_virtual