mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-05 05:15:02 +08:00
drm/i915/region: support contiguous allocations
Some kernel internal objects may need to be allocated as a contiguous block, also thinking ahead the various kernel io_mapping interfaces seem to expect it, although this is purely a limitation in the kernel API...so perhaps something to be improved. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com> Cc: Michael J Ruhl <michael.j.ruhl@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20191008160116.18379-3-matthew.auld@intel.com
This commit is contained in:
parent
232a6ebae4
commit
2f0b97ca02
@ -139,6 +139,12 @@ i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
|
||||
return obj->base.vma_node.readonly;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
|
||||
unsigned long flags)
|
||||
|
@ -119,6 +119,10 @@ struct drm_i915_gem_object {
|
||||
|
||||
I915_SELFTEST_DECLARE(struct list_head st_link);
|
||||
|
||||
unsigned long flags;
|
||||
#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
|
||||
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
|
||||
|
||||
/*
|
||||
* Is the object to be mapped as read-only to the GPU
|
||||
* Only honoured if hardware has relevant pte bit
|
||||
|
@ -23,10 +23,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_memory_region *mem = obj->mm.region;
|
||||
struct list_head *blocks = &obj->mm.blocks;
|
||||
unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE;
|
||||
resource_size_t size = obj->base.size;
|
||||
resource_size_t prev_end;
|
||||
struct i915_buddy_block *block;
|
||||
unsigned int flags;
|
||||
struct sg_table *st;
|
||||
struct scatterlist *sg;
|
||||
unsigned int sg_page_sizes;
|
||||
@ -41,6 +41,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
flags = I915_ALLOC_MIN_PAGE_SIZE;
|
||||
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
|
||||
flags |= I915_ALLOC_CONTIGUOUS;
|
||||
|
||||
ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
|
||||
if (ret)
|
||||
goto err_free_sg;
|
||||
@ -55,7 +59,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
|
||||
list_for_each_entry(block, blocks, link) {
|
||||
u64 block_size, offset;
|
||||
|
||||
block_size = i915_buddy_block_size(&mem->mm, block);
|
||||
block_size = min_t(u64, size,
|
||||
i915_buddy_block_size(&mem->mm, block));
|
||||
offset = i915_buddy_block_offset(block);
|
||||
|
||||
GEM_BUG_ON(overflows_type(block_size, sg->length));
|
||||
@ -96,10 +101,12 @@ err_free_sg:
|
||||
}
|
||||
|
||||
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
|
||||
struct intel_memory_region *mem)
|
||||
struct intel_memory_region *mem,
|
||||
unsigned long flags)
|
||||
{
|
||||
INIT_LIST_HEAD(&obj->mm.blocks);
|
||||
obj->mm.region = intel_memory_region_get(mem);
|
||||
obj->flags |= flags;
|
||||
}
|
||||
|
||||
void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
|
||||
@ -120,6 +127,8 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
|
||||
* future.
|
||||
*/
|
||||
|
||||
GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
|
||||
|
||||
if (!mem)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
|
@ -17,7 +17,8 @@ void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages);
|
||||
|
||||
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
|
||||
struct intel_memory_region *mem);
|
||||
struct intel_memory_region *mem,
|
||||
unsigned long flags);
|
||||
void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
|
@ -456,6 +456,7 @@ out_device:
|
||||
|
||||
static int igt_mock_memory_region_huge_pages(void *arg)
|
||||
{
|
||||
const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
|
||||
struct i915_ppgtt *ppgtt = arg;
|
||||
struct drm_i915_private *i915 = ppgtt->vm.i915;
|
||||
unsigned long supported = INTEL_INFO(i915)->page_sizes;
|
||||
@ -474,46 +475,52 @@ static int igt_mock_memory_region_huge_pages(void *arg)
|
||||
for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
|
||||
unsigned int page_size = BIT(bit);
|
||||
resource_size_t phys;
|
||||
int i;
|
||||
|
||||
obj = i915_gem_object_create_region(mem, page_size, 0);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto out_region;
|
||||
for (i = 0; i < ARRAY_SIZE(flags); ++i) {
|
||||
obj = i915_gem_object_create_region(mem, page_size,
|
||||
flags[i]);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto out_region;
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
err = PTR_ERR(vma);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto out_close;
|
||||
|
||||
err = igt_check_page_sizes(vma);
|
||||
if (err)
|
||||
goto out_unpin;
|
||||
|
||||
phys = i915_gem_object_get_dma_address(obj, 0);
|
||||
if (!IS_ALIGNED(phys, page_size)) {
|
||||
pr_err("%s addr misaligned(%pa) page_size=%u\n",
|
||||
__func__, &phys, page_size);
|
||||
err = -EINVAL;
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
if (vma->page_sizes.gtt != page_size) {
|
||||
pr_err("%s page_sizes.gtt=%u, expected=%u\n",
|
||||
__func__, vma->page_sizes.gtt,
|
||||
page_size);
|
||||
err = -EINVAL;
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_close(vma);
|
||||
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
err = PTR_ERR(vma);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto out_close;
|
||||
|
||||
err = igt_check_page_sizes(vma);
|
||||
if (err)
|
||||
goto out_unpin;
|
||||
|
||||
phys = i915_gem_object_get_dma_address(obj, 0);
|
||||
if (!IS_ALIGNED(phys, page_size)) {
|
||||
pr_err("%s addr misaligned(%pa) page_size=%u\n",
|
||||
__func__, &phys, page_size);
|
||||
err = -EINVAL;
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
if (vma->page_sizes.gtt != page_size) {
|
||||
pr_err("%s page_sizes.gtt=%u, expected=%u\n",
|
||||
__func__, vma->page_sizes.gtt, page_size);
|
||||
err = -EINVAL;
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_close(vma);
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
|
||||
goto out_region;
|
||||
|
@ -47,8 +47,8 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
|
||||
unsigned int flags,
|
||||
struct list_head *blocks)
|
||||
{
|
||||
unsigned long n_pages = size >> ilog2(mem->mm.chunk_size);
|
||||
unsigned int min_order = 0;
|
||||
unsigned long n_pages;
|
||||
|
||||
GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
|
||||
GEM_BUG_ON(!list_empty(blocks));
|
||||
@ -58,6 +58,13 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
|
||||
ilog2(mem->mm.chunk_size);
|
||||
}
|
||||
|
||||
if (flags & I915_ALLOC_CONTIGUOUS) {
|
||||
size = roundup_pow_of_two(size);
|
||||
min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
|
||||
}
|
||||
|
||||
n_pages = size >> ilog2(mem->mm.chunk_size);
|
||||
|
||||
mutex_lock(&mem->mm_lock);
|
||||
|
||||
do {
|
||||
|
@ -18,7 +18,8 @@ struct drm_i915_gem_object;
|
||||
struct intel_memory_region;
|
||||
struct sg_table;
|
||||
|
||||
#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
|
||||
#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
|
||||
#define I915_ALLOC_CONTIGUOUS BIT(1)
|
||||
|
||||
struct intel_memory_region_ops {
|
||||
unsigned int flags;
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#include "gem/i915_gem_region.h"
|
||||
#include "gem/selftests/mock_context.h"
|
||||
#include "selftests/i915_random.h"
|
||||
|
||||
static void close_objects(struct intel_memory_region *mem,
|
||||
struct list_head *objects)
|
||||
@ -86,10 +87,174 @@ static int igt_mock_fill(void *arg)
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
igt_object_create(struct intel_memory_region *mem,
|
||||
struct list_head *objects,
|
||||
u64 size,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_create_region(mem, size, flags);
|
||||
if (IS_ERR(obj))
|
||||
return obj;
|
||||
|
||||
err = i915_gem_object_pin_pages(obj);
|
||||
if (err)
|
||||
goto put;
|
||||
|
||||
list_add(&obj->st_link, objects);
|
||||
return obj;
|
||||
|
||||
put:
|
||||
i915_gem_object_put(obj);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void igt_object_release(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
list_del(&obj->st_link);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
|
||||
static int igt_mock_contiguous(void *arg)
|
||||
{
|
||||
struct intel_memory_region *mem = arg;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long n_objects;
|
||||
LIST_HEAD(objects);
|
||||
LIST_HEAD(holes);
|
||||
I915_RND_STATE(prng);
|
||||
resource_size_t target;
|
||||
resource_size_t total;
|
||||
resource_size_t min;
|
||||
int err = 0;
|
||||
|
||||
total = resource_size(&mem->region);
|
||||
|
||||
/* Min size */
|
||||
obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
|
||||
I915_BO_ALLOC_CONTIGUOUS);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
if (obj->mm.pages->nents != 1) {
|
||||
pr_err("%s min object spans multiple sg entries\n", __func__);
|
||||
err = -EINVAL;
|
||||
goto err_close_objects;
|
||||
}
|
||||
|
||||
igt_object_release(obj);
|
||||
|
||||
/* Max size */
|
||||
obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
if (obj->mm.pages->nents != 1) {
|
||||
pr_err("%s max object spans multiple sg entries\n", __func__);
|
||||
err = -EINVAL;
|
||||
goto err_close_objects;
|
||||
}
|
||||
|
||||
igt_object_release(obj);
|
||||
|
||||
/* Internal fragmentation should not bleed into the object size */
|
||||
target = round_up(prandom_u32_state(&prng) % total, PAGE_SIZE);
|
||||
target = max_t(u64, PAGE_SIZE, target);
|
||||
|
||||
obj = igt_object_create(mem, &objects, target,
|
||||
I915_BO_ALLOC_CONTIGUOUS);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
if (obj->base.size != target) {
|
||||
pr_err("%s obj->base.size(%llx) != target(%llx)\n", __func__,
|
||||
(u64)obj->base.size, (u64)target);
|
||||
err = -EINVAL;
|
||||
goto err_close_objects;
|
||||
}
|
||||
|
||||
if (obj->mm.pages->nents != 1) {
|
||||
pr_err("%s object spans multiple sg entries\n", __func__);
|
||||
err = -EINVAL;
|
||||
goto err_close_objects;
|
||||
}
|
||||
|
||||
igt_object_release(obj);
|
||||
|
||||
/*
|
||||
* Try to fragment the address space, such that half of it is free, but
|
||||
* the max contiguous block size is SZ_64K.
|
||||
*/
|
||||
|
||||
target = SZ_64K;
|
||||
n_objects = div64_u64(total, target);
|
||||
|
||||
while (n_objects--) {
|
||||
struct list_head *list;
|
||||
|
||||
if (n_objects % 2)
|
||||
list = &holes;
|
||||
else
|
||||
list = &objects;
|
||||
|
||||
obj = igt_object_create(mem, list, target,
|
||||
I915_BO_ALLOC_CONTIGUOUS);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto err_close_objects;
|
||||
}
|
||||
}
|
||||
|
||||
close_objects(mem, &holes);
|
||||
|
||||
min = target;
|
||||
target = total >> 1;
|
||||
|
||||
/* Make sure we can still allocate all the fragmented space */
|
||||
obj = igt_object_create(mem, &objects, target, 0);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto err_close_objects;
|
||||
}
|
||||
|
||||
igt_object_release(obj);
|
||||
|
||||
/*
|
||||
* Even though we have enough free space, we don't have a big enough
|
||||
* contiguous block. Make sure that holds true.
|
||||
*/
|
||||
|
||||
do {
|
||||
bool should_fail = target > min;
|
||||
|
||||
obj = igt_object_create(mem, &objects, target,
|
||||
I915_BO_ALLOC_CONTIGUOUS);
|
||||
if (should_fail != IS_ERR(obj)) {
|
||||
pr_err("%s target allocation(%llx) mismatch\n",
|
||||
__func__, (u64)target);
|
||||
err = -EINVAL;
|
||||
goto err_close_objects;
|
||||
}
|
||||
|
||||
target >>= 1;
|
||||
} while (target >= mem->mm.chunk_size);
|
||||
|
||||
err_close_objects:
|
||||
list_splice_tail(&holes, &objects);
|
||||
close_objects(mem, &objects);
|
||||
return err;
|
||||
}
|
||||
|
||||
int intel_memory_region_mock_selftests(void)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(igt_mock_fill),
|
||||
SUBTEST(igt_mock_contiguous),
|
||||
};
|
||||
struct intel_memory_region *mem;
|
||||
struct drm_i915_private *i915;
|
||||
|
@ -36,7 +36,7 @@ mock_object_create(struct intel_memory_region *mem,
|
||||
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
|
||||
|
||||
i915_gem_object_init_memory_region(obj, mem);
|
||||
i915_gem_object_init_memory_region(obj, mem, flags);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user