mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-18 16:44:27 +08:00
drm/i915/gen8: Abstract PDP usage
Up until now, ppgtt->pdp has always been the root of our page tables. Legacy 32b addresses acted like it had 1 PDP with 4 PDPEs. In preparation for 4 level page tables, we need to stop using ppgtt->pdp directly unless we know it's what we want. The future structure will use ppgtt->pml4 for the top level, and the pdp is just one of the entries being pointed to by a pml4e. The temporal pdp local variable will be removed once the rest of the 4-level code lands. Also, start passing the vm pointer to the alloc functions, instead of ppgtt. v2: Updated after dynamic page allocation changes. v3: Rebase after s/page_tables/page_table/. v4: Rebase after changes in "Dynamic page table allocations" patch. v5: Rebase after Mika's ppgtt cleanup / scratch merge patch series. v6: Rebase after final merged version of Mika's ppgtt/scratch patches. v7: Keep pagetable map in-line (and avoid unnecessary for_each_pde loops), remove redundant ppgtt pointer in _alloc_pagetabs (Akash) v8: Fix text indentation in _alloc_pagetabs/page_directories (Chris) v9: Defer gen8_alloc_va_range_4lvl definition until 4lvl is implemented, clean-up gen8_ppgtt_cleanup [pun intended] (Akash). v10: Clean-up commit message (Akash). Cc: Akash Goel <akash.goel@intel.com> Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2+) Reviewed-by: "Akash Goel" <akash.goel@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
6ac1850220
commit
d4ec9da0e1
@ -607,6 +607,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */
|
||||
gen8_pte_t *pt_vaddr, scratch_pte;
|
||||
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
|
||||
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
|
||||
@ -621,10 +622,10 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
struct i915_page_directory *pd;
|
||||
struct i915_page_table *pt;
|
||||
|
||||
if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
|
||||
if (WARN_ON(!pdp->page_directory[pdpe]))
|
||||
break;
|
||||
|
||||
pd = ppgtt->pdp.page_directory[pdpe];
|
||||
pd = pdp->page_directory[pdpe];
|
||||
|
||||
if (WARN_ON(!pd->page_table[pde]))
|
||||
break;
|
||||
@ -662,6 +663,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */
|
||||
gen8_pte_t *pt_vaddr;
|
||||
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
|
||||
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
|
||||
@ -675,7 +677,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
break;
|
||||
|
||||
if (pt_vaddr == NULL) {
|
||||
struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
|
||||
struct i915_page_directory *pd = pdp->page_directory[pdpe];
|
||||
struct i915_page_table *pt = pd->page_table[pde];
|
||||
pt_vaddr = kmap_px(pt);
|
||||
}
|
||||
@ -755,28 +757,29 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
int i;
|
||||
|
||||
for_each_set_bit(i, ppgtt->pdp.used_pdpes,
|
||||
I915_PDPES_PER_PDP(ppgtt->base.dev)) {
|
||||
if (WARN_ON(!ppgtt->pdp.page_directory[i]))
|
||||
for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
|
||||
if (WARN_ON(!pdp->page_directory[i]))
|
||||
continue;
|
||||
|
||||
gen8_free_page_tables(ppgtt->base.dev,
|
||||
ppgtt->pdp.page_directory[i]);
|
||||
free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
|
||||
gen8_free_page_tables(dev, pdp->page_directory[i]);
|
||||
free_pd(dev, pdp->page_directory[i]);
|
||||
}
|
||||
|
||||
free_pdp(ppgtt->base.dev, &ppgtt->pdp);
|
||||
free_pdp(dev, pdp);
|
||||
|
||||
gen8_free_scratch(vm);
|
||||
}
|
||||
|
||||
/**
|
||||
* gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
|
||||
* @ppgtt: Master ppgtt structure.
|
||||
* @pd: Page directory for this address range.
|
||||
* @vm: Master vm structure.
|
||||
* @pd: Page directory for this address range.
|
||||
* @start: Starting virtual address to begin allocations.
|
||||
* @length Size of the allocations.
|
||||
* @length: Size of the allocations.
|
||||
* @new_pts: Bitmap set by function with new allocations. Likely used by the
|
||||
* caller to free on error.
|
||||
*
|
||||
@ -789,13 +792,13 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
*
|
||||
* Return: 0 if success; negative error code otherwise.
|
||||
*/
|
||||
static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
|
||||
static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
|
||||
struct i915_page_directory *pd,
|
||||
uint64_t start,
|
||||
uint64_t length,
|
||||
unsigned long *new_pts)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_device *dev = vm->dev;
|
||||
struct i915_page_table *pt;
|
||||
uint64_t temp;
|
||||
uint32_t pde;
|
||||
@ -804,7 +807,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
|
||||
/* Don't reallocate page tables */
|
||||
if (test_bit(pde, pd->used_pdes)) {
|
||||
/* Scratch is never allocated this way */
|
||||
WARN_ON(pt == ppgtt->base.scratch_pt);
|
||||
WARN_ON(pt == vm->scratch_pt);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -812,7 +815,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
|
||||
if (IS_ERR(pt))
|
||||
goto unwind_out;
|
||||
|
||||
gen8_initialize_pt(&ppgtt->base, pt);
|
||||
gen8_initialize_pt(vm, pt);
|
||||
pd->page_table[pde] = pt;
|
||||
__set_bit(pde, new_pts);
|
||||
}
|
||||
@ -828,11 +831,11 @@ unwind_out:
|
||||
|
||||
/**
|
||||
* gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
|
||||
* @ppgtt: Master ppgtt structure.
|
||||
* @vm: Master vm structure.
|
||||
* @pdp: Page directory pointer for this address range.
|
||||
* @start: Starting virtual address to begin allocations.
|
||||
* @length Size of the allocations.
|
||||
* @new_pds Bitmap set by function with new allocations. Likely used by the
|
||||
* @length: Size of the allocations.
|
||||
* @new_pds: Bitmap set by function with new allocations. Likely used by the
|
||||
* caller to free on error.
|
||||
*
|
||||
* Allocate the required number of page directories starting at the pde index of
|
||||
@ -849,13 +852,14 @@ unwind_out:
|
||||
*
|
||||
* Return: 0 if success; negative error code otherwise.
|
||||
*/
|
||||
static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
|
||||
struct i915_page_directory_pointer *pdp,
|
||||
uint64_t start,
|
||||
uint64_t length,
|
||||
unsigned long *new_pds)
|
||||
static int
|
||||
gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
|
||||
struct i915_page_directory_pointer *pdp,
|
||||
uint64_t start,
|
||||
uint64_t length,
|
||||
unsigned long *new_pds)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_device *dev = vm->dev;
|
||||
struct i915_page_directory *pd;
|
||||
uint64_t temp;
|
||||
uint32_t pdpe;
|
||||
@ -871,7 +875,7 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
|
||||
if (IS_ERR(pd))
|
||||
goto unwind_out;
|
||||
|
||||
gen8_initialize_pd(&ppgtt->base, pd);
|
||||
gen8_initialize_pd(vm, pd);
|
||||
pdp->page_directory[pdpe] = pd;
|
||||
__set_bit(pdpe, new_pds);
|
||||
}
|
||||
@ -947,18 +951,19 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
|
||||
}
|
||||
|
||||
static int gen8_alloc_va_range(struct i915_address_space *vm,
|
||||
uint64_t start,
|
||||
uint64_t length)
|
||||
uint64_t start, uint64_t length)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
unsigned long *new_page_dirs, **new_page_tables;
|
||||
struct drm_device *dev = vm->dev;
|
||||
struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */
|
||||
struct i915_page_directory *pd;
|
||||
const uint64_t orig_start = start;
|
||||
const uint64_t orig_length = length;
|
||||
uint64_t temp;
|
||||
uint32_t pdpe;
|
||||
uint32_t pdpes = I915_PDPES_PER_PDP(ppgtt->base.dev);
|
||||
uint32_t pdpes = I915_PDPES_PER_PDP(dev);
|
||||
int ret;
|
||||
|
||||
/* Wrap is never okay since we can only represent 48b, and we don't
|
||||
@ -967,7 +972,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
|
||||
if (WARN_ON(start + length < start))
|
||||
return -ENODEV;
|
||||
|
||||
if (WARN_ON(start + length > ppgtt->base.total))
|
||||
if (WARN_ON(start + length > vm->total))
|
||||
return -ENODEV;
|
||||
|
||||
ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
|
||||
@ -975,16 +980,16 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
|
||||
return ret;
|
||||
|
||||
/* Do the allocations first so we can easily bail out */
|
||||
ret = gen8_ppgtt_alloc_page_directories(ppgtt, &ppgtt->pdp, start, length,
|
||||
new_page_dirs);
|
||||
ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
|
||||
new_page_dirs);
|
||||
if (ret) {
|
||||
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* For every page directory referenced, allocate page tables */
|
||||
gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
|
||||
ret = gen8_ppgtt_alloc_pagetabs(ppgtt, pd, start, length,
|
||||
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
|
||||
ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
|
||||
new_page_tables[pdpe]);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
@ -995,7 +1000,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
|
||||
|
||||
/* Allocations have completed successfully, so set the bitmaps, and do
|
||||
* the mappings. */
|
||||
gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
|
||||
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
|
||||
gen8_pde_t *const page_directory = kmap_px(pd);
|
||||
struct i915_page_table *pt;
|
||||
uint64_t pd_len = length;
|
||||
@ -1028,8 +1033,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
|
||||
}
|
||||
|
||||
kunmap_px(ppgtt, page_directory);
|
||||
|
||||
__set_bit(pdpe, ppgtt->pdp.used_pdpes);
|
||||
__set_bit(pdpe, pdp->used_pdpes);
|
||||
}
|
||||
|
||||
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
|
||||
@ -1039,11 +1043,11 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
|
||||
err_out:
|
||||
while (pdpe--) {
|
||||
for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES)
|
||||
free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]);
|
||||
free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
|
||||
}
|
||||
|
||||
for_each_set_bit(pdpe, new_page_dirs, pdpes)
|
||||
free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]);
|
||||
free_pd(dev, pdp->page_directory[pdpe]);
|
||||
|
||||
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
|
||||
mark_tlbs_dirty(ppgtt);
|
||||
|
Loading…
Reference in New Issue
Block a user