mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-20 01:24:39 +08:00
drm/i915: simplify shmem pwrite/pread slowpath handling
The shmem paths for pwrite/pread used a clever trick to hold onto a single page when dropping the big dev->struct_mutex for the slowpath. But this ran the risk of reinstating (or not completely purging) the backing storage when dropping purgeable objects. Hence the code needed to keep track of whether it ever dropped the lock, and if it did, manually check whether it needs to re-purge the backing storage. But thanks to the pages pin count introduced in commita5570178c0
Author: Chris Wilson <chris@chris-wilson.co.uk> Date: Tue Sep 4 21:02:54 2012 +0100 drm/i915: Pin backing pages whilst exporting through a dmabuf vmap which allowed us to pin the backing storage and remove that page reference trick from shmem_pwrite/read in commitf60d7f0c1d
Author: Chris Wilson <chris@chris-wilson.co.uk> Date: Tue Sep 4 21:02:56 2012 +0100 drm/i915: Pin backing pages for pread and commit755d22184f
Author: Chris Wilson <chris@chris-wilson.co.uk> Date: Tue Sep 4 21:02:55 2012 +0100 drm/i915: Pin backing pages for pwrite we can now abolish this check. The slowpath cleanup completely disappears from pread, and for pwrite we're only left with the domain fixup in case someone moved the object out of the cpu domain from under us. A follow-on patch will optimize that a notch more. Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
62810e5a9d
commit
a39a68054f
@ -407,7 +407,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
||||
loff_t offset;
|
||||
int shmem_page_offset, page_length, ret = 0;
|
||||
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
|
||||
int hit_slowpath = 0;
|
||||
int prefaulted = 0;
|
||||
int needs_clflush = 0;
|
||||
struct scatterlist *sg;
|
||||
@ -469,7 +468,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
||||
if (ret == 0)
|
||||
goto next_page;
|
||||
|
||||
hit_slowpath = 1;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (!prefaulted) {
|
||||
@ -502,12 +500,6 @@ next_page:
|
||||
out:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
|
||||
if (hit_slowpath) {
|
||||
/* Fixup: Kill any reinstated backing storage pages */
|
||||
if (obj->madv == __I915_MADV_PURGED)
|
||||
i915_gem_object_truncate(obj);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -838,11 +830,8 @@ out:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
|
||||
if (hit_slowpath) {
|
||||
/* Fixup: Kill any reinstated backing storage pages */
|
||||
if (obj->madv == __I915_MADV_PURGED)
|
||||
i915_gem_object_truncate(obj);
|
||||
/* and flush dirty cachelines in case the object isn't in the cpu write
|
||||
* domain anymore. */
|
||||
/* Fixup: Flush dirty cachelines in case the object isn't in the
|
||||
* cpu write domain anymore. */
|
||||
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
|
||||
i915_gem_clflush_object(obj);
|
||||
i915_gem_chipset_flush(dev);
|
||||
|
Loading…
Reference in New Issue
Block a user