mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 02:34:01 +08:00
mm/hmm: replace hmm_update with mmu_notifier_range
The hmm_mirror_ops callback function sync_cpu_device_pagetables() passes a struct hmm_update which is a simplified version of struct mmu_notifier_range. This is unnecessary so replace hmm_update with mmu_notifier_range directly. Link: https://lore.kernel.org/r/20190726005650.2566-2-rcampbell@nvidia.com Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed: Christoph Hellwig <hch@lst.de> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> [jgg: white space tuning] Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
e709accc76
commit
1f96180792
@ -195,13 +195,14 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
|
||||
* Block for operations on BOs to finish and mark pages as accessed and
|
||||
* potentially dirty.
|
||||
*/
|
||||
static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
|
||||
const struct hmm_update *update)
|
||||
static int
|
||||
amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
|
||||
const struct mmu_notifier_range *update)
|
||||
{
|
||||
struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
|
||||
unsigned long start = update->start;
|
||||
unsigned long end = update->end;
|
||||
bool blockable = update->blockable;
|
||||
bool blockable = mmu_notifier_range_blockable(update);
|
||||
struct interval_tree_node *it;
|
||||
|
||||
/* notification is exclusive, but interval is inclusive */
|
||||
@ -243,13 +244,14 @@ static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
|
||||
* necessitates evicting all user-mode queues of the process. The BOs
|
||||
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
|
||||
*/
|
||||
static int amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
|
||||
const struct hmm_update *update)
|
||||
static int
|
||||
amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
|
||||
const struct mmu_notifier_range *update)
|
||||
{
|
||||
struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
|
||||
unsigned long start = update->start;
|
||||
unsigned long end = update->end;
|
||||
bool blockable = update->blockable;
|
||||
bool blockable = mmu_notifier_range_blockable(update);
|
||||
struct interval_tree_node *it;
|
||||
|
||||
/* notification is exclusive, but interval is inclusive */
|
||||
|
@ -252,13 +252,13 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
|
||||
|
||||
static int
|
||||
nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
|
||||
const struct hmm_update *update)
|
||||
const struct mmu_notifier_range *update)
|
||||
{
|
||||
struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
|
||||
unsigned long start = update->start;
|
||||
unsigned long limit = update->end;
|
||||
|
||||
if (!update->blockable)
|
||||
if (!mmu_notifier_range_blockable(update))
|
||||
return -EAGAIN;
|
||||
|
||||
SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
|
||||
|
@ -340,29 +340,6 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
|
||||
|
||||
struct hmm_mirror;
|
||||
|
||||
/*
|
||||
* enum hmm_update_event - type of update
|
||||
* @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
|
||||
*/
|
||||
enum hmm_update_event {
|
||||
HMM_UPDATE_INVALIDATE,
|
||||
};
|
||||
|
||||
/*
|
||||
* struct hmm_update - HMM update information for callback
|
||||
*
|
||||
* @start: virtual start address of the range to update
|
||||
* @end: virtual end address of the range to update
|
||||
* @event: event triggering the update (what is happening)
|
||||
* @blockable: can the callback block/sleep ?
|
||||
*/
|
||||
struct hmm_update {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
enum hmm_update_event event;
|
||||
bool blockable;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct hmm_mirror_ops - HMM mirror device operations callback
|
||||
*
|
||||
@ -383,9 +360,9 @@ struct hmm_mirror_ops {
|
||||
/* sync_cpu_device_pagetables() - synchronize page tables
|
||||
*
|
||||
* @mirror: pointer to struct hmm_mirror
|
||||
* @update: update information (see struct hmm_update)
|
||||
* Return: -EAGAIN if update.blockable false and callback need to
|
||||
* block, 0 otherwise.
|
||||
* @update: update information (see struct mmu_notifier_range)
|
||||
* Return: -EAGAIN if mmu_notifier_range_blockable(update) is false
|
||||
* and callback needs to block, 0 otherwise.
|
||||
*
|
||||
* This callback ultimately originates from mmu_notifiers when the CPU
|
||||
* page table is updated. The device driver must update its page table
|
||||
@ -396,8 +373,9 @@ struct hmm_mirror_ops {
|
||||
* page tables are completely updated (TLBs flushed, etc); this is a
|
||||
* synchronous call.
|
||||
*/
|
||||
int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
|
||||
const struct hmm_update *update);
|
||||
int (*sync_cpu_device_pagetables)(
|
||||
struct hmm_mirror *mirror,
|
||||
const struct mmu_notifier_range *update);
|
||||
};
|
||||
|
||||
/*
|
||||
|
13
mm/hmm.c
13
mm/hmm.c
@ -165,7 +165,6 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
|
||||
{
|
||||
struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
|
||||
struct hmm_mirror *mirror;
|
||||
struct hmm_update update;
|
||||
struct hmm_range *range;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
@ -173,15 +172,10 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
|
||||
if (!kref_get_unless_zero(&hmm->kref))
|
||||
return 0;
|
||||
|
||||
update.start = nrange->start;
|
||||
update.end = nrange->end;
|
||||
update.event = HMM_UPDATE_INVALIDATE;
|
||||
update.blockable = mmu_notifier_range_blockable(nrange);
|
||||
|
||||
spin_lock_irqsave(&hmm->ranges_lock, flags);
|
||||
hmm->notifiers++;
|
||||
list_for_each_entry(range, &hmm->ranges, list) {
|
||||
if (update.end < range->start || update.start >= range->end)
|
||||
if (nrange->end < range->start || nrange->start >= range->end)
|
||||
continue;
|
||||
|
||||
range->valid = false;
|
||||
@ -198,9 +192,10 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
|
||||
list_for_each_entry(mirror, &hmm->mirrors, list) {
|
||||
int rc;
|
||||
|
||||
rc = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
|
||||
rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange);
|
||||
if (rc) {
|
||||
if (WARN_ON(update.blockable || rc != -EAGAIN))
|
||||
if (WARN_ON(mmu_notifier_range_blockable(nrange) ||
|
||||
rc != -EAGAIN))
|
||||
continue;
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user