mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 00:34:20 +08:00
s390/mm: use a single lock for the fields in mm_context_t
The three locks 'lock', 'pgtable_lock' and 'gmap_lock' in the mm_context_t can be reduced to a single lock. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
60f07c8ec5
commit
f28a4b4ddf
@ -9,9 +9,7 @@ typedef struct {
|
||||
cpumask_t cpu_attach_mask;
|
||||
atomic_t flush_count;
|
||||
unsigned int flush_mm;
|
||||
spinlock_t pgtable_lock;
|
||||
struct list_head pgtable_list;
|
||||
spinlock_t gmap_lock;
|
||||
struct list_head gmap_list;
|
||||
unsigned long gmap_asce;
|
||||
unsigned long asce;
|
||||
@ -29,10 +27,7 @@ typedef struct {
|
||||
|
||||
#define INIT_MM_CONTEXT(name) \
|
||||
.context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
|
||||
.context.pgtable_lock = \
|
||||
__SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \
|
||||
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
|
||||
.context.gmap_lock = __SPIN_LOCK_UNLOCKED(name.context.gmap_lock), \
|
||||
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
|
||||
|
||||
static inline int tprot(unsigned long addr)
|
||||
|
@ -18,9 +18,7 @@ static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
spin_lock_init(&mm->context.lock);
|
||||
spin_lock_init(&mm->context.pgtable_lock);
|
||||
INIT_LIST_HEAD(&mm->context.pgtable_list);
|
||||
spin_lock_init(&mm->context.gmap_lock);
|
||||
INIT_LIST_HEAD(&mm->context.gmap_list);
|
||||
cpumask_clear(&mm->context.cpu_attach_mask);
|
||||
atomic_set(&mm->context.flush_count, 0);
|
||||
|
@ -100,14 +100,14 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
|
||||
if (!gmap)
|
||||
return NULL;
|
||||
gmap->mm = mm;
|
||||
spin_lock(&mm->context.gmap_lock);
|
||||
spin_lock(&mm->context.lock);
|
||||
list_add_rcu(&gmap->list, &mm->context.gmap_list);
|
||||
if (list_is_singular(&mm->context.gmap_list))
|
||||
gmap_asce = gmap->asce;
|
||||
else
|
||||
gmap_asce = -1UL;
|
||||
WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
|
||||
spin_unlock(&mm->context.gmap_lock);
|
||||
spin_unlock(&mm->context.lock);
|
||||
return gmap;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gmap_create);
|
||||
@ -248,7 +248,7 @@ void gmap_remove(struct gmap *gmap)
|
||||
spin_unlock(&gmap->shadow_lock);
|
||||
}
|
||||
/* Remove gmap from the pre-mm list */
|
||||
spin_lock(&gmap->mm->context.gmap_lock);
|
||||
spin_lock(&gmap->mm->context.lock);
|
||||
list_del_rcu(&gmap->list);
|
||||
if (list_empty(&gmap->mm->context.gmap_list))
|
||||
gmap_asce = 0;
|
||||
@ -258,7 +258,7 @@ void gmap_remove(struct gmap *gmap)
|
||||
else
|
||||
gmap_asce = -1UL;
|
||||
WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
|
||||
spin_unlock(&gmap->mm->context.gmap_lock);
|
||||
spin_unlock(&gmap->mm->context.lock);
|
||||
synchronize_rcu();
|
||||
/* Put reference */
|
||||
gmap_put(gmap);
|
||||
|
@ -188,7 +188,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
||||
/* Try to get a fragment of a 4K page as a 2K page table */
|
||||
if (!mm_alloc_pgste(mm)) {
|
||||
table = NULL;
|
||||
spin_lock_bh(&mm->context.pgtable_lock);
|
||||
spin_lock_bh(&mm->context.lock);
|
||||
if (!list_empty(&mm->context.pgtable_list)) {
|
||||
page = list_first_entry(&mm->context.pgtable_list,
|
||||
struct page, lru);
|
||||
@ -203,7 +203,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
||||
list_del(&page->lru);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&mm->context.pgtable_lock);
|
||||
spin_unlock_bh(&mm->context.lock);
|
||||
if (table)
|
||||
return table;
|
||||
}
|
||||
@ -227,9 +227,9 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
||||
/* Return the first 2K fragment of the page */
|
||||
atomic_set(&page->_mapcount, 1);
|
||||
clear_table(table, _PAGE_INVALID, PAGE_SIZE);
|
||||
spin_lock_bh(&mm->context.pgtable_lock);
|
||||
spin_lock_bh(&mm->context.lock);
|
||||
list_add(&page->lru, &mm->context.pgtable_list);
|
||||
spin_unlock_bh(&mm->context.pgtable_lock);
|
||||
spin_unlock_bh(&mm->context.lock);
|
||||
}
|
||||
return table;
|
||||
}
|
||||
@ -243,13 +243,13 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
|
||||
if (!mm_alloc_pgste(mm)) {
|
||||
/* Free 2K page table fragment of a 4K page */
|
||||
bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
|
||||
spin_lock_bh(&mm->context.pgtable_lock);
|
||||
spin_lock_bh(&mm->context.lock);
|
||||
mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
|
||||
if (mask & 3)
|
||||
list_add(&page->lru, &mm->context.pgtable_list);
|
||||
else
|
||||
list_del(&page->lru);
|
||||
spin_unlock_bh(&mm->context.pgtable_lock);
|
||||
spin_unlock_bh(&mm->context.lock);
|
||||
if (mask != 0)
|
||||
return;
|
||||
}
|
||||
@ -275,13 +275,13 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
|
||||
return;
|
||||
}
|
||||
bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
|
||||
spin_lock_bh(&mm->context.pgtable_lock);
|
||||
spin_lock_bh(&mm->context.lock);
|
||||
mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
|
||||
if (mask & 3)
|
||||
list_add_tail(&page->lru, &mm->context.pgtable_list);
|
||||
else
|
||||
list_del(&page->lru);
|
||||
spin_unlock_bh(&mm->context.pgtable_lock);
|
||||
spin_unlock_bh(&mm->context.lock);
|
||||
table = (unsigned long *) (__pa(table) | (1U << bit));
|
||||
tlb_remove_table(tlb, table);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user