mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 22:24:09 +08:00
Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm fixes from Dan Williams: - A collection of crash and deadlock fixes for DAX that are also tagged for -stable. We will look to re-enable DAX pmd mappings in 4.5, but for now 4.4 and -stable should disable it by default. - A fixup to ext2 and ext4 to mirror the same warning emitted by XFS when mounting with "-o dax" * 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: block: protect rw_page against device teardown mm, dax: fix DAX deadlocks (COW fault) dax: disable pmd mappings ext2, ext4: warn when mounting with dax enabled
This commit is contained in:
commit
95803066c6
@ -72,8 +72,6 @@ void blk_dequeue_request(struct request *rq);
|
||||
void __blk_queue_free_tags(struct request_queue *q);
|
||||
bool __blk_end_bidi_request(struct request *rq, int error,
|
||||
unsigned int nr_bytes, unsigned int bidi_bytes);
|
||||
int blk_queue_enter(struct request_queue *q, gfp_t gfp);
|
||||
void blk_queue_exit(struct request_queue *q);
|
||||
void blk_freeze_queue(struct request_queue *q);
|
||||
|
||||
static inline void blk_queue_enter_live(struct request_queue *q)
|
||||
|
@ -46,6 +46,12 @@ config FS_DAX
|
||||
or if unsure, say N. Saying Y will increase the size of the kernel
|
||||
by about 5kB.
|
||||
|
||||
config FS_DAX_PMD
|
||||
bool
|
||||
default FS_DAX
|
||||
depends on FS_DAX
|
||||
depends on BROKEN
|
||||
|
||||
endif # BLOCK
|
||||
|
||||
# Posix ACL utility routines
|
||||
|
@ -390,9 +390,17 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
|
||||
struct page *page)
|
||||
{
|
||||
const struct block_device_operations *ops = bdev->bd_disk->fops;
|
||||
int result = -EOPNOTSUPP;
|
||||
|
||||
if (!ops->rw_page || bdev_get_integrity(bdev))
|
||||
return -EOPNOTSUPP;
|
||||
return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
|
||||
return result;
|
||||
|
||||
result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL);
|
||||
if (result)
|
||||
return result;
|
||||
result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
|
||||
blk_queue_exit(bdev->bd_queue);
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bdev_read_page);
|
||||
|
||||
@ -421,14 +429,20 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
|
||||
int result;
|
||||
int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
|
||||
const struct block_device_operations *ops = bdev->bd_disk->fops;
|
||||
|
||||
if (!ops->rw_page || bdev_get_integrity(bdev))
|
||||
return -EOPNOTSUPP;
|
||||
result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
set_page_writeback(page);
|
||||
result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
|
||||
if (result)
|
||||
end_page_writeback(page);
|
||||
else
|
||||
unlock_page(page);
|
||||
blk_queue_exit(bdev->bd_queue);
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bdev_write_page);
|
||||
|
4
fs/dax.c
4
fs/dax.c
@ -541,6 +541,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long pfn;
|
||||
int result = 0;
|
||||
|
||||
/* dax pmd mappings are broken wrt gup and fork */
|
||||
if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
|
||||
return VM_FAULT_FALLBACK;
|
||||
|
||||
/* Fall back to PTEs if we're going to COW */
|
||||
if (write && !(vma->vm_flags & VM_SHARED))
|
||||
return VM_FAULT_FALLBACK;
|
||||
|
@ -569,6 +569,8 @@ static int parse_options(char *options, struct super_block *sb)
|
||||
/* Fall through */
|
||||
case Opt_dax:
|
||||
#ifdef CONFIG_FS_DAX
|
||||
ext2_msg(sb, KERN_WARNING,
|
||||
"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
|
||||
set_opt(sbi->s_mount_opt, DAX);
|
||||
#else
|
||||
ext2_msg(sb, KERN_INFO, "dax option not supported");
|
||||
|
@ -1664,8 +1664,12 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
|
||||
}
|
||||
sbi->s_jquota_fmt = m->mount_opt;
|
||||
#endif
|
||||
#ifndef CONFIG_FS_DAX
|
||||
} else if (token == Opt_dax) {
|
||||
#ifdef CONFIG_FS_DAX
|
||||
ext4_msg(sb, KERN_WARNING,
|
||||
"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
|
||||
sbi->s_mount_opt |= m->mount_opt;
|
||||
#else
|
||||
ext4_msg(sb, KERN_INFO, "dax option not supported");
|
||||
return -1;
|
||||
#endif
|
||||
|
@ -794,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
||||
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
||||
struct scsi_ioctl_command __user *);
|
||||
|
||||
extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
|
||||
extern void blk_queue_exit(struct request_queue *q);
|
||||
extern void blk_start_queue(struct request_queue *q);
|
||||
extern void blk_stop_queue(struct request_queue *q);
|
||||
extern void blk_sync_queue(struct request_queue *q);
|
||||
|
@ -3015,9 +3015,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
} else {
|
||||
/*
|
||||
* The fault handler has no page to lock, so it holds
|
||||
* i_mmap_lock for write to protect against truncate.
|
||||
* i_mmap_lock for read to protect against truncate.
|
||||
*/
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
i_mmap_unlock_read(vma->vm_file->f_mapping);
|
||||
}
|
||||
goto uncharge_out;
|
||||
}
|
||||
@ -3031,9 +3031,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
} else {
|
||||
/*
|
||||
* The fault handler has no page to lock, so it holds
|
||||
* i_mmap_lock for write to protect against truncate.
|
||||
* i_mmap_lock for read to protect against truncate.
|
||||
*/
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
i_mmap_unlock_read(vma->vm_file->f_mapping);
|
||||
}
|
||||
return ret;
|
||||
uncharge_out:
|
||||
|
Loading…
Reference in New Issue
Block a user