mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
iommufd for 6.3 rc
Three bugs found by syzkaller: - An invalid VA range can be be put in a pages and eventually trigger WARN_ON, reject it early - Use of the wrong start index value when doing the complex batch carry scheme - Wrong store ordering resulting in corrupting data used in a later calculation that corrupted the batch structure during carry -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCZC7G7AAKCRCFwuHvBreF YQsjAQDiA56UTfVHwuMWEdZJ7clHbOeZk7xWMLTewVNBxktxhwD/fUVRqeC9uZKT TAWvcHUN4f6dzzfBecKLZaSHrft5lws= =u8cW -----END PGP SIGNATURE----- Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd Pull iommufd fixes from Jason Gunthorpe: - An invalid VA range can be be put in a pages and eventually trigger WARN_ON, reject it early - Use of the wrong start index value when doing the complex batch carry scheme - Wrong store ordering resulting in corrupting data used in a later calculation that corrupted the batch structure during carry * tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd: iommufd: Do not corrupt the pfn list when doing batch carry iommufd: Fix unpinning of pages when an access is present iommufd: Check for uptr overflow
This commit is contained in:
commit
105b64c838
@ -294,9 +294,9 @@ static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns)
|
||||
batch->npfns[batch->end - 1] < keep_pfns);
|
||||
|
||||
batch->total_pfns = keep_pfns;
|
||||
batch->npfns[0] = keep_pfns;
|
||||
batch->pfns[0] = batch->pfns[batch->end - 1] +
|
||||
(batch->npfns[batch->end - 1] - keep_pfns);
|
||||
batch->npfns[0] = keep_pfns;
|
||||
batch->end = 0;
|
||||
}
|
||||
|
||||
@ -1142,6 +1142,7 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length,
|
||||
bool writable)
|
||||
{
|
||||
struct iopt_pages *pages;
|
||||
unsigned long end;
|
||||
|
||||
/*
|
||||
* The iommu API uses size_t as the length, and protect the DIV_ROUND_UP
|
||||
@ -1150,6 +1151,9 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length,
|
||||
if (length > SIZE_MAX - PAGE_SIZE || length == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (check_add_overflow((unsigned long)uptr, length, &end))
|
||||
return ERR_PTR(-EOVERFLOW);
|
||||
|
||||
pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT);
|
||||
if (!pages)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -1203,13 +1207,21 @@ iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area,
|
||||
unsigned long start =
|
||||
max(start_index, *unmapped_end_index);
|
||||
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
|
||||
batch->total_pfns)
|
||||
WARN_ON(*unmapped_end_index -
|
||||
batch->total_pfns !=
|
||||
start_index);
|
||||
batch_from_domain(batch, domain, area, start,
|
||||
last_index);
|
||||
batch_last_index = start + batch->total_pfns - 1;
|
||||
batch_last_index = start_index + batch->total_pfns - 1;
|
||||
} else {
|
||||
batch_last_index = last_index;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
|
||||
WARN_ON(batch_last_index > real_last_index);
|
||||
|
||||
/*
|
||||
* unmaps must always 'cut' at a place where the pfns are not
|
||||
* contiguous to pair with the maps that always install
|
||||
|
Loading…
Reference in New Issue
Block a user