mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-03 19:24:02 +08:00
dma-direct: fix zone selection after an unaddressable CMA allocation
The new dma_alloc_contiguous hides if we allocate CMA or regular
pages, and thus fails to retry a ZONE_NORMAL allocation if the CMA
allocation succeeds but isn't addressable. That means we either fail
outright or dip into a small zone that might not succeed either.
Thanks to Hillf Danton for debugging this issue.
Fixes: b1d2dc009d
("dma-contiguous: add dma_{alloc,free}_contiguous() helpers")
Reported-by: Tobias Klausmann <tobias.johannes.klausmann@mni.thm.de>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Tobias Klausmann <tobias.johannes.klausmann@mni.thm.de>
This commit is contained in:
parent
936376f88f
commit
90ae409f9e
@ -965,10 +965,13 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
|
|||||||
{
|
{
|
||||||
bool coherent = dev_is_dma_coherent(dev);
|
bool coherent = dev_is_dma_coherent(dev);
|
||||||
size_t alloc_size = PAGE_ALIGN(size);
|
size_t alloc_size = PAGE_ALIGN(size);
|
||||||
|
int node = dev_to_node(dev);
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
void *cpu_addr;
|
void *cpu_addr;
|
||||||
|
|
||||||
page = dma_alloc_contiguous(dev, alloc_size, gfp);
|
page = dma_alloc_contiguous(dev, alloc_size, gfp);
|
||||||
|
if (!page)
|
||||||
|
page = alloc_pages_node(node, gfp, get_order(alloc_size));
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
|||||||
static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
|
static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
|
||||||
gfp_t gfp)
|
gfp_t gfp)
|
||||||
{
|
{
|
||||||
int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
|
return NULL;
|
||||||
size_t align = get_order(PAGE_ALIGN(size));
|
|
||||||
|
|
||||||
return alloc_pages_node(node, gfp, align);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dma_free_contiguous(struct device *dev, struct page *page,
|
static inline void dma_free_contiguous(struct device *dev, struct page *page,
|
||||||
|
@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
|||||||
*/
|
*/
|
||||||
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
|
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
|
||||||
{
|
{
|
||||||
int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
|
size_t count = size >> PAGE_SHIFT;
|
||||||
size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
||||||
size_t align = get_order(PAGE_ALIGN(size));
|
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
struct cma *cma = NULL;
|
struct cma *cma = NULL;
|
||||||
|
|
||||||
@ -243,14 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
|
|||||||
|
|
||||||
/* CMA can be used only in the context which permits sleeping */
|
/* CMA can be used only in the context which permits sleeping */
|
||||||
if (cma && gfpflags_allow_blocking(gfp)) {
|
if (cma && gfpflags_allow_blocking(gfp)) {
|
||||||
|
size_t align = get_order(size);
|
||||||
size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
|
size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
|
||||||
|
|
||||||
page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
|
page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Fallback allocation of normal pages */
|
|
||||||
if (!page)
|
|
||||||
page = alloc_pages_node(node, gfp, align);
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,6 +85,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
|||||||
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||||
{
|
{
|
||||||
|
size_t alloc_size = PAGE_ALIGN(size);
|
||||||
|
int node = dev_to_node(dev);
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
u64 phys_mask;
|
u64 phys_mask;
|
||||||
|
|
||||||
@ -95,8 +97,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||||||
gfp &= ~__GFP_ZERO;
|
gfp &= ~__GFP_ZERO;
|
||||||
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||||||
&phys_mask);
|
&phys_mask);
|
||||||
|
page = dma_alloc_contiguous(dev, alloc_size, gfp);
|
||||||
|
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
||||||
|
dma_free_contiguous(dev, page, alloc_size);
|
||||||
|
page = NULL;
|
||||||
|
}
|
||||||
again:
|
again:
|
||||||
page = dma_alloc_contiguous(dev, size, gfp);
|
if (!page)
|
||||||
|
page = alloc_pages_node(node, gfp, get_order(alloc_size));
|
||||||
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
||||||
dma_free_contiguous(dev, page, size);
|
dma_free_contiguous(dev, page, size);
|
||||||
page = NULL;
|
page = NULL;
|
||||||
|
Loading…
Reference in New Issue
Block a user