2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-14 16:23:51 +08:00

s390/mm: make virt_to_pfn() a static inline

Making virt_to_pfn() a static inline taking a strongly typed
(const void *) makes the contract of a passing a pointer of that
type to the function explicit and exposes any misuse of the
macro virt_to_pfn() acting polymorphic and accepting many types
such as (void *), (unitptr_t) or (unsigned long) as arguments
without warnings.

For symmetry do the same with pfn_to_virt() reflecting the
current layout in asm-generic/page.h.

Doing this reveals a number of offenders in the arch code and
the S390-specific drivers, so just bite the bullet and fix up
all of those as well.

Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Link: https://lore.kernel.org/r/20230812-virt-to-phys-s390-v2-1-6c40f31fe36f@linaro.org
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
Linus Walleij 2023-08-12 17:12:54 +02:00 committed by Heiko Carstens
parent 5cfdff02e9
commit 2d1494fb31
6 changed files with 15 additions and 7 deletions

View File

@ -35,7 +35,7 @@ static __always_inline void kfence_split_mapping(void)
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
__kernel_map_pages(virt_to_page(addr), 1, !protect);
__kernel_map_pages(virt_to_page((void *)addr), 1, !protect);
return true;
}

View File

@ -191,8 +191,16 @@ int arch_make_page_accessible(struct page *page);
#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
#define pfn_to_virt(pfn) __va(pfn_to_phys(pfn))
#define virt_to_pfn(kaddr) (phys_to_pfn(__pa(kaddr)))
static inline void *pfn_to_virt(unsigned long pfn)
{
return __va(pfn_to_phys(pfn));
}
static inline unsigned long virt_to_pfn(const void *kaddr)
{
return phys_to_pfn(__pa(kaddr));
}
#define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))

View File

@ -90,7 +90,7 @@ static long cmm_alloc_pages(long nr, long *counter,
} else
free_page((unsigned long) npa);
}
diag10_range(virt_to_pfn(addr), 1);
diag10_range(virt_to_pfn((void *)addr), 1);
pa->pages[pa->index++] = addr;
(*counter)++;
spin_unlock(&cmm_lock);

View File

@ -36,7 +36,7 @@ static void vmem_free_pages(unsigned long addr, int order)
{
/* We don't expect boot memory to be removed ever. */
if (!slab_is_available() ||
WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
return;
free_pages(addr, order);
}

View File

@ -134,7 +134,7 @@ static void scm_request_done(struct scm_request *scmrq)
if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
IS_ALIGNED(aidaw, PAGE_SIZE))
mempool_free(virt_to_page(aidaw), aidaw_pool);
mempool_free(virt_to_page((void *)aidaw), aidaw_pool);
}
spin_lock_irqsave(&list_lock, flags);

View File

@ -89,7 +89,7 @@ static void vmcp_response_free(struct vmcp_session *session)
order = get_order(session->bufsize);
nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
if (session->cma_alloc) {
page = virt_to_page((unsigned long)session->response);
page = virt_to_page(session->response);
cma_release(vmcp_cma, page, nr_pages);
session->cma_alloc = 0;
} else {