Use PAGE_ALIGNED() instead of open coding it in the x86/mm code.

-----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmKcdlsTHHRnbHhAbGlu
 dXRyb25peC5kZQAKCRCmGPVMDXSYoaDeD/oDyKpbOf+ta83AyZm+h6Q+KzYf44n6
 XRaT8Ip1f/aUk+Ucv2YLpZT/LRiBenmJpsdDRaGhje83pS2cnXStd8UhEBjiWmeq
 vBGR8QpXJtFna1Ouizwwq+Pp2mnktK+YEk+Qf17z9UwrRzqXlgTIJqPWsVY1plmV
 lHVhThdkr09PR4aSBN9TQzTFxG0Qtf3mHbsRyPTzODldMtSbdCy9Zy2xlTigDIJX
 uTU/ykoB8qZpJguacAZpExwxvoRHy95cpxNCQfC5paeAv3gJ0U1b3PAJA4VkVlko
 h/2crEzHV0MZRx12o1le72K/2UfuvTeb3949VD3ZBIE5fPSH/K/niwl9BtntXSQj
 Bs0Kbm3k94tJz2VunG0FO7ayJ8/KL98zLHXQR6yRBTGvtkj9m7vhUMaN268NSLQH
 meAIuMreJejfsKV2Z/z9y+nSWebKdEQ0xkOq51qe3s2ivroKshTUNR0h7S35xyw5
 +R8lzJZH4rVVBOcxzCLTaIVsJbVHGfE65gb6JIyUT2sRRxXm53gYPC2rvIRzi8eA
 1h1ODpz0pn3LewXsigOJGmiK11Dda5uJAF58t1BJmGi9kXSH/CVfYBTcDOXcYNHy
 sA+oq+yrQ5RSywv1D9zuDzOgbcEZ1mB82aWuy4Li+k9uhdCyNIsNy7Nbw9paCDZk
 Oy+IMDLroCieSw==
 =KN22
 -----END PGP SIGNATURE-----

Merge tag 'x86-mm-2022-06-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mm cleanup from Thomas Gleixner:
 "Use PAGE_ALIGNED() instead of open coding it in the x86/mm code"

* tag 'x86-mm-2022-06-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Use PAGE_ALIGNED(x) instead of IS_ALIGNED(x, PAGE_SIZE)
This commit is contained in:
Linus Torvalds 2022-06-05 10:57:35 -07:00
commit 0b7da15c21

View File

@ -1240,8 +1240,8 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct,
void __ref vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
VM_BUG_ON(!PAGE_ALIGNED(start));
VM_BUG_ON(!PAGE_ALIGNED(end));
remove_pagetable(start, end, false, altmap);
}
@ -1605,8 +1605,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
{
int err;
VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
VM_BUG_ON(!PAGE_ALIGNED(start));
VM_BUG_ON(!PAGE_ALIGNED(end));
if (end - start < PAGES_PER_SECTION * sizeof(struct page))
err = vmemmap_populate_basepages(start, end, node, NULL);