2019-05-29 22:18:09 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-07-31 05:57:47 +08:00
|
|
|
/*
|
2016-03-12 02:15:36 +08:00
|
|
|
* Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
|
2015-07-31 05:57:47 +08:00
|
|
|
*/
|
2016-03-22 15:29:43 +08:00
|
|
|
#include <linux/memremap.h>
|
2015-07-31 05:57:47 +08:00
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/sizes.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include "nd-core.h"
|
|
|
|
#include "pfn.h"
|
|
|
|
#include "nd.h"
|
|
|
|
|
2023-01-26 04:23:46 +08:00
|
|
|
static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN);
|
|
|
|
|
2015-07-31 05:57:47 +08:00
|
|
|
static void nd_pfn_release(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
|
|
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
|
|
|
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(dev, "trace\n");
|
2015-07-31 05:57:47 +08:00
|
|
|
nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
|
|
|
|
ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
|
|
|
|
kfree(nd_pfn->uuid);
|
|
|
|
kfree(nd_pfn);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nd_pfn *to_nd_pfn(struct device *dev)
|
|
|
|
{
|
|
|
|
struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev);
|
|
|
|
|
|
|
|
WARN_ON(!is_nd_pfn(dev));
|
|
|
|
return nd_pfn;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(to_nd_pfn);
|
|
|
|
|
|
|
|
static ssize_t mode_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2016-03-12 02:15:36 +08:00
|
|
|
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
|
|
|
|
switch (nd_pfn->mode) {
|
|
|
|
case PFN_MODE_RAM:
|
|
|
|
return sprintf(buf, "ram\n");
|
|
|
|
case PFN_MODE_PMEM:
|
|
|
|
return sprintf(buf, "pmem\n");
|
|
|
|
default:
|
|
|
|
return sprintf(buf, "none\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t mode_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
|
|
{
|
2016-03-12 02:15:36 +08:00
|
|
|
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
ssize_t rc = 0;
|
|
|
|
|
2022-04-21 23:33:39 +08:00
|
|
|
device_lock(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
nvdimm_bus_lock(dev);
|
|
|
|
if (dev->driver)
|
|
|
|
rc = -EBUSY;
|
|
|
|
else {
|
|
|
|
size_t n = len - 1;
|
|
|
|
|
|
|
|
if (strncmp(buf, "pmem\n", n) == 0
|
|
|
|
|| strncmp(buf, "pmem", n) == 0) {
|
2016-01-16 08:56:26 +08:00
|
|
|
nd_pfn->mode = PFN_MODE_PMEM;
|
2015-07-31 05:57:47 +08:00
|
|
|
} else if (strncmp(buf, "ram\n", n) == 0
|
|
|
|
|| strncmp(buf, "ram", n) == 0)
|
|
|
|
nd_pfn->mode = PFN_MODE_RAM;
|
|
|
|
else if (strncmp(buf, "none\n", n) == 0
|
|
|
|
|| strncmp(buf, "none", n) == 0)
|
|
|
|
nd_pfn->mode = PFN_MODE_NONE;
|
|
|
|
else
|
|
|
|
rc = -EINVAL;
|
|
|
|
}
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
|
|
|
buf[len - 1] == '\n' ? "" : "\n");
|
2015-07-31 05:57:47 +08:00
|
|
|
nvdimm_bus_unlock(dev);
|
2022-04-21 23:33:39 +08:00
|
|
|
device_unlock(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
|
|
|
|
return rc ? rc : len;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(mode);
|
|
|
|
|
2015-12-11 06:45:23 +08:00
|
|
|
static ssize_t align_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2016-03-12 02:15:36 +08:00
|
|
|
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
2015-12-11 06:45:23 +08:00
|
|
|
|
2016-12-11 00:12:05 +08:00
|
|
|
return sprintf(buf, "%ld\n", nd_pfn->align);
|
2015-12-11 06:45:23 +08:00
|
|
|
}
|
|
|
|
|
libnvdimm/dax: Pick the right alignment default when creating dax devices
Allow arch to provide the supported alignments and use hugepage alignment only
if we support hugepage. Right now we depend on compile time configs whereas this
patch switch this to runtime discovery.
Architectures like ppc64 can have THP enabled in code, but then can have
hugepage size disabled by the hypervisor. This allows us to create dax devices
with PAGE_SIZE alignment in this case.
Existing dax namespace with alignment larger than PAGE_SIZE will fail to
initialize in this specific case. We still allow fsdax namespace initialization.
With respect to identifying whether to enable hugepage fault for a dax device,
if THP is enabled during compile, we default to taking hugepage fault and in dax
fault handler if we find the fault size > alignment we retry with PAGE_SIZE
fault size.
This also addresses the below failure scenario on ppc64
ndctl create-namespace --mode=devdax | grep align
"align":16777216,
"align":16777216
cat /sys/devices/ndbus0/region0/dax0.0/supported_alignments
65536 16777216
daxio.static-debug -z -o /dev/dax0.0
Bus error (core dumped)
$ dmesg | tail
lpar: Failed hash pte insert with error -4
hash-mmu: mm: Hashing failure ! EA=0x7fff17000000 access=0x8000000000000006 current=daxio
hash-mmu: trap=0x300 vsid=0x22cb7a3 ssize=1 base psize=2 psize 10 pte=0xc000000501002b86
daxio[3860]: bus error (7) at 7fff17000000 nip 7fff973c007c lr 7fff973bff34 code 2 in libpmem.so.1.0.0[7fff973b0000+20000]
daxio[3860]: code: 792945e4 7d494b78 e95f0098 7d494b78 f93f00a0 4800012c e93f0088 f93f0120
daxio[3860]: code: e93f00a0 f93f0128 e93f0120 e95f0128 <f9490000> e93f0088 39290008 f93f0110
The failure was due to guest kernel using wrong page size.
The namespaces created with 16M alignment will appear as below on a config with
16M page size disabled.
$ ndctl list -Ni
[
{
"dev":"namespace0.1",
"mode":"fsdax",
"map":"dev",
"size":5351931904,
"uuid":"fc6e9667-461a-4718-82b4-69b24570bddb",
"align":16777216,
"blockdev":"pmem0.1",
"supported_alignments":[
65536
]
},
{
"dev":"namespace0.0",
"mode":"fsdax", <==== devdax 16M alignment marked disabled.
"map":"mem",
"size":5368709120,
"uuid":"a4bdf81a-f2ee-4bc6-91db-7b87eddd0484",
"state":"disabled"
}
]
Cc: linux-mm@kvack.org
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Link: https://lore.kernel.org/r/20190905154603.10349-8-aneesh.kumar@linux.ibm.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2019-09-05 23:46:03 +08:00
|
|
|
static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments)
|
2017-06-27 17:56:12 +08:00
|
|
|
{
|
|
|
|
|
libnvdimm/dax: Pick the right alignment default when creating dax devices
Allow arch to provide the supported alignments and use hugepage alignment only
if we support hugepage. Right now we depend on compile time configs whereas this
patch switch this to runtime discovery.
Architectures like ppc64 can have THP enabled in code, but then can have
hugepage size disabled by the hypervisor. This allows us to create dax devices
with PAGE_SIZE alignment in this case.
Existing dax namespace with alignment larger than PAGE_SIZE will fail to
initialize in this specific case. We still allow fsdax namespace initialization.
With respect to identifying whether to enable hugepage fault for a dax device,
if THP is enabled during compile, we default to taking hugepage fault and in dax
fault handler if we find the fault size > alignment we retry with PAGE_SIZE
fault size.
This also addresses the below failure scenario on ppc64
ndctl create-namespace --mode=devdax | grep align
"align":16777216,
"align":16777216
cat /sys/devices/ndbus0/region0/dax0.0/supported_alignments
65536 16777216
daxio.static-debug -z -o /dev/dax0.0
Bus error (core dumped)
$ dmesg | tail
lpar: Failed hash pte insert with error -4
hash-mmu: mm: Hashing failure ! EA=0x7fff17000000 access=0x8000000000000006 current=daxio
hash-mmu: trap=0x300 vsid=0x22cb7a3 ssize=1 base psize=2 psize 10 pte=0xc000000501002b86
daxio[3860]: bus error (7) at 7fff17000000 nip 7fff973c007c lr 7fff973bff34 code 2 in libpmem.so.1.0.0[7fff973b0000+20000]
daxio[3860]: code: 792945e4 7d494b78 e95f0098 7d494b78 f93f00a0 4800012c e93f0088 f93f0120
daxio[3860]: code: e93f00a0 f93f0128 e93f0120 e95f0128 <f9490000> e93f0088 39290008 f93f0110
The failure was due to guest kernel using wrong page size.
The namespaces created with 16M alignment will appear as below on a config with
16M page size disabled.
$ ndctl list -Ni
[
{
"dev":"namespace0.1",
"mode":"fsdax",
"map":"dev",
"size":5351931904,
"uuid":"fc6e9667-461a-4718-82b4-69b24570bddb",
"align":16777216,
"blockdev":"pmem0.1",
"supported_alignments":[
65536
]
},
{
"dev":"namespace0.0",
"mode":"fsdax", <==== devdax 16M alignment marked disabled.
"map":"mem",
"size":5368709120,
"uuid":"a4bdf81a-f2ee-4bc6-91db-7b87eddd0484",
"state":"disabled"
}
]
Cc: linux-mm@kvack.org
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Link: https://lore.kernel.org/r/20190905154603.10349-8-aneesh.kumar@linux.ibm.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2019-09-05 23:46:03 +08:00
|
|
|
alignments[0] = PAGE_SIZE;
|
|
|
|
|
|
|
|
if (has_transparent_hugepage()) {
|
|
|
|
alignments[1] = HPAGE_PMD_SIZE;
|
|
|
|
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
|
|
|
|
alignments[2] = HPAGE_PUD_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return alignments;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use pmd mapping if supported as default alignment
|
|
|
|
*/
|
|
|
|
static unsigned long nd_pfn_default_alignment(void)
|
|
|
|
{
|
2017-06-27 17:56:12 +08:00
|
|
|
|
libnvdimm/dax: Pick the right alignment default when creating dax devices
Allow arch to provide the supported alignments and use hugepage alignment only
if we support hugepage. Right now we depend on compile time configs whereas this
patch switch this to runtime discovery.
Architectures like ppc64 can have THP enabled in code, but then can have
hugepage size disabled by the hypervisor. This allows us to create dax devices
with PAGE_SIZE alignment in this case.
Existing dax namespace with alignment larger than PAGE_SIZE will fail to
initialize in this specific case. We still allow fsdax namespace initialization.
With respect to identifying whether to enable hugepage fault for a dax device,
if THP is enabled during compile, we default to taking hugepage fault and in dax
fault handler if we find the fault size > alignment we retry with PAGE_SIZE
fault size.
This also addresses the below failure scenario on ppc64
ndctl create-namespace --mode=devdax | grep align
"align":16777216,
"align":16777216
cat /sys/devices/ndbus0/region0/dax0.0/supported_alignments
65536 16777216
daxio.static-debug -z -o /dev/dax0.0
Bus error (core dumped)
$ dmesg | tail
lpar: Failed hash pte insert with error -4
hash-mmu: mm: Hashing failure ! EA=0x7fff17000000 access=0x8000000000000006 current=daxio
hash-mmu: trap=0x300 vsid=0x22cb7a3 ssize=1 base psize=2 psize 10 pte=0xc000000501002b86
daxio[3860]: bus error (7) at 7fff17000000 nip 7fff973c007c lr 7fff973bff34 code 2 in libpmem.so.1.0.0[7fff973b0000+20000]
daxio[3860]: code: 792945e4 7d494b78 e95f0098 7d494b78 f93f00a0 4800012c e93f0088 f93f0120
daxio[3860]: code: e93f00a0 f93f0128 e93f0120 e95f0128 <f9490000> e93f0088 39290008 f93f0110
The failure was due to guest kernel using wrong page size.
The namespaces created with 16M alignment will appear as below on a config with
16M page size disabled.
$ ndctl list -Ni
[
{
"dev":"namespace0.1",
"mode":"fsdax",
"map":"dev",
"size":5351931904,
"uuid":"fc6e9667-461a-4718-82b4-69b24570bddb",
"align":16777216,
"blockdev":"pmem0.1",
"supported_alignments":[
65536
]
},
{
"dev":"namespace0.0",
"mode":"fsdax", <==== devdax 16M alignment marked disabled.
"map":"mem",
"size":5368709120,
"uuid":"a4bdf81a-f2ee-4bc6-91db-7b87eddd0484",
"state":"disabled"
}
]
Cc: linux-mm@kvack.org
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Link: https://lore.kernel.org/r/20190905154603.10349-8-aneesh.kumar@linux.ibm.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2019-09-05 23:46:03 +08:00
|
|
|
if (has_transparent_hugepage())
|
|
|
|
return HPAGE_PMD_SIZE;
|
|
|
|
return PAGE_SIZE;
|
2017-06-27 17:56:12 +08:00
|
|
|
}
|
|
|
|
|
2015-12-11 06:45:23 +08:00
|
|
|
static ssize_t align_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
|
|
{
|
2016-03-12 02:15:36 +08:00
|
|
|
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
libnvdimm/dax: Pick the right alignment default when creating dax devices
Allow arch to provide the supported alignments and use hugepage alignment only
if we support hugepage. Right now we depend on compile time configs whereas this
patch switch this to runtime discovery.
Architectures like ppc64 can have THP enabled in code, but then can have
hugepage size disabled by the hypervisor. This allows us to create dax devices
with PAGE_SIZE alignment in this case.
Existing dax namespace with alignment larger than PAGE_SIZE will fail to
initialize in this specific case. We still allow fsdax namespace initialization.
With respect to identifying whether to enable hugepage fault for a dax device,
if THP is enabled during compile, we default to taking hugepage fault and in dax
fault handler if we find the fault size > alignment we retry with PAGE_SIZE
fault size.
This also addresses the below failure scenario on ppc64
ndctl create-namespace --mode=devdax | grep align
"align":16777216,
"align":16777216
cat /sys/devices/ndbus0/region0/dax0.0/supported_alignments
65536 16777216
daxio.static-debug -z -o /dev/dax0.0
Bus error (core dumped)
$ dmesg | tail
lpar: Failed hash pte insert with error -4
hash-mmu: mm: Hashing failure ! EA=0x7fff17000000 access=0x8000000000000006 current=daxio
hash-mmu: trap=0x300 vsid=0x22cb7a3 ssize=1 base psize=2 psize 10 pte=0xc000000501002b86
daxio[3860]: bus error (7) at 7fff17000000 nip 7fff973c007c lr 7fff973bff34 code 2 in libpmem.so.1.0.0[7fff973b0000+20000]
daxio[3860]: code: 792945e4 7d494b78 e95f0098 7d494b78 f93f00a0 4800012c e93f0088 f93f0120
daxio[3860]: code: e93f00a0 f93f0128 e93f0120 e95f0128 <f9490000> e93f0088 39290008 f93f0110
The failure was due to guest kernel using wrong page size.
The namespaces created with 16M alignment will appear as below on a config with
16M page size disabled.
$ ndctl list -Ni
[
{
"dev":"namespace0.1",
"mode":"fsdax",
"map":"dev",
"size":5351931904,
"uuid":"fc6e9667-461a-4718-82b4-69b24570bddb",
"align":16777216,
"blockdev":"pmem0.1",
"supported_alignments":[
65536
]
},
{
"dev":"namespace0.0",
"mode":"fsdax", <==== devdax 16M alignment marked disabled.
"map":"mem",
"size":5368709120,
"uuid":"a4bdf81a-f2ee-4bc6-91db-7b87eddd0484",
"state":"disabled"
}
]
Cc: linux-mm@kvack.org
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Link: https://lore.kernel.org/r/20190905154603.10349-8-aneesh.kumar@linux.ibm.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2019-09-05 23:46:03 +08:00
|
|
|
unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
|
2015-12-11 06:45:23 +08:00
|
|
|
ssize_t rc;
|
|
|
|
|
2022-04-21 23:33:39 +08:00
|
|
|
device_lock(dev);
|
2015-12-11 06:45:23 +08:00
|
|
|
nvdimm_bus_lock(dev);
|
2017-08-12 08:54:48 +08:00
|
|
|
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
|
libnvdimm/dax: Pick the right alignment default when creating dax devices
Allow arch to provide the supported alignments and use hugepage alignment only
if we support hugepage. Right now we depend on compile time configs whereas this
patch switch this to runtime discovery.
Architectures like ppc64 can have THP enabled in code, but then can have
hugepage size disabled by the hypervisor. This allows us to create dax devices
with PAGE_SIZE alignment in this case.
Existing dax namespace with alignment larger than PAGE_SIZE will fail to
initialize in this specific case. We still allow fsdax namespace initialization.
With respect to identifying whether to enable hugepage fault for a dax device,
if THP is enabled during compile, we default to taking hugepage fault and in dax
fault handler if we find the fault size > alignment we retry with PAGE_SIZE
fault size.
This also addresses the below failure scenario on ppc64
ndctl create-namespace --mode=devdax | grep align
"align":16777216,
"align":16777216
cat /sys/devices/ndbus0/region0/dax0.0/supported_alignments
65536 16777216
daxio.static-debug -z -o /dev/dax0.0
Bus error (core dumped)
$ dmesg | tail
lpar: Failed hash pte insert with error -4
hash-mmu: mm: Hashing failure ! EA=0x7fff17000000 access=0x8000000000000006 current=daxio
hash-mmu: trap=0x300 vsid=0x22cb7a3 ssize=1 base psize=2 psize 10 pte=0xc000000501002b86
daxio[3860]: bus error (7) at 7fff17000000 nip 7fff973c007c lr 7fff973bff34 code 2 in libpmem.so.1.0.0[7fff973b0000+20000]
daxio[3860]: code: 792945e4 7d494b78 e95f0098 7d494b78 f93f00a0 4800012c e93f0088 f93f0120
daxio[3860]: code: e93f00a0 f93f0128 e93f0120 e95f0128 <f9490000> e93f0088 39290008 f93f0110
The failure was due to guest kernel using wrong page size.
The namespaces created with 16M alignment will appear as below on a config with
16M page size disabled.
$ ndctl list -Ni
[
{
"dev":"namespace0.1",
"mode":"fsdax",
"map":"dev",
"size":5351931904,
"uuid":"fc6e9667-461a-4718-82b4-69b24570bddb",
"align":16777216,
"blockdev":"pmem0.1",
"supported_alignments":[
65536
]
},
{
"dev":"namespace0.0",
"mode":"fsdax", <==== devdax 16M alignment marked disabled.
"map":"mem",
"size":5368709120,
"uuid":"a4bdf81a-f2ee-4bc6-91db-7b87eddd0484",
"state":"disabled"
}
]
Cc: linux-mm@kvack.org
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Link: https://lore.kernel.org/r/20190905154603.10349-8-aneesh.kumar@linux.ibm.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2019-09-05 23:46:03 +08:00
|
|
|
nd_pfn_supported_alignments(aligns));
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
|
|
|
buf[len - 1] == '\n' ? "" : "\n");
|
2015-12-11 06:45:23 +08:00
|
|
|
nvdimm_bus_unlock(dev);
|
2022-04-21 23:33:39 +08:00
|
|
|
device_unlock(dev);
|
2015-12-11 06:45:23 +08:00
|
|
|
|
|
|
|
return rc ? rc : len;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(align);
|
|
|
|
|
2015-07-31 05:57:47 +08:00
|
|
|
static ssize_t uuid_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2016-03-12 02:15:36 +08:00
|
|
|
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
|
|
|
|
if (nd_pfn->uuid)
|
|
|
|
return sprintf(buf, "%pUb\n", nd_pfn->uuid);
|
|
|
|
return sprintf(buf, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t uuid_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
|
|
{
|
2016-03-12 02:15:36 +08:00
|
|
|
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
ssize_t rc;
|
|
|
|
|
2022-04-21 23:33:39 +08:00
|
|
|
device_lock(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
|
|
|
buf[len - 1] == '\n' ? "" : "\n");
|
2022-04-21 23:33:39 +08:00
|
|
|
device_unlock(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
|
|
|
|
return rc ? rc : len;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(uuid);
|
|
|
|
|
|
|
|
static ssize_t namespace_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2016-03-12 02:15:36 +08:00
|
|
|
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
ssize_t rc;
|
|
|
|
|
|
|
|
nvdimm_bus_lock(dev);
|
|
|
|
rc = sprintf(buf, "%s\n", nd_pfn->ndns
|
|
|
|
? dev_name(&nd_pfn->ndns->dev) : "");
|
|
|
|
nvdimm_bus_unlock(dev);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t namespace_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf, size_t len)
|
|
|
|
{
|
2016-03-12 02:15:36 +08:00
|
|
|
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
ssize_t rc;
|
|
|
|
|
2022-04-21 23:33:39 +08:00
|
|
|
device_lock(dev);
|
2015-09-16 21:25:38 +08:00
|
|
|
nvdimm_bus_lock(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
|
|
|
|
buf[len - 1] == '\n' ? "" : "\n");
|
2015-07-31 05:57:47 +08:00
|
|
|
nvdimm_bus_unlock(dev);
|
2022-04-21 23:33:39 +08:00
|
|
|
device_unlock(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(namespace);
|
|
|
|
|
2016-03-04 01:46:04 +08:00
|
|
|
static ssize_t resource_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2016-03-12 02:15:36 +08:00
|
|
|
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
2016-03-04 01:46:04 +08:00
|
|
|
ssize_t rc;
|
|
|
|
|
2022-04-21 23:33:39 +08:00
|
|
|
device_lock(dev);
|
2016-03-04 01:46:04 +08:00
|
|
|
if (dev->driver) {
|
|
|
|
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
|
|
|
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
|
|
|
|
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
|
|
|
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
|
|
|
|
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
|
|
|
|
|
|
|
rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start
|
|
|
|
+ start_pad + offset);
|
|
|
|
} else {
|
|
|
|
/* no address to convey if the pfn instance is disabled */
|
|
|
|
rc = -ENXIO;
|
|
|
|
}
|
2022-04-21 23:33:39 +08:00
|
|
|
device_unlock(dev);
|
2016-03-04 01:46:04 +08:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
2020-07-21 06:08:13 +08:00
|
|
|
static DEVICE_ATTR_ADMIN_RO(resource);
|
2016-03-04 01:46:04 +08:00
|
|
|
|
|
|
|
static ssize_t size_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2016-03-12 02:15:36 +08:00
|
|
|
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
2016-03-04 01:46:04 +08:00
|
|
|
ssize_t rc;
|
|
|
|
|
2022-04-21 23:33:39 +08:00
|
|
|
device_lock(dev);
|
2016-03-04 01:46:04 +08:00
|
|
|
if (dev->driver) {
|
|
|
|
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
|
|
|
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
|
|
|
|
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
|
|
|
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
|
|
|
|
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
|
|
|
|
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
|
|
|
|
|
|
|
rc = sprintf(buf, "%llu\n", (unsigned long long)
|
|
|
|
resource_size(&nsio->res) - start_pad
|
|
|
|
- end_trunc - offset);
|
|
|
|
} else {
|
|
|
|
/* no size to convey if the pfn instance is disabled */
|
|
|
|
rc = -ENXIO;
|
|
|
|
}
|
2022-04-21 23:33:39 +08:00
|
|
|
device_unlock(dev);
|
2016-03-04 01:46:04 +08:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(size);
|
|
|
|
|
2017-06-27 17:56:12 +08:00
|
|
|
static ssize_t supported_alignments_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
libnvdimm/dax: Pick the right alignment default when creating dax devices
Allow arch to provide the supported alignments and use hugepage alignment only
if we support hugepage. Right now we depend on compile time configs whereas this
patch switch this to runtime discovery.
Architectures like ppc64 can have THP enabled in code, but then can have
hugepage size disabled by the hypervisor. This allows us to create dax devices
with PAGE_SIZE alignment in this case.
Existing dax namespace with alignment larger than PAGE_SIZE will fail to
initialize in this specific case. We still allow fsdax namespace initialization.
With respect to identifying whether to enable hugepage fault for a dax device,
if THP is enabled during compile, we default to taking hugepage fault and in dax
fault handler if we find the fault size > alignment we retry with PAGE_SIZE
fault size.
This also addresses the below failure scenario on ppc64
ndctl create-namespace --mode=devdax | grep align
"align":16777216,
"align":16777216
cat /sys/devices/ndbus0/region0/dax0.0/supported_alignments
65536 16777216
daxio.static-debug -z -o /dev/dax0.0
Bus error (core dumped)
$ dmesg | tail
lpar: Failed hash pte insert with error -4
hash-mmu: mm: Hashing failure ! EA=0x7fff17000000 access=0x8000000000000006 current=daxio
hash-mmu: trap=0x300 vsid=0x22cb7a3 ssize=1 base psize=2 psize 10 pte=0xc000000501002b86
daxio[3860]: bus error (7) at 7fff17000000 nip 7fff973c007c lr 7fff973bff34 code 2 in libpmem.so.1.0.0[7fff973b0000+20000]
daxio[3860]: code: 792945e4 7d494b78 e95f0098 7d494b78 f93f00a0 4800012c e93f0088 f93f0120
daxio[3860]: code: e93f00a0 f93f0128 e93f0120 e95f0128 <f9490000> e93f0088 39290008 f93f0110
The failure was due to guest kernel using wrong page size.
The namespaces created with 16M alignment will appear as below on a config with
16M page size disabled.
$ ndctl list -Ni
[
{
"dev":"namespace0.1",
"mode":"fsdax",
"map":"dev",
"size":5351931904,
"uuid":"fc6e9667-461a-4718-82b4-69b24570bddb",
"align":16777216,
"blockdev":"pmem0.1",
"supported_alignments":[
65536
]
},
{
"dev":"namespace0.0",
"mode":"fsdax", <==== devdax 16M alignment marked disabled.
"map":"mem",
"size":5368709120,
"uuid":"a4bdf81a-f2ee-4bc6-91db-7b87eddd0484",
"state":"disabled"
}
]
Cc: linux-mm@kvack.org
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Link: https://lore.kernel.org/r/20190905154603.10349-8-aneesh.kumar@linux.ibm.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2019-09-05 23:46:03 +08:00
|
|
|
unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
|
|
|
|
|
|
|
|
return nd_size_select_show(0,
|
|
|
|
nd_pfn_supported_alignments(aligns), buf);
|
2017-06-27 17:56:12 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(supported_alignments);
|
|
|
|
|
2015-07-31 05:57:47 +08:00
|
|
|
static struct attribute *nd_pfn_attributes[] = {
|
|
|
|
&dev_attr_mode.attr,
|
|
|
|
&dev_attr_namespace.attr,
|
|
|
|
&dev_attr_uuid.attr,
|
2015-12-11 06:45:23 +08:00
|
|
|
&dev_attr_align.attr,
|
2016-03-04 01:46:04 +08:00
|
|
|
&dev_attr_resource.attr,
|
|
|
|
&dev_attr_size.attr,
|
2017-06-27 17:56:12 +08:00
|
|
|
&dev_attr_supported_alignments.attr,
|
2015-07-31 05:57:47 +08:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2019-11-07 11:56:41 +08:00
|
|
|
static struct attribute_group nd_pfn_attribute_group = {
|
2015-07-31 05:57:47 +08:00
|
|
|
.attrs = nd_pfn_attributes,
|
|
|
|
};
|
|
|
|
|
2019-11-07 11:56:41 +08:00
|
|
|
const struct attribute_group *nd_pfn_attribute_groups[] = {
|
2015-07-31 05:57:47 +08:00
|
|
|
&nd_pfn_attribute_group,
|
|
|
|
&nd_device_attribute_group,
|
|
|
|
&nd_numa_attribute_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2019-11-07 11:56:41 +08:00
|
|
|
static const struct device_type nd_pfn_device_type = {
|
|
|
|
.name = "nd_pfn",
|
|
|
|
.release = nd_pfn_release,
|
|
|
|
.groups = nd_pfn_attribute_groups,
|
|
|
|
};
|
|
|
|
|
|
|
|
bool is_nd_pfn(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev ? dev->type == &nd_pfn_device_type : false;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(is_nd_pfn);
|
|
|
|
|
2022-04-21 23:33:29 +08:00
|
|
|
static struct lock_class_key nvdimm_pfn_key;
|
|
|
|
|
2016-03-12 02:15:36 +08:00
|
|
|
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
|
2015-07-31 05:57:47 +08:00
|
|
|
struct nd_namespace_common *ndns)
|
|
|
|
{
|
2018-02-05 22:08:52 +08:00
|
|
|
struct device *dev;
|
2015-07-31 05:57:47 +08:00
|
|
|
|
2016-03-12 02:15:36 +08:00
|
|
|
if (!nd_pfn)
|
2015-07-31 05:57:47 +08:00
|
|
|
return NULL;
|
|
|
|
|
2016-03-12 02:15:36 +08:00
|
|
|
nd_pfn->mode = PFN_MODE_NONE;
|
libnvdimm/dax: Pick the right alignment default when creating dax devices
Allow arch to provide the supported alignments and use hugepage alignment only
if we support hugepage. Right now we depend on compile time configs whereas this
patch switch this to runtime discovery.
Architectures like ppc64 can have THP enabled in code, but then can have
hugepage size disabled by the hypervisor. This allows us to create dax devices
with PAGE_SIZE alignment in this case.
Existing dax namespace with alignment larger than PAGE_SIZE will fail to
initialize in this specific case. We still allow fsdax namespace initialization.
With respect to identifying whether to enable hugepage fault for a dax device,
if THP is enabled during compile, we default to taking hugepage fault and in dax
fault handler if we find the fault size > alignment we retry with PAGE_SIZE
fault size.
This also addresses the below failure scenario on ppc64
ndctl create-namespace --mode=devdax | grep align
"align":16777216,
"align":16777216
cat /sys/devices/ndbus0/region0/dax0.0/supported_alignments
65536 16777216
daxio.static-debug -z -o /dev/dax0.0
Bus error (core dumped)
$ dmesg | tail
lpar: Failed hash pte insert with error -4
hash-mmu: mm: Hashing failure ! EA=0x7fff17000000 access=0x8000000000000006 current=daxio
hash-mmu: trap=0x300 vsid=0x22cb7a3 ssize=1 base psize=2 psize 10 pte=0xc000000501002b86
daxio[3860]: bus error (7) at 7fff17000000 nip 7fff973c007c lr 7fff973bff34 code 2 in libpmem.so.1.0.0[7fff973b0000+20000]
daxio[3860]: code: 792945e4 7d494b78 e95f0098 7d494b78 f93f00a0 4800012c e93f0088 f93f0120
daxio[3860]: code: e93f00a0 f93f0128 e93f0120 e95f0128 <f9490000> e93f0088 39290008 f93f0110
The failure was due to guest kernel using wrong page size.
The namespaces created with 16M alignment will appear as below on a config with
16M page size disabled.
$ ndctl list -Ni
[
{
"dev":"namespace0.1",
"mode":"fsdax",
"map":"dev",
"size":5351931904,
"uuid":"fc6e9667-461a-4718-82b4-69b24570bddb",
"align":16777216,
"blockdev":"pmem0.1",
"supported_alignments":[
65536
]
},
{
"dev":"namespace0.0",
"mode":"fsdax", <==== devdax 16M alignment marked disabled.
"map":"mem",
"size":5368709120,
"uuid":"a4bdf81a-f2ee-4bc6-91db-7b87eddd0484",
"state":"disabled"
}
]
Cc: linux-mm@kvack.org
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Link: https://lore.kernel.org/r/20190905154603.10349-8-aneesh.kumar@linux.ibm.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2019-09-05 23:46:03 +08:00
|
|
|
nd_pfn->align = nd_pfn_default_alignment();
|
2016-03-12 02:15:36 +08:00
|
|
|
dev = &nd_pfn->dev;
|
|
|
|
device_initialize(&nd_pfn->dev);
|
2022-04-21 23:33:29 +08:00
|
|
|
lockdep_set_class(&nd_pfn->dev.mutex, &nvdimm_pfn_key);
|
2016-03-12 02:15:36 +08:00
|
|
|
if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
|
|
|
|
dev_name(ndns->claim));
|
2016-03-12 02:15:36 +08:00
|
|
|
put_device(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
return NULL;
|
2016-03-12 02:15:36 +08:00
|
|
|
}
|
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
|
|
|
|
{
|
|
|
|
struct nd_pfn *nd_pfn;
|
|
|
|
struct device *dev;
|
2015-07-31 05:57:47 +08:00
|
|
|
|
|
|
|
nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
|
|
|
|
if (!nd_pfn)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL);
|
|
|
|
if (nd_pfn->id < 0) {
|
|
|
|
kfree(nd_pfn);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev = &nd_pfn->dev;
|
|
|
|
dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
|
2016-03-12 02:15:36 +08:00
|
|
|
dev->type = &nd_pfn_device_type;
|
|
|
|
dev->parent = &nd_region->dev;
|
|
|
|
|
|
|
|
return nd_pfn;
|
2015-07-31 05:57:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct device *nd_pfn_create(struct nd_region *nd_region)
|
|
|
|
{
|
2016-03-12 02:15:36 +08:00
|
|
|
struct nd_pfn *nd_pfn;
|
|
|
|
struct device *dev;
|
|
|
|
|
2017-05-30 14:12:19 +08:00
|
|
|
if (!is_memory(&nd_region->dev))
|
2016-03-12 02:15:36 +08:00
|
|
|
return NULL;
|
2015-07-31 05:57:47 +08:00
|
|
|
|
2016-03-12 02:15:36 +08:00
|
|
|
nd_pfn = nd_pfn_alloc(nd_region);
|
|
|
|
dev = nd_pfn_devinit(nd_pfn, NULL);
|
2015-07-31 05:57:47 +08:00
|
|
|
|
2022-04-21 23:33:29 +08:00
|
|
|
nd_device_register(dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
libnvdimm, pfn: during init, clear errors in the metadata area
If there are badblocks present in the 'struct page' area for pfn
namespaces, until now, the only way to clear them has been to force the
namespace into raw mode, clear the errors, and re-enable the fsdax mode.
This is clunky, given that it should be easy enough for the pfn driver
to do the same.
Add a new helper that uses the most recently available badblocks list to
check whether there are any badblocks that lie in the volatile struct
page area. If so, before initializing the struct pages, send down
targeted writes via nvdimm_write_bytes to write zeroes to the affected
blocks, and thus clear errors.
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-09-19 07:48:31 +08:00
|
|
|
/*
|
|
|
|
* nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap
|
|
|
|
* space associated with the namespace. If the memmap is set to DRAM, then
|
|
|
|
* this is a no-op. Since the memmap area is freshly initialized during
|
|
|
|
* probe, we have an opportunity to clear any badblocks in this area.
|
|
|
|
*/
|
|
|
|
static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
|
|
|
|
{
|
|
|
|
struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
|
|
|
|
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
|
|
|
void *zero_page = page_address(ZERO_PAGE(0));
|
|
|
|
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
|
|
|
int num_bad, meta_num, rc, bb_present;
|
|
|
|
sector_t first_bad, meta_start;
|
|
|
|
struct nd_namespace_io *nsio;
|
|
|
|
|
|
|
|
if (nd_pfn->mode != PFN_MODE_PMEM)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nsio = to_nd_namespace_io(&ndns->dev);
|
|
|
|
meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
|
|
|
|
meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
|
|
|
|
|
2019-10-31 18:57:41 +08:00
|
|
|
/*
|
|
|
|
* re-enable the namespace with correct size so that we can access
|
|
|
|
* the device memmap area.
|
|
|
|
*/
|
|
|
|
devm_namespace_disable(&nd_pfn->dev, ndns);
|
|
|
|
rc = devm_namespace_enable(&nd_pfn->dev, ndns, le64_to_cpu(pfn_sb->dataoff));
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
libnvdimm, pfn: during init, clear errors in the metadata area
If there are badblocks present in the 'struct page' area for pfn
namespaces, until now, the only way to clear them has been to force the
namespace into raw mode, clear the errors, and re-enable the fsdax mode.
This is clunky, given that it should be easy enough for the pfn driver
to do the same.
Add a new helper that uses the most recently available badblocks list to
check whether there are any badblocks that lie in the volatile struct
page area. If so, before initializing the struct pages, send down
targeted writes via nvdimm_write_bytes to write zeroes to the affected
blocks, and thus clear errors.
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-09-19 07:48:31 +08:00
|
|
|
do {
|
|
|
|
unsigned long zero_len;
|
|
|
|
u64 nsoff;
|
|
|
|
|
|
|
|
bb_present = badblocks_check(&nd_region->bb, meta_start,
|
|
|
|
meta_num, &first_bad, &num_bad);
|
|
|
|
if (bb_present) {
|
2019-04-06 00:08:59 +08:00
|
|
|
dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
|
libnvdimm, pfn: during init, clear errors in the metadata area
If there are badblocks present in the 'struct page' area for pfn
namespaces, until now, the only way to clear them has been to force the
namespace into raw mode, clear the errors, and re-enable the fsdax mode.
This is clunky, given that it should be easy enough for the pfn driver
to do the same.
Add a new helper that uses the most recently available badblocks list to
check whether there are any badblocks that lie in the volatile struct
page area. If so, before initializing the struct pages, send down
targeted writes via nvdimm_write_bytes to write zeroes to the affected
blocks, and thus clear errors.
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-09-19 07:48:31 +08:00
|
|
|
num_bad, first_bad);
|
|
|
|
nsoff = ALIGN_DOWN((nd_region->ndr_start
|
|
|
|
+ (first_bad << 9)) - nsio->res.start,
|
|
|
|
PAGE_SIZE);
|
|
|
|
zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
|
|
|
|
while (zero_len) {
|
|
|
|
unsigned long chunk = min(zero_len, PAGE_SIZE);
|
|
|
|
|
|
|
|
rc = nvdimm_write_bytes(ndns, nsoff, zero_page,
|
|
|
|
chunk, 0);
|
|
|
|
if (rc)
|
|
|
|
break;
|
|
|
|
|
|
|
|
zero_len -= chunk;
|
|
|
|
nsoff += chunk;
|
|
|
|
}
|
|
|
|
if (rc) {
|
|
|
|
dev_err(&nd_pfn->dev,
|
2019-04-06 00:08:59 +08:00
|
|
|
"error clearing %x badblocks at %llx\n",
|
libnvdimm, pfn: during init, clear errors in the metadata area
If there are badblocks present in the 'struct page' area for pfn
namespaces, until now, the only way to clear them has been to force the
namespace into raw mode, clear the errors, and re-enable the fsdax mode.
This is clunky, given that it should be easy enough for the pfn driver
to do the same.
Add a new helper that uses the most recently available badblocks list to
check whether there are any badblocks that lie in the volatile struct
page area. If so, before initializing the struct pages, send down
targeted writes via nvdimm_write_bytes to write zeroes to the affected
blocks, and thus clear errors.
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2018-09-19 07:48:31 +08:00
|
|
|
num_bad, first_bad);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (bb_present);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
libnvdimm/dax: Pick the right alignment default when creating dax devices
Allow arch to provide the supported alignments and use hugepage alignment only
if we support hugepage. Right now we depend on compile time configs whereas this
patch switch this to runtime discovery.
Architectures like ppc64 can have THP enabled in code, but then can have
hugepage size disabled by the hypervisor. This allows us to create dax devices
with PAGE_SIZE alignment in this case.
Existing dax namespace with alignment larger than PAGE_SIZE will fail to
initialize in this specific case. We still allow fsdax namespace initialization.
With respect to identifying whether to enable hugepage fault for a dax device,
if THP is enabled during compile, we default to taking hugepage fault and in dax
fault handler if we find the fault size > alignment we retry with PAGE_SIZE
fault size.
This also addresses the below failure scenario on ppc64
ndctl create-namespace --mode=devdax | grep align
"align":16777216,
"align":16777216
cat /sys/devices/ndbus0/region0/dax0.0/supported_alignments
65536 16777216
daxio.static-debug -z -o /dev/dax0.0
Bus error (core dumped)
$ dmesg | tail
lpar: Failed hash pte insert with error -4
hash-mmu: mm: Hashing failure ! EA=0x7fff17000000 access=0x8000000000000006 current=daxio
hash-mmu: trap=0x300 vsid=0x22cb7a3 ssize=1 base psize=2 psize 10 pte=0xc000000501002b86
daxio[3860]: bus error (7) at 7fff17000000 nip 7fff973c007c lr 7fff973bff34 code 2 in libpmem.so.1.0.0[7fff973b0000+20000]
daxio[3860]: code: 792945e4 7d494b78 e95f0098 7d494b78 f93f00a0 4800012c e93f0088 f93f0120
daxio[3860]: code: e93f00a0 f93f0128 e93f0120 e95f0128 <f9490000> e93f0088 39290008 f93f0110
The failure was due to guest kernel using wrong page size.
The namespaces created with 16M alignment will appear as below on a config with
16M page size disabled.
$ ndctl list -Ni
[
{
"dev":"namespace0.1",
"mode":"fsdax",
"map":"dev",
"size":5351931904,
"uuid":"fc6e9667-461a-4718-82b4-69b24570bddb",
"align":16777216,
"blockdev":"pmem0.1",
"supported_alignments":[
65536
]
},
{
"dev":"namespace0.0",
"mode":"fsdax", <==== devdax 16M alignment marked disabled.
"map":"mem",
"size":5368709120,
"uuid":"a4bdf81a-f2ee-4bc6-91db-7b87eddd0484",
"state":"disabled"
}
]
Cc: linux-mm@kvack.org
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Link: https://lore.kernel.org/r/20190905154603.10349-8-aneesh.kumar@linux.ibm.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2019-09-05 23:46:03 +08:00
|
|
|
static bool nd_supported_alignment(unsigned long align)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
unsigned long supported[MAX_NVDIMM_ALIGN] = { [0] = 0, };
|
|
|
|
|
|
|
|
if (align == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
nd_pfn_supported_alignments(supported);
|
|
|
|
for (i = 0; supported[i]; i++)
|
|
|
|
if (align == supported[i])
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-19 06:58:36 +08:00
|
|
|
/**
|
|
|
|
* nd_pfn_validate - read and validate info-block
|
|
|
|
* @nd_pfn: fsdax namespace runtime state / properties
|
|
|
|
* @sig: 'devdax' or 'fsdax' signature
|
|
|
|
*
|
|
|
|
* Upon return the info-block buffer contents (->pfn_sb) are
|
|
|
|
* indeterminate when validation fails, and a coherent info-block
|
|
|
|
* otherwise.
|
|
|
|
*/
|
2016-05-19 05:50:12 +08:00
|
|
|
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
|
2015-07-31 05:57:47 +08:00
|
|
|
{
|
|
|
|
u64 checksum, offset;
|
2020-02-28 13:39:23 +08:00
|
|
|
struct resource *res;
|
2016-06-24 08:50:39 +08:00
|
|
|
enum nd_pfn_mode mode;
|
2015-12-13 08:09:14 +08:00
|
|
|
struct nd_namespace_io *nsio;
|
2017-12-20 07:07:10 +08:00
|
|
|
unsigned long align, start_pad;
|
2015-12-13 08:09:14 +08:00
|
|
|
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
|
|
|
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
2021-09-09 13:11:37 +08:00
|
|
|
const uuid_t *parent_uuid = nd_dev_to_uuid(&ndns->dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
|
|
|
|
if (!pfn_sb || !ndns)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2017-05-30 14:12:19 +08:00
|
|
|
if (!is_memory(nd_pfn->dev.parent))
|
2015-07-31 05:57:47 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
2017-05-11 05:01:30 +08:00
|
|
|
if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0))
|
2015-07-31 05:57:47 +08:00
|
|
|
return -ENXIO;
|
|
|
|
|
2016-05-19 05:50:12 +08:00
|
|
|
if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0)
|
2015-07-31 05:57:47 +08:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
checksum = le64_to_cpu(pfn_sb->checksum);
|
|
|
|
pfn_sb->checksum = 0;
|
|
|
|
if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb))
|
|
|
|
return -ENODEV;
|
|
|
|
pfn_sb->checksum = cpu_to_le64(checksum);
|
|
|
|
|
2015-12-13 08:09:14 +08:00
|
|
|
if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2016-03-04 01:38:00 +08:00
|
|
|
if (__le16_to_cpu(pfn_sb->version_minor) < 1) {
|
|
|
|
pfn_sb->start_pad = 0;
|
|
|
|
pfn_sb->end_trunc = 0;
|
|
|
|
}
|
|
|
|
|
2016-04-01 06:41:18 +08:00
|
|
|
if (__le16_to_cpu(pfn_sb->version_minor) < 2)
|
|
|
|
pfn_sb->align = 0;
|
|
|
|
|
2019-09-05 23:46:00 +08:00
|
|
|
if (__le16_to_cpu(pfn_sb->version_minor) < 4) {
|
|
|
|
pfn_sb->page_struct_size = cpu_to_le16(64);
|
|
|
|
pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
|
|
|
|
}
|
|
|
|
|
2015-07-31 05:57:47 +08:00
|
|
|
switch (le32_to_cpu(pfn_sb->mode)) {
|
|
|
|
case PFN_MODE_RAM:
|
|
|
|
case PFN_MODE_PMEM:
|
2016-01-30 09:42:51 +08:00
|
|
|
break;
|
2015-07-31 05:57:47 +08:00
|
|
|
default:
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2016-06-24 08:50:39 +08:00
|
|
|
align = le32_to_cpu(pfn_sb->align);
|
|
|
|
offset = le64_to_cpu(pfn_sb->dataoff);
|
2017-12-20 07:07:10 +08:00
|
|
|
start_pad = le32_to_cpu(pfn_sb->start_pad);
|
2016-06-24 08:50:39 +08:00
|
|
|
if (align == 0)
|
|
|
|
align = 1UL << ilog2(offset);
|
|
|
|
mode = le32_to_cpu(pfn_sb->mode);
|
|
|
|
|
2019-09-05 23:46:00 +08:00
|
|
|
if ((le32_to_cpu(pfn_sb->page_size) > PAGE_SIZE) &&
|
|
|
|
(mode == PFN_MODE_PMEM)) {
|
|
|
|
dev_err(&nd_pfn->dev,
|
|
|
|
"init failed, page size mismatch %d\n",
|
|
|
|
le32_to_cpu(pfn_sb->page_size));
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((le16_to_cpu(pfn_sb->page_struct_size) < sizeof(struct page)) &&
|
|
|
|
(mode == PFN_MODE_PMEM)) {
|
|
|
|
dev_err(&nd_pfn->dev,
|
|
|
|
"init failed, struct page size mismatch %d\n",
|
|
|
|
le16_to_cpu(pfn_sb->page_struct_size));
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
libnvdimm/dax: Pick the right alignment default when creating dax devices
Allow arch to provide the supported alignments and use hugepage alignment only
if we support hugepage. Right now we depend on compile time configs whereas this
patch switch this to runtime discovery.
Architectures like ppc64 can have THP enabled in code, but then can have
hugepage size disabled by the hypervisor. This allows us to create dax devices
with PAGE_SIZE alignment in this case.
Existing dax namespace with alignment larger than PAGE_SIZE will fail to
initialize in this specific case. We still allow fsdax namespace initialization.
With respect to identifying whether to enable hugepage fault for a dax device,
if THP is enabled during compile, we default to taking hugepage fault and in dax
fault handler if we find the fault size > alignment we retry with PAGE_SIZE
fault size.
This also addresses the below failure scenario on ppc64
ndctl create-namespace --mode=devdax | grep align
"align":16777216,
"align":16777216
cat /sys/devices/ndbus0/region0/dax0.0/supported_alignments
65536 16777216
daxio.static-debug -z -o /dev/dax0.0
Bus error (core dumped)
$ dmesg | tail
lpar: Failed hash pte insert with error -4
hash-mmu: mm: Hashing failure ! EA=0x7fff17000000 access=0x8000000000000006 current=daxio
hash-mmu: trap=0x300 vsid=0x22cb7a3 ssize=1 base psize=2 psize 10 pte=0xc000000501002b86
daxio[3860]: bus error (7) at 7fff17000000 nip 7fff973c007c lr 7fff973bff34 code 2 in libpmem.so.1.0.0[7fff973b0000+20000]
daxio[3860]: code: 792945e4 7d494b78 e95f0098 7d494b78 f93f00a0 4800012c e93f0088 f93f0120
daxio[3860]: code: e93f00a0 f93f0128 e93f0120 e95f0128 <f9490000> e93f0088 39290008 f93f0110
The failure was due to guest kernel using wrong page size.
The namespaces created with 16M alignment will appear as below on a config with
16M page size disabled.
$ ndctl list -Ni
[
{
"dev":"namespace0.1",
"mode":"fsdax",
"map":"dev",
"size":5351931904,
"uuid":"fc6e9667-461a-4718-82b4-69b24570bddb",
"align":16777216,
"blockdev":"pmem0.1",
"supported_alignments":[
65536
]
},
{
"dev":"namespace0.0",
"mode":"fsdax", <==== devdax 16M alignment marked disabled.
"map":"mem",
"size":5368709120,
"uuid":"a4bdf81a-f2ee-4bc6-91db-7b87eddd0484",
"state":"disabled"
}
]
Cc: linux-mm@kvack.org
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Link: https://lore.kernel.org/r/20190905154603.10349-8-aneesh.kumar@linux.ibm.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2019-09-05 23:46:03 +08:00
|
|
|
/*
|
|
|
|
* Check whether the we support the alignment. For Dax if the
|
|
|
|
* superblock alignment is not matching, we won't initialize
|
|
|
|
* the device.
|
|
|
|
*/
|
|
|
|
if (!nd_supported_alignment(align) &&
|
|
|
|
!memcmp(pfn_sb->signature, DAX_SIG, PFN_SIG_LEN)) {
|
|
|
|
dev_err(&nd_pfn->dev, "init failed, alignment mismatch: "
|
|
|
|
"%ld:%ld\n", nd_pfn->align, align);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-07-31 05:57:47 +08:00
|
|
|
if (!nd_pfn->uuid) {
|
2016-06-24 08:50:39 +08:00
|
|
|
/*
|
|
|
|
* When probing a namepace via nd_pfn_probe() the uuid
|
|
|
|
* is NULL (see: nd_pfn_devinit()) we init settings from
|
|
|
|
* pfn_sb
|
|
|
|
*/
|
2015-07-31 05:57:47 +08:00
|
|
|
nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
|
|
|
|
if (!nd_pfn->uuid)
|
|
|
|
return -ENOMEM;
|
2016-06-24 08:50:39 +08:00
|
|
|
nd_pfn->align = align;
|
|
|
|
nd_pfn->mode = mode;
|
2015-07-31 05:57:47 +08:00
|
|
|
} else {
|
2016-06-24 08:50:39 +08:00
|
|
|
/*
|
|
|
|
* When probing a pfn / dax instance we validate the
|
|
|
|
* live settings against the pfn_sb
|
|
|
|
*/
|
2015-07-31 05:57:47 +08:00
|
|
|
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
|
2016-04-08 10:59:27 +08:00
|
|
|
return -ENODEV;
|
2016-06-24 08:50:39 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the uuid validates, but other settings mismatch
|
|
|
|
* return EINVAL because userspace has managed to change
|
|
|
|
* the configuration without specifying new
|
|
|
|
* identification.
|
|
|
|
*/
|
|
|
|
if (nd_pfn->align != align || nd_pfn->mode != mode) {
|
|
|
|
dev_err(&nd_pfn->dev,
|
|
|
|
"init failed, settings mismatch\n");
|
|
|
|
dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
|
|
|
|
nd_pfn->align, align, nd_pfn->mode,
|
|
|
|
mode);
|
2020-02-28 13:31:45 +08:00
|
|
|
return -EOPNOTSUPP;
|
2016-06-24 08:50:39 +08:00
|
|
|
}
|
2015-07-31 05:57:47 +08:00
|
|
|
}
|
|
|
|
|
2016-06-24 08:50:39 +08:00
|
|
|
if (align > nvdimm_namespace_capacity(ndns)) {
|
2015-12-11 06:45:23 +08:00
|
|
|
dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
|
2016-06-24 08:50:39 +08:00
|
|
|
align, nvdimm_namespace_capacity(ndns));
|
2020-02-28 13:31:45 +08:00
|
|
|
return -EOPNOTSUPP;
|
2015-12-11 06:45:23 +08:00
|
|
|
}
|
|
|
|
|
2015-07-31 05:57:47 +08:00
|
|
|
/*
|
|
|
|
* These warnings are verbose because they can only trigger in
|
|
|
|
* the case where the physical address alignment of the
|
|
|
|
* namespace has changed since the pfn superblock was
|
|
|
|
* established.
|
|
|
|
*/
|
|
|
|
nsio = to_nd_namespace_io(&ndns->dev);
|
2020-02-28 13:39:23 +08:00
|
|
|
res = &nsio->res;
|
|
|
|
if (offset >= resource_size(res)) {
|
2015-07-31 05:57:47 +08:00
|
|
|
dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
|
|
|
|
dev_name(&ndns->dev));
|
2020-02-28 13:31:45 +08:00
|
|
|
return -EOPNOTSUPP;
|
2015-07-31 05:57:47 +08:00
|
|
|
}
|
|
|
|
|
2020-02-28 13:39:23 +08:00
|
|
|
if ((align && !IS_ALIGNED(res->start + offset + start_pad, align))
|
2016-05-22 02:01:41 +08:00
|
|
|
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
|
2016-06-24 08:50:39 +08:00
|
|
|
dev_err(&nd_pfn->dev,
|
|
|
|
"bad offset: %#llx dax disabled align: %#lx\n",
|
|
|
|
offset, align);
|
2020-02-28 13:31:45 +08:00
|
|
|
return -EOPNOTSUPP;
|
2015-12-11 06:45:23 +08:00
|
|
|
}
|
|
|
|
|
2020-02-28 13:39:23 +08:00
|
|
|
if (!IS_ALIGNED(res->start + le32_to_cpu(pfn_sb->start_pad),
|
|
|
|
memremap_compat_align())) {
|
|
|
|
dev_err(&nd_pfn->dev, "resource start misaligned\n");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IS_ALIGNED(res->end + 1 - le32_to_cpu(pfn_sb->end_trunc),
|
|
|
|
memremap_compat_align())) {
|
|
|
|
dev_err(&nd_pfn->dev, "resource end misaligned\n");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2019-11-01 11:27:28 +08:00
|
|
|
return 0;
|
2015-07-31 05:57:47 +08:00
|
|
|
}
|
2015-08-01 14:16:37 +08:00
|
|
|
EXPORT_SYMBOL(nd_pfn_validate);
|
2015-07-31 05:57:47 +08:00
|
|
|
|
2016-03-22 15:22:16 +08:00
|
|
|
int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
|
2015-07-31 05:57:47 +08:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct nd_pfn *nd_pfn;
|
2016-03-18 09:16:15 +08:00
|
|
|
struct device *pfn_dev;
|
2015-07-31 05:57:47 +08:00
|
|
|
struct nd_pfn_sb *pfn_sb;
|
|
|
|
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
|
|
|
|
|
|
|
|
if (ndns->force_raw)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2017-06-04 09:18:39 +08:00
|
|
|
switch (ndns->claim_class) {
|
|
|
|
case NVDIMM_CCLASS_NONE:
|
|
|
|
case NVDIMM_CCLASS_PFN:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2015-07-31 05:57:47 +08:00
|
|
|
nvdimm_bus_lock(&ndns->dev);
|
2016-03-12 02:15:36 +08:00
|
|
|
nd_pfn = nd_pfn_alloc(nd_region);
|
|
|
|
pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
|
2015-07-31 05:57:47 +08:00
|
|
|
nvdimm_bus_unlock(&ndns->dev);
|
2016-03-18 09:16:15 +08:00
|
|
|
if (!pfn_dev)
|
2015-07-31 05:57:47 +08:00
|
|
|
return -ENOMEM;
|
2019-07-19 06:58:36 +08:00
|
|
|
pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
|
2016-03-18 09:16:15 +08:00
|
|
|
nd_pfn = to_nd_pfn(pfn_dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
nd_pfn->pfn_sb = pfn_sb;
|
2016-05-19 05:50:12 +08:00
|
|
|
rc = nd_pfn_validate(nd_pfn, PFN_SIG);
|
2018-03-06 08:39:31 +08:00
|
|
|
dev_dbg(dev, "pfn: %s\n", rc == 0 ? dev_name(pfn_dev) : "<none>");
|
2015-07-31 05:57:47 +08:00
|
|
|
if (rc < 0) {
|
2017-04-29 13:05:14 +08:00
|
|
|
nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
|
2016-03-18 09:16:15 +08:00
|
|
|
put_device(pfn_dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
} else
|
2022-04-21 23:33:29 +08:00
|
|
|
nd_device_register(pfn_dev);
|
2015-07-31 05:57:47 +08:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(nd_pfn_probe);
|
2016-03-22 15:29:43 +08:00
|
|
|
|
|
|
|
/*
|
2019-07-19 06:58:40 +08:00
|
|
|
* We hotplug memory at sub-section granularity, pad the reserved area
|
|
|
|
* from the previous section base to the namespace base address.
|
2016-03-22 15:29:43 +08:00
|
|
|
*/
|
|
|
|
static unsigned long init_altmap_base(resource_size_t base)
|
|
|
|
{
|
|
|
|
unsigned long base_pfn = PHYS_PFN(base);
|
|
|
|
|
2019-07-19 06:58:40 +08:00
|
|
|
return SUBSECTION_ALIGN_DOWN(base_pfn);
|
2016-03-22 15:29:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long init_altmap_reserve(resource_size_t base)
|
|
|
|
{
|
2019-10-31 18:57:41 +08:00
|
|
|
unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT;
|
2016-03-22 15:29:43 +08:00
|
|
|
unsigned long base_pfn = PHYS_PFN(base);
|
|
|
|
|
2019-07-19 06:58:40 +08:00
|
|
|
reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
|
2016-03-22 15:29:43 +08:00
|
|
|
return reserve;
|
|
|
|
}
|
|
|
|
|
2017-12-29 15:54:05 +08:00
|
|
|
static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
2016-03-22 15:29:43 +08:00
|
|
|
{
|
2020-10-14 07:50:29 +08:00
|
|
|
struct range *range = &pgmap->range;
|
2017-12-29 15:54:05 +08:00
|
|
|
struct vmem_altmap *altmap = &pgmap->altmap;
|
2016-03-22 15:29:43 +08:00
|
|
|
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
|
|
|
u64 offset = le64_to_cpu(pfn_sb->dataoff);
|
|
|
|
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
|
|
|
|
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
|
2019-10-31 18:57:41 +08:00
|
|
|
u32 reserve = nd_info_block_reserve();
|
2016-03-22 15:29:43 +08:00
|
|
|
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
|
|
|
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
|
|
|
resource_size_t base = nsio->res.start + start_pad;
|
2019-09-10 14:28:25 +08:00
|
|
|
resource_size_t end = nsio->res.end - end_trunc;
|
2016-03-22 15:29:43 +08:00
|
|
|
struct vmem_altmap __altmap = {
|
|
|
|
.base_pfn = init_altmap_base(base),
|
|
|
|
.reserve = init_altmap_reserve(base),
|
2019-09-10 14:28:25 +08:00
|
|
|
.end_pfn = PHYS_PFN(end),
|
2016-03-22 15:29:43 +08:00
|
|
|
};
|
|
|
|
|
2020-10-14 07:50:29 +08:00
|
|
|
*range = (struct range) {
|
|
|
|
.start = nsio->res.start + start_pad,
|
|
|
|
.end = nsio->res.end - end_trunc,
|
|
|
|
};
|
2020-10-14 07:50:34 +08:00
|
|
|
pgmap->nr_range = 1;
|
2016-03-22 15:29:43 +08:00
|
|
|
if (nd_pfn->mode == PFN_MODE_RAM) {
|
2019-02-06 10:04:53 +08:00
|
|
|
if (offset < reserve)
|
2017-12-29 15:54:05 +08:00
|
|
|
return -EINVAL;
|
2016-03-22 15:29:43 +08:00
|
|
|
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
|
|
|
|
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
|
2020-10-14 07:50:29 +08:00
|
|
|
nd_pfn->npfns = PHYS_PFN((range_len(range) - offset));
|
2016-03-22 15:29:43 +08:00
|
|
|
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
|
|
|
|
dev_info(&nd_pfn->dev,
|
|
|
|
"number of pfns truncated from %lld to %ld\n",
|
|
|
|
le64_to_cpu(nd_pfn->pfn_sb->npfns),
|
|
|
|
nd_pfn->npfns);
|
|
|
|
memcpy(altmap, &__altmap, sizeof(*altmap));
|
2019-02-06 10:04:53 +08:00
|
|
|
altmap->free = PHYS_PFN(offset - reserve);
|
2016-03-22 15:29:43 +08:00
|
|
|
altmap->alloc = 0;
|
2019-06-26 20:27:13 +08:00
|
|
|
pgmap->flags |= PGMAP_ALTMAP_VALID;
|
2016-03-22 15:29:43 +08:00
|
|
|
} else
|
2017-12-29 15:54:05 +08:00
|
|
|
return -ENXIO;
|
2016-03-22 15:29:43 +08:00
|
|
|
|
2017-12-29 15:54:05 +08:00
|
|
|
return 0;
|
2016-03-22 15:29:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
|
|
|
{
|
|
|
|
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
2018-11-25 02:47:04 +08:00
|
|
|
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
2016-03-22 15:29:43 +08:00
|
|
|
resource_size_t start, size;
|
|
|
|
struct nd_region *nd_region;
|
2019-07-19 06:58:40 +08:00
|
|
|
unsigned long npfns, align;
|
2019-08-28 23:49:46 +08:00
|
|
|
u32 end_trunc;
|
2016-03-22 15:29:43 +08:00
|
|
|
struct nd_pfn_sb *pfn_sb;
|
|
|
|
phys_addr_t offset;
|
2016-05-19 05:50:12 +08:00
|
|
|
const char *sig;
|
2016-03-22 15:29:43 +08:00
|
|
|
u64 checksum;
|
|
|
|
int rc;
|
|
|
|
|
2019-07-19 06:58:36 +08:00
|
|
|
pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
|
2016-03-22 15:29:43 +08:00
|
|
|
if (!pfn_sb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
nd_pfn->pfn_sb = pfn_sb;
|
2016-05-19 05:50:12 +08:00
|
|
|
if (is_nd_dax(&nd_pfn->dev))
|
|
|
|
sig = DAX_SIG;
|
|
|
|
else
|
|
|
|
sig = PFN_SIG;
|
2019-07-19 06:58:36 +08:00
|
|
|
|
2016-05-19 05:50:12 +08:00
|
|
|
rc = nd_pfn_validate(nd_pfn, sig);
|
2019-11-01 11:27:28 +08:00
|
|
|
if (rc == 0)
|
|
|
|
return nd_pfn_clear_memmap_errors(nd_pfn);
|
2016-03-22 15:29:43 +08:00
|
|
|
if (rc != -ENODEV)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/* no info block, do init */;
|
2019-07-19 06:58:36 +08:00
|
|
|
memset(pfn_sb, 0, sizeof(*pfn_sb));
|
|
|
|
|
2016-03-22 15:29:43 +08:00
|
|
|
nd_region = to_nd_region(nd_pfn->dev.parent);
|
|
|
|
if (nd_region->ro) {
|
|
|
|
dev_info(&nd_pfn->dev,
|
|
|
|
"%s is read-only, unable to init metadata\n",
|
|
|
|
dev_name(&nd_region->dev));
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2019-07-19 06:58:40 +08:00
|
|
|
start = nsio->res.start;
|
2016-03-22 15:29:43 +08:00
|
|
|
size = resource_size(&nsio->res);
|
2019-07-19 06:58:40 +08:00
|
|
|
npfns = PHYS_PFN(size - SZ_8K);
|
2020-02-28 13:39:23 +08:00
|
|
|
align = max(nd_pfn->align, memremap_compat_align());
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When @start is misaligned fail namespace creation. See
|
|
|
|
* the 'struct nd_pfn_sb' commentary on why ->start_pad is not
|
|
|
|
* an option.
|
|
|
|
*/
|
|
|
|
if (!IS_ALIGNED(start, memremap_compat_align())) {
|
|
|
|
dev_err(&nd_pfn->dev, "%s: start %pa misaligned to %#lx\n",
|
|
|
|
dev_name(&ndns->dev), &start,
|
|
|
|
memremap_compat_align());
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2019-08-28 23:49:46 +08:00
|
|
|
end_trunc = start + size - ALIGN_DOWN(start + size, align);
|
2016-05-19 00:59:34 +08:00
|
|
|
if (nd_pfn->mode == PFN_MODE_PMEM) {
|
2023-01-26 04:23:46 +08:00
|
|
|
unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns;
|
|
|
|
|
2016-05-19 00:59:34 +08:00
|
|
|
/*
|
2017-06-27 17:56:33 +08:00
|
|
|
* The altmap should be padded out to the block size used
|
|
|
|
* when populating the vmemmap. This *should* be equal to
|
|
|
|
* PMD_SIZE for most architectures.
|
2019-09-05 23:45:59 +08:00
|
|
|
*
|
2023-01-26 04:23:46 +08:00
|
|
|
* Also make sure size of struct page is less than
|
|
|
|
* MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
|
|
|
|
* face of production kernel configurations that reduce the
|
|
|
|
* 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
|
|
|
|
* kernel configurations that increase the 'struct page' size
|
|
|
|
* above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
|
|
|
|
* for continuing with the capacity that will be wasted when
|
|
|
|
* reverting to a production kernel configuration. Otherwise,
|
|
|
|
* those configurations are blocked by default.
|
2016-05-19 00:59:34 +08:00
|
|
|
*/
|
2023-01-26 04:23:46 +08:00
|
|
|
if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) {
|
|
|
|
if (page_struct_override)
|
|
|
|
page_map_size = sizeof(struct page) * npfns;
|
|
|
|
else {
|
|
|
|
dev_err(&nd_pfn->dev,
|
|
|
|
"Memory debug options prevent using pmem for the page map\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
offset = ALIGN(start + SZ_8K + page_map_size, align) - start;
|
2016-05-19 00:59:34 +08:00
|
|
|
} else if (nd_pfn->mode == PFN_MODE_RAM)
|
2019-07-19 06:58:40 +08:00
|
|
|
offset = ALIGN(start + SZ_8K, align) - start;
|
2016-03-22 15:29:43 +08:00
|
|
|
else
|
|
|
|
return -ENXIO;
|
|
|
|
|
2019-07-19 06:58:40 +08:00
|
|
|
if (offset >= size) {
|
2016-03-22 15:29:43 +08:00
|
|
|
dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
|
|
|
|
dev_name(&ndns->dev));
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2019-08-28 23:49:46 +08:00
|
|
|
npfns = PHYS_PFN(size - offset - end_trunc);
|
2016-03-22 15:29:43 +08:00
|
|
|
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
|
|
|
|
pfn_sb->dataoff = cpu_to_le64(offset);
|
|
|
|
pfn_sb->npfns = cpu_to_le64(npfns);
|
2016-05-19 05:50:12 +08:00
|
|
|
memcpy(pfn_sb->signature, sig, PFN_SIG_LEN);
|
2016-03-22 15:29:43 +08:00
|
|
|
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
|
|
|
|
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
|
|
|
|
pfn_sb->version_major = cpu_to_le16(1);
|
2019-09-05 23:46:00 +08:00
|
|
|
pfn_sb->version_minor = cpu_to_le16(4);
|
2019-08-28 23:49:46 +08:00
|
|
|
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
|
2016-04-01 06:41:18 +08:00
|
|
|
pfn_sb->align = cpu_to_le32(nd_pfn->align);
|
2023-01-26 04:23:46 +08:00
|
|
|
if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override)
|
|
|
|
pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page));
|
|
|
|
else
|
|
|
|
pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
|
2019-09-05 23:46:00 +08:00
|
|
|
pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
|
2016-03-22 15:29:43 +08:00
|
|
|
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
|
|
|
|
pfn_sb->checksum = cpu_to_le64(checksum);
|
|
|
|
|
2019-11-01 11:27:28 +08:00
|
|
|
rc = nd_pfn_clear_memmap_errors(nd_pfn);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2017-05-11 05:01:30 +08:00
|
|
|
return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
|
2016-03-22 15:29:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine the effective resource range and vmem_altmap from an nd_pfn
|
|
|
|
* instance.
|
|
|
|
*/
|
2017-12-29 15:54:05 +08:00
|
|
|
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
|
2016-03-22 15:29:43 +08:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!nd_pfn->uuid || !nd_pfn->ndns)
|
2017-12-29 15:54:05 +08:00
|
|
|
return -ENODEV;
|
2016-03-22 15:29:43 +08:00
|
|
|
|
|
|
|
rc = nd_pfn_init(nd_pfn);
|
|
|
|
if (rc)
|
2017-12-29 15:54:05 +08:00
|
|
|
return rc;
|
2016-03-22 15:29:43 +08:00
|
|
|
|
2017-12-29 15:54:05 +08:00
|
|
|
/* we need a valid pfn_sb before we can init a dev_pagemap */
|
|
|
|
return __nvdimm_setup_pfn(nd_pfn, pgmap);
|
2016-03-22 15:29:43 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);
|