mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-11 21:14:07 +08:00
7e3e888dfc
At namespace creation time there is the potential for the "expected to
be zero" fields of a 'pfn' info-block to be filled with indeterminate
data. While the kernel buffer is zeroed on allocation it is immediately
overwritten by nd_pfn_validate() filling it with the current contents of
the on-media info-block location. For fields like, 'flags' and the
'padding' it potentially means that future implementations can not rely on
those fields being zero.
In preparation to stop using the 'start_pad' and 'end_trunc' fields for
section alignment, arrange for fields that are not explicitly
initialized to be guaranteed zero. Bump the minor version to indicate
it is safe to assume the 'padding' and 'flags' are zero. Otherwise,
this corruption is expected to benign since all other critical fields
are explicitly initialized.
Note The cc: stable is about spreading this new policy to as many
kernels as possible not fixing an issue in those kernels. It is not
until the change titled "libnvdimm/pfn: Stop padding pmem namespaces to
section alignment" where this improper initialization becomes a problem.
So if someone decides to backport "libnvdimm/pfn: Stop padding pmem
namespaces to section alignment" (which is not tagged for stable), make
sure this pre-requisite is flagged.
Link: http://lkml.kernel.org/r/156092356065.979959.6681003754765958296.stgit@dwillia2-desk3.amr.corp.intel.com
Fixes: 32ab0a3f51
("libnvdimm, pmem: 'struct page' for pmem")
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> [ppc64]
Cc: <stable@vger.kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richardw.yang@linux.intel.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
134 lines
3.0 KiB
C
134 lines
3.0 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
|
|
*/
|
|
#include <linux/device.h>
|
|
#include <linux/sizes.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/mm.h>
|
|
#include "nd-core.h"
|
|
#include "pfn.h"
|
|
#include "nd.h"
|
|
|
|
static void nd_dax_release(struct device *dev)
|
|
{
|
|
struct nd_region *nd_region = to_nd_region(dev->parent);
|
|
struct nd_dax *nd_dax = to_nd_dax(dev);
|
|
struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
|
|
|
|
dev_dbg(dev, "trace\n");
|
|
nd_detach_ndns(dev, &nd_pfn->ndns);
|
|
ida_simple_remove(&nd_region->dax_ida, nd_pfn->id);
|
|
kfree(nd_pfn->uuid);
|
|
kfree(nd_dax);
|
|
}
|
|
|
|
static struct device_type nd_dax_device_type = {
|
|
.name = "nd_dax",
|
|
.release = nd_dax_release,
|
|
};
|
|
|
|
bool is_nd_dax(struct device *dev)
|
|
{
|
|
return dev ? dev->type == &nd_dax_device_type : false;
|
|
}
|
|
EXPORT_SYMBOL(is_nd_dax);
|
|
|
|
struct nd_dax *to_nd_dax(struct device *dev)
|
|
{
|
|
struct nd_dax *nd_dax = container_of(dev, struct nd_dax, nd_pfn.dev);
|
|
|
|
WARN_ON(!is_nd_dax(dev));
|
|
return nd_dax;
|
|
}
|
|
EXPORT_SYMBOL(to_nd_dax);
|
|
|
|
static const struct attribute_group *nd_dax_attribute_groups[] = {
|
|
&nd_pfn_attribute_group,
|
|
&nd_device_attribute_group,
|
|
&nd_numa_attribute_group,
|
|
NULL,
|
|
};
|
|
|
|
static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
|
|
{
|
|
struct nd_pfn *nd_pfn;
|
|
struct nd_dax *nd_dax;
|
|
struct device *dev;
|
|
|
|
nd_dax = kzalloc(sizeof(*nd_dax), GFP_KERNEL);
|
|
if (!nd_dax)
|
|
return NULL;
|
|
|
|
nd_pfn = &nd_dax->nd_pfn;
|
|
nd_pfn->id = ida_simple_get(&nd_region->dax_ida, 0, 0, GFP_KERNEL);
|
|
if (nd_pfn->id < 0) {
|
|
kfree(nd_dax);
|
|
return NULL;
|
|
}
|
|
|
|
dev = &nd_pfn->dev;
|
|
dev_set_name(dev, "dax%d.%d", nd_region->id, nd_pfn->id);
|
|
dev->groups = nd_dax_attribute_groups;
|
|
dev->type = &nd_dax_device_type;
|
|
dev->parent = &nd_region->dev;
|
|
|
|
return nd_dax;
|
|
}
|
|
|
|
struct device *nd_dax_create(struct nd_region *nd_region)
|
|
{
|
|
struct device *dev = NULL;
|
|
struct nd_dax *nd_dax;
|
|
|
|
if (!is_memory(&nd_region->dev))
|
|
return NULL;
|
|
|
|
nd_dax = nd_dax_alloc(nd_region);
|
|
if (nd_dax)
|
|
dev = nd_pfn_devinit(&nd_dax->nd_pfn, NULL);
|
|
__nd_device_register(dev);
|
|
return dev;
|
|
}
|
|
|
|
int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
|
|
{
|
|
int rc;
|
|
struct nd_dax *nd_dax;
|
|
struct device *dax_dev;
|
|
struct nd_pfn *nd_pfn;
|
|
struct nd_pfn_sb *pfn_sb;
|
|
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
|
|
|
|
if (ndns->force_raw)
|
|
return -ENODEV;
|
|
|
|
switch (ndns->claim_class) {
|
|
case NVDIMM_CCLASS_NONE:
|
|
case NVDIMM_CCLASS_DAX:
|
|
break;
|
|
default:
|
|
return -ENODEV;
|
|
}
|
|
|
|
nvdimm_bus_lock(&ndns->dev);
|
|
nd_dax = nd_dax_alloc(nd_region);
|
|
nd_pfn = &nd_dax->nd_pfn;
|
|
dax_dev = nd_pfn_devinit(nd_pfn, ndns);
|
|
nvdimm_bus_unlock(&ndns->dev);
|
|
if (!dax_dev)
|
|
return -ENOMEM;
|
|
pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
|
|
nd_pfn->pfn_sb = pfn_sb;
|
|
rc = nd_pfn_validate(nd_pfn, DAX_SIG);
|
|
dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
|
|
if (rc < 0) {
|
|
nd_detach_ndns(dax_dev, &nd_pfn->ndns);
|
|
put_device(dax_dev);
|
|
} else
|
|
__nd_device_register(dax_dev);
|
|
|
|
return rc;
|
|
}
|
|
EXPORT_SYMBOL(nd_dax_probe);
|