mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 10:44:14 +08:00
Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm fixes from Dan Williams: "1/ Two regression fixes since v4.6: one for the byte order of a sysfs attribute (bz121161) and another for QEMU 2.6's NVDIMM _DSM (ACPI Device Specific Method) implementation that gets tripped up by new auto-probing behavior in the NFIT driver. 2/ A fix tagged for -stable that stops the kernel from clobbering/ignoring changes to the configuration of a 'pfn' instance ("struct page" driver). For example changing the alignment from 2M to 1G may silently revert to 2M if that value is currently stored on media. 3/ A fix from Eric for an xfstests failure in dax. It is not currently tagged for -stable since it requires an 8-exabyte file system to trigger, and there appear to be no user visible side effects" * 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: nfit: fix format interface code byte order dax: fix offset overflow in dax_io acpi, nfit: fix acpi_check_dsm() vs zero functions implemented libnvdimm, pfn, dax: fix initialization vs autodetect for mode + alignment
This commit is contained in:
commit
f3683ccd12
@ -928,7 +928,7 @@ static ssize_t format_show(struct device *dev,
|
||||
{
|
||||
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
|
||||
|
||||
return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code));
|
||||
return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
|
||||
}
|
||||
static DEVICE_ATTR_RO(format);
|
||||
|
||||
@ -961,8 +961,8 @@ static ssize_t format1_show(struct device *dev,
|
||||
continue;
|
||||
if (nfit_dcr->dcr->code == dcr->code)
|
||||
continue;
|
||||
rc = sprintf(buf, "%#x\n",
|
||||
be16_to_cpu(nfit_dcr->dcr->code));
|
||||
rc = sprintf(buf, "0x%04x\n",
|
||||
le16_to_cpu(nfit_dcr->dcr->code));
|
||||
break;
|
||||
}
|
||||
if (rc != ENXIO)
|
||||
@ -1131,11 +1131,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
|
||||
|
||||
/*
|
||||
* Until standardization materializes we need to consider up to 3
|
||||
* different command sets. Note, that checking for function0 (bit0)
|
||||
* tells us if any commands are reachable through this uuid.
|
||||
* different command sets. Note, that checking for zero functions
|
||||
* tells us if any commands might be reachable through this uuid.
|
||||
*/
|
||||
for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++)
|
||||
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
|
||||
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 0))
|
||||
break;
|
||||
|
||||
/* limit the supported commands to those that are publicly documented */
|
||||
|
@ -53,12 +53,12 @@ enum nfit_uuids {
|
||||
};
|
||||
|
||||
/*
|
||||
* Region format interface codes are stored as an array of bytes in the
|
||||
* NFIT DIMM Control Region structure
|
||||
* Region format interface codes are stored with the interface as the
|
||||
* LSB and the function as the MSB.
|
||||
*/
|
||||
#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */
|
||||
#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */
|
||||
#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */
|
||||
#define NFIT_FIC_BYTE cpu_to_le16(0x101) /* byte-addressable energy backed */
|
||||
#define NFIT_FIC_BLK cpu_to_le16(0x201) /* block-addressable non-energy backed */
|
||||
#define NFIT_FIC_BYTEN cpu_to_le16(0x301) /* byte-addressable non-energy backed */
|
||||
|
||||
enum {
|
||||
NFIT_BLK_READ_FLUSH = 1,
|
||||
|
@ -680,9 +680,6 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
|
||||
u64 mask = 0;
|
||||
union acpi_object *obj;
|
||||
|
||||
if (funcs == 0)
|
||||
return false;
|
||||
|
||||
obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
|
||||
if (!obj)
|
||||
return false;
|
||||
@ -695,6 +692,9 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
|
||||
mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
|
||||
ACPI_FREE(obj);
|
||||
|
||||
if (funcs == 0)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Bit 0 indicates whether there's support for any functions other than
|
||||
* function 0 for the specified UUID and revision.
|
||||
|
@ -344,6 +344,8 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
|
||||
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
|
||||
{
|
||||
u64 checksum, offset;
|
||||
unsigned long align;
|
||||
enum nd_pfn_mode mode;
|
||||
struct nd_namespace_io *nsio;
|
||||
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
@ -386,22 +388,50 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
align = le32_to_cpu(pfn_sb->align);
|
||||
offset = le64_to_cpu(pfn_sb->dataoff);
|
||||
if (align == 0)
|
||||
align = 1UL << ilog2(offset);
|
||||
mode = le32_to_cpu(pfn_sb->mode);
|
||||
|
||||
if (!nd_pfn->uuid) {
|
||||
/* from probe we allocate */
|
||||
/*
|
||||
* When probing a namepace via nd_pfn_probe() the uuid
|
||||
* is NULL (see: nd_pfn_devinit()) we init settings from
|
||||
* pfn_sb
|
||||
*/
|
||||
nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
|
||||
if (!nd_pfn->uuid)
|
||||
return -ENOMEM;
|
||||
nd_pfn->align = align;
|
||||
nd_pfn->mode = mode;
|
||||
} else {
|
||||
/* from init we validate */
|
||||
/*
|
||||
* When probing a pfn / dax instance we validate the
|
||||
* live settings against the pfn_sb
|
||||
*/
|
||||
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* If the uuid validates, but other settings mismatch
|
||||
* return EINVAL because userspace has managed to change
|
||||
* the configuration without specifying new
|
||||
* identification.
|
||||
*/
|
||||
if (nd_pfn->align != align || nd_pfn->mode != mode) {
|
||||
dev_err(&nd_pfn->dev,
|
||||
"init failed, settings mismatch\n");
|
||||
dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
|
||||
nd_pfn->align, align, nd_pfn->mode,
|
||||
mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (nd_pfn->align == 0)
|
||||
nd_pfn->align = le32_to_cpu(pfn_sb->align);
|
||||
if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
|
||||
if (align > nvdimm_namespace_capacity(ndns)) {
|
||||
dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
|
||||
nd_pfn->align, nvdimm_namespace_capacity(ndns));
|
||||
align, nvdimm_namespace_capacity(ndns));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -411,7 +441,6 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
|
||||
* namespace has changed since the pfn superblock was
|
||||
* established.
|
||||
*/
|
||||
offset = le64_to_cpu(pfn_sb->dataoff);
|
||||
nsio = to_nd_namespace_io(&ndns->dev);
|
||||
if (offset >= resource_size(&nsio->res)) {
|
||||
dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
|
||||
@ -419,10 +448,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if ((nd_pfn->align && !IS_ALIGNED(offset, nd_pfn->align))
|
||||
if ((align && !IS_ALIGNED(offset, align))
|
||||
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
|
||||
dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n",
|
||||
offset);
|
||||
dev_err(&nd_pfn->dev,
|
||||
"bad offset: %#llx dax disabled align: %#lx\n",
|
||||
offset, align);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@ -502,7 +532,6 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
|
||||
res->start += start_pad;
|
||||
res->end -= end_trunc;
|
||||
|
||||
nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
|
||||
if (nd_pfn->mode == PFN_MODE_RAM) {
|
||||
if (offset < SZ_8K)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
7
fs/dax.c
7
fs/dax.c
@ -208,7 +208,12 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
|
||||
dax.addr += first;
|
||||
size = map_len - first;
|
||||
}
|
||||
max = min(pos + size, end);
|
||||
/*
|
||||
* pos + size is one past the last offset for IO,
|
||||
* so pos + size can overflow loff_t at extreme offsets.
|
||||
* Cast to u64 to catch this and get the true minimum.
|
||||
*/
|
||||
max = min_t(u64, pos + size, end);
|
||||
}
|
||||
|
||||
if (iov_iter_rw(iter) == WRITE) {
|
||||
|
Loading…
Reference in New Issue
Block a user