2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-26 14:14:01 +08:00
linux-next/include/linux/pmem.h
Dan Williams 6abccd1bfe x86, dax, pmem: remove indirection around memcpy_from_pmem()
memcpy_from_pmem() maps directly to memcpy_mcsafe(). The wrapper
serves no real benefit aside from affording a more generic function name
than the x86-specific 'mcsafe'. However this would not be the first time
that x86 terminology leaked into the global namespace. For lack of
better name, just use memcpy_mcsafe() directly.

This conversion also catches a place where we should have been using
plain memcpy, acpi_nfit_blk_single_io().

Cc: <x86@kernel.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Acked-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
2017-04-25 13:20:46 -07:00

143 lines
3.8 KiB
C

/*
* Copyright(c) 2015 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __PMEM_H__
#define __PMEM_H__
#include <linux/io.h>
#include <linux/uio.h>
#ifdef CONFIG_ARCH_HAS_PMEM_API
#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
#include <asm/pmem.h>
#else
#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
/*
* These are simply here to enable compilation, all call sites gate
* calling these symbols with arch_has_pmem_api() and redirect to the
* implementation in asm/pmem.h.
*/
static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
{
BUG();
}
static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
BUG();
return 0;
}
static inline void arch_clear_pmem(void *addr, size_t size)
{
BUG();
}
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
BUG();
}
static inline void arch_invalidate_pmem(void *addr, size_t size)
{
BUG();
}
#endif
static inline bool arch_has_pmem_api(void)
{
return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
}
/**
* memcpy_to_pmem - copy data to persistent memory
* @dst: destination buffer for the copy
* @src: source buffer for the copy
* @n: length of the copy in bytes
*
* Perform a memory copy that results in the destination of the copy
* being effectively evicted from, or never written to, the processor
* cache hierarchy after the copy completes. After memcpy_to_pmem()
* data may still reside in cpu or platform buffers, so this operation
* must be followed by a blkdev_issue_flush() on the pmem block device.
*/
static inline void memcpy_to_pmem(void *dst, const void *src, size_t n)
{
if (arch_has_pmem_api())
arch_memcpy_to_pmem(dst, src, n);
else
memcpy(dst, src, n);
}
/**
* copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
* @bytes: number of bytes to copy
* @i: iterator with source data
*
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
* See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline size_t copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
if (arch_has_pmem_api())
return arch_copy_from_iter_pmem(addr, bytes, i);
return copy_from_iter_nocache(addr, bytes, i);
}
/**
* clear_pmem - zero a PMEM memory range
* @addr: virtual start address
* @size: number of bytes to zero
*
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
* See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline void clear_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_clear_pmem(addr, size);
else
memset(addr, 0, size);
}
/**
* invalidate_pmem - flush a pmem range from the cache hierarchy
* @addr: virtual start address
* @size: bytes to invalidate (internally aligned to cache line size)
*
* For platforms that support clearing poison this flushes any poisoned
* ranges out of the cache
*/
static inline void invalidate_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_invalidate_pmem(addr, size);
}
/**
* wb_cache_pmem - write back processor cache for PMEM memory range
* @addr: virtual start address
* @size: number of bytes to write back
*
* Write back the processor cache range starting at 'addr' for 'size' bytes.
* See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline void wb_cache_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_wb_cache_pmem(addr, size);
}
#endif /* __PMEM_H__ */