mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-14 15:54:15 +08:00
0069455bcb
Patch series "Memory allocation profiling", v6. Overview: Low overhead [1] per-callsite memory allocation profiling. Not just for debug kernels, overhead low enough to be deployed in production. Example output: root@moria-kvm:~# sort -rn /proc/allocinfo 127664128 31168 mm/page_ext.c:270 func:alloc_page_ext 56373248 4737 mm/slub.c:2259 func:alloc_slab_page 14880768 3633 mm/readahead.c:247 func:page_cache_ra_unbounded 14417920 3520 mm/mm_init.c:2530 func:alloc_large_system_hash 13377536 234 block/blk-mq.c:3421 func:blk_mq_alloc_rqs 11718656 2861 mm/filemap.c:1919 func:__filemap_get_folio 9192960 2800 kernel/fork.c:307 func:alloc_thread_stack_node 4206592 4 net/netfilter/nf_conntrack_core.c:2567 func:nf_ct_alloc_hashtable 4136960 1010 drivers/staging/ctagmod/ctagmod.c:20 [ctagmod] func:ctagmod_start 3940352 962 mm/memory.c:4214 func:alloc_anon_folio 2894464 22613 fs/kernfs/dir.c:615 func:__kernfs_new_node ... Usage: kconfig options: - CONFIG_MEM_ALLOC_PROFILING - CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT - CONFIG_MEM_ALLOC_PROFILING_DEBUG adds warnings for allocations that weren't accounted because of a missing annotation sysctl: /proc/sys/vm/mem_profiling Runtime info: /proc/allocinfo Notes: [1]: Overhead To measure the overhead we are comparing the following configurations: (1) Baseline with CONFIG_MEMCG_KMEM=n (2) Disabled by default (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=n) (3) Enabled by default (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=y) (4) Enabled at runtime (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=n && /proc/sys/vm/mem_profiling=1) (5) Baseline with CONFIG_MEMCG_KMEM=y && allocating with __GFP_ACCOUNT (6) Disabled by default (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=n) && CONFIG_MEMCG_KMEM=y (7) Enabled by default (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=y) && CONFIG_MEMCG_KMEM=y Performance overhead: To evaluate performance we implemented an in-kernel test executing multiple get_free_page/free_page and kmalloc/kfree calls with allocation sizes growing from 8 to 240 bytes with CPU frequency set to max and CPU affinity set to a specific CPU to minimize the noise. Below are results from running the test on Ubuntu 22.04.2 LTS with 6.8.0-rc1 kernel on 56 core Intel Xeon: kmalloc pgalloc (1 baseline) 6.764s 16.902s (2 default disabled) 6.793s (+0.43%) 17.007s (+0.62%) (3 default enabled) 7.197s (+6.40%) 23.666s (+40.02%) (4 runtime enabled) 7.405s (+9.48%) 23.901s (+41.41%) (5 memcg) 13.388s (+97.94%) 48.460s (+186.71%) (6 def disabled+memcg) 13.332s (+97.10%) 48.105s (+184.61%) (7 def enabled+memcg) 13.446s (+98.78%) 54.963s (+225.18%) Memory overhead: Kernel size: text data bss dec diff (1) 26515311 18890222 17018880 62424413 (2) 26524728 19423818 16740352 62688898 264485 (3) 26524724 19423818 16740352 62688894 264481 (4) 26524728 19423818 16740352 62688898 264485 (5) 26541782 18964374 16957440 62463596 39183 Memory consumption on a 56 core Intel CPU with 125GB of memory: Code tags: 192 kB PageExts: 262144 kB (256MB) SlabExts: 9876 kB (9.6MB) PcpuExts: 512 kB (0.5MB) Total overhead is 0.2% of total memory. Benchmarks: Hackbench tests run 100 times: hackbench -s 512 -l 200 -g 15 -f 25 -P baseline disabled profiling enabled profiling avg 0.3543 0.3559 (+0.0016) 0.3566 (+0.0023) stdev 0.0137 0.0188 0.0077 hackbench -l 10000 baseline disabled profiling enabled profiling avg 6.4218 6.4306 (+0.0088) 6.5077 (+0.0859) stdev 0.0933 0.0286 0.0489 stress-ng tests: stress-ng --class memory --seq 4 -t 60 stress-ng --class cpu --seq 4 -t 60 Results posted at: https://evilpiepirate.org/~kent/memalloc_prof_v4_stress-ng/ [2] https://lore.kernel.org/all/20240306182440.2003814-1-surenb@google.com/ This patch (of 37): The next patch drops vmalloc.h from a system header in order to fix a circular dependency; this adds it to all the files that were pulling it in implicitly. [kent.overstreet@linux.dev: fix arch/alpha/lib/memcpy.c] Link: https://lkml.kernel.org/r/20240327002152.3339937-1-kent.overstreet@linux.dev [surenb@google.com: fix arch/x86/mm/numa_32.c] Link: https://lkml.kernel.org/r/20240402180933.1663992-1-surenb@google.com [kent.overstreet@linux.dev: a few places were depending on sizes.h] Link: https://lkml.kernel.org/r/20240404034744.1664840-1-kent.overstreet@linux.dev [arnd@arndb.de: fix mm/kasan/hw_tags.c] Link: https://lkml.kernel.org/r/20240404124435.3121534-1-arnd@kernel.org [surenb@google.com: fix arc build] Link: https://lkml.kernel.org/r/20240405225115.431056-1-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-1-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-2-surenb@google.com Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev> Signed-off-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com> Tested-by: Kees Cook <keescook@chromium.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Alex Gaynor <alex.gaynor@gmail.com> Cc: Alice Ryhl <aliceryhl@google.com> Cc: Andreas Hindborg <a.hindborg@samsung.com> Cc: Benno Lossin <benno.lossin@proton.me> Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dennis Zhou <dennis@kernel.org> Cc: Gary Guo <gary@garyguo.net> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
645 lines
17 KiB
C
645 lines
17 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
|
|
|
|
#include <linux/interval_tree.h>
|
|
#include <linux/vfio.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/pds/pds_common.h>
|
|
#include <linux/pds/pds_core_if.h>
|
|
#include <linux/pds/pds_adminq.h>
|
|
|
|
#include "vfio_dev.h"
|
|
#include "cmds.h"
|
|
#include "dirty.h"
|
|
|
|
#define READ_SEQ true
|
|
#define WRITE_ACK false
|
|
|
|
bool pds_vfio_dirty_is_enabled(struct pds_vfio_pci_device *pds_vfio)
|
|
{
|
|
return pds_vfio->dirty.is_enabled;
|
|
}
|
|
|
|
void pds_vfio_dirty_set_enabled(struct pds_vfio_pci_device *pds_vfio)
|
|
{
|
|
pds_vfio->dirty.is_enabled = true;
|
|
}
|
|
|
|
void pds_vfio_dirty_set_disabled(struct pds_vfio_pci_device *pds_vfio)
|
|
{
|
|
pds_vfio->dirty.is_enabled = false;
|
|
}
|
|
|
|
static void
|
|
pds_vfio_print_guest_region_info(struct pds_vfio_pci_device *pds_vfio,
|
|
u8 max_regions)
|
|
{
|
|
int len = max_regions * sizeof(struct pds_lm_dirty_region_info);
|
|
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
|
|
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
|
|
struct pds_lm_dirty_region_info *region_info;
|
|
dma_addr_t regions_dma;
|
|
u8 num_regions;
|
|
int err;
|
|
|
|
region_info = kcalloc(max_regions,
|
|
sizeof(struct pds_lm_dirty_region_info),
|
|
GFP_KERNEL);
|
|
if (!region_info)
|
|
return;
|
|
|
|
regions_dma =
|
|
dma_map_single(pdsc_dev, region_info, len, DMA_FROM_DEVICE);
|
|
if (dma_mapping_error(pdsc_dev, regions_dma))
|
|
goto out_free_region_info;
|
|
|
|
err = pds_vfio_dirty_status_cmd(pds_vfio, regions_dma, &max_regions,
|
|
&num_regions);
|
|
dma_unmap_single(pdsc_dev, regions_dma, len, DMA_FROM_DEVICE);
|
|
if (err)
|
|
goto out_free_region_info;
|
|
|
|
for (unsigned int i = 0; i < num_regions; i++)
|
|
dev_dbg(&pdev->dev,
|
|
"region_info[%d]: dma_base 0x%llx page_count %u page_size_log2 %u\n",
|
|
i, le64_to_cpu(region_info[i].dma_base),
|
|
le32_to_cpu(region_info[i].page_count),
|
|
region_info[i].page_size_log2);
|
|
|
|
out_free_region_info:
|
|
kfree(region_info);
|
|
}
|
|
|
|
static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_region *region,
|
|
unsigned long bytes)
|
|
{
|
|
unsigned long *host_seq_bmp, *host_ack_bmp;
|
|
|
|
host_seq_bmp = vzalloc(bytes);
|
|
if (!host_seq_bmp)
|
|
return -ENOMEM;
|
|
|
|
host_ack_bmp = vzalloc(bytes);
|
|
if (!host_ack_bmp) {
|
|
bitmap_free(host_seq_bmp);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
region->host_seq = host_seq_bmp;
|
|
region->host_ack = host_ack_bmp;
|
|
region->bmp_bytes = bytes;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void pds_vfio_dirty_free_bitmaps(struct pds_vfio_dirty *dirty)
|
|
{
|
|
if (!dirty->regions)
|
|
return;
|
|
|
|
for (int i = 0; i < dirty->num_regions; i++) {
|
|
struct pds_vfio_region *region = &dirty->regions[i];
|
|
|
|
vfree(region->host_seq);
|
|
vfree(region->host_ack);
|
|
region->host_seq = NULL;
|
|
region->host_ack = NULL;
|
|
region->bmp_bytes = 0;
|
|
}
|
|
}
|
|
|
|
static void __pds_vfio_dirty_free_sgl(struct pds_vfio_pci_device *pds_vfio,
|
|
struct pds_vfio_region *region)
|
|
{
|
|
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
|
|
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
|
|
|
|
dma_unmap_single(pdsc_dev, region->sgl_addr,
|
|
region->num_sge * sizeof(struct pds_lm_sg_elem),
|
|
DMA_BIDIRECTIONAL);
|
|
kfree(region->sgl);
|
|
|
|
region->num_sge = 0;
|
|
region->sgl = NULL;
|
|
region->sgl_addr = 0;
|
|
}
|
|
|
|
static void pds_vfio_dirty_free_sgl(struct pds_vfio_pci_device *pds_vfio)
|
|
{
|
|
struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
|
|
|
|
if (!dirty->regions)
|
|
return;
|
|
|
|
for (int i = 0; i < dirty->num_regions; i++) {
|
|
struct pds_vfio_region *region = &dirty->regions[i];
|
|
|
|
if (region->sgl)
|
|
__pds_vfio_dirty_free_sgl(pds_vfio, region);
|
|
}
|
|
}
|
|
|
|
static int pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
|
|
struct pds_vfio_region *region,
|
|
u32 page_count)
|
|
{
|
|
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
|
|
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
|
|
struct pds_lm_sg_elem *sgl;
|
|
dma_addr_t sgl_addr;
|
|
size_t sgl_size;
|
|
u32 max_sge;
|
|
|
|
max_sge = DIV_ROUND_UP(page_count, PAGE_SIZE * 8);
|
|
sgl_size = max_sge * sizeof(struct pds_lm_sg_elem);
|
|
|
|
sgl = kzalloc(sgl_size, GFP_KERNEL);
|
|
if (!sgl)
|
|
return -ENOMEM;
|
|
|
|
sgl_addr = dma_map_single(pdsc_dev, sgl, sgl_size, DMA_BIDIRECTIONAL);
|
|
if (dma_mapping_error(pdsc_dev, sgl_addr)) {
|
|
kfree(sgl);
|
|
return -EIO;
|
|
}
|
|
|
|
region->sgl = sgl;
|
|
region->num_sge = max_sge;
|
|
region->sgl_addr = sgl_addr;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void pds_vfio_dirty_free_regions(struct pds_vfio_dirty *dirty)
|
|
{
|
|
vfree(dirty->regions);
|
|
dirty->regions = NULL;
|
|
dirty->num_regions = 0;
|
|
}
|
|
|
|
static int pds_vfio_dirty_alloc_regions(struct pds_vfio_pci_device *pds_vfio,
|
|
struct pds_lm_dirty_region_info *region_info,
|
|
u64 region_page_size, u8 num_regions)
|
|
{
|
|
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
|
|
struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
|
|
u32 dev_bmp_offset_byte = 0;
|
|
int err;
|
|
|
|
dirty->regions = vcalloc(num_regions, sizeof(struct pds_vfio_region));
|
|
if (!dirty->regions)
|
|
return -ENOMEM;
|
|
dirty->num_regions = num_regions;
|
|
|
|
for (int i = 0; i < num_regions; i++) {
|
|
struct pds_lm_dirty_region_info *ri = ®ion_info[i];
|
|
struct pds_vfio_region *region = &dirty->regions[i];
|
|
u64 region_size, region_start;
|
|
u32 page_count;
|
|
|
|
/* page_count might be adjusted by the device */
|
|
page_count = le32_to_cpu(ri->page_count);
|
|
region_start = le64_to_cpu(ri->dma_base);
|
|
region_size = page_count * region_page_size;
|
|
|
|
err = pds_vfio_dirty_alloc_bitmaps(region,
|
|
page_count / BITS_PER_BYTE);
|
|
if (err) {
|
|
dev_err(&pdev->dev, "Failed to alloc dirty bitmaps: %pe\n",
|
|
ERR_PTR(err));
|
|
goto out_free_regions;
|
|
}
|
|
|
|
err = pds_vfio_dirty_alloc_sgl(pds_vfio, region, page_count);
|
|
if (err) {
|
|
dev_err(&pdev->dev, "Failed to alloc dirty sg lists: %pe\n",
|
|
ERR_PTR(err));
|
|
goto out_free_regions;
|
|
}
|
|
|
|
region->size = region_size;
|
|
region->start = region_start;
|
|
region->page_size = region_page_size;
|
|
region->dev_bmp_offset_start_byte = dev_bmp_offset_byte;
|
|
|
|
dev_bmp_offset_byte += page_count / BITS_PER_BYTE;
|
|
if (dev_bmp_offset_byte % BITS_PER_BYTE) {
|
|
dev_err(&pdev->dev, "Device bitmap offset is mis-aligned\n");
|
|
err = -EINVAL;
|
|
goto out_free_regions;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
|
|
out_free_regions:
|
|
pds_vfio_dirty_free_bitmaps(dirty);
|
|
pds_vfio_dirty_free_sgl(pds_vfio);
|
|
pds_vfio_dirty_free_regions(dirty);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
|
|
struct rb_root_cached *ranges, u32 nnodes,
|
|
u64 *page_size)
|
|
{
|
|
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
|
|
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
|
|
struct pds_lm_dirty_region_info *region_info;
|
|
struct interval_tree_node *node = NULL;
|
|
u64 region_page_size = *page_size;
|
|
u8 max_regions = 0, num_regions;
|
|
dma_addr_t regions_dma = 0;
|
|
u32 num_ranges = nnodes;
|
|
int err;
|
|
u16 len;
|
|
|
|
dev_dbg(&pdev->dev, "vf%u: Start dirty page tracking\n",
|
|
pds_vfio->vf_id);
|
|
|
|
if (pds_vfio_dirty_is_enabled(pds_vfio))
|
|
return -EINVAL;
|
|
|
|
/* find if dirty tracking is disabled, i.e. num_regions == 0 */
|
|
err = pds_vfio_dirty_status_cmd(pds_vfio, 0, &max_regions,
|
|
&num_regions);
|
|
if (err < 0) {
|
|
dev_err(&pdev->dev, "Failed to get dirty status, err %pe\n",
|
|
ERR_PTR(err));
|
|
return err;
|
|
} else if (num_regions) {
|
|
dev_err(&pdev->dev,
|
|
"Dirty tracking already enabled for %d regions\n",
|
|
num_regions);
|
|
return -EEXIST;
|
|
} else if (!max_regions) {
|
|
dev_err(&pdev->dev,
|
|
"Device doesn't support dirty tracking, max_regions %d\n",
|
|
max_regions);
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
if (num_ranges > max_regions) {
|
|
vfio_combine_iova_ranges(ranges, nnodes, max_regions);
|
|
num_ranges = max_regions;
|
|
}
|
|
|
|
region_info = kcalloc(num_ranges, sizeof(*region_info), GFP_KERNEL);
|
|
if (!region_info)
|
|
return -ENOMEM;
|
|
len = num_ranges * sizeof(*region_info);
|
|
|
|
node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
|
|
if (!node)
|
|
return -EINVAL;
|
|
for (int i = 0; i < num_ranges; i++) {
|
|
struct pds_lm_dirty_region_info *ri = ®ion_info[i];
|
|
u64 region_size = node->last - node->start + 1;
|
|
u64 region_start = node->start;
|
|
u32 page_count;
|
|
|
|
page_count = DIV_ROUND_UP(region_size, region_page_size);
|
|
|
|
ri->dma_base = cpu_to_le64(region_start);
|
|
ri->page_count = cpu_to_le32(page_count);
|
|
ri->page_size_log2 = ilog2(region_page_size);
|
|
|
|
dev_dbg(&pdev->dev,
|
|
"region_info[%d]: region_start 0x%llx region_end 0x%lx region_size 0x%llx page_count %u page_size %llu\n",
|
|
i, region_start, node->last, region_size, page_count,
|
|
region_page_size);
|
|
|
|
node = interval_tree_iter_next(node, 0, ULONG_MAX);
|
|
}
|
|
|
|
regions_dma = dma_map_single(pdsc_dev, (void *)region_info, len,
|
|
DMA_BIDIRECTIONAL);
|
|
if (dma_mapping_error(pdsc_dev, regions_dma)) {
|
|
err = -ENOMEM;
|
|
goto out_free_region_info;
|
|
}
|
|
|
|
err = pds_vfio_dirty_enable_cmd(pds_vfio, regions_dma, num_ranges);
|
|
dma_unmap_single(pdsc_dev, regions_dma, len, DMA_BIDIRECTIONAL);
|
|
if (err)
|
|
goto out_free_region_info;
|
|
|
|
err = pds_vfio_dirty_alloc_regions(pds_vfio, region_info,
|
|
region_page_size, num_ranges);
|
|
if (err) {
|
|
dev_err(&pdev->dev,
|
|
"Failed to allocate %d regions for tracking dirty regions: %pe\n",
|
|
num_regions, ERR_PTR(err));
|
|
goto out_dirty_disable;
|
|
}
|
|
|
|
pds_vfio_dirty_set_enabled(pds_vfio);
|
|
|
|
pds_vfio_print_guest_region_info(pds_vfio, max_regions);
|
|
|
|
kfree(region_info);
|
|
|
|
return 0;
|
|
|
|
out_dirty_disable:
|
|
pds_vfio_dirty_disable_cmd(pds_vfio);
|
|
out_free_region_info:
|
|
kfree(region_info);
|
|
return err;
|
|
}
|
|
|
|
void pds_vfio_dirty_disable(struct pds_vfio_pci_device *pds_vfio, bool send_cmd)
|
|
{
|
|
if (pds_vfio_dirty_is_enabled(pds_vfio)) {
|
|
pds_vfio_dirty_set_disabled(pds_vfio);
|
|
if (send_cmd)
|
|
pds_vfio_dirty_disable_cmd(pds_vfio);
|
|
pds_vfio_dirty_free_sgl(pds_vfio);
|
|
pds_vfio_dirty_free_bitmaps(&pds_vfio->dirty);
|
|
pds_vfio_dirty_free_regions(&pds_vfio->dirty);
|
|
}
|
|
|
|
if (send_cmd)
|
|
pds_vfio_send_host_vf_lm_status_cmd(pds_vfio, PDS_LM_STA_NONE);
|
|
}
|
|
|
|
static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
|
|
struct pds_vfio_region *region,
|
|
unsigned long *seq_ack_bmp, u32 offset,
|
|
u32 bmp_bytes, bool read_seq)
|
|
{
|
|
const char *bmp_type_str = read_seq ? "read_seq" : "write_ack";
|
|
u8 dma_dir = read_seq ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
|
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
|
|
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
|
|
unsigned long long npages;
|
|
struct sg_table sg_table;
|
|
struct scatterlist *sg;
|
|
struct page **pages;
|
|
u32 page_offset;
|
|
const void *bmp;
|
|
size_t size;
|
|
u16 num_sge;
|
|
int err;
|
|
int i;
|
|
|
|
bmp = (void *)((u64)seq_ack_bmp + offset);
|
|
page_offset = offset_in_page(bmp);
|
|
bmp -= page_offset;
|
|
|
|
/*
|
|
* Start and end of bitmap section to seq/ack might not be page
|
|
* aligned, so use the page_offset to account for that so there
|
|
* will be enough pages to represent the bmp_bytes
|
|
*/
|
|
npages = DIV_ROUND_UP_ULL(bmp_bytes + page_offset, PAGE_SIZE);
|
|
pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
|
|
if (!pages)
|
|
return -ENOMEM;
|
|
|
|
for (unsigned long long i = 0; i < npages; i++) {
|
|
struct page *page = vmalloc_to_page(bmp);
|
|
|
|
if (!page) {
|
|
err = -EFAULT;
|
|
goto out_free_pages;
|
|
}
|
|
|
|
pages[i] = page;
|
|
bmp += PAGE_SIZE;
|
|
}
|
|
|
|
err = sg_alloc_table_from_pages(&sg_table, pages, npages, page_offset,
|
|
bmp_bytes, GFP_KERNEL);
|
|
if (err)
|
|
goto out_free_pages;
|
|
|
|
err = dma_map_sgtable(pdsc_dev, &sg_table, dma_dir, 0);
|
|
if (err)
|
|
goto out_free_sg_table;
|
|
|
|
for_each_sgtable_dma_sg(&sg_table, sg, i) {
|
|
struct pds_lm_sg_elem *sg_elem = ®ion->sgl[i];
|
|
|
|
sg_elem->addr = cpu_to_le64(sg_dma_address(sg));
|
|
sg_elem->len = cpu_to_le32(sg_dma_len(sg));
|
|
}
|
|
|
|
num_sge = sg_table.nents;
|
|
size = num_sge * sizeof(struct pds_lm_sg_elem);
|
|
offset += region->dev_bmp_offset_start_byte;
|
|
dma_sync_single_for_device(pdsc_dev, region->sgl_addr, size, dma_dir);
|
|
err = pds_vfio_dirty_seq_ack_cmd(pds_vfio, region->sgl_addr, num_sge,
|
|
offset, bmp_bytes, read_seq);
|
|
if (err)
|
|
dev_err(&pdev->dev,
|
|
"Dirty bitmap %s failed offset %u bmp_bytes %u num_sge %u DMA 0x%llx: %pe\n",
|
|
bmp_type_str, offset, bmp_bytes,
|
|
num_sge, region->sgl_addr, ERR_PTR(err));
|
|
dma_sync_single_for_cpu(pdsc_dev, region->sgl_addr, size, dma_dir);
|
|
|
|
dma_unmap_sgtable(pdsc_dev, &sg_table, dma_dir, 0);
|
|
out_free_sg_table:
|
|
sg_free_table(&sg_table);
|
|
out_free_pages:
|
|
kfree(pages);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int pds_vfio_dirty_write_ack(struct pds_vfio_pci_device *pds_vfio,
|
|
struct pds_vfio_region *region,
|
|
u32 offset, u32 len)
|
|
{
|
|
|
|
return pds_vfio_dirty_seq_ack(pds_vfio, region, region->host_ack,
|
|
offset, len, WRITE_ACK);
|
|
}
|
|
|
|
static int pds_vfio_dirty_read_seq(struct pds_vfio_pci_device *pds_vfio,
|
|
struct pds_vfio_region *region,
|
|
u32 offset, u32 len)
|
|
{
|
|
return pds_vfio_dirty_seq_ack(pds_vfio, region, region->host_seq,
|
|
offset, len, READ_SEQ);
|
|
}
|
|
|
|
static int pds_vfio_dirty_process_bitmaps(struct pds_vfio_pci_device *pds_vfio,
|
|
struct pds_vfio_region *region,
|
|
struct iova_bitmap *dirty_bitmap,
|
|
u32 bmp_offset, u32 len_bytes)
|
|
{
|
|
u64 page_size = region->page_size;
|
|
u64 region_start = region->start;
|
|
u32 bmp_offset_bit;
|
|
__le64 *seq, *ack;
|
|
int dword_count;
|
|
|
|
dword_count = len_bytes / sizeof(u64);
|
|
seq = (__le64 *)((u64)region->host_seq + bmp_offset);
|
|
ack = (__le64 *)((u64)region->host_ack + bmp_offset);
|
|
bmp_offset_bit = bmp_offset * 8;
|
|
|
|
for (int i = 0; i < dword_count; i++) {
|
|
u64 xor = le64_to_cpu(seq[i]) ^ le64_to_cpu(ack[i]);
|
|
|
|
/* prepare for next write_ack call */
|
|
ack[i] = seq[i];
|
|
|
|
for (u8 bit_i = 0; bit_i < BITS_PER_TYPE(u64); ++bit_i) {
|
|
if (xor & BIT(bit_i)) {
|
|
u64 abs_bit_i = bmp_offset_bit +
|
|
i * BITS_PER_TYPE(u64) + bit_i;
|
|
u64 addr = abs_bit_i * page_size + region_start;
|
|
|
|
iova_bitmap_set(dirty_bitmap, addr, page_size);
|
|
}
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct pds_vfio_region *
|
|
pds_vfio_get_region(struct pds_vfio_pci_device *pds_vfio, unsigned long iova)
|
|
{
|
|
struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
|
|
|
|
for (int i = 0; i < dirty->num_regions; i++) {
|
|
struct pds_vfio_region *region = &dirty->regions[i];
|
|
|
|
if (iova >= region->start &&
|
|
iova < (region->start + region->size))
|
|
return region;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
|
|
struct iova_bitmap *dirty_bitmap,
|
|
unsigned long iova, unsigned long length)
|
|
{
|
|
struct device *dev = &pds_vfio->vfio_coredev.pdev->dev;
|
|
struct pds_vfio_region *region;
|
|
u64 bmp_offset, bmp_bytes;
|
|
u64 bitmap_size, pages;
|
|
int err;
|
|
|
|
dev_dbg(dev, "vf%u: Get dirty page bitmap\n", pds_vfio->vf_id);
|
|
|
|
if (!pds_vfio_dirty_is_enabled(pds_vfio)) {
|
|
dev_err(dev, "vf%u: Sync failed, dirty tracking is disabled\n",
|
|
pds_vfio->vf_id);
|
|
return -EINVAL;
|
|
}
|
|
|
|
region = pds_vfio_get_region(pds_vfio, iova);
|
|
if (!region) {
|
|
dev_err(dev, "vf%u: Failed to find region that contains iova 0x%lx length 0x%lx\n",
|
|
pds_vfio->vf_id, iova, length);
|
|
return -EINVAL;
|
|
}
|
|
|
|
pages = DIV_ROUND_UP(length, region->page_size);
|
|
bitmap_size =
|
|
round_up(pages, sizeof(u64) * BITS_PER_BYTE) / BITS_PER_BYTE;
|
|
|
|
dev_dbg(dev,
|
|
"vf%u: iova 0x%lx length %lu page_size %llu pages %llu bitmap_size %llu\n",
|
|
pds_vfio->vf_id, iova, length, region->page_size,
|
|
pages, bitmap_size);
|
|
|
|
if (!length || ((iova - region->start + length) > region->size)) {
|
|
dev_err(dev, "Invalid iova 0x%lx and/or length 0x%lx to sync\n",
|
|
iova, length);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* bitmap is modified in 64 bit chunks */
|
|
bmp_bytes = ALIGN(DIV_ROUND_UP(length / region->page_size,
|
|
sizeof(u64)), sizeof(u64));
|
|
if (bmp_bytes != bitmap_size) {
|
|
dev_err(dev,
|
|
"Calculated bitmap bytes %llu not equal to bitmap size %llu\n",
|
|
bmp_bytes, bitmap_size);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (bmp_bytes > region->bmp_bytes) {
|
|
dev_err(dev,
|
|
"Calculated bitmap bytes %llu larger than region's cached bmp_bytes %llu\n",
|
|
bmp_bytes, region->bmp_bytes);
|
|
return -EINVAL;
|
|
}
|
|
|
|
bmp_offset = DIV_ROUND_UP((iova - region->start) /
|
|
region->page_size, sizeof(u64));
|
|
|
|
dev_dbg(dev,
|
|
"Syncing dirty bitmap, iova 0x%lx length 0x%lx, bmp_offset %llu bmp_bytes %llu\n",
|
|
iova, length, bmp_offset, bmp_bytes);
|
|
|
|
err = pds_vfio_dirty_read_seq(pds_vfio, region, bmp_offset, bmp_bytes);
|
|
if (err)
|
|
return err;
|
|
|
|
err = pds_vfio_dirty_process_bitmaps(pds_vfio, region, dirty_bitmap,
|
|
bmp_offset, bmp_bytes);
|
|
if (err)
|
|
return err;
|
|
|
|
err = pds_vfio_dirty_write_ack(pds_vfio, region, bmp_offset, bmp_bytes);
|
|
if (err)
|
|
return err;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int pds_vfio_dma_logging_report(struct vfio_device *vdev, unsigned long iova,
|
|
unsigned long length, struct iova_bitmap *dirty)
|
|
{
|
|
struct pds_vfio_pci_device *pds_vfio =
|
|
container_of(vdev, struct pds_vfio_pci_device,
|
|
vfio_coredev.vdev);
|
|
int err;
|
|
|
|
mutex_lock(&pds_vfio->state_mutex);
|
|
err = pds_vfio_dirty_sync(pds_vfio, dirty, iova, length);
|
|
mutex_unlock(&pds_vfio->state_mutex);
|
|
|
|
return err;
|
|
}
|
|
|
|
int pds_vfio_dma_logging_start(struct vfio_device *vdev,
|
|
struct rb_root_cached *ranges, u32 nnodes,
|
|
u64 *page_size)
|
|
{
|
|
struct pds_vfio_pci_device *pds_vfio =
|
|
container_of(vdev, struct pds_vfio_pci_device,
|
|
vfio_coredev.vdev);
|
|
int err;
|
|
|
|
mutex_lock(&pds_vfio->state_mutex);
|
|
pds_vfio_send_host_vf_lm_status_cmd(pds_vfio, PDS_LM_STA_IN_PROGRESS);
|
|
err = pds_vfio_dirty_enable(pds_vfio, ranges, nnodes, page_size);
|
|
mutex_unlock(&pds_vfio->state_mutex);
|
|
|
|
return err;
|
|
}
|
|
|
|
int pds_vfio_dma_logging_stop(struct vfio_device *vdev)
|
|
{
|
|
struct pds_vfio_pci_device *pds_vfio =
|
|
container_of(vdev, struct pds_vfio_pci_device,
|
|
vfio_coredev.vdev);
|
|
|
|
mutex_lock(&pds_vfio->state_mutex);
|
|
pds_vfio_dirty_disable(pds_vfio, true);
|
|
mutex_unlock(&pds_vfio->state_mutex);
|
|
|
|
return 0;
|
|
}
|