2019-07-31 23:57:31 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2019-06-24 15:22:55 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2019 HUAWEI, Inc.
|
2020-07-13 21:09:44 +08:00
|
|
|
* https://www.huawei.com/
|
2019-06-24 15:22:55 +08:00
|
|
|
*/
|
|
|
|
#include "compress.h"
|
|
|
|
#include <linux/lz4.h>
|
|
|
|
|
|
|
|
#ifndef LZ4_DISTANCE_MAX /* history window size */
|
|
|
|
#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
|
|
|
|
#endif
|
|
|
|
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
|
2019-06-24 15:22:56 +08:00
|
|
|
#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
|
|
|
|
#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
|
|
|
|
#endif
|
2019-06-24 15:22:55 +08:00
|
|
|
|
2021-12-28 13:46:00 +08:00
|
|
|
struct z_erofs_lz4_decompress_ctx {
|
|
|
|
struct z_erofs_decompress_req *rq;
|
|
|
|
/* # of encoded, decoded pages */
|
|
|
|
unsigned int inpages, outpages;
|
|
|
|
/* decoded block total length (used for in-place decompression) */
|
|
|
|
unsigned int oend;
|
|
|
|
};
|
|
|
|
|
2023-10-22 21:09:57 +08:00
|
|
|
static int z_erofs_load_lz4_config(struct super_block *sb,
|
|
|
|
struct erofs_super_block *dsb, void *data, int size)
|
2021-03-29 09:23:06 +08:00
|
|
|
{
|
2021-04-07 12:39:23 +08:00
|
|
|
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
2023-10-22 21:09:57 +08:00
|
|
|
struct z_erofs_lz4_cfgs *lz4 = data;
|
2021-03-29 09:23:07 +08:00
|
|
|
u16 distance;
|
|
|
|
|
|
|
|
if (lz4) {
|
|
|
|
if (size < sizeof(struct z_erofs_lz4_cfgs)) {
|
|
|
|
erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
distance = le16_to_cpu(lz4->max_distance);
|
2021-04-07 12:39:23 +08:00
|
|
|
|
|
|
|
sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
|
|
|
|
if (!sbi->lz4.max_pclusterblks) {
|
|
|
|
sbi->lz4.max_pclusterblks = 1; /* reserved case */
|
|
|
|
} else if (sbi->lz4.max_pclusterblks >
|
2023-03-13 21:53:08 +08:00
|
|
|
erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
|
2021-04-07 12:39:23 +08:00
|
|
|
erofs_err(sb, "too large lz4 pclusterblks %u",
|
|
|
|
sbi->lz4.max_pclusterblks);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2021-03-29 09:23:07 +08:00
|
|
|
} else {
|
2021-03-29 18:00:12 +08:00
|
|
|
distance = le16_to_cpu(dsb->u1.lz4_max_distance);
|
2021-04-07 12:39:23 +08:00
|
|
|
sbi->lz4.max_pclusterblks = 1;
|
2021-03-29 09:23:07 +08:00
|
|
|
}
|
2021-03-29 09:23:06 +08:00
|
|
|
|
2021-04-07 12:39:23 +08:00
|
|
|
sbi->lz4.max_distance_pages = distance ?
|
2021-03-29 09:23:06 +08:00
|
|
|
DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
|
|
|
|
LZ4_MAX_DISTANCE_PAGES;
|
2024-04-02 18:00:36 +08:00
|
|
|
return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks);
|
2021-03-29 09:23:06 +08:00
|
|
|
}
|
|
|
|
|
2021-10-11 05:31:44 +08:00
|
|
|
/*
|
|
|
|
* Fill all gaps with bounce pages if it's a sparse page list. Also check if
|
|
|
|
* all physical pages are consecutive, which can be seen for moderate CR.
|
|
|
|
*/
|
2021-12-28 13:46:00 +08:00
|
|
|
static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
|
2021-10-22 17:01:20 +08:00
|
|
|
struct page **pagepool)
|
2019-06-24 15:22:55 +08:00
|
|
|
{
|
2021-12-28 13:46:00 +08:00
|
|
|
struct z_erofs_decompress_req *rq = ctx->rq;
|
2019-06-24 15:22:55 +08:00
|
|
|
struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
|
|
|
|
BITS_PER_LONG)] = { 0 };
|
2021-03-29 09:23:06 +08:00
|
|
|
unsigned int lz4_max_distance_pages =
|
|
|
|
EROFS_SB(rq->sb)->lz4.max_distance_pages;
|
2019-06-24 15:22:55 +08:00
|
|
|
void *kaddr = NULL;
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
unsigned int i, j, top;
|
2019-06-24 15:22:55 +08:00
|
|
|
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
top = 0;
|
2021-12-28 13:46:00 +08:00
|
|
|
for (i = j = 0; i < ctx->outpages; ++i, ++j) {
|
2019-06-24 15:22:55 +08:00
|
|
|
struct page *const page = rq->out[i];
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
struct page *victim;
|
2019-06-24 15:22:55 +08:00
|
|
|
|
2021-03-29 09:23:06 +08:00
|
|
|
if (j >= lz4_max_distance_pages)
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
j = 0;
|
|
|
|
|
|
|
|
/* 'valid' bounced can only be tested after a complete round */
|
2022-07-15 23:42:03 +08:00
|
|
|
if (!rq->fillgaps && test_bit(j, bounced)) {
|
2021-03-29 09:23:06 +08:00
|
|
|
DBG_BUGON(i < lz4_max_distance_pages);
|
|
|
|
DBG_BUGON(top >= lz4_max_distance_pages);
|
|
|
|
availables[top++] = rq->out[i - lz4_max_distance_pages];
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
}
|
2019-06-24 15:22:55 +08:00
|
|
|
|
|
|
|
if (page) {
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
__clear_bit(j, bounced);
|
2022-07-08 18:10:01 +08:00
|
|
|
if (!PageHighMem(page)) {
|
|
|
|
if (!i) {
|
|
|
|
kaddr = page_address(page);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (kaddr &&
|
|
|
|
kaddr + PAGE_SIZE == page_address(page)) {
|
2019-06-24 15:22:55 +08:00
|
|
|
kaddr += PAGE_SIZE;
|
2022-07-08 18:10:01 +08:00
|
|
|
continue;
|
|
|
|
}
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
2022-07-08 18:10:01 +08:00
|
|
|
kaddr = NULL;
|
2019-06-24 15:22:55 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
kaddr = NULL;
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
__set_bit(j, bounced);
|
2019-06-24 15:22:55 +08:00
|
|
|
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
if (top) {
|
|
|
|
victim = availables[--top];
|
|
|
|
get_page(victim);
|
2019-06-24 15:22:55 +08:00
|
|
|
} else {
|
2024-04-02 21:15:23 +08:00
|
|
|
victim = __erofs_allocpage(pagepool, rq->gfp, true);
|
2024-01-26 22:01:42 +08:00
|
|
|
if (!victim)
|
|
|
|
return -ENOMEM;
|
2020-12-08 17:58:32 +08:00
|
|
|
set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
rq->out[i] = victim;
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
|
|
|
return kaddr ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2021-12-28 13:46:00 +08:00
|
|
|
static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
|
2023-12-06 12:55:34 +08:00
|
|
|
void *inpage, void *out, unsigned int *inputmargin,
|
|
|
|
int *maptype, bool may_inplace)
|
2019-06-24 15:22:55 +08:00
|
|
|
{
|
2021-12-28 13:46:00 +08:00
|
|
|
struct z_erofs_decompress_req *rq = ctx->rq;
|
2023-12-06 12:55:34 +08:00
|
|
|
unsigned int omargin, total, i;
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
struct page **in;
|
|
|
|
void *src, *tmp;
|
|
|
|
|
|
|
|
if (rq->inplace_io) {
|
2021-12-28 13:46:00 +08:00
|
|
|
omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
|
2021-12-28 13:46:02 +08:00
|
|
|
if (rq->partial_decoding || !may_inplace ||
|
2021-12-28 13:46:00 +08:00
|
|
|
omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
goto docopy;
|
|
|
|
|
2023-12-06 12:55:34 +08:00
|
|
|
for (i = 0; i < ctx->inpages; ++i)
|
|
|
|
if (rq->out[ctx->outpages - ctx->inpages + i] !=
|
|
|
|
rq->in[i])
|
|
|
|
goto docopy;
|
|
|
|
kunmap_local(inpage);
|
|
|
|
*maptype = 3;
|
|
|
|
return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
}
|
|
|
|
|
2021-12-28 13:46:00 +08:00
|
|
|
if (ctx->inpages <= 1) {
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
*maptype = 0;
|
|
|
|
return inpage;
|
|
|
|
}
|
2023-06-28 00:12:39 +08:00
|
|
|
kunmap_local(inpage);
|
2021-12-28 13:46:00 +08:00
|
|
|
src = erofs_vm_map_ram(rq->in, ctx->inpages);
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
if (!src)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
*maptype = 1;
|
|
|
|
return src;
|
|
|
|
|
|
|
|
docopy:
|
|
|
|
/* Or copy compressed data which can be overlapped to per-CPU buffer */
|
|
|
|
in = rq->in;
|
2024-04-02 18:00:36 +08:00
|
|
|
src = z_erofs_get_gbuf(ctx->inpages);
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
if (!src) {
|
|
|
|
DBG_BUGON(1);
|
2023-06-28 00:12:39 +08:00
|
|
|
kunmap_local(inpage);
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
return ERR_PTR(-EFAULT);
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = src;
|
|
|
|
total = rq->inputsize;
|
|
|
|
while (total) {
|
|
|
|
unsigned int page_copycnt =
|
|
|
|
min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
|
|
|
|
|
|
|
|
if (!inpage)
|
2023-06-28 00:12:39 +08:00
|
|
|
inpage = kmap_local_page(*in);
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
memcpy(tmp, inpage + *inputmargin, page_copycnt);
|
2023-06-28 00:12:39 +08:00
|
|
|
kunmap_local(inpage);
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
inpage = NULL;
|
|
|
|
tmp += page_copycnt;
|
|
|
|
total -= page_copycnt;
|
2019-06-24 15:22:55 +08:00
|
|
|
++in;
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
*inputmargin = 0;
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
*maptype = 2;
|
|
|
|
return src;
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
|
|
|
|
2021-12-28 13:46:01 +08:00
|
|
|
/*
|
|
|
|
* Get the exact inputsize with zero_padding feature.
|
|
|
|
* - For LZ4, it should work if zero_padding feature is on (5.3+);
|
|
|
|
* - For MicroLZMA, it'd be enabled all the time.
|
|
|
|
*/
|
|
|
|
int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
|
|
|
|
unsigned int padbufsize)
|
|
|
|
{
|
|
|
|
const char *padend;
|
|
|
|
|
|
|
|
padend = memchr_inv(padbuf, 0, padbufsize);
|
|
|
|
if (!padend)
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
rq->inputsize -= padend - padbuf;
|
|
|
|
rq->pageofs_in += padend - padbuf;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-12-28 13:46:00 +08:00
|
|
|
static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
|
2023-12-06 12:55:34 +08:00
|
|
|
u8 *dst)
|
2019-06-24 15:22:55 +08:00
|
|
|
{
|
2021-12-28 13:46:00 +08:00
|
|
|
struct z_erofs_decompress_req *rq = ctx->rq;
|
2021-12-28 13:46:02 +08:00
|
|
|
bool support_0padding = false, may_inplace = false;
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
unsigned int inputmargin;
|
2023-12-06 12:55:34 +08:00
|
|
|
u8 *out, *headpage, *src;
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
int ret, maptype;
|
2019-06-24 15:22:55 +08:00
|
|
|
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
DBG_BUGON(*rq->in == NULL);
|
2023-06-28 00:12:39 +08:00
|
|
|
headpage = kmap_local_page(*rq->in);
|
2019-06-24 15:22:56 +08:00
|
|
|
|
2021-12-28 13:46:01 +08:00
|
|
|
/* LZ4 decompression inplace is only safe if zero_padding is enabled */
|
2021-11-13 00:09:33 +08:00
|
|
|
if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
|
2019-06-24 15:22:56 +08:00
|
|
|
support_0padding = true;
|
2021-12-28 13:46:01 +08:00
|
|
|
ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
|
|
|
|
min_t(unsigned int, rq->inputsize,
|
2023-03-13 21:53:08 +08:00
|
|
|
rq->sb->s_blocksize - rq->pageofs_in));
|
2021-12-28 13:46:01 +08:00
|
|
|
if (ret) {
|
2023-06-28 00:12:39 +08:00
|
|
|
kunmap_local(headpage);
|
2021-12-28 13:46:01 +08:00
|
|
|
return ret;
|
2019-06-24 15:22:56 +08:00
|
|
|
}
|
2021-12-28 13:46:02 +08:00
|
|
|
may_inplace = !((rq->pageofs_in + rq->inputsize) &
|
2023-03-13 21:53:08 +08:00
|
|
|
(rq->sb->s_blocksize - 1));
|
2019-06-24 15:22:56 +08:00
|
|
|
}
|
2019-06-24 15:22:55 +08:00
|
|
|
|
2021-12-28 13:46:01 +08:00
|
|
|
inputmargin = rq->pageofs_in;
|
2023-12-06 12:55:34 +08:00
|
|
|
src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
|
2021-12-28 13:46:02 +08:00
|
|
|
&maptype, may_inplace);
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
if (IS_ERR(src))
|
|
|
|
return PTR_ERR(src);
|
2019-06-24 15:22:55 +08:00
|
|
|
|
2023-12-06 12:55:34 +08:00
|
|
|
out = dst + rq->pageofs_out;
|
2020-02-26 16:10:07 +08:00
|
|
|
/* legacy format could compress extra data in a pcluster. */
|
|
|
|
if (rq->partial_decoding || !support_0padding)
|
|
|
|
ret = LZ4_decompress_safe_partial(src + inputmargin, out,
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
rq->inputsize, rq->outputsize, rq->outputsize);
|
2020-02-26 16:10:07 +08:00
|
|
|
else
|
|
|
|
ret = LZ4_decompress_safe(src + inputmargin, out,
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
rq->inputsize, rq->outputsize);
|
2020-02-26 16:10:07 +08:00
|
|
|
|
2020-02-26 16:10:08 +08:00
|
|
|
if (ret != rq->outputsize) {
|
|
|
|
erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
ret, rq->inputsize, inputmargin, rq->outputsize);
|
2020-02-26 16:10:08 +08:00
|
|
|
if (ret >= 0)
|
|
|
|
memset(out + ret, 0, rq->outputsize - ret);
|
2023-12-27 23:19:03 +08:00
|
|
|
ret = -EFSCORRUPTED;
|
2021-10-14 14:57:44 +08:00
|
|
|
} else {
|
|
|
|
ret = 0;
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
|
|
|
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
if (maptype == 0) {
|
2023-06-28 00:12:39 +08:00
|
|
|
kunmap_local(headpage);
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
} else if (maptype == 1) {
|
2021-12-28 13:46:00 +08:00
|
|
|
vm_unmap_ram(src, ctx->inpages);
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
} else if (maptype == 2) {
|
2024-04-02 18:00:36 +08:00
|
|
|
z_erofs_put_gbuf(src);
|
2023-12-06 12:55:34 +08:00
|
|
|
} else if (maptype != 3) {
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
DBG_BUGON(1);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2019-06-24 15:22:55 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-10-11 05:31:44 +08:00
|
|
|
static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
|
2021-10-22 17:01:20 +08:00
|
|
|
struct page **pagepool)
|
2019-06-24 15:22:55 +08:00
|
|
|
{
|
2021-12-28 13:46:00 +08:00
|
|
|
struct z_erofs_lz4_decompress_ctx ctx;
|
2019-06-24 15:22:55 +08:00
|
|
|
unsigned int dst_maptype;
|
|
|
|
void *dst;
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
int ret;
|
2019-06-24 15:22:55 +08:00
|
|
|
|
2021-12-28 13:46:00 +08:00
|
|
|
ctx.rq = rq;
|
|
|
|
ctx.oend = rq->pageofs_out + rq->outputsize;
|
|
|
|
ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
|
|
|
|
ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
|
|
|
|
|
2021-10-14 14:57:44 +08:00
|
|
|
/* one optimized fast path only for non bigpcluster cases yet */
|
2021-12-28 13:46:00 +08:00
|
|
|
if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
|
2021-10-14 14:57:44 +08:00
|
|
|
DBG_BUGON(!*rq->out);
|
2023-06-28 00:12:39 +08:00
|
|
|
dst = kmap_local_page(*rq->out);
|
2021-10-14 14:57:44 +08:00
|
|
|
dst_maptype = 0;
|
|
|
|
goto dstmap_out;
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
|
|
|
|
erofs: support decompress big pcluster for lz4 backend
Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,
- (maptype 0) if there is only one compressed page + no need
to copy inplace I/O, just map it directly what we did before;
- (maptype 1) if there are more compressed pages + no need to
copy inplace I/O, vmap such compressed pages instead;
- (maptype 2) if inplace I/O needs to be copied, use per-CPU
buffers for decompression then.
Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.
Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.
Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:26 +08:00
|
|
|
/* general decoding path which can be used for all cases */
|
2021-12-28 13:46:00 +08:00
|
|
|
ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
|
|
|
|
if (ret < 0) {
|
2019-06-24 15:22:55 +08:00
|
|
|
return ret;
|
2021-12-28 13:46:00 +08:00
|
|
|
} else if (ret > 0) {
|
2019-06-24 15:22:55 +08:00
|
|
|
dst = page_address(*rq->out);
|
|
|
|
dst_maptype = 1;
|
2021-12-28 13:46:00 +08:00
|
|
|
} else {
|
|
|
|
dst = erofs_vm_map_ram(rq->out, ctx.outpages);
|
|
|
|
if (!dst)
|
|
|
|
return -ENOMEM;
|
|
|
|
dst_maptype = 2;
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
dstmap_out:
|
2023-12-06 12:55:34 +08:00
|
|
|
ret = z_erofs_lz4_decompress_mem(&ctx, dst);
|
2019-06-24 15:22:55 +08:00
|
|
|
if (!dst_maptype)
|
2023-06-28 00:12:39 +08:00
|
|
|
kunmap_local(dst);
|
2019-06-24 15:22:55 +08:00
|
|
|
else if (dst_maptype == 2)
|
2021-12-28 13:46:00 +08:00
|
|
|
vm_unmap_ram(dst, ctx.outpages);
|
2019-06-24 15:22:55 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-09-23 10:11:21 +08:00
|
|
|
static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
|
|
|
|
struct page **pagepool)
|
2019-06-24 15:22:55 +08:00
|
|
|
{
|
2023-12-06 17:10:56 +08:00
|
|
|
const unsigned int nrpages_in =
|
|
|
|
PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT;
|
|
|
|
const unsigned int nrpages_out =
|
2019-06-24 15:22:55 +08:00
|
|
|
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
|
2023-12-06 17:10:56 +08:00
|
|
|
const unsigned int bs = rq->sb->s_blocksize;
|
|
|
|
unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
|
|
|
|
u8 *kin;
|
|
|
|
|
2024-03-04 11:53:39 +08:00
|
|
|
if (rq->outputsize > rq->inputsize)
|
|
|
|
return -EOPNOTSUPP;
|
2023-12-06 17:10:56 +08:00
|
|
|
if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
|
|
|
|
cur = bs - (rq->pageofs_out & (bs - 1));
|
|
|
|
pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
|
|
|
|
cur = min(cur, rq->outputsize);
|
|
|
|
if (cur && rq->out[0]) {
|
|
|
|
kin = kmap_local_page(rq->in[nrpages_in - 1]);
|
|
|
|
if (rq->out[0] == rq->in[nrpages_in - 1]) {
|
|
|
|
memmove(kin + rq->pageofs_out, kin + pi, cur);
|
|
|
|
flush_dcache_page(rq->out[0]);
|
|
|
|
} else {
|
|
|
|
memcpy_to_page(rq->out[0], rq->pageofs_out,
|
|
|
|
kin + pi, cur);
|
|
|
|
}
|
|
|
|
kunmap_local(kin);
|
|
|
|
}
|
|
|
|
rq->outputsize -= cur;
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
|
|
|
|
2023-12-06 17:10:56 +08:00
|
|
|
for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) {
|
|
|
|
insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
|
|
|
|
rq->outputsize -= insz;
|
|
|
|
if (!rq->in[ni])
|
|
|
|
continue;
|
|
|
|
kin = kmap_local_page(rq->in[ni]);
|
|
|
|
pi = 0;
|
|
|
|
do {
|
|
|
|
no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
|
|
|
|
po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
|
|
|
|
DBG_BUGON(no >= nrpages_out);
|
|
|
|
cnt = min(insz - pi, PAGE_SIZE - po);
|
|
|
|
if (rq->out[no] == rq->in[ni]) {
|
|
|
|
memmove(kin + po,
|
|
|
|
kin + rq->pageofs_in + pi, cnt);
|
|
|
|
flush_dcache_page(rq->out[no]);
|
|
|
|
} else if (rq->out[no]) {
|
|
|
|
memcpy_to_page(rq->out[no], po,
|
|
|
|
kin + rq->pageofs_in + pi, cnt);
|
|
|
|
}
|
|
|
|
pi += cnt;
|
|
|
|
} while (pi < insz);
|
|
|
|
kunmap_local(kin);
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
2023-12-06 17:10:56 +08:00
|
|
|
DBG_BUGON(ni > nrpages_in);
|
2019-06-24 15:22:55 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-04-26 16:44:49 +08:00
|
|
|
const struct z_erofs_decompressor erofs_decompressors[] = {
|
2021-10-11 05:31:44 +08:00
|
|
|
[Z_EROFS_COMPRESSION_SHIFTED] = {
|
2022-09-23 10:11:21 +08:00
|
|
|
.decompress = z_erofs_transform_plain,
|
2021-10-11 05:31:44 +08:00
|
|
|
.name = "shifted"
|
|
|
|
},
|
2022-09-23 10:11:21 +08:00
|
|
|
[Z_EROFS_COMPRESSION_INTERLACED] = {
|
|
|
|
.decompress = z_erofs_transform_plain,
|
|
|
|
.name = "interlaced"
|
|
|
|
},
|
2021-10-11 05:31:44 +08:00
|
|
|
[Z_EROFS_COMPRESSION_LZ4] = {
|
2023-10-22 21:09:57 +08:00
|
|
|
.config = z_erofs_load_lz4_config,
|
2021-10-11 05:31:44 +08:00
|
|
|
.decompress = z_erofs_lz4_decompress,
|
|
|
|
.name = "lz4"
|
|
|
|
},
|
2021-10-11 05:31:45 +08:00
|
|
|
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
|
|
|
|
[Z_EROFS_COMPRESSION_LZMA] = {
|
2023-10-22 21:09:57 +08:00
|
|
|
.config = z_erofs_load_lzma_config,
|
2021-10-11 05:31:45 +08:00
|
|
|
.decompress = z_erofs_lzma_decompress,
|
|
|
|
.name = "lzma"
|
|
|
|
},
|
|
|
|
#endif
|
erofs: DEFLATE compression support
Add DEFLATE compression as the 3rd supported algorithm.
DEFLATE is a popular generic-purpose compression algorithm for quite
long time (many advanced formats like gzip, zlib, zip, png are all
based on that) as Apple documentation written "If you require
interoperability with non-Apple devices, use COMPRESSION_ZLIB. [1]".
Due to its popularity, there are several hardware on-market DEFLATE
accelerators, such as (s390) DFLTCC, (Intel) IAA/QAT, (HiSilicon) ZIP
accelerator, etc. In addition, there are also several high-performence
IP cores and even open-source FPGA approches available for DEFLATE.
Therefore, it's useful to support DEFLATE compression in order to find
a way to utilize these accelerators for asynchronous I/Os and get
benefits from these later.
Besides, it's a good choice to trade off between compression ratios
and performance compared to LZ4 and LZMA. The DEFLATE core format is
simple as well as easy to understand, therefore the code size of its
decompressor is small even for the bootloader use cases. The runtime
memory consumption is quite limited too (e.g. 32K + ~7K for each zlib
stream). As usual, EROFS ourperforms similar approaches too.
Alternatively, DEFLATE could still be used for some specific files
since EROFS supports multiple compression algorithms in one image.
[1] https://developer.apple.com/documentation/compression/compression_algorithm
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20230810154859.118330-1-hsiangkao@linux.alibaba.com
2023-08-10 23:48:59 +08:00
|
|
|
#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
|
|
|
|
[Z_EROFS_COMPRESSION_DEFLATE] = {
|
2023-10-22 21:09:57 +08:00
|
|
|
.config = z_erofs_load_deflate_config,
|
erofs: DEFLATE compression support
Add DEFLATE compression as the 3rd supported algorithm.
DEFLATE is a popular generic-purpose compression algorithm for quite
long time (many advanced formats like gzip, zlib, zip, png are all
based on that) as Apple documentation written "If you require
interoperability with non-Apple devices, use COMPRESSION_ZLIB. [1]".
Due to its popularity, there are several hardware on-market DEFLATE
accelerators, such as (s390) DFLTCC, (Intel) IAA/QAT, (HiSilicon) ZIP
accelerator, etc. In addition, there are also several high-performence
IP cores and even open-source FPGA approches available for DEFLATE.
Therefore, it's useful to support DEFLATE compression in order to find
a way to utilize these accelerators for asynchronous I/Os and get
benefits from these later.
Besides, it's a good choice to trade off between compression ratios
and performance compared to LZ4 and LZMA. The DEFLATE core format is
simple as well as easy to understand, therefore the code size of its
decompressor is small even for the bootloader use cases. The runtime
memory consumption is quite limited too (e.g. 32K + ~7K for each zlib
stream). As usual, EROFS ourperforms similar approaches too.
Alternatively, DEFLATE could still be used for some specific files
since EROFS supports multiple compression algorithms in one image.
[1] https://developer.apple.com/documentation/compression/compression_algorithm
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20230810154859.118330-1-hsiangkao@linux.alibaba.com
2023-08-10 23:48:59 +08:00
|
|
|
.decompress = z_erofs_deflate_decompress,
|
|
|
|
.name = "deflate"
|
|
|
|
},
|
|
|
|
#endif
|
2024-05-09 07:44:53 +08:00
|
|
|
#ifdef CONFIG_EROFS_FS_ZIP_ZSTD
|
|
|
|
[Z_EROFS_COMPRESSION_ZSTD] = {
|
|
|
|
.config = z_erofs_load_zstd_config,
|
|
|
|
.decompress = z_erofs_zstd_decompress,
|
|
|
|
.name = "zstd"
|
|
|
|
},
|
|
|
|
#endif
|
2021-10-11 05:31:44 +08:00
|
|
|
};
|
2023-10-22 21:09:57 +08:00
|
|
|
|
|
|
|
int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
|
|
|
|
{
|
|
|
|
struct erofs_sb_info *sbi = EROFS_SB(sb);
|
|
|
|
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
|
|
|
unsigned int algs, alg;
|
|
|
|
erofs_off_t offset;
|
|
|
|
int size, ret = 0;
|
|
|
|
|
|
|
|
if (!erofs_sb_has_compr_cfgs(sbi)) {
|
2024-01-13 23:06:02 +08:00
|
|
|
sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
|
2023-10-22 21:09:57 +08:00
|
|
|
return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
|
|
|
|
if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
|
|
|
|
erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
|
|
|
|
sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
erofs_init_metabuf(&buf, sb);
|
|
|
|
offset = EROFS_SUPER_OFFSET + sbi->sb_size;
|
|
|
|
alg = 0;
|
|
|
|
for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
|
|
|
|
void *data;
|
|
|
|
|
|
|
|
if (!(algs & 1))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
data = erofs_read_metadata(sb, &buf, &offset, &size);
|
|
|
|
if (IS_ERR(data)) {
|
|
|
|
ret = PTR_ERR(data);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (alg >= ARRAY_SIZE(erofs_decompressors) ||
|
|
|
|
!erofs_decompressors[alg].config) {
|
|
|
|
erofs_err(sb, "algorithm %d isn't enabled on this kernel",
|
|
|
|
alg);
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
} else {
|
|
|
|
ret = erofs_decompressors[alg].config(sb,
|
|
|
|
dsb, data, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(data);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
erofs_put_metabuf(&buf);
|
|
|
|
return ret;
|
|
|
|
}
|