mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
ffa09b3bd0
Add DEFLATE compression as the 3rd supported algorithm. DEFLATE is a popular generic-purpose compression algorithm for quite long time (many advanced formats like gzip, zlib, zip, png are all based on that) as Apple documentation written "If you require interoperability with non-Apple devices, use COMPRESSION_ZLIB. [1]". Due to its popularity, there are several hardware on-market DEFLATE accelerators, such as (s390) DFLTCC, (Intel) IAA/QAT, (HiSilicon) ZIP accelerator, etc. In addition, there are also several high-performence IP cores and even open-source FPGA approches available for DEFLATE. Therefore, it's useful to support DEFLATE compression in order to find a way to utilize these accelerators for asynchronous I/Os and get benefits from these later. Besides, it's a good choice to trade off between compression ratios and performance compared to LZ4 and LZMA. The DEFLATE core format is simple as well as easy to understand, therefore the code size of its decompressor is small even for the bootloader use cases. The runtime memory consumption is quite limited too (e.g. 32K + ~7K for each zlib stream). As usual, EROFS ourperforms similar approaches too. Alternatively, DEFLATE could still be used for some specific files since EROFS supports multiple compression algorithms in one image. [1] https://developer.apple.com/documentation/compression/compression_algorithm Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com> Link: https://lore.kernel.org/r/20230810154859.118330-1-hsiangkao@linux.alibaba.com
100 lines
3.0 KiB
C
100 lines
3.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2019 HUAWEI, Inc.
|
|
* https://www.huawei.com/
|
|
*/
|
|
#ifndef __EROFS_FS_COMPRESS_H
|
|
#define __EROFS_FS_COMPRESS_H
|
|
|
|
#include "internal.h"
|
|
|
|
struct z_erofs_decompress_req {
|
|
struct super_block *sb;
|
|
struct page **in, **out;
|
|
|
|
unsigned short pageofs_in, pageofs_out;
|
|
unsigned int inputsize, outputsize;
|
|
|
|
/* indicate the algorithm will be used for decompression */
|
|
unsigned int alg;
|
|
bool inplace_io, partial_decoding, fillgaps;
|
|
};
|
|
|
|
struct z_erofs_decompressor {
|
|
int (*decompress)(struct z_erofs_decompress_req *rq,
|
|
struct page **pagepool);
|
|
char *name;
|
|
};
|
|
|
|
/* some special page->private (unsigned long, see below) */
|
|
#define Z_EROFS_SHORTLIVED_PAGE (-1UL << 2)
|
|
#define Z_EROFS_PREALLOCATED_PAGE (-2UL << 2)
|
|
|
|
/*
|
|
* For all pages in a pcluster, page->private should be one of
|
|
* Type Last 2bits page->private
|
|
* short-lived page 00 Z_EROFS_SHORTLIVED_PAGE
|
|
* preallocated page (tryalloc) 00 Z_EROFS_PREALLOCATED_PAGE
|
|
* cached/managed page 00 pointer to z_erofs_pcluster
|
|
* online page (file-backed, 01/10/11 sub-index << 2 | count
|
|
* some pages can be used for inplace I/O)
|
|
*
|
|
* page->mapping should be one of
|
|
* Type page->mapping
|
|
* short-lived page NULL
|
|
* preallocated page NULL
|
|
* cached/managed page non-NULL or NULL (invalidated/truncated page)
|
|
* online page non-NULL
|
|
*
|
|
* For all managed pages, PG_private should be set with 1 extra refcount,
|
|
* which is used for page reclaim / migration.
|
|
*/
|
|
|
|
/*
|
|
* short-lived pages are pages directly from buddy system with specific
|
|
* page->private (no need to set PagePrivate since these are non-LRU /
|
|
* non-movable pages and bypass reclaim / migration code).
|
|
*/
|
|
static inline bool z_erofs_is_shortlived_page(struct page *page)
|
|
{
|
|
if (page->private != Z_EROFS_SHORTLIVED_PAGE)
|
|
return false;
|
|
|
|
DBG_BUGON(page->mapping);
|
|
return true;
|
|
}
|
|
|
|
static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
|
|
struct page *page)
|
|
{
|
|
if (!z_erofs_is_shortlived_page(page))
|
|
return false;
|
|
|
|
/* short-lived pages should not be used by others at the same time */
|
|
if (page_ref_count(page) > 1) {
|
|
put_page(page);
|
|
} else {
|
|
/* follow the pcluster rule above. */
|
|
erofs_pagepool_add(pagepool, page);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
|
|
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
|
|
struct page *page)
|
|
{
|
|
return page->mapping == MNGD_MAPPING(sbi);
|
|
}
|
|
|
|
int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
|
|
unsigned int padbufsize);
|
|
extern const struct z_erofs_decompressor erofs_decompressors[];
|
|
|
|
/* prototypes for specific algorithms */
|
|
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
|
|
struct page **pagepool);
|
|
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
|
|
struct page **pagepool);
|
|
#endif
|