mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-13 05:54:23 +08:00
erofs: get rid of `struct z_erofs_collector'
Avoid `struct z_erofs_collector' since there is another context structure called "struct z_erofs_decompress_frontend". No logic changes. Link: https://lore.kernel.org/r/20220301194951.106227-1-hsiangkao@linux.alibaba.com Reviewed-by: Yue Hu <huyue2@coolpad.com> Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
This commit is contained in:
parent
ed6e0401e6
commit
5c6dcc57e2
163
fs/erofs/zdata.c
163
fs/erofs/zdata.c
@ -192,7 +192,10 @@ enum z_erofs_collectmode {
|
|||||||
COLLECT_PRIMARY_FOLLOWED,
|
COLLECT_PRIMARY_FOLLOWED,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct z_erofs_collector {
|
struct z_erofs_decompress_frontend {
|
||||||
|
struct inode *const inode;
|
||||||
|
struct erofs_map_blocks map;
|
||||||
|
|
||||||
struct z_erofs_pagevec_ctor vector;
|
struct z_erofs_pagevec_ctor vector;
|
||||||
|
|
||||||
struct z_erofs_pcluster *pcl, *tailpcl;
|
struct z_erofs_pcluster *pcl, *tailpcl;
|
||||||
@ -202,13 +205,6 @@ struct z_erofs_collector {
|
|||||||
z_erofs_next_pcluster_t owned_head;
|
z_erofs_next_pcluster_t owned_head;
|
||||||
|
|
||||||
enum z_erofs_collectmode mode;
|
enum z_erofs_collectmode mode;
|
||||||
};
|
|
||||||
|
|
||||||
struct z_erofs_decompress_frontend {
|
|
||||||
struct inode *const inode;
|
|
||||||
|
|
||||||
struct z_erofs_collector clt;
|
|
||||||
struct erofs_map_blocks map;
|
|
||||||
|
|
||||||
bool readahead;
|
bool readahead;
|
||||||
/* used for applying cache strategy on the fly */
|
/* used for applying cache strategy on the fly */
|
||||||
@ -216,30 +212,26 @@ struct z_erofs_decompress_frontend {
|
|||||||
erofs_off_t headoffset;
|
erofs_off_t headoffset;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define COLLECTOR_INIT() { \
|
|
||||||
.owned_head = Z_EROFS_PCLUSTER_TAIL, \
|
|
||||||
.mode = COLLECT_PRIMARY_FOLLOWED }
|
|
||||||
|
|
||||||
#define DECOMPRESS_FRONTEND_INIT(__i) { \
|
#define DECOMPRESS_FRONTEND_INIT(__i) { \
|
||||||
.inode = __i, .clt = COLLECTOR_INIT(), \
|
.inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
|
||||||
.backmost = true, }
|
.mode = COLLECT_PRIMARY_FOLLOWED }
|
||||||
|
|
||||||
static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
|
static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
|
||||||
static DEFINE_MUTEX(z_pagemap_global_lock);
|
static DEFINE_MUTEX(z_pagemap_global_lock);
|
||||||
|
|
||||||
static void preload_compressed_pages(struct z_erofs_collector *clt,
|
static void preload_compressed_pages(struct z_erofs_decompress_frontend *fe,
|
||||||
struct address_space *mc,
|
struct address_space *mc,
|
||||||
enum z_erofs_cache_alloctype type,
|
enum z_erofs_cache_alloctype type,
|
||||||
struct page **pagepool)
|
struct page **pagepool)
|
||||||
{
|
{
|
||||||
struct z_erofs_pcluster *pcl = clt->pcl;
|
struct z_erofs_pcluster *pcl = fe->pcl;
|
||||||
bool standalone = true;
|
bool standalone = true;
|
||||||
gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
|
gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
|
||||||
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
|
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
pgoff_t index;
|
pgoff_t index;
|
||||||
|
|
||||||
if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
|
if (fe->mode < COLLECT_PRIMARY_FOLLOWED)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pages = pcl->compressed_pages;
|
pages = pcl->compressed_pages;
|
||||||
@ -288,7 +280,7 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
|
|||||||
* managed cache since it can be moved to the bypass queue instead.
|
* managed cache since it can be moved to the bypass queue instead.
|
||||||
*/
|
*/
|
||||||
if (standalone)
|
if (standalone)
|
||||||
clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
|
fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* called by erofs_shrinker to get rid of all compressed_pages */
|
/* called by erofs_shrinker to get rid of all compressed_pages */
|
||||||
@ -350,47 +342,47 @@ int erofs_try_to_free_cached_page(struct page *page)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
|
/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
|
||||||
static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
|
static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
|
||||||
struct page *page)
|
struct page *page)
|
||||||
{
|
{
|
||||||
struct z_erofs_pcluster *const pcl = clt->pcl;
|
struct z_erofs_pcluster *const pcl = fe->pcl;
|
||||||
|
|
||||||
while (clt->icpage_ptr > pcl->compressed_pages)
|
while (fe->icpage_ptr > pcl->compressed_pages)
|
||||||
if (!cmpxchg(--clt->icpage_ptr, NULL, page))
|
if (!cmpxchg(--fe->icpage_ptr, NULL, page))
|
||||||
return true;
|
return true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* callers must be with collection lock held */
|
/* callers must be with collection lock held */
|
||||||
static int z_erofs_attach_page(struct z_erofs_collector *clt,
|
static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
|
||||||
struct page *page, enum z_erofs_page_type type,
|
struct page *page, enum z_erofs_page_type type,
|
||||||
bool pvec_safereuse)
|
bool pvec_safereuse)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* give priority for inplaceio */
|
/* give priority for inplaceio */
|
||||||
if (clt->mode >= COLLECT_PRIMARY &&
|
if (fe->mode >= COLLECT_PRIMARY &&
|
||||||
type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
|
type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
|
||||||
z_erofs_try_inplace_io(clt, page))
|
z_erofs_try_inplace_io(fe, page))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = z_erofs_pagevec_enqueue(&clt->vector, page, type,
|
ret = z_erofs_pagevec_enqueue(&fe->vector, page, type,
|
||||||
pvec_safereuse);
|
pvec_safereuse);
|
||||||
clt->cl->vcnt += (unsigned int)ret;
|
fe->cl->vcnt += (unsigned int)ret;
|
||||||
return ret ? 0 : -EAGAIN;
|
return ret ? 0 : -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void z_erofs_try_to_claim_pcluster(struct z_erofs_collector *clt)
|
static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
|
||||||
{
|
{
|
||||||
struct z_erofs_pcluster *pcl = clt->pcl;
|
struct z_erofs_pcluster *pcl = f->pcl;
|
||||||
z_erofs_next_pcluster_t *owned_head = &clt->owned_head;
|
z_erofs_next_pcluster_t *owned_head = &f->owned_head;
|
||||||
|
|
||||||
/* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
|
/* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
|
||||||
if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
|
if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
|
||||||
*owned_head) == Z_EROFS_PCLUSTER_NIL) {
|
*owned_head) == Z_EROFS_PCLUSTER_NIL) {
|
||||||
*owned_head = &pcl->next;
|
*owned_head = &pcl->next;
|
||||||
/* so we can attach this pcluster to our submission chain. */
|
/* so we can attach this pcluster to our submission chain. */
|
||||||
clt->mode = COLLECT_PRIMARY_FOLLOWED;
|
f->mode = COLLECT_PRIMARY_FOLLOWED;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -401,24 +393,24 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_collector *clt)
|
|||||||
if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
|
if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
|
||||||
*owned_head) == Z_EROFS_PCLUSTER_TAIL) {
|
*owned_head) == Z_EROFS_PCLUSTER_TAIL) {
|
||||||
*owned_head = Z_EROFS_PCLUSTER_TAIL;
|
*owned_head = Z_EROFS_PCLUSTER_TAIL;
|
||||||
clt->mode = COLLECT_PRIMARY_HOOKED;
|
f->mode = COLLECT_PRIMARY_HOOKED;
|
||||||
clt->tailpcl = NULL;
|
f->tailpcl = NULL;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/* type 3, it belongs to a chain, but it isn't the end of the chain */
|
/* type 3, it belongs to a chain, but it isn't the end of the chain */
|
||||||
clt->mode = COLLECT_PRIMARY;
|
f->mode = COLLECT_PRIMARY;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
|
static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe,
|
||||||
struct inode *inode,
|
struct inode *inode,
|
||||||
struct erofs_map_blocks *map)
|
struct erofs_map_blocks *map)
|
||||||
{
|
{
|
||||||
struct z_erofs_pcluster *pcl = clt->pcl;
|
struct z_erofs_pcluster *pcl = fe->pcl;
|
||||||
struct z_erofs_collection *cl;
|
struct z_erofs_collection *cl;
|
||||||
unsigned int length;
|
unsigned int length;
|
||||||
|
|
||||||
/* to avoid unexpected loop formed by corrupted images */
|
/* to avoid unexpected loop formed by corrupted images */
|
||||||
if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
|
if (fe->owned_head == &pcl->next || pcl == fe->tailpcl) {
|
||||||
DBG_BUGON(1);
|
DBG_BUGON(1);
|
||||||
return -EFSCORRUPTED;
|
return -EFSCORRUPTED;
|
||||||
}
|
}
|
||||||
@ -449,15 +441,15 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
|
|||||||
}
|
}
|
||||||
mutex_lock(&cl->lock);
|
mutex_lock(&cl->lock);
|
||||||
/* used to check tail merging loop due to corrupted images */
|
/* used to check tail merging loop due to corrupted images */
|
||||||
if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
|
if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
|
||||||
clt->tailpcl = pcl;
|
fe->tailpcl = pcl;
|
||||||
|
|
||||||
z_erofs_try_to_claim_pcluster(clt);
|
z_erofs_try_to_claim_pcluster(fe);
|
||||||
clt->cl = cl;
|
fe->cl = cl;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int z_erofs_register_collection(struct z_erofs_collector *clt,
|
static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe,
|
||||||
struct inode *inode,
|
struct inode *inode,
|
||||||
struct erofs_map_blocks *map)
|
struct erofs_map_blocks *map)
|
||||||
{
|
{
|
||||||
@ -485,8 +477,8 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
|
|||||||
Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
|
Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
|
||||||
|
|
||||||
/* new pclusters should be claimed as type 1, primary and followed */
|
/* new pclusters should be claimed as type 1, primary and followed */
|
||||||
pcl->next = clt->owned_head;
|
pcl->next = fe->owned_head;
|
||||||
clt->mode = COLLECT_PRIMARY_FOLLOWED;
|
fe->mode = COLLECT_PRIMARY_FOLLOWED;
|
||||||
|
|
||||||
cl = z_erofs_primarycollection(pcl);
|
cl = z_erofs_primarycollection(pcl);
|
||||||
cl->pageofs = map->m_la & ~PAGE_MASK;
|
cl->pageofs = map->m_la & ~PAGE_MASK;
|
||||||
@ -512,18 +504,18 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (grp != &pcl->obj) {
|
if (grp != &pcl->obj) {
|
||||||
clt->pcl = container_of(grp,
|
fe->pcl = container_of(grp,
|
||||||
struct z_erofs_pcluster, obj);
|
struct z_erofs_pcluster, obj);
|
||||||
err = -EEXIST;
|
err = -EEXIST;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* used to check tail merging loop due to corrupted images */
|
/* used to check tail merging loop due to corrupted images */
|
||||||
if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
|
if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
|
||||||
clt->tailpcl = pcl;
|
fe->tailpcl = pcl;
|
||||||
clt->owned_head = &pcl->next;
|
fe->owned_head = &pcl->next;
|
||||||
clt->pcl = pcl;
|
fe->pcl = pcl;
|
||||||
clt->cl = cl;
|
fe->cl = cl;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
@ -532,18 +524,18 @@ err_out:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int z_erofs_collector_begin(struct z_erofs_collector *clt,
|
static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe,
|
||||||
struct inode *inode,
|
struct inode *inode,
|
||||||
struct erofs_map_blocks *map)
|
struct erofs_map_blocks *map)
|
||||||
{
|
{
|
||||||
struct erofs_workgroup *grp;
|
struct erofs_workgroup *grp;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
DBG_BUGON(clt->cl);
|
DBG_BUGON(fe->cl);
|
||||||
|
|
||||||
/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
|
/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
|
||||||
DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
|
DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
|
||||||
DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
|
DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
|
||||||
|
|
||||||
if (map->m_flags & EROFS_MAP_META) {
|
if (map->m_flags & EROFS_MAP_META) {
|
||||||
if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
|
if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
|
||||||
@ -555,28 +547,28 @@ static int z_erofs_collector_begin(struct z_erofs_collector *clt,
|
|||||||
|
|
||||||
grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
|
grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
|
||||||
if (grp) {
|
if (grp) {
|
||||||
clt->pcl = container_of(grp, struct z_erofs_pcluster, obj);
|
fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
|
||||||
} else {
|
} else {
|
||||||
tailpacking:
|
tailpacking:
|
||||||
ret = z_erofs_register_collection(clt, inode, map);
|
ret = z_erofs_register_collection(fe, inode, map);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
goto out;
|
goto out;
|
||||||
if (ret != -EEXIST)
|
if (ret != -EEXIST)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = z_erofs_lookup_collection(clt, inode, map);
|
ret = z_erofs_lookup_collection(fe, inode, map);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
erofs_workgroup_put(&clt->pcl->obj);
|
erofs_workgroup_put(&fe->pcl->obj);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
|
z_erofs_pagevec_ctor_init(&fe->vector, Z_EROFS_NR_INLINE_PAGEVECS,
|
||||||
clt->cl->pagevec, clt->cl->vcnt);
|
fe->cl->pagevec, fe->cl->vcnt);
|
||||||
/* since file-backed online pages are traversed in reverse order */
|
/* since file-backed online pages are traversed in reverse order */
|
||||||
clt->icpage_ptr = clt->pcl->compressed_pages +
|
fe->icpage_ptr = fe->pcl->compressed_pages +
|
||||||
z_erofs_pclusterpages(clt->pcl);
|
z_erofs_pclusterpages(fe->pcl);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -610,24 +602,24 @@ static void z_erofs_collection_put(struct z_erofs_collection *cl)
|
|||||||
erofs_workgroup_put(&pcl->obj);
|
erofs_workgroup_put(&pcl->obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool z_erofs_collector_end(struct z_erofs_collector *clt)
|
static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
|
||||||
{
|
{
|
||||||
struct z_erofs_collection *cl = clt->cl;
|
struct z_erofs_collection *cl = fe->cl;
|
||||||
|
|
||||||
if (!cl)
|
if (!cl)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
z_erofs_pagevec_ctor_exit(&clt->vector, false);
|
z_erofs_pagevec_ctor_exit(&fe->vector, false);
|
||||||
mutex_unlock(&cl->lock);
|
mutex_unlock(&cl->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if all pending pages are added, don't hold its reference
|
* if all pending pages are added, don't hold its reference
|
||||||
* any longer if the pcluster isn't hosted by ourselves.
|
* any longer if the pcluster isn't hosted by ourselves.
|
||||||
*/
|
*/
|
||||||
if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
|
if (fe->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
|
||||||
z_erofs_collection_put(cl);
|
z_erofs_collection_put(cl);
|
||||||
|
|
||||||
clt->cl = NULL;
|
fe->cl = NULL;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -651,7 +643,6 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
|
|||||||
struct inode *const inode = fe->inode;
|
struct inode *const inode = fe->inode;
|
||||||
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
|
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
|
||||||
struct erofs_map_blocks *const map = &fe->map;
|
struct erofs_map_blocks *const map = &fe->map;
|
||||||
struct z_erofs_collector *const clt = &fe->clt;
|
|
||||||
const loff_t offset = page_offset(page);
|
const loff_t offset = page_offset(page);
|
||||||
bool tight = true;
|
bool tight = true;
|
||||||
|
|
||||||
@ -672,7 +663,7 @@ repeat:
|
|||||||
if (offset + cur >= map->m_la &&
|
if (offset + cur >= map->m_la &&
|
||||||
offset + cur < map->m_la + map->m_llen) {
|
offset + cur < map->m_la + map->m_llen) {
|
||||||
/* didn't get a valid collection previously (very rare) */
|
/* didn't get a valid collection previously (very rare) */
|
||||||
if (!clt->cl)
|
if (!fe->cl)
|
||||||
goto restart_now;
|
goto restart_now;
|
||||||
goto hitted;
|
goto hitted;
|
||||||
}
|
}
|
||||||
@ -680,7 +671,7 @@ repeat:
|
|||||||
/* go ahead the next map_blocks */
|
/* go ahead the next map_blocks */
|
||||||
erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
|
erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
|
||||||
|
|
||||||
if (z_erofs_collector_end(clt))
|
if (z_erofs_collector_end(fe))
|
||||||
fe->backmost = false;
|
fe->backmost = false;
|
||||||
|
|
||||||
map->m_la = offset + cur;
|
map->m_la = offset + cur;
|
||||||
@ -693,11 +684,11 @@ restart_now:
|
|||||||
if (!(map->m_flags & EROFS_MAP_MAPPED))
|
if (!(map->m_flags & EROFS_MAP_MAPPED))
|
||||||
goto hitted;
|
goto hitted;
|
||||||
|
|
||||||
err = z_erofs_collector_begin(clt, inode, map);
|
err = z_erofs_collector_begin(fe, inode, map);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
if (z_erofs_is_inline_pcluster(clt->pcl)) {
|
if (z_erofs_is_inline_pcluster(fe->pcl)) {
|
||||||
void *mp;
|
void *mp;
|
||||||
|
|
||||||
mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
|
mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
|
||||||
@ -709,8 +700,8 @@ restart_now:
|
|||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
get_page(fe->map.buf.page);
|
get_page(fe->map.buf.page);
|
||||||
WRITE_ONCE(clt->pcl->compressed_pages[0], fe->map.buf.page);
|
WRITE_ONCE(fe->pcl->compressed_pages[0], fe->map.buf.page);
|
||||||
clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
|
fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
|
||||||
} else {
|
} else {
|
||||||
/* preload all compressed pages (can change mode if needed) */
|
/* preload all compressed pages (can change mode if needed) */
|
||||||
if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy,
|
if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy,
|
||||||
@ -719,7 +710,7 @@ restart_now:
|
|||||||
else
|
else
|
||||||
cache_strategy = DONTALLOC;
|
cache_strategy = DONTALLOC;
|
||||||
|
|
||||||
preload_compressed_pages(clt, MNGD_MAPPING(sbi),
|
preload_compressed_pages(fe, MNGD_MAPPING(sbi),
|
||||||
cache_strategy, pagepool);
|
cache_strategy, pagepool);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -730,8 +721,8 @@ hitted:
|
|||||||
* those chains are handled asynchronously thus the page cannot be used
|
* those chains are handled asynchronously thus the page cannot be used
|
||||||
* for inplace I/O or pagevec (should be processed in strict order.)
|
* for inplace I/O or pagevec (should be processed in strict order.)
|
||||||
*/
|
*/
|
||||||
tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
|
tight &= (fe->mode >= COLLECT_PRIMARY_HOOKED &&
|
||||||
clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
|
fe->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
|
||||||
|
|
||||||
cur = end - min_t(unsigned int, offset + end - map->m_la, end);
|
cur = end - min_t(unsigned int, offset + end - map->m_la, end);
|
||||||
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
|
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
|
||||||
@ -746,18 +737,18 @@ hitted:
|
|||||||
Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
|
Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
|
||||||
|
|
||||||
if (cur)
|
if (cur)
|
||||||
tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
|
tight &= (fe->mode >= COLLECT_PRIMARY_FOLLOWED);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
err = z_erofs_attach_page(clt, page, page_type,
|
err = z_erofs_attach_page(fe, page, page_type,
|
||||||
clt->mode >= COLLECT_PRIMARY_FOLLOWED);
|
fe->mode >= COLLECT_PRIMARY_FOLLOWED);
|
||||||
/* should allocate an additional short-lived page for pagevec */
|
/* should allocate an additional short-lived page for pagevec */
|
||||||
if (err == -EAGAIN) {
|
if (err == -EAGAIN) {
|
||||||
struct page *const newpage =
|
struct page *const newpage =
|
||||||
alloc_page(GFP_NOFS | __GFP_NOFAIL);
|
alloc_page(GFP_NOFS | __GFP_NOFAIL);
|
||||||
|
|
||||||
set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
|
set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
|
||||||
err = z_erofs_attach_page(clt, newpage,
|
err = z_erofs_attach_page(fe, newpage,
|
||||||
Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
|
Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
|
||||||
if (!err)
|
if (!err)
|
||||||
goto retry;
|
goto retry;
|
||||||
@ -773,7 +764,7 @@ retry:
|
|||||||
/* bump up the number of spiltted parts of a page */
|
/* bump up the number of spiltted parts of a page */
|
||||||
++spiltted;
|
++spiltted;
|
||||||
/* also update nr_pages */
|
/* also update nr_pages */
|
||||||
clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1);
|
fe->cl->nr_pages = max_t(pgoff_t, fe->cl->nr_pages, index + 1);
|
||||||
next_part:
|
next_part:
|
||||||
/* can be used for verification */
|
/* can be used for verification */
|
||||||
map->m_llen = offset + cur - map->m_la;
|
map->m_llen = offset + cur - map->m_la;
|
||||||
@ -1309,7 +1300,7 @@ static void z_erofs_submit_queue(struct super_block *sb,
|
|||||||
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
|
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
|
||||||
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
|
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
|
||||||
void *bi_private;
|
void *bi_private;
|
||||||
z_erofs_next_pcluster_t owned_head = f->clt.owned_head;
|
z_erofs_next_pcluster_t owned_head = f->owned_head;
|
||||||
/* bio is NULL initially, so no need to initialize last_{index,bdev} */
|
/* bio is NULL initially, so no need to initialize last_{index,bdev} */
|
||||||
pgoff_t last_index;
|
pgoff_t last_index;
|
||||||
struct block_device *last_bdev;
|
struct block_device *last_bdev;
|
||||||
@ -1417,7 +1408,7 @@ static void z_erofs_runqueue(struct super_block *sb,
|
|||||||
{
|
{
|
||||||
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
|
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
|
||||||
|
|
||||||
if (f->clt.owned_head == Z_EROFS_PCLUSTER_TAIL)
|
if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
|
||||||
return;
|
return;
|
||||||
z_erofs_submit_queue(sb, f, pagepool, io, &force_fg);
|
z_erofs_submit_queue(sb, f, pagepool, io, &force_fg);
|
||||||
|
|
||||||
@ -1517,7 +1508,7 @@ static int z_erofs_readpage(struct file *file, struct page *page)
|
|||||||
err = z_erofs_do_read_page(&f, page, &pagepool);
|
err = z_erofs_do_read_page(&f, page, &pagepool);
|
||||||
z_erofs_pcluster_readmore(&f, NULL, 0, &pagepool, false);
|
z_erofs_pcluster_readmore(&f, NULL, 0, &pagepool, false);
|
||||||
|
|
||||||
(void)z_erofs_collector_end(&f.clt);
|
(void)z_erofs_collector_end(&f);
|
||||||
|
|
||||||
/* if some compressed cluster ready, need submit them anyway */
|
/* if some compressed cluster ready, need submit them anyway */
|
||||||
z_erofs_runqueue(inode->i_sb, &f, &pagepool,
|
z_erofs_runqueue(inode->i_sb, &f, &pagepool,
|
||||||
@ -1567,7 +1558,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
|
|||||||
put_page(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false);
|
z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false);
|
||||||
(void)z_erofs_collector_end(&f.clt);
|
(void)z_erofs_collector_end(&f);
|
||||||
|
|
||||||
z_erofs_runqueue(inode->i_sb, &f, &pagepool,
|
z_erofs_runqueue(inode->i_sb, &f, &pagepool,
|
||||||
z_erofs_get_sync_decompress_policy(sbi, nr_pages));
|
z_erofs_get_sync_decompress_policy(sbi, nr_pages));
|
||||||
|
Loading…
Reference in New Issue
Block a user