2019-07-31 23:57:31 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2019-06-24 15:22:52 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2018-2019 HUAWEI, Inc.
|
2020-07-13 21:09:44 +08:00
|
|
|
* https://www.huawei.com/
|
2019-06-24 15:22:52 +08:00
|
|
|
*/
|
|
|
|
#include "internal.h"
|
|
|
|
#include <asm/unaligned.h>
|
|
|
|
#include <trace/events/erofs.h>
|
|
|
|
|
|
|
|
struct z_erofs_maprecorder {
|
|
|
|
struct inode *inode;
|
|
|
|
struct erofs_map_blocks *map;
|
|
|
|
void *kaddr;
|
|
|
|
|
|
|
|
unsigned long lcn;
|
|
|
|
/* compression extent information gathered */
|
2021-10-09 04:08:37 +08:00
|
|
|
u8 type, headtype;
|
2019-06-24 15:22:52 +08:00
|
|
|
u16 clusterofs;
|
|
|
|
u16 delta[2];
|
2022-08-12 14:01:50 +08:00
|
|
|
erofs_blk_t pblk, compressedblks;
|
2021-12-28 13:46:04 +08:00
|
|
|
erofs_off_t nextpackoff;
|
2022-09-23 09:49:15 +08:00
|
|
|
bool partialref;
|
2019-06-24 15:22:52 +08:00
|
|
|
};
|
|
|
|
|
2023-06-15 14:44:21 +08:00
|
|
|
static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
|
|
|
|
unsigned long lcn)
|
2019-06-24 15:22:52 +08:00
|
|
|
{
|
|
|
|
struct inode *const inode = m->inode;
|
2019-09-04 10:08:56 +08:00
|
|
|
struct erofs_inode *const vi = EROFS_I(inode);
|
2023-03-31 14:31:49 +08:00
|
|
|
const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(erofs_iloc(inode) +
|
|
|
|
vi->inode_isize + vi->xattr_isize) +
|
|
|
|
lcn * sizeof(struct z_erofs_lcluster_index);
|
|
|
|
struct z_erofs_lcluster_index *di;
|
2024-05-08 20:33:57 +08:00
|
|
|
unsigned int advise;
|
2019-06-24 15:22:52 +08:00
|
|
|
|
2022-09-27 11:25:18 +08:00
|
|
|
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
|
2024-04-26 03:59:44 +08:00
|
|
|
pos, EROFS_KMAP);
|
2022-09-27 11:25:18 +08:00
|
|
|
if (IS_ERR(m->kaddr))
|
|
|
|
return PTR_ERR(m->kaddr);
|
2019-06-24 15:22:52 +08:00
|
|
|
|
2023-03-31 14:31:49 +08:00
|
|
|
m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
|
2019-06-24 15:22:52 +08:00
|
|
|
m->lcn = lcn;
|
2024-04-26 03:59:44 +08:00
|
|
|
di = m->kaddr;
|
2019-06-24 15:22:52 +08:00
|
|
|
|
|
|
|
advise = le16_to_cpu(di->di_advise);
|
2024-05-08 20:33:57 +08:00
|
|
|
m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK;
|
|
|
|
if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
|
2019-06-24 15:22:52 +08:00
|
|
|
m->clusterofs = 1 << vi->z_logical_clusterbits;
|
|
|
|
m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
|
2023-03-31 14:31:49 +08:00
|
|
|
if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
|
2021-10-18 00:57:21 +08:00
|
|
|
if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
|
|
|
|
Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
|
2021-04-07 12:39:24 +08:00
|
|
|
DBG_BUGON(1);
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
2022-08-12 14:01:50 +08:00
|
|
|
m->compressedblks = m->delta[0] &
|
2023-03-31 14:31:49 +08:00
|
|
|
~Z_EROFS_LI_D0_CBLKCNT;
|
2021-04-07 12:39:24 +08:00
|
|
|
m->delta[0] = 1;
|
|
|
|
}
|
2019-06-24 15:22:52 +08:00
|
|
|
m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
|
2024-05-08 20:33:57 +08:00
|
|
|
} else {
|
|
|
|
m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF);
|
2019-06-24 15:22:52 +08:00
|
|
|
m->clusterofs = le16_to_cpu(di->di_clusterofs);
|
2023-04-11 01:37:14 +08:00
|
|
|
if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
|
|
|
|
DBG_BUGON(1);
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
2019-06-24 15:22:52 +08:00
|
|
|
m->pblk = le32_to_cpu(di->di_u.blkaddr);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int decode_compactedbits(unsigned int lobits,
|
|
|
|
u8 *in, unsigned int pos, u8 *type)
|
|
|
|
{
|
|
|
|
const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
|
2023-12-06 17:10:55 +08:00
|
|
|
const unsigned int lo = v & ((1 << lobits) - 1);
|
2019-06-24 15:22:52 +08:00
|
|
|
|
|
|
|
*type = (v >> lobits) & 3;
|
|
|
|
return lo;
|
|
|
|
}
|
|
|
|
|
2023-12-06 17:10:55 +08:00
|
|
|
static int get_compacted_la_distance(unsigned int lobits,
|
2021-08-18 23:22:31 +08:00
|
|
|
unsigned int encodebits,
|
|
|
|
unsigned int vcnt, u8 *in, int i)
|
|
|
|
{
|
|
|
|
unsigned int lo, d1 = 0;
|
|
|
|
u8 type;
|
|
|
|
|
|
|
|
DBG_BUGON(i >= vcnt);
|
|
|
|
|
|
|
|
do {
|
2023-12-06 17:10:55 +08:00
|
|
|
lo = decode_compactedbits(lobits, in, encodebits * i, &type);
|
2021-08-18 23:22:31 +08:00
|
|
|
|
2023-03-31 14:31:49 +08:00
|
|
|
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
|
2021-08-18 23:22:31 +08:00
|
|
|
return d1;
|
|
|
|
++d1;
|
|
|
|
} while (++i < vcnt);
|
|
|
|
|
2023-03-31 14:31:49 +08:00
|
|
|
/* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
|
|
|
|
if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
|
2021-08-18 23:22:31 +08:00
|
|
|
d1 += lo - 1;
|
|
|
|
return d1;
|
|
|
|
}
|
|
|
|
|
2019-06-24 15:22:52 +08:00
|
|
|
static int unpack_compacted_index(struct z_erofs_maprecorder *m,
|
|
|
|
unsigned int amortizedshift,
|
2021-12-28 13:46:04 +08:00
|
|
|
erofs_off_t pos, bool lookahead)
|
2019-06-24 15:22:52 +08:00
|
|
|
{
|
2019-09-04 10:08:56 +08:00
|
|
|
struct erofs_inode *const vi = EROFS_I(m->inode);
|
2019-06-24 15:22:52 +08:00
|
|
|
const unsigned int lclusterbits = vi->z_logical_clusterbits;
|
2024-04-26 03:59:44 +08:00
|
|
|
unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
|
2019-06-24 15:22:52 +08:00
|
|
|
int i;
|
|
|
|
u8 *in, type;
|
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:25 +08:00
|
|
|
bool big_pcluster;
|
2019-06-24 15:22:52 +08:00
|
|
|
|
2023-06-01 19:23:41 +08:00
|
|
|
if (1 << amortizedshift == 4 && lclusterbits <= 14)
|
2019-06-24 15:22:52 +08:00
|
|
|
vcnt = 2;
|
2023-12-06 17:10:55 +08:00
|
|
|
else if (1 << amortizedshift == 2 && lclusterbits <= 12)
|
2019-06-24 15:22:52 +08:00
|
|
|
vcnt = 16;
|
|
|
|
else
|
2019-08-14 18:37:05 +08:00
|
|
|
return -EOPNOTSUPP;
|
2019-06-24 15:22:52 +08:00
|
|
|
|
2021-12-28 13:46:04 +08:00
|
|
|
/* it doesn't equal to round_up(..) */
|
|
|
|
m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
|
|
|
|
(vcnt << amortizedshift);
|
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:25 +08:00
|
|
|
big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
|
2023-12-06 17:10:55 +08:00
|
|
|
lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
|
2019-06-24 15:22:52 +08:00
|
|
|
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
|
2024-04-26 03:59:44 +08:00
|
|
|
bytes = pos & ((vcnt << amortizedshift) - 1);
|
2019-06-24 15:22:52 +08:00
|
|
|
|
2024-04-26 03:59:44 +08:00
|
|
|
in = m->kaddr - bytes;
|
|
|
|
|
|
|
|
i = bytes >> amortizedshift;
|
2019-06-24 15:22:52 +08:00
|
|
|
|
2023-12-06 17:10:55 +08:00
|
|
|
lo = decode_compactedbits(lobits, in, encodebits * i, &type);
|
2019-06-24 15:22:52 +08:00
|
|
|
m->type = type;
|
2023-03-31 14:31:49 +08:00
|
|
|
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
|
2019-06-24 15:22:52 +08:00
|
|
|
m->clusterofs = 1 << lclusterbits;
|
2021-08-18 23:22:31 +08:00
|
|
|
|
|
|
|
/* figure out lookahead_distance: delta[1] if needed */
|
|
|
|
if (lookahead)
|
2023-12-06 17:10:55 +08:00
|
|
|
m->delta[1] = get_compacted_la_distance(lobits,
|
2021-08-18 23:22:31 +08:00
|
|
|
encodebits, vcnt, in, i);
|
2023-03-31 14:31:49 +08:00
|
|
|
if (lo & Z_EROFS_LI_D0_CBLKCNT) {
|
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:25 +08:00
|
|
|
if (!big_pcluster) {
|
|
|
|
DBG_BUGON(1);
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
2023-03-31 14:31:49 +08:00
|
|
|
m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
|
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:25 +08:00
|
|
|
m->delta[0] = 1;
|
|
|
|
return 0;
|
|
|
|
} else if (i + 1 != (int)vcnt) {
|
2019-06-24 15:22:52 +08:00
|
|
|
m->delta[0] = lo;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* since the last lcluster in the pack is special,
|
|
|
|
* of which lo saves delta[1] rather than delta[0].
|
|
|
|
* Hence, get delta[0] by the previous lcluster indirectly.
|
|
|
|
*/
|
2023-12-06 17:10:55 +08:00
|
|
|
lo = decode_compactedbits(lobits, in,
|
|
|
|
encodebits * (i - 1), &type);
|
2023-03-31 14:31:49 +08:00
|
|
|
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
|
2019-06-24 15:22:52 +08:00
|
|
|
lo = 0;
|
2023-03-31 14:31:49 +08:00
|
|
|
else if (lo & Z_EROFS_LI_D0_CBLKCNT)
|
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:25 +08:00
|
|
|
lo = 1;
|
2019-06-24 15:22:52 +08:00
|
|
|
m->delta[0] = lo + 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
m->clusterofs = lo;
|
|
|
|
m->delta[0] = 0;
|
|
|
|
/* figout out blkaddr (pblk) for HEAD lclusters */
|
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:25 +08:00
|
|
|
if (!big_pcluster) {
|
|
|
|
nblk = 1;
|
|
|
|
while (i > 0) {
|
|
|
|
--i;
|
2023-12-06 17:10:55 +08:00
|
|
|
lo = decode_compactedbits(lobits, in,
|
|
|
|
encodebits * i, &type);
|
2023-03-31 14:31:49 +08:00
|
|
|
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
|
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:25 +08:00
|
|
|
i -= lo;
|
|
|
|
|
|
|
|
if (i >= 0)
|
|
|
|
++nblk;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
nblk = 0;
|
|
|
|
while (i > 0) {
|
|
|
|
--i;
|
2023-12-06 17:10:55 +08:00
|
|
|
lo = decode_compactedbits(lobits, in,
|
|
|
|
encodebits * i, &type);
|
2023-03-31 14:31:49 +08:00
|
|
|
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
|
|
|
|
if (lo & Z_EROFS_LI_D0_CBLKCNT) {
|
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:25 +08:00
|
|
|
--i;
|
2023-03-31 14:31:49 +08:00
|
|
|
nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
|
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:25 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* bigpcluster shouldn't have plain d0 == 1 */
|
|
|
|
if (lo <= 1) {
|
|
|
|
DBG_BUGON(1);
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
i -= lo - 2;
|
|
|
|
continue;
|
|
|
|
}
|
2019-06-24 15:22:52 +08:00
|
|
|
++nblk;
|
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 12:39:25 +08:00
|
|
|
}
|
2019-06-24 15:22:52 +08:00
|
|
|
}
|
|
|
|
in += (vcnt << amortizedshift) - sizeof(__le32);
|
|
|
|
m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-15 14:44:21 +08:00
|
|
|
static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
|
|
|
|
unsigned long lcn, bool lookahead)
|
2019-06-24 15:22:52 +08:00
|
|
|
{
|
|
|
|
struct inode *const inode = m->inode;
|
2019-09-04 10:08:56 +08:00
|
|
|
struct erofs_inode *const vi = EROFS_I(inode);
|
2023-01-14 23:08:23 +08:00
|
|
|
const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
|
|
|
|
ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
|
2023-03-13 21:53:08 +08:00
|
|
|
unsigned int totalidx = erofs_iblks(inode);
|
2019-06-24 15:22:52 +08:00
|
|
|
unsigned int compacted_4b_initial, compacted_2b;
|
|
|
|
unsigned int amortizedshift;
|
|
|
|
erofs_off_t pos;
|
|
|
|
|
|
|
|
if (lcn >= totalidx)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
m->lcn = lcn;
|
|
|
|
/* used to align to 32-byte (compacted_2b) alignment */
|
|
|
|
compacted_4b_initial = (32 - ebase % 32) / 4;
|
|
|
|
if (compacted_4b_initial == 32 / 4)
|
|
|
|
compacted_4b_initial = 0;
|
|
|
|
|
2021-09-14 11:59:15 +08:00
|
|
|
if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
|
|
|
|
compacted_4b_initial < totalidx)
|
2019-06-24 15:22:52 +08:00
|
|
|
compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
|
|
|
|
else
|
|
|
|
compacted_2b = 0;
|
|
|
|
|
|
|
|
pos = ebase;
|
|
|
|
if (lcn < compacted_4b_initial) {
|
|
|
|
amortizedshift = 2;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
pos += compacted_4b_initial * 4;
|
|
|
|
lcn -= compacted_4b_initial;
|
|
|
|
|
|
|
|
if (lcn < compacted_2b) {
|
|
|
|
amortizedshift = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
pos += compacted_2b * 2;
|
|
|
|
lcn -= compacted_2b;
|
|
|
|
amortizedshift = 2;
|
|
|
|
out:
|
|
|
|
pos += lcn * (1 << amortizedshift);
|
2022-09-27 11:25:18 +08:00
|
|
|
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
|
2024-04-26 03:59:44 +08:00
|
|
|
pos, EROFS_KMAP);
|
2022-09-27 11:25:18 +08:00
|
|
|
if (IS_ERR(m->kaddr))
|
|
|
|
return PTR_ERR(m->kaddr);
|
2021-12-28 13:46:04 +08:00
|
|
|
return unpack_compacted_index(m, amortizedshift, pos, lookahead);
|
2019-06-24 15:22:52 +08:00
|
|
|
}
|
|
|
|
|
2023-06-15 14:44:21 +08:00
|
|
|
static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
|
|
|
|
unsigned int lcn, bool lookahead)
|
2019-06-24 15:22:52 +08:00
|
|
|
{
|
2023-06-15 14:44:21 +08:00
|
|
|
switch (EROFS_I(m->inode)->datalayout) {
|
|
|
|
case EROFS_INODE_COMPRESSED_FULL:
|
|
|
|
return z_erofs_load_full_lcluster(m, lcn);
|
|
|
|
case EROFS_INODE_COMPRESSED_COMPACT:
|
|
|
|
return z_erofs_load_compact_lcluster(m, lcn, lookahead);
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2019-06-24 15:22:52 +08:00
|
|
|
}
|
|
|
|
|
2019-11-08 11:37:33 +08:00
|
|
|
static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
|
|
|
|
unsigned int lookback_distance)
|
2019-06-24 15:22:52 +08:00
|
|
|
{
|
2023-06-15 14:44:21 +08:00
|
|
|
struct super_block *sb = m->inode->i_sb;
|
2019-09-04 10:08:56 +08:00
|
|
|
struct erofs_inode *const vi = EROFS_I(m->inode);
|
2019-06-24 15:22:52 +08:00
|
|
|
const unsigned int lclusterbits = vi->z_logical_clusterbits;
|
|
|
|
|
2022-03-11 02:27:42 +08:00
|
|
|
while (m->lcn >= lookback_distance) {
|
|
|
|
unsigned long lcn = m->lcn - lookback_distance;
|
|
|
|
int err;
|
|
|
|
|
2023-06-15 14:44:21 +08:00
|
|
|
err = z_erofs_load_lcluster_from_disk(m, lcn, false);
|
2022-03-11 02:27:42 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
switch (m->type) {
|
2023-03-31 14:31:49 +08:00
|
|
|
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
|
2022-03-11 02:27:42 +08:00
|
|
|
lookback_distance = m->delta[0];
|
2023-06-15 14:44:21 +08:00
|
|
|
if (!lookback_distance)
|
|
|
|
goto err_bogus;
|
2022-03-11 02:27:42 +08:00
|
|
|
continue;
|
2023-03-31 14:31:49 +08:00
|
|
|
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
|
|
|
|
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
|
|
|
|
case Z_EROFS_LCLUSTER_TYPE_HEAD2:
|
2022-03-11 02:27:42 +08:00
|
|
|
m->headtype = m->type;
|
|
|
|
m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
|
|
|
|
return 0;
|
|
|
|
default:
|
2023-06-15 14:44:21 +08:00
|
|
|
erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu",
|
2022-03-11 02:27:42 +08:00
|
|
|
m->type, lcn, vi->nid);
|
2019-08-19 18:34:26 +08:00
|
|
|
DBG_BUGON(1);
|
2022-03-11 02:27:42 +08:00
|
|
|
return -EOPNOTSUPP;
|
2019-08-19 18:34:26 +08:00
|
|
|
}
|
2019-06-24 15:22:52 +08:00
|
|
|
}
|
2023-06-15 14:44:21 +08:00
|
|
|
err_bogus:
|
|
|
|
erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
|
|
|
|
lookback_distance, m->lcn, vi->nid);
|
2022-03-11 02:27:42 +08:00
|
|
|
DBG_BUGON(1);
|
|
|
|
return -EFSCORRUPTED;
|
2019-06-24 15:22:52 +08:00
|
|
|
}
|
|
|
|
|
2021-04-07 12:39:24 +08:00
|
|
|
static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
|
|
|
|
unsigned int initial_lcn)
|
|
|
|
{
|
2023-03-13 21:53:08 +08:00
|
|
|
struct super_block *sb = m->inode->i_sb;
|
2021-04-07 12:39:24 +08:00
|
|
|
struct erofs_inode *const vi = EROFS_I(m->inode);
|
|
|
|
struct erofs_map_blocks *const map = m->map;
|
|
|
|
const unsigned int lclusterbits = vi->z_logical_clusterbits;
|
|
|
|
unsigned long lcn;
|
|
|
|
int err;
|
|
|
|
|
2023-03-31 14:31:49 +08:00
|
|
|
DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
|
|
|
|
m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 &&
|
|
|
|
m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2);
|
2021-10-18 00:57:21 +08:00
|
|
|
DBG_BUGON(m->type != m->headtype);
|
|
|
|
|
2023-03-31 14:31:49 +08:00
|
|
|
if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
|
|
|
|
((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) &&
|
2021-10-18 00:57:21 +08:00
|
|
|
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
|
2023-03-31 14:31:49 +08:00
|
|
|
((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) &&
|
2021-10-18 00:57:21 +08:00
|
|
|
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
|
2022-03-11 01:34:48 +08:00
|
|
|
map->m_plen = 1ULL << lclusterbits;
|
2021-04-07 12:39:24 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
lcn = m->lcn + 1;
|
2022-08-12 14:01:50 +08:00
|
|
|
if (m->compressedblks)
|
2021-04-07 12:39:24 +08:00
|
|
|
goto out;
|
|
|
|
|
2023-06-15 14:44:21 +08:00
|
|
|
err = z_erofs_load_lcluster_from_disk(m, lcn, false);
|
2021-04-07 12:39:24 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-05-10 14:47:15 +08:00
|
|
|
/*
|
|
|
|
* If the 1st NONHEAD lcluster has already been handled initially w/o
|
2022-08-12 14:01:50 +08:00
|
|
|
* valid compressedblks, which means at least it mustn't be CBLKCNT, or
|
2021-05-10 14:47:15 +08:00
|
|
|
* an internal implemenatation error is detected.
|
|
|
|
*
|
|
|
|
* The following code can also handle it properly anyway, but let's
|
|
|
|
* BUG_ON in the debugging mode only for developers to notice that.
|
|
|
|
*/
|
|
|
|
DBG_BUGON(lcn == initial_lcn &&
|
2023-03-31 14:31:49 +08:00
|
|
|
m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
|
2021-05-10 14:47:15 +08:00
|
|
|
|
2021-04-07 12:39:24 +08:00
|
|
|
switch (m->type) {
|
2023-03-31 14:31:49 +08:00
|
|
|
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
|
|
|
|
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
|
|
|
|
case Z_EROFS_LCLUSTER_TYPE_HEAD2:
|
2021-05-10 14:47:15 +08:00
|
|
|
/*
|
|
|
|
* if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
|
|
|
|
* rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
|
|
|
|
*/
|
2023-03-13 21:53:08 +08:00
|
|
|
m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits);
|
2021-05-10 14:47:15 +08:00
|
|
|
break;
|
2023-03-31 14:31:49 +08:00
|
|
|
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
|
2021-04-07 12:39:24 +08:00
|
|
|
if (m->delta[0] != 1)
|
|
|
|
goto err_bonus_cblkcnt;
|
2022-08-12 14:01:50 +08:00
|
|
|
if (m->compressedblks)
|
2021-04-07 12:39:24 +08:00
|
|
|
break;
|
|
|
|
fallthrough;
|
|
|
|
default:
|
2023-06-15 14:44:21 +08:00
|
|
|
erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn,
|
|
|
|
vi->nid);
|
2021-04-07 12:39:24 +08:00
|
|
|
DBG_BUGON(1);
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
out:
|
2023-03-13 21:53:08 +08:00
|
|
|
map->m_plen = erofs_pos(sb, m->compressedblks);
|
2021-04-07 12:39:24 +08:00
|
|
|
return 0;
|
|
|
|
err_bonus_cblkcnt:
|
2023-06-15 14:44:21 +08:00
|
|
|
erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
|
2021-04-07 12:39:24 +08:00
|
|
|
DBG_BUGON(1);
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
|
2021-08-18 23:22:31 +08:00
|
|
|
static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
|
|
|
|
{
|
|
|
|
struct inode *inode = m->inode;
|
|
|
|
struct erofs_inode *vi = EROFS_I(inode);
|
|
|
|
struct erofs_map_blocks *map = m->map;
|
|
|
|
unsigned int lclusterbits = vi->z_logical_clusterbits;
|
|
|
|
u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
|
|
|
/* handle the last EOF pcluster (no next HEAD lcluster) */
|
|
|
|
if ((lcn << lclusterbits) >= inode->i_size) {
|
|
|
|
map->m_llen = inode->i_size - map->m_la;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-15 14:44:21 +08:00
|
|
|
err = z_erofs_load_lcluster_from_disk(m, lcn, true);
|
2021-08-18 23:22:31 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2023-03-31 14:31:49 +08:00
|
|
|
if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
|
2021-08-18 23:22:31 +08:00
|
|
|
DBG_BUGON(!m->delta[1] &&
|
|
|
|
m->clusterofs != 1 << lclusterbits);
|
2023-03-31 14:31:49 +08:00
|
|
|
} else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
|
|
|
|
m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1 ||
|
|
|
|
m->type == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
|
2021-08-18 23:22:31 +08:00
|
|
|
/* go on until the next HEAD lcluster */
|
|
|
|
if (lcn != headlcn)
|
|
|
|
break;
|
|
|
|
m->delta[1] = 1;
|
|
|
|
} else {
|
|
|
|
erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
|
|
|
|
m->type, lcn, vi->nid);
|
|
|
|
DBG_BUGON(1);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
lcn += m->delta[1];
|
|
|
|
} while (m->delta[1]);
|
|
|
|
|
|
|
|
map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-12-28 13:46:04 +08:00
|
|
|
static int z_erofs_do_map_blocks(struct inode *inode,
|
2023-03-31 14:31:49 +08:00
|
|
|
struct erofs_map_blocks *map, int flags)
|
2019-06-24 15:22:52 +08:00
|
|
|
{
|
2019-09-04 10:08:56 +08:00
|
|
|
struct erofs_inode *const vi = EROFS_I(inode);
|
2021-12-28 13:46:04 +08:00
|
|
|
bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
|
2022-09-23 10:11:22 +08:00
|
|
|
bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
|
2019-06-24 15:22:52 +08:00
|
|
|
struct z_erofs_maprecorder m = {
|
|
|
|
.inode = inode,
|
|
|
|
.map = map,
|
|
|
|
};
|
|
|
|
int err = 0;
|
2024-01-13 23:06:02 +08:00
|
|
|
unsigned int lclusterbits, endoff, afmt;
|
2021-04-07 12:39:24 +08:00
|
|
|
unsigned long initial_lcn;
|
2019-06-24 15:22:52 +08:00
|
|
|
unsigned long long ofs, end;
|
|
|
|
|
|
|
|
lclusterbits = vi->z_logical_clusterbits;
|
2021-12-28 13:46:04 +08:00
|
|
|
ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
|
2021-04-07 12:39:24 +08:00
|
|
|
initial_lcn = ofs >> lclusterbits;
|
2019-06-24 15:22:52 +08:00
|
|
|
endoff = ofs & ((1 << lclusterbits) - 1);
|
|
|
|
|
2023-06-15 14:44:21 +08:00
|
|
|
err = z_erofs_load_lcluster_from_disk(&m, initial_lcn, false);
|
2019-06-24 15:22:52 +08:00
|
|
|
if (err)
|
|
|
|
goto unmap_out;
|
|
|
|
|
2021-12-28 13:46:04 +08:00
|
|
|
if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
|
|
|
|
vi->z_idataoff = m.nextpackoff;
|
|
|
|
|
2021-10-09 04:08:37 +08:00
|
|
|
map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
|
2019-06-24 15:22:52 +08:00
|
|
|
end = (m.lcn + 1ULL) << lclusterbits;
|
|
|
|
|
|
|
|
switch (m.type) {
|
2023-03-31 14:31:49 +08:00
|
|
|
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
|
|
|
|
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
|
|
|
|
case Z_EROFS_LCLUSTER_TYPE_HEAD2:
|
2019-06-24 15:22:52 +08:00
|
|
|
if (endoff >= m.clusterofs) {
|
2021-10-09 04:08:37 +08:00
|
|
|
m.headtype = m.type;
|
2019-06-24 15:22:52 +08:00
|
|
|
map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
|
erofs: fix small compressed files inlining
Prior to ztailpacking feature, it's enough that each lcluster has
two pclusters at most, and the last pcluster should be turned into
an uncompressed pcluster when necessary. For example,
_________________________________________________
|_ pcluster n-2 _|_ pcluster n-1 _|____ EOFed ____|
which should be converted into:
_________________________________________________
|_ pcluster n-2 _|_ pcluster n-1 (uncompressed)' _|
That is fine since either pcluster n-1 or (uncompressed)' takes one
physical block.
However, after ztailpacking was supported, the game is changed since
the last pcluster can be inlined now. And such case above is quite
common for inlining small files. Therefore, in order to inline more
effectively, special EOF lclusters are now supported which can have
three parts at most, as illustrated below:
_________________________________________________
|_ pcluster n-2 _|_ pcluster n-1 _|____ EOFed ____|
^ i_size
Actually similar code exists in Yue Hu's original patchset [1], but I
removed this part on purpose. After evaluating more real cases with
small files, I've changed my mind.
[1] https://lore.kernel.org/r/20211215094449.15162-1-huyue2@yulong.com
Link: https://lore.kernel.org/r/20220203190203.30794-1-xiang@kernel.org
Fixes: ab92184ff8f1 ("erofs: add on-disk compressed tail-packing inline support")
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-02-04 03:02:03 +08:00
|
|
|
/*
|
|
|
|
* For ztailpacking files, in order to inline data more
|
|
|
|
* effectively, special EOF lclusters are now supported
|
|
|
|
* which can have three parts at most.
|
|
|
|
*/
|
|
|
|
if (ztailpacking && end > inode->i_size)
|
|
|
|
end = inode->i_size;
|
2019-06-24 15:22:52 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* m.lcn should be >= 1 if endoff < m.clusterofs */
|
2019-08-30 00:38:27 +08:00
|
|
|
if (!m.lcn) {
|
2019-09-04 10:09:09 +08:00
|
|
|
erofs_err(inode->i_sb,
|
|
|
|
"invalid logical cluster 0 at nid %llu",
|
|
|
|
vi->nid);
|
2019-08-14 18:37:03 +08:00
|
|
|
err = -EFSCORRUPTED;
|
2019-06-24 15:22:52 +08:00
|
|
|
goto unmap_out;
|
|
|
|
}
|
|
|
|
end = (m.lcn << lclusterbits) | m.clusterofs;
|
2019-06-24 15:22:58 +08:00
|
|
|
map->m_flags |= EROFS_MAP_FULL_MAPPED;
|
2019-06-24 15:22:52 +08:00
|
|
|
m.delta[0] = 1;
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2023-03-31 14:31:49 +08:00
|
|
|
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
|
2021-03-31 17:39:20 +08:00
|
|
|
/* get the corresponding first chunk */
|
2019-11-08 11:37:33 +08:00
|
|
|
err = z_erofs_extent_lookback(&m, m.delta[0]);
|
2019-08-30 00:38:27 +08:00
|
|
|
if (err)
|
2019-06-24 15:22:52 +08:00
|
|
|
goto unmap_out;
|
|
|
|
break;
|
|
|
|
default:
|
2019-09-04 10:09:09 +08:00
|
|
|
erofs_err(inode->i_sb,
|
|
|
|
"unknown type %u @ offset %llu of nid %llu",
|
|
|
|
m.type, ofs, vi->nid);
|
2019-08-14 18:37:04 +08:00
|
|
|
err = -EOPNOTSUPP;
|
2019-06-24 15:22:52 +08:00
|
|
|
goto unmap_out;
|
|
|
|
}
|
2022-09-23 09:49:15 +08:00
|
|
|
if (m.partialref)
|
|
|
|
map->m_flags |= EROFS_MAP_PARTIAL_REF;
|
2019-06-24 15:22:52 +08:00
|
|
|
map->m_llen = end - map->m_la;
|
|
|
|
|
2022-09-23 10:11:22 +08:00
|
|
|
if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
|
2021-12-28 13:46:04 +08:00
|
|
|
vi->z_tailextent_headlcn = m.lcn;
|
2022-09-23 10:11:22 +08:00
|
|
|
/* for non-compact indexes, fragmentoff is 64 bits */
|
2023-06-15 14:44:21 +08:00
|
|
|
if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
|
2022-09-23 10:11:22 +08:00
|
|
|
vi->z_fragmentoff |= (u64)m.pblk << 32;
|
|
|
|
}
|
2021-12-28 13:46:04 +08:00
|
|
|
if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
|
|
|
|
map->m_flags |= EROFS_MAP_META;
|
|
|
|
map->m_pa = vi->z_idataoff;
|
|
|
|
map->m_plen = vi->z_idata_size;
|
2022-09-23 10:11:22 +08:00
|
|
|
} else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
|
|
|
|
map->m_flags |= EROFS_MAP_FRAGMENT;
|
2021-12-28 13:46:04 +08:00
|
|
|
} else {
|
2023-03-13 21:53:08 +08:00
|
|
|
map->m_pa = erofs_pos(inode->i_sb, m.pblk);
|
2021-12-28 13:46:04 +08:00
|
|
|
err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
|
|
|
|
if (err)
|
2022-12-05 23:00:49 +08:00
|
|
|
goto unmap_out;
|
2021-12-28 13:46:04 +08:00
|
|
|
}
|
2021-08-18 23:22:31 +08:00
|
|
|
|
2023-03-31 14:31:49 +08:00
|
|
|
if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
|
2022-12-05 23:00:50 +08:00
|
|
|
if (map->m_llen > map->m_plen) {
|
|
|
|
DBG_BUGON(1);
|
|
|
|
err = -EFSCORRUPTED;
|
|
|
|
goto unmap_out;
|
|
|
|
}
|
2024-01-13 23:06:02 +08:00
|
|
|
afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ?
|
|
|
|
Z_EROFS_COMPRESSION_INTERLACED :
|
|
|
|
Z_EROFS_COMPRESSION_SHIFTED;
|
2022-09-23 10:11:21 +08:00
|
|
|
} else {
|
2024-01-13 23:06:02 +08:00
|
|
|
afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ?
|
|
|
|
vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
|
|
|
|
if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
|
|
|
|
erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
|
|
|
|
afmt, vi->nid);
|
|
|
|
err = -EFSCORRUPTED;
|
|
|
|
goto unmap_out;
|
|
|
|
}
|
2022-09-23 10:11:21 +08:00
|
|
|
}
|
2024-01-13 23:06:02 +08:00
|
|
|
map->m_algorithmformat = afmt;
|
2021-10-09 04:08:37 +08:00
|
|
|
|
2021-10-11 05:31:45 +08:00
|
|
|
if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
|
|
|
|
((flags & EROFS_GET_BLOCKS_READMORE) &&
|
erofs: DEFLATE compression support
Add DEFLATE compression as the 3rd supported algorithm.
DEFLATE is a popular generic-purpose compression algorithm for quite
long time (many advanced formats like gzip, zlib, zip, png are all
based on that) as Apple documentation written "If you require
interoperability with non-Apple devices, use COMPRESSION_ZLIB. [1]".
Due to its popularity, there are several hardware on-market DEFLATE
accelerators, such as (s390) DFLTCC, (Intel) IAA/QAT, (HiSilicon) ZIP
accelerator, etc. In addition, there are also several high-performence
IP cores and even open-source FPGA approches available for DEFLATE.
Therefore, it's useful to support DEFLATE compression in order to find
a way to utilize these accelerators for asynchronous I/Os and get
benefits from these later.
Besides, it's a good choice to trade off between compression ratios
and performance compared to LZ4 and LZMA. The DEFLATE core format is
simple as well as easy to understand, therefore the code size of its
decompressor is small even for the bootloader use cases. The runtime
memory consumption is quite limited too (e.g. 32K + ~7K for each zlib
stream). As usual, EROFS ourperforms similar approaches too.
Alternatively, DEFLATE could still be used for some specific files
since EROFS supports multiple compression algorithms in one image.
[1] https://developer.apple.com/documentation/compression/compression_algorithm
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20230810154859.118330-1-hsiangkao@linux.alibaba.com
2023-08-10 23:48:59 +08:00
|
|
|
(map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA ||
|
2024-05-09 07:44:53 +08:00
|
|
|
map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE ||
|
|
|
|
map->m_algorithmformat == Z_EROFS_COMPRESSION_ZSTD) &&
|
erofs: DEFLATE compression support
Add DEFLATE compression as the 3rd supported algorithm.
DEFLATE is a popular generic-purpose compression algorithm for quite
long time (many advanced formats like gzip, zlib, zip, png are all
based on that) as Apple documentation written "If you require
interoperability with non-Apple devices, use COMPRESSION_ZLIB. [1]".
Due to its popularity, there are several hardware on-market DEFLATE
accelerators, such as (s390) DFLTCC, (Intel) IAA/QAT, (HiSilicon) ZIP
accelerator, etc. In addition, there are also several high-performence
IP cores and even open-source FPGA approches available for DEFLATE.
Therefore, it's useful to support DEFLATE compression in order to find
a way to utilize these accelerators for asynchronous I/Os and get
benefits from these later.
Besides, it's a good choice to trade off between compression ratios
and performance compared to LZ4 and LZMA. The DEFLATE core format is
simple as well as easy to understand, therefore the code size of its
decompressor is small even for the bootloader use cases. The runtime
memory consumption is quite limited too (e.g. 32K + ~7K for each zlib
stream). As usual, EROFS ourperforms similar approaches too.
Alternatively, DEFLATE could still be used for some specific files
since EROFS supports multiple compression algorithms in one image.
[1] https://developer.apple.com/documentation/compression/compression_algorithm
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20230810154859.118330-1-hsiangkao@linux.alibaba.com
2023-08-10 23:48:59 +08:00
|
|
|
map->m_llen >= i_blocksize(inode))) {
|
2021-08-18 23:22:31 +08:00
|
|
|
err = z_erofs_get_extent_decompressedlen(&m);
|
|
|
|
if (!err)
|
|
|
|
map->m_flags |= EROFS_MAP_FULL_MAPPED;
|
|
|
|
}
|
2022-12-05 23:00:49 +08:00
|
|
|
|
2019-06-24 15:22:52 +08:00
|
|
|
unmap_out:
|
2022-01-02 12:00:17 +08:00
|
|
|
erofs_unmap_metabuf(&m.map->buf);
|
2021-12-28 13:46:04 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-02-04 17:30:39 +08:00
|
|
|
static int z_erofs_fill_inode_lazy(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct erofs_inode *const vi = EROFS_I(inode);
|
|
|
|
struct super_block *const sb = inode->i_sb;
|
|
|
|
int err, headnr;
|
|
|
|
erofs_off_t pos;
|
|
|
|
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
|
|
|
|
struct z_erofs_map_header *h;
|
|
|
|
|
|
|
|
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
|
|
|
|
/*
|
|
|
|
* paired with smp_mb() at the end of the function to ensure
|
|
|
|
* fields will only be observed after the bit is set.
|
|
|
|
*/
|
|
|
|
smp_mb();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
|
|
|
|
return -ERESTARTSYS;
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
|
erofs: don't align offset for erofs_read_metabuf() (simple cases)
Most of the callers of erofs_read_metabuf() have the following form:
block = erofs_blknr(sb, offset);
off = erofs_blkoff(sb, offset);
p = erofs_read_metabuf(...., erofs_pos(sb, block), ...);
if (IS_ERR(p))
return PTR_ERR(p);
q = p + off;
// no further uses of p, block or off.
The value passed to erofs_read_metabuf() is offset rounded down to block
size, i.e. offset - off. Passing offset as-is would increase the return
value by off in case of success and keep the return value unchanged in
in case of error. In other words, the same could be achieved by
q = erofs_read_metabuf(...., offset, ...);
if (IS_ERR(q))
return PTR_ERR(q);
This commit convert these simple cases.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Link: https://lore.kernel.org/r/20240425195915.GD1031757@ZenIV
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2024-04-26 03:59:15 +08:00
|
|
|
h = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP);
|
|
|
|
if (IS_ERR(h)) {
|
|
|
|
err = PTR_ERR(h);
|
2023-02-04 17:30:39 +08:00
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if the highest bit of the 8-byte map header is set, the whole file
|
|
|
|
* is stored in the packed inode. The rest bits keeps z_fragmentoff.
|
|
|
|
*/
|
|
|
|
if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
|
|
|
|
vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
|
|
|
|
vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
|
|
|
|
vi->z_tailextent_headlcn = 0;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
vi->z_advise = le16_to_cpu(h->h_advise);
|
|
|
|
vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
|
|
|
|
vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
|
|
|
|
|
|
|
|
headnr = 0;
|
|
|
|
if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
|
|
|
|
vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
|
|
|
|
erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
|
|
|
|
headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto out_put_metabuf;
|
|
|
|
}
|
|
|
|
|
2023-03-13 21:53:08 +08:00
|
|
|
vi->z_logical_clusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 7);
|
2023-02-04 17:30:39 +08:00
|
|
|
if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
|
|
|
|
vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
|
|
|
|
Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
|
|
|
|
erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
|
|
|
|
vi->nid);
|
|
|
|
err = -EFSCORRUPTED;
|
|
|
|
goto out_put_metabuf;
|
|
|
|
}
|
2023-03-31 14:31:49 +08:00
|
|
|
if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
|
2023-02-04 17:30:39 +08:00
|
|
|
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
|
|
|
|
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
|
|
|
|
erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
|
|
|
|
vi->nid);
|
|
|
|
err = -EFSCORRUPTED;
|
|
|
|
goto out_put_metabuf;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
|
|
|
|
struct erofs_map_blocks map = {
|
|
|
|
.buf = __EROFS_BUF_INITIALIZER
|
|
|
|
};
|
|
|
|
|
|
|
|
vi->z_idata_size = le16_to_cpu(h->h_idata_size);
|
|
|
|
err = z_erofs_do_map_blocks(inode, &map,
|
|
|
|
EROFS_GET_BLOCKS_FINDTAIL);
|
|
|
|
erofs_put_metabuf(&map.buf);
|
|
|
|
|
|
|
|
if (!map.m_plen ||
|
2023-03-13 21:53:08 +08:00
|
|
|
erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) {
|
2023-02-04 17:30:39 +08:00
|
|
|
erofs_err(sb, "invalid tail-packing pclustersize %llu",
|
|
|
|
map.m_plen);
|
|
|
|
err = -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
if (err < 0)
|
|
|
|
goto out_put_metabuf;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
|
|
|
|
!(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
|
|
|
|
struct erofs_map_blocks map = {
|
|
|
|
.buf = __EROFS_BUF_INITIALIZER
|
|
|
|
};
|
|
|
|
|
|
|
|
vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
|
|
|
|
err = z_erofs_do_map_blocks(inode, &map,
|
|
|
|
EROFS_GET_BLOCKS_FINDTAIL);
|
|
|
|
erofs_put_metabuf(&map.buf);
|
|
|
|
if (err < 0)
|
|
|
|
goto out_put_metabuf;
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
/* paired with smp_mb() at the beginning of the function */
|
|
|
|
smp_mb();
|
|
|
|
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
|
|
|
|
out_put_metabuf:
|
|
|
|
erofs_put_metabuf(&buf);
|
|
|
|
out_unlock:
|
|
|
|
clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2022-09-27 14:36:06 +08:00
|
|
|
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
|
2021-12-28 13:46:04 +08:00
|
|
|
int flags)
|
|
|
|
{
|
2022-09-23 10:11:22 +08:00
|
|
|
struct erofs_inode *const vi = EROFS_I(inode);
|
2021-12-28 13:46:04 +08:00
|
|
|
int err = 0;
|
|
|
|
|
2024-07-10 16:34:59 +08:00
|
|
|
trace_erofs_map_blocks_enter(inode, map, flags);
|
2024-09-12 15:41:56 +08:00
|
|
|
if (map->m_la >= inode->i_size) { /* post-EOF unmapped extent */
|
2021-12-28 13:46:04 +08:00
|
|
|
map->m_llen = map->m_la + 1 - inode->i_size;
|
|
|
|
map->m_la = inode->i_size;
|
|
|
|
map->m_flags = 0;
|
2024-09-12 15:41:56 +08:00
|
|
|
} else {
|
|
|
|
err = z_erofs_fill_inode_lazy(inode);
|
|
|
|
if (!err) {
|
|
|
|
if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
|
|
|
|
!vi->z_tailextent_headlcn) {
|
|
|
|
map->m_la = 0;
|
|
|
|
map->m_llen = inode->i_size;
|
|
|
|
map->m_flags = EROFS_MAP_MAPPED |
|
|
|
|
EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT;
|
|
|
|
} else {
|
|
|
|
err = z_erofs_do_map_blocks(inode, map, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!err && (map->m_flags & EROFS_MAP_ENCODED) &&
|
|
|
|
unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
|
|
|
|
map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
if (err)
|
|
|
|
map->m_llen = 0;
|
2022-09-23 10:11:22 +08:00
|
|
|
}
|
2024-07-10 16:34:59 +08:00
|
|
|
trace_erofs_map_blocks_exit(inode, map, flags, err);
|
2019-06-24 15:22:52 +08:00
|
|
|
return err;
|
|
|
|
}
|
2021-08-13 13:29:31 +08:00
|
|
|
|
|
|
|
static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
|
|
|
|
loff_t length, unsigned int flags,
|
|
|
|
struct iomap *iomap, struct iomap *srcmap)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct erofs_map_blocks map = { .m_la = offset };
|
|
|
|
|
|
|
|
ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
|
2022-01-02 12:00:17 +08:00
|
|
|
erofs_put_metabuf(&map.buf);
|
2021-08-13 13:29:31 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
iomap->bdev = inode->i_sb->s_bdev;
|
|
|
|
iomap->offset = map.m_la;
|
|
|
|
iomap->length = map.m_llen;
|
|
|
|
if (map.m_flags & EROFS_MAP_MAPPED) {
|
|
|
|
iomap->type = IOMAP_MAPPED;
|
2022-09-23 10:11:22 +08:00
|
|
|
iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ?
|
|
|
|
IOMAP_NULL_ADDR : map.m_pa;
|
2021-08-13 13:29:31 +08:00
|
|
|
} else {
|
|
|
|
iomap->type = IOMAP_HOLE;
|
|
|
|
iomap->addr = IOMAP_NULL_ADDR;
|
|
|
|
/*
|
2022-12-09 18:21:51 +08:00
|
|
|
* No strict rule on how to describe extents for post EOF, yet
|
|
|
|
* we need to do like below. Otherwise, iomap itself will get
|
2021-08-13 13:29:31 +08:00
|
|
|
* into an endless loop on post EOF.
|
2022-12-09 18:21:51 +08:00
|
|
|
*
|
|
|
|
* Calculate the effective offset by subtracting extent start
|
|
|
|
* (map.m_la) from the requested offset, and add it to length.
|
|
|
|
* (NB: offset >= map.m_la always)
|
2021-08-13 13:29:31 +08:00
|
|
|
*/
|
|
|
|
if (iomap->offset >= inode->i_size)
|
2022-12-09 18:21:51 +08:00
|
|
|
iomap->length = length + offset - map.m_la;
|
2021-08-13 13:29:31 +08:00
|
|
|
}
|
|
|
|
iomap->flags = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct iomap_ops z_erofs_iomap_report_ops = {
|
|
|
|
.iomap_begin = z_erofs_iomap_begin_report,
|
|
|
|
};
|