2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-22 12:14:01 +08:00

xfs: refactor log recovery item dispatch for pass2 readhead functions

Move the pass2 readhead code into the per-item source code files and use
the dispatch function to call them.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Darrick J. Wong 2020-05-01 16:00:46 -07:00
parent 86ffa471d9
commit 8ea5682d07
5 changed files with 73 additions and 92 deletions

View File

@ -31,6 +31,9 @@ struct xlog_recover_item_ops {
* values mean.
*/
enum xlog_recover_reorder (*reorder)(struct xlog_recover_item *item);
/* Start readahead for pass2, if provided. */
void (*ra_pass2)(struct xlog *log, struct xlog_recover_item *item);
};
extern const struct xlog_recover_item_ops xlog_icreate_item_ops;
@ -92,4 +95,7 @@ struct xlog_recover {
#define XLOG_RECOVER_PASS1 1
#define XLOG_RECOVER_PASS2 2
void xlog_buf_readahead(struct xlog *log, xfs_daddr_t blkno, uint len,
const struct xfs_buf_ops *ops);
#endif /* __XFS_LOG_RECOVER_H__ */

View File

@ -46,7 +46,18 @@ xlog_recover_buf_reorder(
return XLOG_REORDER_BUFFER_LIST;
}
STATIC void
xlog_recover_buf_ra_pass2(
struct xlog *log,
struct xlog_recover_item *item)
{
struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
xlog_buf_readahead(log, buf_f->blf_blkno, buf_f->blf_len, NULL);
}
const struct xlog_recover_item_ops xlog_buf_item_ops = {
.item_type = XFS_LI_BUF,
.reorder = xlog_recover_buf_reorder,
.ra_pass2 = xlog_recover_buf_ra_pass2,
};

View File

@ -20,8 +20,42 @@
#include "xfs_log_priv.h"
#include "xfs_log_recover.h"
STATIC void
xlog_recover_dquot_ra_pass2(
struct xlog *log,
struct xlog_recover_item *item)
{
struct xfs_mount *mp = log->l_mp;
struct xfs_disk_dquot *recddq;
struct xfs_dq_logformat *dq_f;
uint type;
if (mp->m_qflags == 0)
return;
recddq = item->ri_buf[1].i_addr;
if (recddq == NULL)
return;
if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
return;
type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
ASSERT(type);
if (log->l_quotaoffs_flag & type)
return;
dq_f = item->ri_buf[0].i_addr;
ASSERT(dq_f);
ASSERT(dq_f->qlf_len == 1);
xlog_buf_readahead(log, dq_f->qlf_blkno,
XFS_FSB_TO_BB(mp, dq_f->qlf_len),
&xfs_dquot_buf_ra_ops);
}
const struct xlog_recover_item_ops xlog_dquot_item_ops = {
.item_type = XFS_LI_DQUOT,
.ra_pass2 = xlog_recover_dquot_ra_pass2,
};
const struct xlog_recover_item_ops xlog_quotaoff_item_ops = {

View File

@ -21,6 +21,25 @@
#include "xfs_log_priv.h"
#include "xfs_log_recover.h"
STATIC void
xlog_recover_inode_ra_pass2(
struct xlog *log,
struct xlog_recover_item *item)
{
if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
struct xfs_inode_log_format *ilfp = item->ri_buf[0].i_addr;
xlog_buf_readahead(log, ilfp->ilf_blkno, ilfp->ilf_len,
&xfs_inode_buf_ra_ops);
} else {
struct xfs_inode_log_format_32 *ilfp = item->ri_buf[0].i_addr;
xlog_buf_readahead(log, ilfp->ilf_blkno, ilfp->ilf_len,
&xfs_inode_buf_ra_ops);
}
}
const struct xlog_recover_item_ops xlog_inode_item_ops = {
.item_type = XFS_LI_INODE,
.ra_pass2 = xlog_recover_inode_ra_pass2,
};

View File

@ -2023,7 +2023,7 @@ xlog_put_buffer_cancelled(
return true;
}
static void
void
xlog_buf_readahead(
struct xlog *log,
xfs_daddr_t blkno,
@ -3890,96 +3890,6 @@ xlog_recover_do_icreate_pass2(
length, be32_to_cpu(icl->icl_gen));
}
STATIC void
xlog_recover_buffer_ra_pass2(
struct xlog *log,
struct xlog_recover_item *item)
{
struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
xlog_buf_readahead(log, buf_f->blf_blkno, buf_f->blf_len, NULL);
}
STATIC void
xlog_recover_inode_ra_pass2(
struct xlog *log,
struct xlog_recover_item *item)
{
if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
struct xfs_inode_log_format *ilfp = item->ri_buf[0].i_addr;
xlog_buf_readahead(log, ilfp->ilf_blkno, ilfp->ilf_len,
&xfs_inode_buf_ra_ops);
} else {
struct xfs_inode_log_format_32 *ilfp = item->ri_buf[0].i_addr;
xlog_buf_readahead(log, ilfp->ilf_blkno, ilfp->ilf_len,
&xfs_inode_buf_ra_ops);
}
}
STATIC void
xlog_recover_dquot_ra_pass2(
struct xlog *log,
struct xlog_recover_item *item)
{
struct xfs_mount *mp = log->l_mp;
struct xfs_disk_dquot *recddq;
struct xfs_dq_logformat *dq_f;
uint type;
if (mp->m_qflags == 0)
return;
recddq = item->ri_buf[1].i_addr;
if (recddq == NULL)
return;
if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
return;
type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
ASSERT(type);
if (log->l_quotaoffs_flag & type)
return;
dq_f = item->ri_buf[0].i_addr;
ASSERT(dq_f);
ASSERT(dq_f->qlf_len == 1);
xlog_buf_readahead(log, dq_f->qlf_blkno,
XFS_FSB_TO_BB(mp, dq_f->qlf_len),
&xfs_dquot_buf_ra_ops);
}
STATIC void
xlog_recover_ra_pass2(
struct xlog *log,
struct xlog_recover_item *item)
{
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
xlog_recover_buffer_ra_pass2(log, item);
break;
case XFS_LI_INODE:
xlog_recover_inode_ra_pass2(log, item);
break;
case XFS_LI_DQUOT:
xlog_recover_dquot_ra_pass2(log, item);
break;
case XFS_LI_EFI:
case XFS_LI_EFD:
case XFS_LI_QUOTAOFF:
case XFS_LI_RUI:
case XFS_LI_RUD:
case XFS_LI_CUI:
case XFS_LI_CUD:
case XFS_LI_BUI:
case XFS_LI_BUD:
default:
break;
}
}
STATIC int
xlog_recover_commit_pass1(
struct xlog *log,
@ -4116,7 +4026,8 @@ xlog_recover_commit_trans(
error = xlog_recover_commit_pass1(log, trans, item);
break;
case XLOG_RECOVER_PASS2:
xlog_recover_ra_pass2(log, item);
if (item->ri_ops->ra_pass2)
item->ri_ops->ra_pass2(log, item);
list_move_tail(&item->ri_list, &ra_list);
items_queued++;
if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {