2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 17:53:56 +08:00

xfs: extended attribute fixes for CRCs

- Remove assert on count of remote attribute CRC headers
 - Fix the number of blocks read in for remote attributes
 - Zero remote attribute tails properly
 - Fix mapping of remote attribute buffers to have correct length
 - initialize temp leaf properly in xfs_attr3_leaf_unbalance, and
   xfs_attr3_leaf_compact
 - Rework remote atttributes to have a header per block
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.10 (GNU/Linux)
 
 iQIcBAABAgAGBQJRqQE3AAoJENaLyazVq6ZOBHMQAIaaBdvrHV0tg/EyzVCuVFul
 BJH2RrL7WqFe6cXjkb5qV9neesj/dyDoSOEVqko7klNxlUhnSK6j9UnM9QfeAZVg
 x22TQ+c938giCu/iibdaFsyEVIgX2WfjPY+OoKJh1kTPOimO1ekRE/H8qRJ3A5Ge
 hl5H/pyDcJXiXowNVQEF6sPj59BzEn65kelpIn/uW5BzrmdIjfM+wnvr4mvkxrCS
 0FM9acLmEl/ri448wsZjrlA8yltqg/J8LS+oc6AgWhWGB9qL3LzM7xtQblv47ail
 kJrvJek8RMQT1nQ8N1gQa0jCSCtWB+Lir04ynTM0b5h2NKcm6iFih5kd6a+wl5qd
 p26LkTSjIQLv21dx9fylTvUIf4xx0NCW/BNqSPpEgTXi6FUiYGzEq7iuvCfbGSYf
 +k9PB3KkVhmRzv/IWvP8OmnrhXZhQVo55Z+OulHwuaZ/+orL0Y0TtypW6imjETK5
 b/JiRoVaN1OroZUyg1qk4OnYftl6j93X3iyLniIZiI5hTMNZBhiqe4EcHd++pYDN
 gpaQC/V/9pAgSWhyyD6E5Jv7HvkUuOc73mSPHH3oz5WAoEI6I1XWx+pExo15BiZD
 FADL3HSnnvGDgGxfWb0nYSS/M0KdAT9lpu+48lKg1sIXVANnYkHxJNofM6ZXe042
 BUuItw8SCGARf9JEJF8J
 =VDbP
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-v3.10-rc4-crc-xattr-fixes' of git://oss.sgi.com/xfs/xfs

Pull xfs extended attribute fixes for CRCs from Ben Myers:
 "Here are several fixes that are relevant on CRC enabled XFS
  filesystems.  They are followed by a rework of the remote attribute
  code so that each block of the attribute contains a header with a CRC.

  Previously there was a CRC header per extent in the remote attribute
  code, but this was untenable because it was not possible to know how
  many extents would be allocated for the attribute until after the
  allocation has completed, due to the fragmentation of free space.
  This became complicated because the size of the headers needs to be
  added to the length of the payload to get the overall length required
  for the allocation.  With a header per block, things are less
  complicated at the cost of a little space.

  I would have preferred to defer this and the rest of the CRC queue to
  3.11 to mitigate risk for existing non-crc users in 3.10.  Doing so
  would require setting a feature bit for the on-disk changes, and so I
  have been pressured into sending this pull request by Eric Sandeen and
  David Chinner from Red Hat.  I'll send another pull request or two
  with the rest of the CRC queue next week.

   - Remove assert on count of remote attribute CRC headers
   - Fix the number of blocks read in for remote attributes
   - Zero remote attribute tails properly
   - Fix mapping of remote attribute buffers to have correct length
   - initialize temp leaf properly in xfs_attr3_leaf_unbalance, and
     xfs_attr3_leaf_compact
   - Rework remote atttributes to have a header per block"

* tag 'for-linus-v3.10-rc4-crc-xattr-fixes' of git://oss.sgi.com/xfs/xfs:
  xfs: rework remote attr CRCs
  xfs: fully initialise temp leaf in xfs_attr3_leaf_compact
  xfs: fully initialise temp leaf in xfs_attr3_leaf_unbalance
  xfs: correctly map remote attr buffers during removal
  xfs: remote attribute tail zeroing does too much
  xfs: remote attribute read too short
  xfs: remote attribute allocation may be contiguous
This commit is contained in:
Linus Torvalds 2013-06-01 06:56:21 +09:00
commit 7cfb953258
4 changed files with 307 additions and 183 deletions

View File

@ -1412,7 +1412,7 @@ xfs_attr3_leaf_add_work(
name_rmt->valuelen = 0;
name_rmt->valueblk = 0;
args->rmtblkno = 1;
args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen);
args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
}
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
@ -1445,11 +1445,12 @@ xfs_attr3_leaf_add_work(
STATIC void
xfs_attr3_leaf_compact(
struct xfs_da_args *args,
struct xfs_attr3_icleaf_hdr *ichdr_d,
struct xfs_attr3_icleaf_hdr *ichdr_dst,
struct xfs_buf *bp)
{
xfs_attr_leafblock_t *leaf_s, *leaf_d;
struct xfs_attr3_icleaf_hdr ichdr_s;
struct xfs_attr_leafblock *leaf_src;
struct xfs_attr_leafblock *leaf_dst;
struct xfs_attr3_icleaf_hdr ichdr_src;
struct xfs_trans *trans = args->trans;
struct xfs_mount *mp = trans->t_mountp;
char *tmpbuffer;
@ -1457,29 +1458,38 @@ xfs_attr3_leaf_compact(
trace_xfs_attr_leaf_compact(args);
tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
ASSERT(tmpbuffer != NULL);
memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(mp));
memset(bp->b_addr, 0, XFS_LBSIZE(mp));
leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
leaf_dst = bp->b_addr;
/*
* Copy basic information
* Copy the on-disk header back into the destination buffer to ensure
* all the information in the header that is not part of the incore
* header structure is preserved.
*/
leaf_s = (xfs_attr_leafblock_t *)tmpbuffer;
leaf_d = bp->b_addr;
ichdr_s = *ichdr_d; /* struct copy */
ichdr_d->firstused = XFS_LBSIZE(mp);
ichdr_d->usedbytes = 0;
ichdr_d->count = 0;
ichdr_d->holes = 0;
ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_s);
ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base;
memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src));
/* Initialise the incore headers */
ichdr_src = *ichdr_dst; /* struct copy */
ichdr_dst->firstused = XFS_LBSIZE(mp);
ichdr_dst->usedbytes = 0;
ichdr_dst->count = 0;
ichdr_dst->holes = 0;
ichdr_dst->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_src);
ichdr_dst->freemap[0].size = ichdr_dst->firstused -
ichdr_dst->freemap[0].base;
/* write the header back to initialise the underlying buffer */
xfs_attr3_leaf_hdr_to_disk(leaf_dst, ichdr_dst);
/*
* Copy all entry's in the same (sorted) order,
* but allocate name/value pairs packed and in sequence.
*/
xfs_attr3_leaf_moveents(leaf_s, &ichdr_s, 0, leaf_d, ichdr_d, 0,
ichdr_s.count, mp);
xfs_attr3_leaf_moveents(leaf_src, &ichdr_src, 0, leaf_dst, ichdr_dst, 0,
ichdr_src.count, mp);
/*
* this logs the entire buffer, but the caller must write the header
* back to the buffer when it is finished modifying it.
@ -2181,14 +2191,24 @@ xfs_attr3_leaf_unbalance(
struct xfs_attr_leafblock *tmp_leaf;
struct xfs_attr3_icleaf_hdr tmphdr;
tmp_leaf = kmem_alloc(state->blocksize, KM_SLEEP);
memset(tmp_leaf, 0, state->blocksize);
memset(&tmphdr, 0, sizeof(tmphdr));
tmp_leaf = kmem_zalloc(state->blocksize, KM_SLEEP);
/*
* Copy the header into the temp leaf so that all the stuff
* not in the incore header is present and gets copied back in
* once we've moved all the entries.
*/
memcpy(tmp_leaf, save_leaf, xfs_attr3_leaf_hdr_size(save_leaf));
memset(&tmphdr, 0, sizeof(tmphdr));
tmphdr.magic = savehdr.magic;
tmphdr.forw = savehdr.forw;
tmphdr.back = savehdr.back;
tmphdr.firstused = state->blocksize;
/* write the header to the temp buffer to initialise it */
xfs_attr3_leaf_hdr_to_disk(tmp_leaf, &tmphdr);
if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
drop_blk->bp, &drophdr)) {
xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0,
@ -2334,8 +2354,9 @@ xfs_attr3_leaf_lookup_int(
args->index = probe;
args->valuelen = be32_to_cpu(name_rmt->valuelen);
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
args->valuelen);
args->rmtblkcnt = xfs_attr3_rmt_blocks(
args->dp->i_mount,
args->valuelen);
return XFS_ERROR(EEXIST);
}
}
@ -2386,7 +2407,8 @@ xfs_attr3_leaf_getvalue(
ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
valuelen = be32_to_cpu(name_rmt->valuelen);
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen);
args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
valuelen);
if (args->flags & ATTR_KERNOVAL) {
args->valuelen = valuelen;
return 0;
@ -2712,7 +2734,8 @@ xfs_attr3_leaf_list_int(
args.valuelen = valuelen;
args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
args.rmtblkcnt = xfs_attr3_rmt_blocks(
args.dp->i_mount, valuelen);
retval = xfs_attr_rmtval_get(&args);
if (retval)
return retval;

View File

@ -47,22 +47,55 @@
* Each contiguous block has a header, so it is not just a simple attribute
* length to FSB conversion.
*/
static int
int
xfs_attr3_rmt_blocks(
struct xfs_mount *mp,
int attrlen)
{
int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp,
mp->m_sb.sb_blocksize);
return (attrlen + buflen - 1) / buflen;
if (xfs_sb_version_hascrc(&mp->m_sb)) {
int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
return (attrlen + buflen - 1) / buflen;
}
return XFS_B_TO_FSB(mp, attrlen);
}
/*
* Checking of the remote attribute header is split into two parts. The verifier
* does CRC, location and bounds checking, the unpacking function checks the
* attribute parameters and owner.
*/
static bool
xfs_attr3_rmt_hdr_ok(
struct xfs_mount *mp,
void *ptr,
xfs_ino_t ino,
uint32_t offset,
uint32_t size,
xfs_daddr_t bno)
{
struct xfs_attr3_rmt_hdr *rmt = ptr;
if (bno != be64_to_cpu(rmt->rm_blkno))
return false;
if (offset != be32_to_cpu(rmt->rm_offset))
return false;
if (size != be32_to_cpu(rmt->rm_bytes))
return false;
if (ino != be64_to_cpu(rmt->rm_owner))
return false;
/* ok */
return true;
}
static bool
xfs_attr3_rmt_verify(
struct xfs_buf *bp)
struct xfs_mount *mp,
void *ptr,
int fsbsize,
xfs_daddr_t bno)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
struct xfs_attr3_rmt_hdr *rmt = ptr;
if (!xfs_sb_version_hascrc(&mp->m_sb))
return false;
@ -70,7 +103,9 @@ xfs_attr3_rmt_verify(
return false;
if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid))
return false;
if (bp->b_bn != be64_to_cpu(rmt->rm_blkno))
if (be64_to_cpu(rmt->rm_blkno) != bno)
return false;
if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
return false;
if (be32_to_cpu(rmt->rm_offset) +
be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX)
@ -86,17 +121,40 @@ xfs_attr3_rmt_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
char *ptr;
int len;
bool corrupt = false;
xfs_daddr_t bno;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
XFS_ATTR3_RMT_CRC_OFF) ||
!xfs_attr3_rmt_verify(bp)) {
ptr = bp->b_addr;
bno = bp->b_bn;
len = BBTOB(bp->b_length);
ASSERT(len >= XFS_LBSIZE(mp));
while (len > 0) {
if (!xfs_verify_cksum(ptr, XFS_LBSIZE(mp),
XFS_ATTR3_RMT_CRC_OFF)) {
corrupt = true;
break;
}
if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
corrupt = true;
break;
}
len -= XFS_LBSIZE(mp);
ptr += XFS_LBSIZE(mp);
bno += mp->m_bsize;
}
if (corrupt) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
} else
ASSERT(len == 0);
}
static void
@ -105,23 +163,39 @@ xfs_attr3_rmt_write_verify(
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_buf_log_item *bip = bp->b_fspriv;
char *ptr;
int len;
xfs_daddr_t bno;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
if (!xfs_attr3_rmt_verify(bp)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
return;
}
ptr = bp->b_addr;
bno = bp->b_bn;
len = BBTOB(bp->b_length);
ASSERT(len >= XFS_LBSIZE(mp));
if (bip) {
struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
while (len > 0) {
if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
XFS_CORRUPTION_ERROR(__func__,
XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
return;
}
if (bip) {
struct xfs_attr3_rmt_hdr *rmt;
rmt = (struct xfs_attr3_rmt_hdr *)ptr;
rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
}
xfs_update_cksum(ptr, XFS_LBSIZE(mp), XFS_ATTR3_RMT_CRC_OFF);
len -= XFS_LBSIZE(mp);
ptr += XFS_LBSIZE(mp);
bno += mp->m_bsize;
}
xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
XFS_ATTR3_RMT_CRC_OFF);
ASSERT(len == 0);
}
const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
@ -129,15 +203,16 @@ const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
.verify_write = xfs_attr3_rmt_write_verify,
};
static int
STATIC int
xfs_attr3_rmt_hdr_set(
struct xfs_mount *mp,
void *ptr,
xfs_ino_t ino,
uint32_t offset,
uint32_t size,
struct xfs_buf *bp)
xfs_daddr_t bno)
{
struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
struct xfs_attr3_rmt_hdr *rmt = ptr;
if (!xfs_sb_version_hascrc(&mp->m_sb))
return 0;
@ -147,36 +222,107 @@ xfs_attr3_rmt_hdr_set(
rmt->rm_bytes = cpu_to_be32(size);
uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_uuid);
rmt->rm_owner = cpu_to_be64(ino);
rmt->rm_blkno = cpu_to_be64(bp->b_bn);
bp->b_ops = &xfs_attr3_rmt_buf_ops;
rmt->rm_blkno = cpu_to_be64(bno);
return sizeof(struct xfs_attr3_rmt_hdr);
}
/*
* Checking of the remote attribute header is split into two parts. the verifier
* does CRC, location and bounds checking, the unpacking function checks the
* attribute parameters and owner.
* Helper functions to copy attribute data in and out of the one disk extents
*/
static bool
xfs_attr3_rmt_hdr_ok(
struct xfs_mount *mp,
xfs_ino_t ino,
uint32_t offset,
uint32_t size,
struct xfs_buf *bp)
STATIC int
xfs_attr_rmtval_copyout(
struct xfs_mount *mp,
struct xfs_buf *bp,
xfs_ino_t ino,
int *offset,
int *valuelen,
char **dst)
{
struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
char *src = bp->b_addr;
xfs_daddr_t bno = bp->b_bn;
int len = BBTOB(bp->b_length);
if (offset != be32_to_cpu(rmt->rm_offset))
return false;
if (size != be32_to_cpu(rmt->rm_bytes))
return false;
if (ino != be64_to_cpu(rmt->rm_owner))
return false;
ASSERT(len >= XFS_LBSIZE(mp));
/* ok */
return true;
while (len > 0 && *valuelen > 0) {
int hdr_size = 0;
int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
byte_cnt = min_t(int, *valuelen, byte_cnt);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!xfs_attr3_rmt_hdr_ok(mp, src, ino, *offset,
byte_cnt, bno)) {
xfs_alert(mp,
"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
bno, *offset, byte_cnt, ino);
return EFSCORRUPTED;
}
hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
}
memcpy(*dst, src + hdr_size, byte_cnt);
/* roll buffer forwards */
len -= XFS_LBSIZE(mp);
src += XFS_LBSIZE(mp);
bno += mp->m_bsize;
/* roll attribute data forwards */
*valuelen -= byte_cnt;
*dst += byte_cnt;
*offset += byte_cnt;
}
return 0;
}
STATIC void
xfs_attr_rmtval_copyin(
struct xfs_mount *mp,
struct xfs_buf *bp,
xfs_ino_t ino,
int *offset,
int *valuelen,
char **src)
{
char *dst = bp->b_addr;
xfs_daddr_t bno = bp->b_bn;
int len = BBTOB(bp->b_length);
ASSERT(len >= XFS_LBSIZE(mp));
while (len > 0 && *valuelen > 0) {
int hdr_size;
int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
byte_cnt = min(*valuelen, byte_cnt);
hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
byte_cnt, bno);
memcpy(dst + hdr_size, *src, byte_cnt);
/*
* If this is the last block, zero the remainder of it.
* Check that we are actually the last block, too.
*/
if (byte_cnt + hdr_size < XFS_LBSIZE(mp)) {
ASSERT(*valuelen - byte_cnt == 0);
ASSERT(len == XFS_LBSIZE(mp));
memset(dst + hdr_size + byte_cnt, 0,
XFS_LBSIZE(mp) - hdr_size - byte_cnt);
}
/* roll buffer forwards */
len -= XFS_LBSIZE(mp);
dst += XFS_LBSIZE(mp);
bno += mp->m_bsize;
/* roll attribute data forwards */
*valuelen -= byte_cnt;
*src += byte_cnt;
*offset += byte_cnt;
}
}
/*
@ -190,13 +336,12 @@ xfs_attr_rmtval_get(
struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE];
struct xfs_mount *mp = args->dp->i_mount;
struct xfs_buf *bp;
xfs_daddr_t dblkno;
xfs_dablk_t lblkno = args->rmtblkno;
void *dst = args->value;
char *dst = args->value;
int valuelen = args->valuelen;
int nmap;
int error;
int blkcnt;
int blkcnt = args->rmtblkcnt;
int i;
int offset = 0;
@ -207,52 +352,36 @@ xfs_attr_rmtval_get(
while (valuelen > 0) {
nmap = ATTR_RMTVALUE_MAPSIZE;
error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
args->rmtblkcnt, map, &nmap,
blkcnt, map, &nmap,
XFS_BMAPI_ATTRFORK);
if (error)
return error;
ASSERT(nmap >= 1);
for (i = 0; (i < nmap) && (valuelen > 0); i++) {
int byte_cnt;
char *src;
xfs_daddr_t dblkno;
int dblkcnt;
ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
(map[i].br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
dblkno, blkcnt, 0, &bp,
dblkno, dblkcnt, 0, &bp,
&xfs_attr3_rmt_buf_ops);
if (error)
return error;
byte_cnt = min_t(int, valuelen, BBTOB(bp->b_length));
byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, byte_cnt);
src = bp->b_addr;
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!xfs_attr3_rmt_hdr_ok(mp, args->dp->i_ino,
offset, byte_cnt, bp)) {
xfs_alert(mp,
"remote attribute header does not match required off/len/owner (0x%x/Ox%x,0x%llx)",
offset, byte_cnt, args->dp->i_ino);
xfs_buf_relse(bp);
return EFSCORRUPTED;
}
src += sizeof(struct xfs_attr3_rmt_hdr);
}
memcpy(dst, src, byte_cnt);
error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
&offset, &valuelen,
&dst);
xfs_buf_relse(bp);
if (error)
return error;
offset += byte_cnt;
dst += byte_cnt;
valuelen -= byte_cnt;
/* roll attribute extent map forwards */
lblkno += map[i].br_blockcount;
blkcnt -= map[i].br_blockcount;
}
}
ASSERT(valuelen == 0);
@ -270,17 +399,13 @@ xfs_attr_rmtval_set(
struct xfs_inode *dp = args->dp;
struct xfs_mount *mp = dp->i_mount;
struct xfs_bmbt_irec map;
struct xfs_buf *bp;
xfs_daddr_t dblkno;
xfs_dablk_t lblkno;
xfs_fileoff_t lfileoff = 0;
void *src = args->value;
char *src = args->value;
int blkcnt;
int valuelen;
int nmap;
int error;
int hdrcnt = 0;
bool crcs = xfs_sb_version_hascrc(&mp->m_sb);
int offset = 0;
trace_xfs_attr_rmtval_set(args);
@ -289,24 +414,14 @@ xfs_attr_rmtval_set(
* Find a "hole" in the attribute address space large enough for
* us to drop the new attribute's value into. Because CRC enable
* attributes have headers, we can't just do a straight byte to FSB
* conversion. We calculate the worst case block count in this case
* and we may not need that many, so we have to handle this when
* allocating the blocks below.
* conversion and have to take the header space into account.
*/
if (!crcs)
blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
else
blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
XFS_ATTR_FORK);
if (error)
return error;
/* Start with the attribute data. We'll allocate the rest afterwards. */
if (crcs)
blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff;
args->rmtblkcnt = blkcnt;
@ -349,26 +464,6 @@ xfs_attr_rmtval_set(
(map.br_startblock != HOLESTARTBLOCK));
lblkno += map.br_blockcount;
blkcnt -= map.br_blockcount;
hdrcnt++;
/*
* If we have enough blocks for the attribute data, calculate
* how many extra blocks we need for headers. We might run
* through this multiple times in the case that the additional
* headers in the blocks needed for the data fragments spills
* into requiring more blocks. e.g. for 512 byte blocks, we'll
* spill for another block every 9 headers we require in this
* loop.
*/
if (crcs && blkcnt == 0) {
int total_len;
total_len = args->valuelen +
hdrcnt * sizeof(struct xfs_attr3_rmt_hdr);
blkcnt = XFS_B_TO_FSB(mp, total_len);
blkcnt -= args->rmtblkcnt;
args->rmtblkcnt += blkcnt;
}
/*
* Start the next trans in the chain.
@ -385,18 +480,19 @@ xfs_attr_rmtval_set(
* the INCOMPLETE flag.
*/
lblkno = args->rmtblkno;
blkcnt = args->rmtblkcnt;
valuelen = args->valuelen;
while (valuelen > 0) {
int byte_cnt;
char *buf;
struct xfs_buf *bp;
xfs_daddr_t dblkno;
int dblkcnt;
ASSERT(blkcnt > 0);
/*
* Try to remember where we decided to put the value.
*/
xfs_bmap_init(args->flist, args->firstblock);
nmap = 1;
error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
args->rmtblkcnt, &map, &nmap,
blkcnt, &map, &nmap,
XFS_BMAPI_ATTRFORK);
if (error)
return(error);
@ -405,41 +501,27 @@ xfs_attr_rmtval_set(
(map.br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, 0);
bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0);
if (!bp)
return ENOMEM;
bp->b_ops = &xfs_attr3_rmt_buf_ops;
byte_cnt = BBTOB(bp->b_length);
byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, byte_cnt);
if (valuelen < byte_cnt)
byte_cnt = valuelen;
buf = bp->b_addr;
buf += xfs_attr3_rmt_hdr_set(mp, dp->i_ino, offset,
byte_cnt, bp);
memcpy(buf, src, byte_cnt);
if (byte_cnt < BBTOB(bp->b_length))
xfs_buf_zero(bp, byte_cnt,
BBTOB(bp->b_length) - byte_cnt);
xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
&valuelen, &src);
error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
xfs_buf_relse(bp);
if (error)
return error;
src += byte_cnt;
valuelen -= byte_cnt;
offset += byte_cnt;
hdrcnt--;
/* roll attribute extent map forwards */
lblkno += map.br_blockcount;
blkcnt -= map.br_blockcount;
}
ASSERT(valuelen == 0);
ASSERT(hdrcnt == 0);
return 0;
}
@ -448,33 +530,40 @@ xfs_attr_rmtval_set(
* out-of-line buffer that it is stored on.
*/
int
xfs_attr_rmtval_remove(xfs_da_args_t *args)
xfs_attr_rmtval_remove(
struct xfs_da_args *args)
{
xfs_mount_t *mp;
xfs_bmbt_irec_t map;
xfs_buf_t *bp;
xfs_daddr_t dblkno;
xfs_dablk_t lblkno;
int valuelen, blkcnt, nmap, error, done, committed;
struct xfs_mount *mp = args->dp->i_mount;
xfs_dablk_t lblkno;
int blkcnt;
int error;
int done;
trace_xfs_attr_rmtval_remove(args);
mp = args->dp->i_mount;
/*
* Roll through the "value", invalidating the attribute value's
* blocks.
* Roll through the "value", invalidating the attribute value's blocks.
* Note that args->rmtblkcnt is the minimum number of data blocks we'll
* see for a CRC enabled remote attribute. Each extent will have a
* header, and so we may have more blocks than we realise here. If we
* fail to map the blocks correctly, we'll have problems with the buffer
* lookups.
*/
lblkno = args->rmtblkno;
valuelen = args->rmtblkcnt;
while (valuelen > 0) {
blkcnt = args->rmtblkcnt;
while (blkcnt > 0) {
struct xfs_bmbt_irec map;
struct xfs_buf *bp;
xfs_daddr_t dblkno;
int dblkcnt;
int nmap;
/*
* Try to remember where we decided to put the value.
*/
nmap = 1;
error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
args->rmtblkcnt, &map, &nmap,
XFS_BMAPI_ATTRFORK);
blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
if (error)
return(error);
ASSERT(nmap == 1);
@ -482,21 +571,20 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
(map.br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
/*
* If the "remote" value is in the cache, remove it.
*/
bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, XBF_TRYLOCK);
bp = xfs_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK);
if (bp) {
xfs_buf_stale(bp);
xfs_buf_relse(bp);
bp = NULL;
}
valuelen -= map.br_blockcount;
lblkno += map.br_blockcount;
blkcnt -= map.br_blockcount;
}
/*
@ -506,6 +594,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
blkcnt = args->rmtblkcnt;
done = 0;
while (!done) {
int committed;
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,

View File

@ -20,6 +20,14 @@
#define XFS_ATTR3_RMT_MAGIC 0x5841524d /* XARM */
/*
* There is one of these headers per filesystem block in a remote attribute.
* This is done to ensure there is a 1:1 mapping between the attribute value
* length and the number of blocks needed to store the attribute. This makes the
* verification of a buffer a little more complex, but greatly simplifies the
* allocation, reading and writing of these attributes as we don't have to guess
* the number of blocks needed to store the attribute data.
*/
struct xfs_attr3_rmt_hdr {
__be32 rm_magic;
__be32 rm_offset;
@ -39,6 +47,8 @@ struct xfs_attr3_rmt_hdr {
extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
int xfs_attr_rmtval_get(struct xfs_da_args *args);
int xfs_attr_rmtval_set(struct xfs_da_args *args);
int xfs_attr_rmtval_remove(struct xfs_da_args *args);

View File

@ -513,6 +513,7 @@ _xfs_buf_find(
xfs_alert(btp->bt_mount,
"%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
__func__, blkno, eofs);
WARN_ON(1);
return NULL;
}