mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
[PATCH] ufs: change block number on the fly
First of all some necessary notes about UFS by it self: To avoid waste of disk space the tail of file consists not from blocks (which is ordinary big enough, 16K usually), it consists from fragments(which is ordinary 2K). When file is growing its tail occupy 1 fragment, 2 fragments... At some stage decision to allocate whole block is made and all fragments are moved to one block. How this situation was handled before: ufs_prepare_write ->block_prepare_write ->ufs_getfrag_block ->... ->ufs_new_fragments: bh = sb_bread bh->b_blocknr = result + i; mark_buffer_dirty (bh); This is wrong solution, because: - it didn't take into consideration that there is another cache: "inode page cache" - because of sb_getblk uses not b_blocknr, (it uses page->index) to find certain block, this breaks sb_getblk. How this situation is handled now: we go though all "page inode cache", if there are no such page in cache we load it into cache, and change b_blocknr. Signed-off-by: Evgeniy Dushistov <dushistov@mail.ru> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
c9a27b5dca
commit
6ef4d6bf86
137
fs/ufs/balloc.c
137
fs/ufs/balloc.c
@ -39,7 +39,8 @@ static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *,
|
||||
/*
|
||||
* Free 'count' fragments from fragment number 'fragment'
|
||||
*/
|
||||
void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
|
||||
void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
|
||||
{
|
||||
struct super_block * sb;
|
||||
struct ufs_sb_private_info * uspi;
|
||||
struct ufs_super_block_first * usb1;
|
||||
@ -134,7 +135,8 @@ failed:
|
||||
/*
|
||||
* Free 'count' fragments from fragment number 'fragment' (free whole blocks)
|
||||
*/
|
||||
void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
|
||||
void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
|
||||
{
|
||||
struct super_block * sb;
|
||||
struct ufs_sb_private_info * uspi;
|
||||
struct ufs_super_block_first * usb1;
|
||||
@ -222,15 +224,118 @@ failed:
|
||||
return;
|
||||
}
|
||||
|
||||
static struct page *ufs_get_locked_page(struct address_space *mapping,
|
||||
unsigned long index)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
|
||||
unsigned goal, unsigned count, int * err )
|
||||
try_again:
|
||||
page = find_lock_page(mapping, index);
|
||||
if (!page) {
|
||||
page = read_cache_page(mapping, index,
|
||||
(filler_t*)mapping->a_ops->readpage,
|
||||
NULL);
|
||||
if (IS_ERR(page)) {
|
||||
printk(KERN_ERR "ufs_change_blocknr: "
|
||||
"read_cache_page error: ino %lu, index: %lu\n",
|
||||
mapping->host->i_ino, index);
|
||||
goto out;
|
||||
}
|
||||
|
||||
lock_page(page);
|
||||
|
||||
if (!PageUptodate(page) || PageError(page)) {
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
|
||||
printk(KERN_ERR "ufs_change_blocknr: "
|
||||
"can not read page: ino %lu, index: %lu\n",
|
||||
mapping->host->i_ino, index);
|
||||
|
||||
page = ERR_PTR(-EIO);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(!page->mapping || !page_has_buffers(page))) {
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
goto try_again;/*we really need these buffers*/
|
||||
}
|
||||
out:
|
||||
return page;
|
||||
}
|
||||
|
||||
/*
|
||||
* Modify inode page cache in such way:
|
||||
* have - blocks with b_blocknr equal to oldb...oldb+count-1
|
||||
* get - blocks with b_blocknr equal to newb...newb+count-1
|
||||
* also we suppose that oldb...oldb+count-1 blocks
|
||||
* situated at the end of file.
|
||||
*
|
||||
* We can come here from ufs_writepage or ufs_prepare_write,
|
||||
* locked_page is argument of these functions, so we already lock it.
|
||||
*/
|
||||
static void ufs_change_blocknr(struct inode *inode, unsigned int count,
|
||||
unsigned int oldb, unsigned int newb,
|
||||
struct page *locked_page)
|
||||
{
|
||||
unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
||||
sector_t baseblk;
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
pgoff_t index, cur_index = locked_page->index;
|
||||
unsigned int i, j;
|
||||
struct page *page;
|
||||
struct buffer_head *head, *bh;
|
||||
|
||||
baseblk = ((i_size_read(inode) - 1) >> inode->i_blkbits) + 1 - count;
|
||||
|
||||
UFSD(("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
|
||||
inode->i_ino, count, oldb, newb));
|
||||
|
||||
BUG_ON(!PageLocked(locked_page));
|
||||
|
||||
for (i = 0; i < count; i += blk_per_page) {
|
||||
index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
||||
|
||||
if (likely(cur_index != index)) {
|
||||
page = ufs_get_locked_page(mapping, index);
|
||||
if (IS_ERR(page))
|
||||
continue;
|
||||
} else
|
||||
page = locked_page;
|
||||
|
||||
j = i;
|
||||
head = page_buffers(page);
|
||||
bh = head;
|
||||
do {
|
||||
if (likely(bh->b_blocknr == j + oldb && j < count)) {
|
||||
unmap_underlying_metadata(bh->b_bdev,
|
||||
bh->b_blocknr);
|
||||
bh->b_blocknr = newb + j++;
|
||||
mark_buffer_dirty(bh);
|
||||
}
|
||||
|
||||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
|
||||
set_page_dirty(page);
|
||||
|
||||
if (likely(cur_index != index)) {
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
}
|
||||
UFSD(("EXIT\n"));
|
||||
}
|
||||
|
||||
unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
|
||||
unsigned goal, unsigned count, int * err, struct page *locked_page)
|
||||
{
|
||||
struct super_block * sb;
|
||||
struct ufs_sb_private_info * uspi;
|
||||
struct ufs_super_block_first * usb1;
|
||||
struct buffer_head * bh;
|
||||
unsigned cgno, oldcount, newcount, tmp, request, i, result;
|
||||
unsigned cgno, oldcount, newcount, tmp, request, result;
|
||||
|
||||
UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
|
||||
|
||||
@ -343,24 +448,8 @@ unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
|
||||
}
|
||||
result = ufs_alloc_fragments (inode, cgno, goal, request, err);
|
||||
if (result) {
|
||||
for (i = 0; i < oldcount; i++) {
|
||||
bh = sb_bread(sb, tmp + i);
|
||||
if(bh)
|
||||
{
|
||||
clear_buffer_dirty(bh);
|
||||
bh->b_blocknr = result + i;
|
||||
mark_buffer_dirty (bh);
|
||||
if (IS_SYNC(inode))
|
||||
sync_dirty_buffer(bh);
|
||||
brelse (bh);
|
||||
}
|
||||
else
|
||||
{
|
||||
printk(KERN_ERR "ufs_new_fragments: bread fail\n");
|
||||
unlock_super(sb);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
ufs_change_blocknr(inode, oldcount, tmp, result, locked_page);
|
||||
|
||||
*p = cpu_to_fs32(sb, result);
|
||||
*err = 0;
|
||||
inode->i_blocks += count << uspi->s_nspfshift;
|
||||
|
@ -172,9 +172,10 @@ static void ufs_clear_block(struct inode *inode, struct buffer_head *bh)
|
||||
sync_dirty_buffer(bh);
|
||||
}
|
||||
|
||||
static struct buffer_head * ufs_inode_getfrag (struct inode *inode,
|
||||
unsigned int fragment, unsigned int new_fragment,
|
||||
unsigned int required, int *err, int metadata, long *phys, int *new)
|
||||
static struct buffer_head *ufs_inode_getfrag(struct inode *inode,
|
||||
unsigned int fragment, unsigned int new_fragment,
|
||||
unsigned int required, int *err, int metadata,
|
||||
long *phys, int *new, struct page *locked_page)
|
||||
{
|
||||
struct ufs_inode_info *ufsi = UFS_I(inode);
|
||||
struct super_block * sb;
|
||||
@ -232,7 +233,8 @@ repeat:
|
||||
if (lastblockoff) {
|
||||
p2 = ufsi->i_u1.i_data + lastblock;
|
||||
tmp = ufs_new_fragments (inode, p2, lastfrag,
|
||||
fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff, err);
|
||||
fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff,
|
||||
err, locked_page);
|
||||
if (!tmp) {
|
||||
if (lastfrag != ufsi->i_lastfrag)
|
||||
goto repeat;
|
||||
@ -244,14 +246,16 @@ repeat:
|
||||
}
|
||||
goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb;
|
||||
tmp = ufs_new_fragments (inode, p, fragment - blockoff,
|
||||
goal, required + blockoff, err);
|
||||
goal, required + blockoff,
|
||||
err, locked_page);
|
||||
}
|
||||
/*
|
||||
* We will extend last allocated block
|
||||
*/
|
||||
else if (lastblock == block) {
|
||||
tmp = ufs_new_fragments (inode, p, fragment - (blockoff - lastblockoff),
|
||||
fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), err);
|
||||
tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff),
|
||||
fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff),
|
||||
err, locked_page);
|
||||
}
|
||||
/*
|
||||
* We will allocate new block before last allocated block
|
||||
@ -259,8 +263,8 @@ repeat:
|
||||
else /* (lastblock > block) */ {
|
||||
if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1])))
|
||||
goal = tmp + uspi->s_fpb;
|
||||
tmp = ufs_new_fragments (inode, p, fragment - blockoff,
|
||||
goal, uspi->s_fpb, err);
|
||||
tmp = ufs_new_fragments(inode, p, fragment - blockoff,
|
||||
goal, uspi->s_fpb, err, locked_page);
|
||||
}
|
||||
if (!tmp) {
|
||||
if ((!blockoff && *p) ||
|
||||
@ -303,9 +307,10 @@ repeat2:
|
||||
*/
|
||||
}
|
||||
|
||||
static struct buffer_head * ufs_block_getfrag (struct inode *inode,
|
||||
struct buffer_head *bh, unsigned int fragment, unsigned int new_fragment,
|
||||
unsigned int blocksize, int * err, int metadata, long *phys, int *new)
|
||||
static struct buffer_head *ufs_block_getfrag(struct inode *inode, struct buffer_head *bh,
|
||||
unsigned int fragment, unsigned int new_fragment,
|
||||
unsigned int blocksize, int * err, int metadata,
|
||||
long *phys, int *new, struct page *locked_page)
|
||||
{
|
||||
struct super_block * sb;
|
||||
struct ufs_sb_private_info * uspi;
|
||||
@ -350,7 +355,8 @@ repeat:
|
||||
goal = tmp + uspi->s_fpb;
|
||||
else
|
||||
goal = bh->b_blocknr + uspi->s_fpb;
|
||||
tmp = ufs_new_fragments (inode, p, ufs_blknum(new_fragment), goal, uspi->s_fpb, err);
|
||||
tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
|
||||
uspi->s_fpb, err, locked_page);
|
||||
if (!tmp) {
|
||||
if (fs32_to_cpu(sb, *p))
|
||||
goto repeat;
|
||||
@ -424,15 +430,15 @@ int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_hea
|
||||
* it much more readable:
|
||||
*/
|
||||
#define GET_INODE_DATABLOCK(x) \
|
||||
ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new)
|
||||
ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new, bh_result->b_page)
|
||||
#define GET_INODE_PTR(x) \
|
||||
ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL)
|
||||
ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL, bh_result->b_page)
|
||||
#define GET_INDIRECT_DATABLOCK(x) \
|
||||
ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
|
||||
&err, 0, &phys, &new);
|
||||
ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
|
||||
&err, 0, &phys, &new, bh_result->b_page);
|
||||
#define GET_INDIRECT_PTR(x) \
|
||||
ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
|
||||
&err, 1, NULL, NULL);
|
||||
ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
|
||||
&err, 1, NULL, NULL, bh_result->b_page);
|
||||
|
||||
if (ptr < UFS_NDIR_FRAGMENT) {
|
||||
bh = GET_INODE_DATABLOCK(ptr);
|
||||
|
@ -875,7 +875,8 @@ struct ufs_super_block_third {
|
||||
/* balloc.c */
|
||||
extern void ufs_free_fragments (struct inode *, unsigned, unsigned);
|
||||
extern void ufs_free_blocks (struct inode *, unsigned, unsigned);
|
||||
extern unsigned ufs_new_fragments (struct inode *, __fs32 *, unsigned, unsigned, unsigned, int *);
|
||||
extern unsigned ufs_new_fragments(struct inode *, __fs32 *, unsigned, unsigned,
|
||||
unsigned, int *, struct page *);
|
||||
|
||||
/* cylinder.c */
|
||||
extern struct ufs_cg_private_info * ufs_load_cylinder (struct super_block *, unsigned);
|
||||
|
Loading…
Reference in New Issue
Block a user