mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 23:14:31 +08:00
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "7 patches. Subsystems affected by this patch series: lib, ocfs2, and mm (slub, migration, and memcg)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/memcg: fix NULL pointer dereference in memcg_slab_free_hook() slub: fix unreclaimable slab stat for bulk free mm/migrate: fix NR_ISOLATED corruption on 64-bit mm: memcontrol: fix blocking rstat function called from atomic cgroup1 thresholding code ocfs2: issue zeroout to EOF blocks ocfs2: fix zero out valid data lib/test_string.c: move string selftest in the Runtime Testing menu
This commit is contained in:
commit
ad6ec09d96
103
fs/ocfs2/file.c
103
fs/ocfs2/file.c
@ -1529,6 +1529,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* zero out partial blocks of one cluster.
|
||||
*
|
||||
* start: file offset where zero starts, will be made upper block aligned.
|
||||
* len: it will be trimmed to the end of current cluster if "start + len"
|
||||
* is bigger than it.
|
||||
*/
|
||||
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
int ret;
|
||||
u64 start_block, end_block, nr_blocks;
|
||||
u64 p_block, offset;
|
||||
u32 cluster, p_cluster, nr_clusters;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
|
||||
|
||||
if (start + len < end)
|
||||
end = start + len;
|
||||
|
||||
start_block = ocfs2_blocks_for_bytes(sb, start);
|
||||
end_block = ocfs2_blocks_for_bytes(sb, end);
|
||||
nr_blocks = end_block - start_block;
|
||||
if (!nr_blocks)
|
||||
return 0;
|
||||
|
||||
cluster = ocfs2_bytes_to_clusters(sb, start);
|
||||
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
|
||||
&nr_clusters, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!p_cluster)
|
||||
return 0;
|
||||
|
||||
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
|
||||
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
|
||||
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
|
||||
}
|
||||
|
||||
static int ocfs2_zero_partial_clusters(struct inode *inode,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
@ -1538,6 +1577,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
|
||||
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
||||
unsigned int csize = osb->s_clustersize;
|
||||
handle_t *handle;
|
||||
loff_t isize = i_size_read(inode);
|
||||
|
||||
/*
|
||||
* The "start" and "end" values are NOT necessarily part of
|
||||
@ -1558,6 +1598,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
|
||||
if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
|
||||
goto out;
|
||||
|
||||
/* No page cache for EOF blocks, issue zero out to disk. */
|
||||
if (end > isize) {
|
||||
/*
|
||||
* zeroout eof blocks in last cluster starting from
|
||||
* "isize" even "start" > "isize" because it is
|
||||
* complicated to zeroout just at "start" as "start"
|
||||
* may be not aligned with block size, buffer write
|
||||
* would be required to do that, but out of eof buffer
|
||||
* write is not supported.
|
||||
*/
|
||||
ret = ocfs2_zeroout_partial_cluster(inode, isize,
|
||||
end - isize);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
if (start >= isize)
|
||||
goto out;
|
||||
end = isize;
|
||||
}
|
||||
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
|
||||
if (IS_ERR(handle)) {
|
||||
ret = PTR_ERR(handle);
|
||||
@ -1855,45 +1915,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* zero out partial blocks of one cluster.
|
||||
*
|
||||
* start: file offset where zero starts, will be made upper block aligned.
|
||||
* len: it will be trimmed to the end of current cluster if "start + len"
|
||||
* is bigger than it.
|
||||
*/
|
||||
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
int ret;
|
||||
u64 start_block, end_block, nr_blocks;
|
||||
u64 p_block, offset;
|
||||
u32 cluster, p_cluster, nr_clusters;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
|
||||
|
||||
if (start + len < end)
|
||||
end = start + len;
|
||||
|
||||
start_block = ocfs2_blocks_for_bytes(sb, start);
|
||||
end_block = ocfs2_blocks_for_bytes(sb, end);
|
||||
nr_blocks = end_block - start_block;
|
||||
if (!nr_blocks)
|
||||
return 0;
|
||||
|
||||
cluster = ocfs2_bytes_to_clusters(sb, start);
|
||||
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
|
||||
&nr_clusters, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!p_cluster)
|
||||
return 0;
|
||||
|
||||
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
|
||||
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
|
||||
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Parts of this function taken from xfs_change_file_space()
|
||||
*/
|
||||
@ -1935,7 +1956,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
||||
goto out_inode_unlock;
|
||||
}
|
||||
|
||||
orig_isize = i_size_read(inode);
|
||||
switch (sr->l_whence) {
|
||||
case 0: /*SEEK_SET*/
|
||||
break;
|
||||
@ -1943,7 +1963,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
||||
sr->l_start += f_pos;
|
||||
break;
|
||||
case 2: /*SEEK_END*/
|
||||
sr->l_start += orig_isize;
|
||||
sr->l_start += i_size_read(inode);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@ -1998,6 +2018,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
orig_isize = i_size_read(inode);
|
||||
/* zeroout eof blocks in the cluster. */
|
||||
if (!ret && change_size && orig_isize < size) {
|
||||
ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
|
||||
|
@ -683,9 +683,6 @@ config PARMAN
|
||||
config OBJAGG
|
||||
tristate "objagg" if COMPILE_TEST
|
||||
|
||||
config STRING_SELFTEST
|
||||
tristate "Test string functions"
|
||||
|
||||
endmenu
|
||||
|
||||
config GENERIC_IOREMAP
|
||||
|
@ -2180,6 +2180,9 @@ config ASYNC_RAID6_TEST
|
||||
config TEST_HEXDUMP
|
||||
tristate "Test functions located in the hexdump module at runtime"
|
||||
|
||||
config STRING_SELFTEST
|
||||
tristate "Test string functions at runtime"
|
||||
|
||||
config TEST_STRING_HELPERS
|
||||
tristate "Test functions located in the string_helpers module at runtime"
|
||||
|
||||
|
@ -3574,7 +3574,8 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
|
||||
unsigned long val;
|
||||
|
||||
if (mem_cgroup_is_root(memcg)) {
|
||||
cgroup_rstat_flush(memcg->css.cgroup);
|
||||
/* mem_cgroup_threshold() calls here from irqsafe context */
|
||||
cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
|
||||
val = memcg_page_state(memcg, NR_FILE_PAGES) +
|
||||
memcg_page_state(memcg, NR_ANON_MAPPED);
|
||||
if (swap)
|
||||
|
@ -2068,7 +2068,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
|
||||
LIST_HEAD(migratepages);
|
||||
new_page_t *new;
|
||||
bool compound;
|
||||
unsigned int nr_pages = thp_nr_pages(page);
|
||||
int nr_pages = thp_nr_pages(page);
|
||||
|
||||
/*
|
||||
* PTE mapped THP or HugeTLB page can't reach here so the page could
|
||||
|
@ -346,7 +346,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
|
||||
continue;
|
||||
|
||||
page = virt_to_head_page(p[i]);
|
||||
objcgs = page_objcgs(page);
|
||||
objcgs = page_objcgs_check(page);
|
||||
if (!objcgs)
|
||||
continue;
|
||||
|
||||
|
22
mm/slub.c
22
mm/slub.c
@ -3236,6 +3236,16 @@ struct detached_freelist {
|
||||
struct kmem_cache *s;
|
||||
};
|
||||
|
||||
static inline void free_nonslab_page(struct page *page)
|
||||
{
|
||||
unsigned int order = compound_order(page);
|
||||
|
||||
VM_BUG_ON_PAGE(!PageCompound(page), page);
|
||||
kfree_hook(page_address(page));
|
||||
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
|
||||
__free_pages(page, order);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function progressively scans the array with free objects (with
|
||||
* a limited look ahead) and extract objects belonging to the same
|
||||
@ -3272,9 +3282,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
|
||||
if (!s) {
|
||||
/* Handle kalloc'ed objects */
|
||||
if (unlikely(!PageSlab(page))) {
|
||||
BUG_ON(!PageCompound(page));
|
||||
kfree_hook(object);
|
||||
__free_pages(page, compound_order(page));
|
||||
free_nonslab_page(page);
|
||||
p[size] = NULL; /* mark object processed */
|
||||
return size;
|
||||
}
|
||||
@ -4250,13 +4258,7 @@ void kfree(const void *x)
|
||||
|
||||
page = virt_to_head_page(x);
|
||||
if (unlikely(!PageSlab(page))) {
|
||||
unsigned int order = compound_order(page);
|
||||
|
||||
BUG_ON(!PageCompound(page));
|
||||
kfree_hook(object);
|
||||
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
|
||||
-(PAGE_SIZE << order));
|
||||
__free_pages(page, order);
|
||||
free_nonslab_page(page);
|
||||
return;
|
||||
}
|
||||
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
|
||||
|
Loading…
Reference in New Issue
Block a user