Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from ANdrew Morton:
 "8 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  revert "mm: make sure all file VMAs have ->vm_ops set"
  MAINTAINERS: update LTP mailing list
  userfaultfd: add missing mmput() in error path
  lib/string_helpers.c: fix infinite loop in string_get_size()
  alpha: lib: export __delay
  alpha: io: define ioremap_uc
  kasan: fix last shadow judgement in memory_is_poisoned_16()
  zram: fix possible use after free in zcomp_create()
This commit is contained in:
Linus Torvalds 2015-09-17 21:16:47 -07:00
commit a8f1558558
8 changed files with 22 additions and 20 deletions

View File

@ -6452,11 +6452,11 @@ F: drivers/hwmon/ltc4261.c
LTP (Linux Test Project)
M: Mike Frysinger <vapier@gentoo.org>
M: Cyril Hrubis <chrubis@suse.cz>
M: Wanlong Gao <gaowanlong@cn.fujitsu.com>
M: Wanlong Gao <wanlong.gao@gmail.com>
M: Jan Stancek <jstancek@redhat.com>
M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
M: Alexey Kodanev <alexey.kodanev@oracle.com>
L: ltp-list@lists.sourceforge.net (subscribers-only)
L: ltp@lists.linux.it (subscribers-only)
W: http://linux-test-project.github.io/
T: git git://github.com/linux-test-project/ltp.git
S: Maintained

View File

@ -299,6 +299,8 @@ static inline void __iomem * ioremap_nocache(unsigned long offset,
return ioremap(offset, size);
}
#define ioremap_uc ioremap_nocache
static inline void iounmap(volatile void __iomem *addr)
{
IO_CONCAT(__IO_PREFIX,iounmap)(addr);

View File

@ -30,6 +30,7 @@ __delay(int loops)
" bgt %0,1b"
: "=&r" (tmp), "=r" (loops) : "1"(loops));
}
EXPORT_SYMBOL(__delay);
#ifdef CONFIG_SMP
#define LPJ cpu_data[smp_processor_id()].loops_per_jiffy

View File

@ -330,12 +330,14 @@ void zcomp_destroy(struct zcomp *comp)
* allocate new zcomp and initialize it. return compressing
* backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
* if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
* case of allocation error.
* case of allocation error, or any other error potentially
* returned by functions zcomp_strm_{multi,single}_create.
*/
struct zcomp *zcomp_create(const char *compress, int max_strm)
{
struct zcomp *comp;
struct zcomp_backend *backend;
int error;
backend = find_backend(compress);
if (!backend)
@ -347,12 +349,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm)
comp->backend = backend;
if (max_strm > 1)
zcomp_strm_multi_create(comp, max_strm);
error = zcomp_strm_multi_create(comp, max_strm);
else
zcomp_strm_single_create(comp);
if (!comp->stream) {
error = zcomp_strm_single_create(comp);
if (error) {
kfree(comp);
return ERR_PTR(-ENOMEM);
return ERR_PTR(error);
}
return comp;
}

View File

@ -1287,8 +1287,10 @@ static struct file *userfaultfd_file_create(int flags)
file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
if (IS_ERR(file))
if (IS_ERR(file)) {
mmput(ctx->mm);
kmem_cache_free(userfaultfd_ctx_cachep, ctx);
}
out:
return file;
}

View File

@ -59,7 +59,11 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
}
exp = divisor[units] / (u32)blk_size;
if (size >= exp) {
/*
* size must be strictly greater than exp here to ensure that remainder
* is greater than divisor[units] coming out of the if below.
*/
if (size > exp) {
remainder = do_div(size, divisor[units]);
remainder *= blk_size;
i++;

View File

@ -135,12 +135,11 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr)
if (unlikely(*shadow_addr)) {
u16 shadow_first_bytes = *(u16 *)shadow_addr;
s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK;
if (unlikely(shadow_first_bytes))
return true;
if (likely(!last_byte))
if (likely(IS_ALIGNED(addr, 8)))
return false;
return memory_is_poisoned_1(addr + 15);

View File

@ -612,8 +612,6 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm,
void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
struct rb_node **rb_link, struct rb_node *rb_parent)
{
WARN_ONCE(vma->vm_file && !vma->vm_ops, "missing vma->vm_ops");
/* Update tracking information for the gap following the new vma. */
if (vma->vm_next)
vma_gap_update(vma->vm_next);
@ -1638,12 +1636,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
*/
WARN_ON_ONCE(addr != vma->vm_start);
/* All file mapping must have ->vm_ops set */
if (!vma->vm_ops) {
static const struct vm_operations_struct dummy_ops = {};
vma->vm_ops = &dummy_ops;
}
addr = vma->vm_start;
vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) {