Commit a8f15585 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from ANdrew Morton:
 "8 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  revert "mm: make sure all file VMAs have ->vm_ops set"
  MAINTAINERS: update LTP mailing list
  userfaultfd: add missing mmput() in error path
  lib/string_helpers.c: fix infinite loop in string_get_size()
  alpha: lib: export __delay
  alpha: io: define ioremap_uc
  kasan: fix last shadow judgement in memory_is_poisoned_16()
  zram: fix possible use after free in zcomp_create()
parents 8e64a733 28c553d0
...@@ -6452,11 +6452,11 @@ F: drivers/hwmon/ltc4261.c ...@@ -6452,11 +6452,11 @@ F: drivers/hwmon/ltc4261.c
LTP (Linux Test Project) LTP (Linux Test Project)
M: Mike Frysinger <vapier@gentoo.org> M: Mike Frysinger <vapier@gentoo.org>
M: Cyril Hrubis <chrubis@suse.cz> M: Cyril Hrubis <chrubis@suse.cz>
M: Wanlong Gao <gaowanlong@cn.fujitsu.com> M: Wanlong Gao <wanlong.gao@gmail.com>
M: Jan Stancek <jstancek@redhat.com> M: Jan Stancek <jstancek@redhat.com>
M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com> M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
M: Alexey Kodanev <alexey.kodanev@oracle.com> M: Alexey Kodanev <alexey.kodanev@oracle.com>
L: ltp-list@lists.sourceforge.net (subscribers-only) L: ltp@lists.linux.it (subscribers-only)
W: http://linux-test-project.github.io/ W: http://linux-test-project.github.io/
T: git git://github.com/linux-test-project/ltp.git T: git git://github.com/linux-test-project/ltp.git
S: Maintained S: Maintained
......
...@@ -297,7 +297,9 @@ static inline void __iomem * ioremap_nocache(unsigned long offset, ...@@ -297,7 +297,9 @@ static inline void __iomem * ioremap_nocache(unsigned long offset,
unsigned long size) unsigned long size)
{ {
return ioremap(offset, size); return ioremap(offset, size);
} }
#define ioremap_uc ioremap_nocache
static inline void iounmap(volatile void __iomem *addr) static inline void iounmap(volatile void __iomem *addr)
{ {
......
...@@ -30,6 +30,7 @@ __delay(int loops) ...@@ -30,6 +30,7 @@ __delay(int loops)
" bgt %0,1b" " bgt %0,1b"
: "=&r" (tmp), "=r" (loops) : "1"(loops)); : "=&r" (tmp), "=r" (loops) : "1"(loops));
} }
EXPORT_SYMBOL(__delay);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define LPJ cpu_data[smp_processor_id()].loops_per_jiffy #define LPJ cpu_data[smp_processor_id()].loops_per_jiffy
......
...@@ -330,12 +330,14 @@ void zcomp_destroy(struct zcomp *comp) ...@@ -330,12 +330,14 @@ void zcomp_destroy(struct zcomp *comp)
* allocate new zcomp and initialize it. return compressing * allocate new zcomp and initialize it. return compressing
* backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL) * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
* if requested algorithm is not supported, ERR_PTR(-ENOMEM) in * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
* case of allocation error. * case of allocation error, or any other error potentially
* returned by functions zcomp_strm_{multi,single}_create.
*/ */
struct zcomp *zcomp_create(const char *compress, int max_strm) struct zcomp *zcomp_create(const char *compress, int max_strm)
{ {
struct zcomp *comp; struct zcomp *comp;
struct zcomp_backend *backend; struct zcomp_backend *backend;
int error;
backend = find_backend(compress); backend = find_backend(compress);
if (!backend) if (!backend)
...@@ -347,12 +349,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm) ...@@ -347,12 +349,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm)
comp->backend = backend; comp->backend = backend;
if (max_strm > 1) if (max_strm > 1)
zcomp_strm_multi_create(comp, max_strm); error = zcomp_strm_multi_create(comp, max_strm);
else else
zcomp_strm_single_create(comp); error = zcomp_strm_single_create(comp);
if (!comp->stream) { if (error) {
kfree(comp); kfree(comp);
return ERR_PTR(-ENOMEM); return ERR_PTR(error);
} }
return comp; return comp;
} }
...@@ -1287,8 +1287,10 @@ static struct file *userfaultfd_file_create(int flags) ...@@ -1287,8 +1287,10 @@ static struct file *userfaultfd_file_create(int flags)
file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx, file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
if (IS_ERR(file)) if (IS_ERR(file)) {
mmput(ctx->mm);
kmem_cache_free(userfaultfd_ctx_cachep, ctx); kmem_cache_free(userfaultfd_ctx_cachep, ctx);
}
out: out:
return file; return file;
} }
......
...@@ -59,7 +59,11 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units, ...@@ -59,7 +59,11 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
} }
exp = divisor[units] / (u32)blk_size; exp = divisor[units] / (u32)blk_size;
if (size >= exp) { /*
* size must be strictly greater than exp here to ensure that remainder
* is greater than divisor[units] coming out of the if below.
*/
if (size > exp) {
remainder = do_div(size, divisor[units]); remainder = do_div(size, divisor[units]);
remainder *= blk_size; remainder *= blk_size;
i++; i++;
......
...@@ -135,12 +135,11 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr) ...@@ -135,12 +135,11 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr)
if (unlikely(*shadow_addr)) { if (unlikely(*shadow_addr)) {
u16 shadow_first_bytes = *(u16 *)shadow_addr; u16 shadow_first_bytes = *(u16 *)shadow_addr;
s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK;
if (unlikely(shadow_first_bytes)) if (unlikely(shadow_first_bytes))
return true; return true;
if (likely(!last_byte)) if (likely(IS_ALIGNED(addr, 8)))
return false; return false;
return memory_is_poisoned_1(addr + 15); return memory_is_poisoned_1(addr + 15);
......
...@@ -612,8 +612,6 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, ...@@ -612,8 +612,6 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm,
void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
struct rb_node **rb_link, struct rb_node *rb_parent) struct rb_node **rb_link, struct rb_node *rb_parent)
{ {
WARN_ONCE(vma->vm_file && !vma->vm_ops, "missing vma->vm_ops");
/* Update tracking information for the gap following the new vma. */ /* Update tracking information for the gap following the new vma. */
if (vma->vm_next) if (vma->vm_next)
vma_gap_update(vma->vm_next); vma_gap_update(vma->vm_next);
...@@ -1638,12 +1636,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1638,12 +1636,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
*/ */
WARN_ON_ONCE(addr != vma->vm_start); WARN_ON_ONCE(addr != vma->vm_start);
/* All file mapping must have ->vm_ops set */
if (!vma->vm_ops) {
static const struct vm_operations_struct dummy_ops = {};
vma->vm_ops = &dummy_ops;
}
addr = vma->vm_start; addr = vma->vm_start;
vm_flags = vma->vm_flags; vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) { } else if (vm_flags & VM_SHARED) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment