Commit 7a752478 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:

 - "fork: unconditionally clear stack on fork" is a non-bugfix which got
   lost during the merge window - performance concerns appear to have
   been adequately addressed.

 - and a bunch of fixes

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/filemap.c: fix NULL pointer in page_cache_tree_insert()
  mm: memcg: add __GFP_NOWARN in __memcg_schedule_kmem_cache_create()
  fs, elf: don't complain MAP_FIXED_NOREPLACE unless -EEXIST error
  kexec_file: do not add extra alignment to efi memmap
  proc: fix /proc/loadavg regression
  proc: revalidate kernel thread inodes to root:root
  autofs: mount point create should honour passed in mode
  MAINTAINERS: add personal addresses for Sascha and Uwe
  kasan: add no_sanitize attribute for clang builds
  rapidio: fix rio_dma_transfer error handling
  mm: enable thp migration for shmem thp
  writeback: safer lock nesting
  mm, pagemap: fix swap offset value for PMD migration entry
  mm: fix do_pages_move status handling
  fork: unconditionally clear stack on fork
parents 83beed7b abc1be13
...@@ -1373,7 +1373,8 @@ F: arch/arm/mach-ebsa110/ ...@@ -1373,7 +1373,8 @@ F: arch/arm/mach-ebsa110/
F: drivers/net/ethernet/amd/am79c961a.* F: drivers/net/ethernet/amd/am79c961a.*
ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT
M: Uwe Kleine-König <kernel@pengutronix.de> M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
N: efm32 N: efm32
...@@ -1401,7 +1402,8 @@ F: arch/arm/mach-footbridge/ ...@@ -1401,7 +1402,8 @@ F: arch/arm/mach-footbridge/
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
M: Shawn Guo <shawnguo@kernel.org> M: Shawn Guo <shawnguo@kernel.org>
M: Sascha Hauer <kernel@pengutronix.de> M: Sascha Hauer <s.hauer@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
R: Fabio Estevam <fabio.estevam@nxp.com> R: Fabio Estevam <fabio.estevam@nxp.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
...@@ -1416,7 +1418,8 @@ F: include/soc/imx/ ...@@ -1416,7 +1418,8 @@ F: include/soc/imx/
ARM/FREESCALE VYBRID ARM ARCHITECTURE ARM/FREESCALE VYBRID ARM ARCHITECTURE
M: Shawn Guo <shawnguo@kernel.org> M: Shawn Guo <shawnguo@kernel.org>
M: Sascha Hauer <kernel@pengutronix.de> M: Sascha Hauer <s.hauer@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
R: Stefan Agner <stefan@agner.ch> R: Stefan Agner <stefan@agner.ch>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
...@@ -5652,7 +5655,8 @@ F: drivers/net/ethernet/freescale/fec.h ...@@ -5652,7 +5655,8 @@ F: drivers/net/ethernet/freescale/fec.h
F: Documentation/devicetree/bindings/net/fsl-fec.txt F: Documentation/devicetree/bindings/net/fsl-fec.txt
FREESCALE IMX / MXC FRAMEBUFFER DRIVER FREESCALE IMX / MXC FRAMEBUFFER DRIVER
M: Sascha Hauer <kernel@pengutronix.de> M: Sascha Hauer <s.hauer@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
L: linux-fbdev@vger.kernel.org L: linux-fbdev@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
...@@ -12825,7 +12829,8 @@ F: include/linux/siphash.h ...@@ -12825,7 +12829,8 @@ F: include/linux/siphash.h
SIOX SIOX
M: Gavin Schenk <g.schenk@eckelmann.de> M: Gavin Schenk <g.schenk@eckelmann.de>
M: Uwe Kleine-König <kernel@pengutronix.de> M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
S: Supported S: Supported
F: drivers/siox/* F: drivers/siox/*
F: include/trace/events/siox.h F: include/trace/events/siox.h
......
...@@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private) ...@@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
LOAD_INT(c), LOAD_FRAC(c), LOAD_INT(c), LOAD_FRAC(c),
count_active_contexts(), count_active_contexts(),
atomic_read(&nr_spu_contexts), atomic_read(&nr_spu_contexts),
idr_get_cursor(&task_active_pid_ns(current)->idr)); idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
return 0; return 0;
} }
......
...@@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel, ...@@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
* little bit simple * little bit simple
*/ */
efi_map_sz = efi_get_runtime_map_size(); efi_map_sz = efi_get_runtime_map_size();
efi_map_sz = ALIGN(efi_map_sz, 16);
params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
MAX_ELFCOREHDR_STR_LEN; MAX_ELFCOREHDR_STR_LEN;
params_cmdline_sz = ALIGN(params_cmdline_sz, 16); params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
kbuf.bufsz = params_cmdline_sz + efi_map_sz + kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) +
sizeof(struct setup_data) + sizeof(struct setup_data) +
sizeof(struct efi_setup_data); sizeof(struct efi_setup_data);
...@@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel, ...@@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
if (!params) if (!params)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
efi_map_offset = params_cmdline_sz; efi_map_offset = params_cmdline_sz;
efi_setup_data_offset = efi_map_offset + efi_map_sz; efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16);
/* Copy setup header onto bootparams. Documentation/x86/boot.txt */ /* Copy setup header onto bootparams. Documentation/x86/boot.txt */
setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset;
......
...@@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req, ...@@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req,
tx->callback = dma_xfer_callback; tx->callback = dma_xfer_callback;
tx->callback_param = req; tx->callback_param = req;
req->dmach = chan;
req->sync = sync;
req->status = DMA_IN_PROGRESS; req->status = DMA_IN_PROGRESS;
init_completion(&req->req_comp);
kref_get(&req->refcount); kref_get(&req->refcount);
cookie = dmaengine_submit(tx); cookie = dmaengine_submit(tx);
...@@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, ...@@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
if (!req) if (!req)
return -ENOMEM; return -ENOMEM;
kref_init(&req->refcount);
ret = get_dma_channel(priv); ret = get_dma_channel(priv);
if (ret) { if (ret) {
kfree(req); kfree(req);
return ret; return ret;
} }
chan = priv->dmach;
kref_init(&req->refcount);
init_completion(&req->req_comp);
req->dir = dir;
req->filp = filp;
req->priv = priv;
req->dmach = chan;
req->sync = sync;
/* /*
* If parameter loc_addr != NULL, we are transferring data from/to * If parameter loc_addr != NULL, we are transferring data from/to
...@@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, ...@@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
xfer->offset, xfer->length); xfer->offset, xfer->length);
} }
req->dir = dir;
req->filp = filp;
req->priv = priv;
chan = priv->dmach;
nents = dma_map_sg(chan->device->dev, nents = dma_map_sg(chan->device->dev,
req->sgt.sgl, req->sgt.nents, dir); req->sgt.sgl, req->sgt.nents, dir);
if (nents == 0) { if (nents == 0) {
......
...@@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inode *dir, ...@@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inode *dir,
autofs4_del_active(dentry); autofs4_del_active(dentry);
inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
if (!inode) if (!inode)
return -ENOMEM; return -ENOMEM;
d_add(dentry, inode); d_add(dentry, inode);
......
...@@ -377,10 +377,10 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, ...@@ -377,10 +377,10 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
} else } else
map_addr = vm_mmap(filep, addr, size, prot, type, off); map_addr = vm_mmap(filep, addr, size, prot, type, off);
if ((type & MAP_FIXED_NOREPLACE) && BAD_ADDR(map_addr)) if ((type & MAP_FIXED_NOREPLACE) &&
pr_info("%d (%s): Uhuuh, elf segment at %p requested but the memory is mapped already\n", PTR_ERR((void *)map_addr) == -EEXIST)
task_pid_nr(current), current->comm, pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
(void *)addr); task_pid_nr(current), current->comm, (void *)addr);
return(map_addr); return(map_addr);
} }
......
...@@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits) ...@@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits)
*/ */
if (inode && inode_to_wb_is_valid(inode)) { if (inode && inode_to_wb_is_valid(inode)) {
struct bdi_writeback *wb; struct bdi_writeback *wb;
bool locked, congested; struct wb_lock_cookie lock_cookie = {};
bool congested;
wb = unlocked_inode_to_wb_begin(inode, &locked); wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
congested = wb_congested(wb, cong_bits); congested = wb_congested(wb, cong_bits);
unlocked_inode_to_wb_end(inode, locked); unlocked_inode_to_wb_end(inode, &lock_cookie);
return congested; return congested;
} }
......
...@@ -1693,6 +1693,12 @@ void task_dump_owner(struct task_struct *task, umode_t mode, ...@@ -1693,6 +1693,12 @@ void task_dump_owner(struct task_struct *task, umode_t mode,
kuid_t uid; kuid_t uid;
kgid_t gid; kgid_t gid;
if (unlikely(task->flags & PF_KTHREAD)) {
*ruid = GLOBAL_ROOT_UID;
*rgid = GLOBAL_ROOT_GID;
return;
}
/* Default to the tasks effective ownership */ /* Default to the tasks effective ownership */
rcu_read_lock(); rcu_read_lock();
cred = __task_cred(task); cred = __task_cred(task);
......
...@@ -24,7 +24,7 @@ static int loadavg_proc_show(struct seq_file *m, void *v) ...@@ -24,7 +24,7 @@ static int loadavg_proc_show(struct seq_file *m, void *v)
LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
nr_running(), nr_threads, nr_running(), nr_threads,
idr_get_cursor(&task_active_pid_ns(current)->idr)); idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
return 0; return 0;
} }
......
...@@ -1310,9 +1310,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, ...@@ -1310,9 +1310,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
else if (is_swap_pmd(pmd)) { else if (is_swap_pmd(pmd)) {
swp_entry_t entry = pmd_to_swp_entry(pmd); swp_entry_t entry = pmd_to_swp_entry(pmd);
unsigned long offset = swp_offset(entry);
offset += (addr & ~PMD_MASK) >> PAGE_SHIFT;
frame = swp_type(entry) | frame = swp_type(entry) |
(swp_offset(entry) << MAX_SWAPFILES_SHIFT); (offset << MAX_SWAPFILES_SHIFT);
flags |= PM_SWAP; flags |= PM_SWAP;
if (pmd_swp_soft_dirty(pmd)) if (pmd_swp_soft_dirty(pmd))
flags |= PM_SOFT_DIRTY; flags |= PM_SOFT_DIRTY;
...@@ -1332,6 +1334,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, ...@@ -1332,6 +1334,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
break; break;
if (pm->show_pfn && (flags & PM_PRESENT)) if (pm->show_pfn && (flags & PM_PRESENT))
frame++; frame++;
else if (flags & PM_SWAP)
frame += (1 << MAX_SWAPFILES_SHIFT);
} }
spin_unlock(ptl); spin_unlock(ptl);
return err; return err;
......
...@@ -223,6 +223,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) ...@@ -223,6 +223,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
set_wb_congested(bdi->wb.congested, sync); set_wb_congested(bdi->wb.congested, sync);
} }
struct wb_lock_cookie {
bool locked;
unsigned long flags;
};
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
/** /**
......
...@@ -347,7 +347,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) ...@@ -347,7 +347,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
/** /**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode * @inode: target inode
* @lockedp: temp bool output param, to be passed to the end function * @cookie: output param, to be passed to the end function
* *
* The caller wants to access the wb associated with @inode but isn't * The caller wants to access the wb associated with @inode but isn't
* holding inode->i_lock, the i_pages lock or wb->list_lock. This * holding inode->i_lock, the i_pages lock or wb->list_lock. This
...@@ -355,12 +355,12 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) ...@@ -355,12 +355,12 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
* association doesn't change until the transaction is finished with * association doesn't change until the transaction is finished with
* unlocked_inode_to_wb_end(). * unlocked_inode_to_wb_end().
* *
* The caller must call unlocked_inode_to_wb_end() with *@lockdep * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
* afterwards and can't sleep during transaction. IRQ may or may not be * can't sleep during the transaction. IRQs may or may not be disabled on
* disabled on return. * return.
*/ */
static inline struct bdi_writeback * static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{ {
rcu_read_lock(); rcu_read_lock();
...@@ -368,10 +368,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) ...@@ -368,10 +368,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
* Paired with store_release in inode_switch_wb_work_fn() and * Paired with store_release in inode_switch_wb_work_fn() and
* ensures that we see the new wb if we see cleared I_WB_SWITCH. * ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/ */
*lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
if (unlikely(*lockedp)) if (unlikely(cookie->locked))
xa_lock_irq(&inode->i_mapping->i_pages); xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
/* /*
* Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
...@@ -383,12 +383,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) ...@@ -383,12 +383,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
/** /**
* unlocked_inode_to_wb_end - end inode wb access transaction * unlocked_inode_to_wb_end - end inode wb access transaction
* @inode: target inode * @inode: target inode
* @locked: *@lockedp from unlocked_inode_to_wb_begin() * @cookie: @cookie from unlocked_inode_to_wb_begin()
*/ */
static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) static inline void unlocked_inode_to_wb_end(struct inode *inode,
struct wb_lock_cookie *cookie)
{ {
if (unlikely(locked)) if (unlikely(cookie->locked))
xa_unlock_irq(&inode->i_mapping->i_pages); xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -435,12 +436,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) ...@@ -435,12 +436,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
} }
static inline struct bdi_writeback * static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{ {
return inode_to_wb(inode); return inode_to_wb(inode);
} }
static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) static inline void unlocked_inode_to_wb_end(struct inode *inode,
struct wb_lock_cookie *cookie)
{ {
} }
......
...@@ -25,6 +25,9 @@ ...@@ -25,6 +25,9 @@
#define __SANITIZE_ADDRESS__ #define __SANITIZE_ADDRESS__
#endif #endif
#undef __no_sanitize_address
#define __no_sanitize_address __attribute__((no_sanitize("address")))
/* Clang doesn't have a way to turn it off per-function, yet. */ /* Clang doesn't have a way to turn it off per-function, yet. */
#ifdef __noretpoline #ifdef __noretpoline
#undef __noretpoline #undef __noretpoline
......
...@@ -43,11 +43,7 @@ enum { ...@@ -43,11 +43,7 @@ enum {
#define THREAD_ALIGN THREAD_SIZE #define THREAD_ALIGN THREAD_SIZE
#endif #endif
#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) #define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
#else
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT)
#endif
/* /*
* flag set/clear/test wrappers * flag set/clear/test wrappers
......
...@@ -216,10 +216,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) ...@@ -216,10 +216,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
if (!s) if (!s)
continue; continue;
#ifdef CONFIG_DEBUG_KMEMLEAK
/* Clear stale pointers from reused stack. */ /* Clear stale pointers from reused stack. */
memset(s->addr, 0, THREAD_SIZE); memset(s->addr, 0, THREAD_SIZE);
#endif
tsk->stack_vm_area = s; tsk->stack_vm_area = s;
return s->addr; return s->addr;
} }
......
...@@ -786,7 +786,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) ...@@ -786,7 +786,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
VM_BUG_ON_PAGE(!PageLocked(new), new); VM_BUG_ON_PAGE(!PageLocked(new), new);
VM_BUG_ON_PAGE(new->mapping, new); VM_BUG_ON_PAGE(new->mapping, new);
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
if (!error) { if (!error) {
struct address_space *mapping = old->mapping; struct address_space *mapping = old->mapping;
void (*freepage)(struct page *); void (*freepage)(struct page *);
...@@ -842,7 +842,7 @@ static int __add_to_page_cache_locked(struct page *page, ...@@ -842,7 +842,7 @@ static int __add_to_page_cache_locked(struct page *page,
return error; return error;
} }
error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
if (error) { if (error) {
if (!huge) if (!huge)
mem_cgroup_cancel_charge(page, memcg, false); mem_cgroup_cancel_charge(page, memcg, false);
...@@ -1585,8 +1585,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, ...@@ -1585,8 +1585,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
if (fgp_flags & FGP_ACCESSED) if (fgp_flags & FGP_ACCESSED)
__SetPageReferenced(page); __SetPageReferenced(page);
err = add_to_page_cache_lru(page, mapping, offset, err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
gfp_mask & GFP_RECLAIM_MASK);
if (unlikely(err)) { if (unlikely(err)) {
put_page(page); put_page(page);
page = NULL; page = NULL;
...@@ -2387,7 +2386,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) ...@@ -2387,7 +2386,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
if (ret == 0) if (ret == 0)
ret = mapping->a_ops->readpage(file, page); ret = mapping->a_ops->readpage(file, page);
else if (ret == -EEXIST) else if (ret == -EEXIST)
......
...@@ -2925,7 +2925,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) ...@@ -2925,7 +2925,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
pmde = maybe_pmd_mkwrite(pmde, vma); pmde = maybe_pmd_mkwrite(pmde, vma);
flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
if (PageAnon(new))
page_add_anon_rmap(new, vma, mmun_start, true); page_add_anon_rmap(new, vma, mmun_start, true);
else
page_add_file_rmap(new, true);
set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
mlock_vma_page(new); mlock_vma_page(new);
......
...@@ -2192,7 +2192,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, ...@@ -2192,7 +2192,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
{ {
struct memcg_kmem_cache_create_work *cw; struct memcg_kmem_cache_create_work *cw;
cw = kmalloc(sizeof(*cw), GFP_NOWAIT); cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
if (!cw) if (!cw)
return; return;
......
...@@ -472,7 +472,7 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -472,7 +472,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
pslot = radix_tree_lookup_slot(&mapping->i_pages, pslot = radix_tree_lookup_slot(&mapping->i_pages,
page_index(page)); page_index(page));
expected_count += 1 + page_has_private(page); expected_count += hpage_nr_pages(page) + page_has_private(page);
if (page_count(page) != expected_count || if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, radix_tree_deref_slot_protected(pslot,
&mapping->i_pages.xa_lock) != page) { &mapping->i_pages.xa_lock) != page) {
...@@ -505,7 +505,7 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -505,7 +505,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
*/ */
newpage->index = page->index; newpage->index = page->index;
newpage->mapping = page->mapping; newpage->mapping = page->mapping;
get_page(newpage); /* add cache reference */ page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
if (PageSwapBacked(page)) { if (PageSwapBacked(page)) {
__SetPageSwapBacked(newpage); __SetPageSwapBacked(newpage);
if (PageSwapCache(page)) { if (PageSwapCache(page)) {
...@@ -524,13 +524,26 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -524,13 +524,26 @@ int migrate_page_move_mapping(struct address_space *mapping,
} }
radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
if (PageTransHuge(page)) {
int i;
int index = page_index(page);
for (i = 0; i < HPAGE_PMD_NR; i++) {
pslot = radix_tree_lookup_slot(&mapping->i_pages,
index + i);
radix_tree_replace_slot(&mapping->i_pages, pslot,
newpage + i);
}
} else {
radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
}
/* /*
* Drop cache reference from old page by unfreezing * Drop cache reference from old page by unfreezing
* to one less reference. * to one less reference.
* We know this isn't the last reference. * We know this isn't the last reference.
*/ */
page_ref_unfreeze(page, expected_count - 1); page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
xa_unlock(&mapping->i_pages); xa_unlock(&mapping->i_pages);
/* Leave irq disabled to prevent preemption while updating stats */ /* Leave irq disabled to prevent preemption while updating stats */
...@@ -1622,6 +1635,9 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, ...@@ -1622,6 +1635,9 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
current_node = NUMA_NO_NODE; current_node = NUMA_NO_NODE;
} }
out_flush: out_flush:
if (list_empty(&pagelist))
return err;
/* Make sure we do not overwrite the existing error */ /* Make sure we do not overwrite the existing error */
err1 = do_move_pages_to_node(mm, &pagelist, current_node); err1 = do_move_pages_to_node(mm, &pagelist, current_node);
if (!err1) if (!err1)
......
...@@ -2502,13 +2502,13 @@ void account_page_redirty(struct page *page) ...@@ -2502,13 +2502,13 @@ void account_page_redirty(struct page *page)
if (mapping && mapping_cap_account_dirty(mapping)) { if (mapping && mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct bdi_writeback *wb; struct bdi_writeback *wb;
bool locked; struct wb_lock_cookie cookie = {};
wb = unlocked_inode_to_wb_begin(inode, &locked); wb = unlocked_inode_to_wb_begin(inode, &cookie);
current->nr_dirtied--; current->nr_dirtied--;
dec_node_page_state(page, NR_DIRTIED); dec_node_page_state(page, NR_DIRTIED);
dec_wb_stat(wb, WB_DIRTIED); dec_wb_stat(wb, WB_DIRTIED);
unlocked_inode_to_wb_end(inode, locked); unlocked_inode_to_wb_end(inode, &cookie);
} }
} }
EXPORT_SYMBOL(account_page_redirty); EXPORT_SYMBOL(account_page_redirty);
...@@ -2614,15 +2614,15 @@ void __cancel_dirty_page(struct page *page) ...@@ -2614,15 +2614,15 @@ void __cancel_dirty_page(struct page *page)
if (mapping_cap_account_dirty(mapping)) { if (mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct bdi_writeback *wb; struct bdi_writeback *wb;
bool locked; struct wb_lock_cookie cookie = {};
lock_page_memcg(page); lock_page_memcg(page);
wb = unlocked_inode_to_wb_begin(inode, &locked); wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page)) if (TestClearPageDirty(page))
account_page_cleaned(page, mapping, wb); account_page_cleaned(page, mapping, wb);
unlocked_inode_to_wb_end(inode, locked); unlocked_inode_to_wb_end(inode, &cookie);
unlock_page_memcg(page); unlock_page_memcg(page);
} else { } else {
ClearPageDirty(page); ClearPageDirty(page);
...@@ -2654,7 +2654,7 @@ int clear_page_dirty_for_io(struct page *page) ...@@ -2654,7 +2654,7 @@ int clear_page_dirty_for_io(struct page *page)
if (mapping && mapping_cap_account_dirty(mapping)) { if (mapping && mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct bdi_writeback *wb; struct bdi_writeback *wb;
bool locked; struct wb_lock_cookie cookie = {};
/* /*
* Yes, Virginia, this is indeed insane. * Yes, Virginia, this is indeed insane.
...@@ -2691,14 +2691,14 @@ int clear_page_dirty_for_io(struct page *page) ...@@ -2691,14 +2691,14 @@ int clear_page_dirty_for_io(struct page *page)
* always locked coming in here, so we get the desired * always locked coming in here, so we get the desired
* exclusion. * exclusion.
*/ */
wb = unlocked_inode_to_wb_begin(inode, &locked); wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page)) { if (TestClearPageDirty(page)) {
dec_lruvec_page_state(page, NR_FILE_DIRTY); dec_lruvec_page_state(page, NR_FILE_DIRTY);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
dec_wb_stat(wb, WB_RECLAIMABLE); dec_wb_stat(wb, WB_RECLAIMABLE);
ret = 1; ret = 1;
} }
unlocked_inode_to_wb_end(inode, locked); unlocked_inode_to_wb_end(inode, &cookie);
return ret; return ret;
} }
return TestClearPageDirty(page); return TestClearPageDirty(page);
......
...@@ -1374,9 +1374,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1374,9 +1374,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (!pvmw.pte && (flags & TTU_MIGRATION)) { if (!pvmw.pte && (flags & TTU_MIGRATION)) {
VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
if (!PageAnon(page))
continue;
set_pmd_migration_entry(&pvmw, page); set_pmd_migration_entry(&pvmw, page);
continue; continue;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment