Commit e4a9bc58 authored by Joe Perches's avatar Joe Perches Committed by Linus Torvalds
parent e46b893d
...@@ -1102,7 +1102,7 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1102,7 +1102,7 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto retry; goto retry;
case -EBUSY: case -EBUSY:
ret = 0; ret = 0;
/* FALLTHRU */ fallthrough;
case -EFAULT: case -EFAULT:
case -ENOMEM: case -ENOMEM:
case -EHWPOISON: case -EHWPOISON:
......
...@@ -467,14 +467,14 @@ static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v) ...@@ -467,14 +467,14 @@ static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
switch (MEMFILE_ATTR(cft->private)) { switch (MEMFILE_ATTR(cft->private)) {
case RES_RSVD_USAGE: case RES_RSVD_USAGE:
counter = &h_cg->rsvd_hugepage[idx]; counter = &h_cg->rsvd_hugepage[idx];
/* Fall through. */ fallthrough;
case RES_USAGE: case RES_USAGE:
val = (u64)page_counter_read(counter); val = (u64)page_counter_read(counter);
seq_printf(seq, "%llu\n", val * PAGE_SIZE); seq_printf(seq, "%llu\n", val * PAGE_SIZE);
break; break;
case RES_RSVD_LIMIT: case RES_RSVD_LIMIT:
counter = &h_cg->rsvd_hugepage[idx]; counter = &h_cg->rsvd_hugepage[idx];
/* Fall through. */ fallthrough;
case RES_LIMIT: case RES_LIMIT:
val = (u64)counter->max; val = (u64)counter->max;
if (val == limit) if (val == limit)
...@@ -514,7 +514,7 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of, ...@@ -514,7 +514,7 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
switch (MEMFILE_ATTR(of_cft(of)->private)) { switch (MEMFILE_ATTR(of_cft(of)->private)) {
case RES_RSVD_LIMIT: case RES_RSVD_LIMIT:
rsvd = true; rsvd = true;
/* Fall through. */ fallthrough;
case RES_LIMIT: case RES_LIMIT:
mutex_lock(&hugetlb_limit_mutex); mutex_lock(&hugetlb_limit_mutex);
ret = page_counter_set_max( ret = page_counter_set_max(
......
...@@ -2813,8 +2813,7 @@ static int ksm_memory_callback(struct notifier_block *self, ...@@ -2813,8 +2813,7 @@ static int ksm_memory_callback(struct notifier_block *self,
*/ */
ksm_check_stable_tree(mn->start_pfn, ksm_check_stable_tree(mn->start_pfn,
mn->start_pfn + mn->nr_pages); mn->start_pfn + mn->nr_pages);
/* fallthrough */ fallthrough;
case MEM_CANCEL_OFFLINE: case MEM_CANCEL_OFFLINE:
mutex_lock(&ksm_thread_mutex); mutex_lock(&ksm_thread_mutex);
ksm_run &= ~KSM_RUN_OFFLINE; ksm_run &= ~KSM_RUN_OFFLINE;
......
...@@ -223,7 +223,7 @@ __list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx, ...@@ -223,7 +223,7 @@ __list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
switch (ret) { switch (ret) {
case LRU_REMOVED_RETRY: case LRU_REMOVED_RETRY:
assert_spin_locked(&nlru->lock); assert_spin_locked(&nlru->lock);
/* fall through */ fallthrough;
case LRU_REMOVED: case LRU_REMOVED:
isolated++; isolated++;
nlru->nr_items--; nlru->nr_items--;
......
...@@ -5813,7 +5813,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, ...@@ -5813,7 +5813,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
switch (get_mctgt_type(vma, addr, ptent, &target)) { switch (get_mctgt_type(vma, addr, ptent, &target)) {
case MC_TARGET_DEVICE: case MC_TARGET_DEVICE:
device = true; device = true;
/* fall through */ fallthrough;
case MC_TARGET_PAGE: case MC_TARGET_PAGE:
page = target.page; page = target.page;
/* /*
......
...@@ -881,7 +881,6 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) ...@@ -881,7 +881,6 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
switch (p->mode) { switch (p->mode) {
case MPOL_BIND: case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE: case MPOL_INTERLEAVE:
*nodes = p->v.nodes; *nodes = p->v.nodes;
break; break;
...@@ -2066,7 +2065,6 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask) ...@@ -2066,7 +2065,6 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
break; break;
case MPOL_BIND: case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE: case MPOL_INTERLEAVE:
*mask = mempolicy->v.nodes; *mask = mempolicy->v.nodes;
break; break;
...@@ -2333,7 +2331,6 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) ...@@ -2333,7 +2331,6 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
switch (a->mode) { switch (a->mode) {
case MPOL_BIND: case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE: case MPOL_INTERLEAVE:
return !!nodes_equal(a->v.nodes, b->v.nodes); return !!nodes_equal(a->v.nodes, b->v.nodes);
case MPOL_PREFERRED: case MPOL_PREFERRED:
......
...@@ -1460,7 +1460,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, ...@@ -1460,7 +1460,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
* with MAP_SHARED to preserve backward compatibility. * with MAP_SHARED to preserve backward compatibility.
*/ */
flags &= LEGACY_MAP_MASK; flags &= LEGACY_MAP_MASK;
/* fall through */ fallthrough;
case MAP_SHARED_VALIDATE: case MAP_SHARED_VALIDATE:
if (flags & ~flags_mask) if (flags & ~flags_mask)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1487,8 +1487,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, ...@@ -1487,8 +1487,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
vm_flags |= VM_SHARED | VM_MAYSHARE; vm_flags |= VM_SHARED | VM_MAYSHARE;
if (!(file->f_mode & FMODE_WRITE)) if (!(file->f_mode & FMODE_WRITE))
vm_flags &= ~(VM_MAYWRITE | VM_SHARED); vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
fallthrough;
/* fall through */
case MAP_PRIVATE: case MAP_PRIVATE:
if (!(file->f_mode & FMODE_READ)) if (!(file->f_mode & FMODE_READ))
return -EACCES; return -EACCES;
......
...@@ -3996,7 +3996,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma) ...@@ -3996,7 +3996,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
if (i_size >= HPAGE_PMD_SIZE && if (i_size >= HPAGE_PMD_SIZE &&
i_size >> PAGE_SHIFT >= off) i_size >> PAGE_SHIFT >= off)
return true; return true;
/* fall through */ fallthrough;
case SHMEM_HUGE_ADVISE: case SHMEM_HUGE_ADVISE:
/* TODO: implement fadvise() hints */ /* TODO: implement fadvise() hints */
return (vma->vm_flags & VM_HUGEPAGE); return (vma->vm_flags & VM_HUGEPAGE);
......
...@@ -424,7 +424,7 @@ static void *zs_zpool_map(void *pool, unsigned long handle, ...@@ -424,7 +424,7 @@ static void *zs_zpool_map(void *pool, unsigned long handle,
case ZPOOL_MM_WO: case ZPOOL_MM_WO:
zs_mm = ZS_MM_WO; zs_mm = ZS_MM_WO;
break; break;
case ZPOOL_MM_RW: /* fall through */ case ZPOOL_MM_RW:
default: default:
zs_mm = ZS_MM_RW; zs_mm = ZS_MM_RW;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment