Commit 46b1ee38 authored by Ralph Campbell's avatar Ralph Campbell Committed by Linus Torvalds

mm/mremap_pages: fix static key devmap_managed_key updates

commit 6f42193f ("memremap: don't use a separate devm action for
devmap_managed_enable_get") changed the static key updates such that we
now call devmap_managed_enable_put() without doing the equivalent
devmap_managed_enable_get().

devmap_managed_enable_get() is only called for MEMORY_DEVICE_PRIVATE and
MEMORY_DEVICE_FS_DAX, But memunmap_pages() get called for other pgmap
types too.  This results in the below warning when switching between
system-ram and devdax mode for devdax namespace.

   jump label: negative count!
   WARNING: CPU: 52 PID: 1335 at kernel/jump_label.c:235 static_key_slow_try_dec+0x88/0xa0
   Modules linked in:
   ....

   NIP static_key_slow_try_dec+0x88/0xa0
   LR static_key_slow_try_dec+0x84/0xa0
   Call Trace:
     static_key_slow_try_dec+0x84/0xa0
     __static_key_slow_dec_cpuslocked+0x34/0xd0
     static_key_slow_dec+0x54/0xf0
     memunmap_pages+0x36c/0x500
     devm_action_release+0x30/0x50
     release_nodes+0x2f4/0x3e0
     device_release_driver_internal+0x17c/0x280
     bus_remove_device+0x124/0x210
     device_del+0x1d4/0x530
     unregister_dev_dax+0x48/0xe0
     devm_action_release+0x30/0x50
     release_nodes+0x2f4/0x3e0
     device_release_driver_internal+0x17c/0x280
     unbind_store+0x130/0x170
     drv_attr_store+0x40/0x60
     sysfs_kf_write+0x6c/0xb0
     kernfs_fop_write+0x118/0x280
     vfs_write+0xe8/0x2a0
     ksys_write+0x84/0x140
     system_call_exception+0x120/0x270
     system_call_common+0xf0/0x27c
Reported-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Tested-by: default avatarSachin Sant <sachinp@linux.vnet.ibm.com>
Reviewed-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Link: https://lkml.kernel.org/r/20201023183222.13186-1-rcampbell@nvidia.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3cea11cd
...@@ -41,28 +41,24 @@ EXPORT_SYMBOL_GPL(memremap_compat_align); ...@@ -41,28 +41,24 @@ EXPORT_SYMBOL_GPL(memremap_compat_align);
DEFINE_STATIC_KEY_FALSE(devmap_managed_key); DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
EXPORT_SYMBOL(devmap_managed_key); EXPORT_SYMBOL(devmap_managed_key);
static void devmap_managed_enable_put(void) static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
{ {
if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
pgmap->type == MEMORY_DEVICE_FS_DAX)
static_branch_dec(&devmap_managed_key); static_branch_dec(&devmap_managed_key);
} }
static int devmap_managed_enable_get(struct dev_pagemap *pgmap) static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
{ {
if (pgmap->type == MEMORY_DEVICE_PRIVATE && if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
(!pgmap->ops || !pgmap->ops->page_free)) { pgmap->type == MEMORY_DEVICE_FS_DAX)
WARN(1, "Missing page_free method\n");
return -EINVAL;
}
static_branch_inc(&devmap_managed_key); static_branch_inc(&devmap_managed_key);
return 0;
} }
#else #else
static int devmap_managed_enable_get(struct dev_pagemap *pgmap) static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
{ {
return -EINVAL;
} }
static void devmap_managed_enable_put(void) static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
{ {
} }
#endif /* CONFIG_DEV_PAGEMAP_OPS */ #endif /* CONFIG_DEV_PAGEMAP_OPS */
...@@ -169,7 +165,7 @@ void memunmap_pages(struct dev_pagemap *pgmap) ...@@ -169,7 +165,7 @@ void memunmap_pages(struct dev_pagemap *pgmap)
pageunmap_range(pgmap, i); pageunmap_range(pgmap, i);
WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
devmap_managed_enable_put(); devmap_managed_enable_put(pgmap);
} }
EXPORT_SYMBOL_GPL(memunmap_pages); EXPORT_SYMBOL_GPL(memunmap_pages);
...@@ -307,7 +303,6 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -307,7 +303,6 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
.pgprot = PAGE_KERNEL, .pgprot = PAGE_KERNEL,
}; };
const int nr_range = pgmap->nr_range; const int nr_range = pgmap->nr_range;
bool need_devmap_managed = true;
int error, i; int error, i;
if (WARN_ONCE(!nr_range, "nr_range must be specified\n")) if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
...@@ -323,6 +318,10 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -323,6 +318,10 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
WARN(1, "Missing migrate_to_ram method\n"); WARN(1, "Missing migrate_to_ram method\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (!pgmap->ops->page_free) {
WARN(1, "Missing page_free method\n");
return ERR_PTR(-EINVAL);
}
if (!pgmap->owner) { if (!pgmap->owner) {
WARN(1, "Missing owner\n"); WARN(1, "Missing owner\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -336,11 +335,9 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -336,11 +335,9 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
} }
break; break;
case MEMORY_DEVICE_GENERIC: case MEMORY_DEVICE_GENERIC:
need_devmap_managed = false;
break; break;
case MEMORY_DEVICE_PCI_P2PDMA: case MEMORY_DEVICE_PCI_P2PDMA:
params.pgprot = pgprot_noncached(params.pgprot); params.pgprot = pgprot_noncached(params.pgprot);
need_devmap_managed = false;
break; break;
default: default:
WARN(1, "Invalid pgmap type %d\n", pgmap->type); WARN(1, "Invalid pgmap type %d\n", pgmap->type);
...@@ -364,11 +361,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -364,11 +361,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
} }
} }
if (need_devmap_managed) { devmap_managed_enable_get(pgmap);
error = devmap_managed_enable_get(pgmap);
if (error)
return ERR_PTR(error);
}
/* /*
* Clear the pgmap nr_range as it will be incremented for each * Clear the pgmap nr_range as it will be incremented for each
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment