Commit 65a2aa5f authored by David Hildenbrand's avatar David Hildenbrand Committed by Linus Torvalds

mm/memory_hotplug: remove nid parameter from arch_remove_memory()

The parameter is unused, let's remove it.

Link: https://lkml.kernel.org/r/20210712124052.26491-3-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Acked-by: Heiko Carstens <hca@linux.ibm.com>	[s390]
Reviewed-by: default avatarPankaj Gupta <pankaj.gupta@ionos.com>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Rich Felker <dalias@libc.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Sergei Trofimovich <slyfox@gentoo.org>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Michel Lespinasse <michel@lespinasse.org>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
Cc: Thiago Jung Bauermann <bauerman@linux.ibm.com>
Cc: Joe Perches <joe@perches.com>
Cc: Pierre Morel <pmorel@linux.ibm.com>
Cc: Jia He <justin.he@arm.com>
Cc: Anton Blanchard <anton@ozlabs.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Len Brown <lenb@kernel.org>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Nathan Lynch <nathanl@linux.ibm.com>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Scott Cheloha <cheloha@linux.ibm.com>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7cf209ba
...@@ -1502,8 +1502,7 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -1502,8 +1502,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
return ret; return ret;
} }
void arch_remove_memory(int nid, u64 start, u64 size, void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
struct vmem_altmap *altmap)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
......
...@@ -484,8 +484,7 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -484,8 +484,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
return ret; return ret;
} }
void arch_remove_memory(int nid, u64 start, u64 size, void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
struct vmem_altmap *altmap)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
......
...@@ -119,8 +119,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, ...@@ -119,8 +119,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
return rc; return rc;
} }
void __ref arch_remove_memory(int nid, u64 start, u64 size, void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
struct vmem_altmap *altmap)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
......
...@@ -306,8 +306,7 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -306,8 +306,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
return rc; return rc;
} }
void arch_remove_memory(int nid, u64 start, u64 size, void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
struct vmem_altmap *altmap)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
......
...@@ -414,8 +414,7 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -414,8 +414,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
return ret; return ret;
} }
void arch_remove_memory(int nid, u64 start, u64 size, void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
struct vmem_altmap *altmap)
{ {
unsigned long start_pfn = PFN_DOWN(start); unsigned long start_pfn = PFN_DOWN(start);
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
......
...@@ -801,8 +801,7 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -801,8 +801,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
return __add_pages(nid, start_pfn, nr_pages, params); return __add_pages(nid, start_pfn, nr_pages, params);
} }
void arch_remove_memory(int nid, u64 start, u64 size, void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
struct vmem_altmap *altmap)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
......
...@@ -1255,8 +1255,7 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end) ...@@ -1255,8 +1255,7 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end)
remove_pagetable(start, end, true, NULL); remove_pagetable(start, end, true, NULL);
} }
void __ref arch_remove_memory(int nid, u64 start, u64 size, void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
struct vmem_altmap *altmap)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
......
...@@ -130,8 +130,7 @@ static inline bool movable_node_is_enabled(void) ...@@ -130,8 +130,7 @@ static inline bool movable_node_is_enabled(void)
return movable_node_enabled; return movable_node_enabled;
} }
extern void arch_remove_memory(int nid, u64 start, u64 size, extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
struct vmem_altmap *altmap);
extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
struct vmem_altmap *altmap); struct vmem_altmap *altmap);
......
...@@ -1106,7 +1106,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) ...@@ -1106,7 +1106,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
/* create memory block devices after memory was added */ /* create memory block devices after memory was added */
ret = create_memory_block_devices(start, size, mhp_altmap.alloc); ret = create_memory_block_devices(start, size, mhp_altmap.alloc);
if (ret) { if (ret) {
arch_remove_memory(nid, start, size, NULL); arch_remove_memory(start, size, NULL);
goto error; goto error;
} }
...@@ -1886,7 +1886,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) ...@@ -1886,7 +1886,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
mem_hotplug_begin(); mem_hotplug_begin();
arch_remove_memory(nid, start, size, altmap); arch_remove_memory(start, size, altmap);
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
memblock_free(start, size); memblock_free(start, size);
......
...@@ -140,14 +140,11 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) ...@@ -140,14 +140,11 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
{ {
struct range *range = &pgmap->ranges[range_id]; struct range *range = &pgmap->ranges[range_id];
struct page *first_page; struct page *first_page;
int nid;
/* make sure to access a memmap that was actually initialized */ /* make sure to access a memmap that was actually initialized */
first_page = pfn_to_page(pfn_first(pgmap, range_id)); first_page = pfn_to_page(pfn_first(pgmap, range_id));
/* pages are dead and unused, undo the arch mapping */ /* pages are dead and unused, undo the arch mapping */
nid = page_to_nid(first_page);
mem_hotplug_begin(); mem_hotplug_begin();
remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start), remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
PHYS_PFN(range_len(range))); PHYS_PFN(range_len(range)));
...@@ -155,7 +152,7 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) ...@@ -155,7 +152,7 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
__remove_pages(PHYS_PFN(range->start), __remove_pages(PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), NULL); PHYS_PFN(range_len(range)), NULL);
} else { } else {
arch_remove_memory(nid, range->start, range_len(range), arch_remove_memory(range->start, range_len(range),
pgmap_altmap(pgmap)); pgmap_altmap(pgmap));
kasan_remove_zero_shadow(__va(range->start), range_len(range)); kasan_remove_zero_shadow(__va(range->start), range_len(range));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment