Commit dd6fa0b6 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: call free_huge_page() directly

Indirect calls are expensive, thanks to Spectre.  Call free_huge_page()
directly if the folio belongs to hugetlb.

Link: https://lkml.kernel.org/r/20230816151201.3655946-3-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 99a9e0b8
...@@ -26,6 +26,8 @@ typedef struct { unsigned long pd; } hugepd_t; ...@@ -26,6 +26,8 @@ typedef struct { unsigned long pd; } hugepd_t;
#define __hugepd(x) ((hugepd_t) { (x) }) #define __hugepd(x) ((hugepd_t) { (x) })
#endif #endif
void free_huge_page(struct page *page);
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h> #include <linux/mempolicy.h>
...@@ -165,7 +167,6 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags, ...@@ -165,7 +167,6 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared); bool *migratable_cleared);
void folio_putback_active_hugetlb(struct folio *folio); void folio_putback_active_hugetlb(struct folio *folio);
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason); void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode); void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table; extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
......
...@@ -287,9 +287,6 @@ const char * const migratetype_names[MIGRATE_TYPES] = { ...@@ -287,9 +287,6 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
[NULL_COMPOUND_DTOR] = NULL, [NULL_COMPOUND_DTOR] = NULL,
[COMPOUND_PAGE_DTOR] = free_compound_page, [COMPOUND_PAGE_DTOR] = free_compound_page,
#ifdef CONFIG_HUGETLB_PAGE
[HUGETLB_PAGE_DTOR] = free_huge_page,
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
[TRANSHUGE_PAGE_DTOR] = free_transhuge_page, [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
#endif #endif
...@@ -612,6 +609,11 @@ void destroy_large_folio(struct folio *folio) ...@@ -612,6 +609,11 @@ void destroy_large_folio(struct folio *folio)
{ {
enum compound_dtor_id dtor = folio->_folio_dtor; enum compound_dtor_id dtor = folio->_folio_dtor;
if (folio_test_hugetlb(folio)) {
free_huge_page(&folio->page);
return;
}
VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio); VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
compound_page_dtors[dtor](&folio->page); compound_page_dtors[dtor](&folio->page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment