Commit 6c26d310 authored by Miaohe Lin's avatar Miaohe Lin Committed by Linus Torvalds

mm/hugetlb: fix some comment typos

Fix typos sasitfy to satisfy, reservtion to reservation, hugegpage to
hugepage and uniprocesor to uniprocessor in comments.

Link: https://lkml.kernel.org/r/20210128112028.64831-1-linmiaohe@huawei.comSigned-off-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Reviewed-by: default avatarSouptick Joarder <jrdr.linux@gmail.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 82e5d378
...@@ -37,7 +37,7 @@ struct hugepage_subpool { ...@@ -37,7 +37,7 @@ struct hugepage_subpool {
struct hstate *hstate; struct hstate *hstate;
long min_hpages; /* Minimum huge pages or -1 if no minimum. */ long min_hpages; /* Minimum huge pages or -1 if no minimum. */
long rsv_hpages; /* Pages reserved against global pool to */ long rsv_hpages; /* Pages reserved against global pool to */
/* sasitfy minimum size. */ /* satisfy minimum size. */
}; };
struct resv_map { struct resv_map {
......
...@@ -1434,7 +1434,7 @@ static void __free_huge_page(struct page *page) ...@@ -1434,7 +1434,7 @@ static void __free_huge_page(struct page *page)
* reservation. If the page was associated with a subpool, there * reservation. If the page was associated with a subpool, there
* would have been a page reserved in the subpool before allocation * would have been a page reserved in the subpool before allocation
* via hugepage_subpool_get_pages(). Since we are 'restoring' the * via hugepage_subpool_get_pages(). Since we are 'restoring' the
* reservtion, do not call hugepage_subpool_put_pages() as this will * reservation, do not call hugepage_subpool_put_pages() as this will
* remove the reserved page from the subpool. * remove the reserved page from the subpool.
*/ */
if (!restore_reserve) { if (!restore_reserve) {
...@@ -3707,7 +3707,7 @@ static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) ...@@ -3707,7 +3707,7 @@ static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
/* /*
* We cannot handle pagefaults against hugetlb pages at all. They cause * We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the * handle_mm_fault() to try to instantiate regular-sized pages in the
* hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get
* this far. * this far.
*/ */
static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
...@@ -4491,7 +4491,7 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) ...@@ -4491,7 +4491,7 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
} }
#else #else
/* /*
* For uniprocesor systems we always use a single mutex, so just * For uniprocessor systems we always use a single mutex, so just
* return 0 and avoid the hashing overhead. * return 0 and avoid the hashing overhead.
*/ */
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment