Commit 391aab11 authored by Jérôme Glisse's avatar Jérôme Glisse Committed by Linus Torvalds

mm/hmm: convert various hmm_pfn_* to device_entry which is a better name

Convert hmm_pfn_* to device_entry_* as here we are dealing with device
driver specific entry format and hmm provide helpers to allow differents
components (including HMM) to create/parse device entry.

We keep wrapper with the old name so that we can convert driver to use the
new API in stages in each device driver tree.  This will get remove once
all driver are converted.

Link: http://lkml.kernel.org/r/20190403193318.16478-13-jglisse@redhat.comSigned-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 55c0ece8
...@@ -239,36 +239,36 @@ static inline bool hmm_range_valid(struct hmm_range *range) ...@@ -239,36 +239,36 @@ static inline bool hmm_range_valid(struct hmm_range *range)
} }
/* /*
* hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn * hmm_device_entry_to_page() - return struct page pointed to by a device entry
* @range: range use to decode HMM pfn value * @range: range use to decode device entry value
* @pfn: HMM pfn value to get corresponding struct page from * @entry: device entry value to get corresponding struct page from
* Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise * Returns: struct page pointer if entry is a valid, NULL otherwise
* *
* If the HMM pfn is valid (ie valid flag set) then return the struct page * If the device entry is valid (ie valid flag set) then return the struct page
* matching the pfn value stored in the HMM pfn. Otherwise return NULL. * matching the entry value. Otherwise return NULL.
*/ */
static inline struct page *hmm_pfn_to_page(const struct hmm_range *range, static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range,
uint64_t pfn) uint64_t entry)
{ {
if (pfn == range->values[HMM_PFN_NONE]) if (entry == range->values[HMM_PFN_NONE])
return NULL; return NULL;
if (pfn == range->values[HMM_PFN_ERROR]) if (entry == range->values[HMM_PFN_ERROR])
return NULL; return NULL;
if (pfn == range->values[HMM_PFN_SPECIAL]) if (entry == range->values[HMM_PFN_SPECIAL])
return NULL; return NULL;
if (!(pfn & range->flags[HMM_PFN_VALID])) if (!(entry & range->flags[HMM_PFN_VALID]))
return NULL; return NULL;
return pfn_to_page(pfn >> range->pfn_shift); return pfn_to_page(entry >> range->pfn_shift);
} }
/* /*
* hmm_pfn_to_pfn() - return pfn value store in a HMM pfn * hmm_device_entry_to_pfn() - return pfn value store in a device entry
* @range: range use to decode HMM pfn value * @range: range use to decode device entry value
* @pfn: HMM pfn value to extract pfn from * @entry: device entry to extract pfn from
* Returns: pfn value if HMM pfn is valid, -1UL otherwise * Returns: pfn value if device entry is valid, -1UL otherwise
*/ */
static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range, static inline unsigned long
uint64_t pfn) hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
{ {
if (pfn == range->values[HMM_PFN_NONE]) if (pfn == range->values[HMM_PFN_NONE])
return -1UL; return -1UL;
...@@ -282,31 +282,66 @@ static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range, ...@@ -282,31 +282,66 @@ static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
} }
/* /*
* hmm_pfn_from_page() - create a valid HMM pfn value from struct page * hmm_device_entry_from_page() - create a valid device entry for a page
* @range: range use to encode HMM pfn value * @range: range use to encode HMM pfn value
* @page: struct page pointer for which to create the HMM pfn * @page: page for which to create the device entry
* Returns: valid HMM pfn for the page * Returns: valid device entry for the page
*/ */
static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range, static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
struct page *page) struct page *page)
{ {
return (page_to_pfn(page) << range->pfn_shift) | return (page_to_pfn(page) << range->pfn_shift) |
range->flags[HMM_PFN_VALID]; range->flags[HMM_PFN_VALID];
} }
/* /*
* hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
* @range: range use to encode HMM pfn value * @range: range use to encode HMM pfn value
* @pfn: pfn value for which to create the HMM pfn * @pfn: pfn value for which to create the device entry
* Returns: valid HMM pfn for the pfn * Returns: valid device entry for the pfn
*/ */
static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
unsigned long pfn) unsigned long pfn)
{ {
return (pfn << range->pfn_shift) | return (pfn << range->pfn_shift) |
range->flags[HMM_PFN_VALID]; range->flags[HMM_PFN_VALID];
} }
/*
* Old API:
* hmm_pfn_to_page()
* hmm_pfn_to_pfn()
* hmm_pfn_from_page()
* hmm_pfn_from_pfn()
*
* This are the OLD API please use new API, it is here to avoid cross-tree
* merge painfullness ie we convert things to new API in stages.
*/
static inline struct page *hmm_pfn_to_page(const struct hmm_range *range,
uint64_t pfn)
{
return hmm_device_entry_to_page(range, pfn);
}
static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
uint64_t pfn)
{
return hmm_device_entry_to_pfn(range, pfn);
}
static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range,
struct page *page)
{
return hmm_device_entry_from_page(range, page);
}
static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
unsigned long pfn)
{
return hmm_device_entry_from_pfn(range, pfn);
}
#if IS_ENABLED(CONFIG_HMM_MIRROR) #if IS_ENABLED(CONFIG_HMM_MIRROR)
/* /*
......
...@@ -543,7 +543,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, ...@@ -543,7 +543,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
if (unlikely(!hmm_vma_walk->pgmap)) if (unlikely(!hmm_vma_walk->pgmap))
return -EBUSY; return -EBUSY;
} }
pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
} }
if (hmm_vma_walk->pgmap) { if (hmm_vma_walk->pgmap) {
put_dev_pagemap(hmm_vma_walk->pgmap); put_dev_pagemap(hmm_vma_walk->pgmap);
...@@ -611,7 +611,8 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, ...@@ -611,7 +611,8 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
&fault, &write_fault); &fault, &write_fault);
if (fault || write_fault) if (fault || write_fault)
goto fault; goto fault;
*pfn = hmm_pfn_from_pfn(range, swp_offset(entry)); *pfn = hmm_device_entry_from_pfn(range,
swp_offset(entry));
*pfn |= cpu_flags; *pfn |= cpu_flags;
return 0; return 0;
} }
...@@ -649,7 +650,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, ...@@ -649,7 +650,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
return -EFAULT; return -EFAULT;
} }
*pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags; *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
return 0; return 0;
fault: fault:
...@@ -803,7 +804,8 @@ static int hmm_vma_walk_pud(pud_t *pudp, ...@@ -803,7 +804,8 @@ static int hmm_vma_walk_pud(pud_t *pudp,
hmm_vma_walk->pgmap); hmm_vma_walk->pgmap);
if (unlikely(!hmm_vma_walk->pgmap)) if (unlikely(!hmm_vma_walk->pgmap))
return -EBUSY; return -EBUSY;
pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
cpu_flags;
} }
if (hmm_vma_walk->pgmap) { if (hmm_vma_walk->pgmap) {
put_dev_pagemap(hmm_vma_walk->pgmap); put_dev_pagemap(hmm_vma_walk->pgmap);
...@@ -879,7 +881,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, ...@@ -879,7 +881,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift); pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift);
for (; addr < end; addr += size, i++, pfn += pfn_inc) for (; addr < end; addr += size, i++, pfn += pfn_inc)
range->pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
cpu_flags;
hmm_vma_walk->last = end; hmm_vma_walk->last = end;
unlock: unlock:
...@@ -1222,7 +1225,7 @@ long hmm_range_dma_map(struct hmm_range *range, ...@@ -1222,7 +1225,7 @@ long hmm_range_dma_map(struct hmm_range *range,
*/ */
daddrs[i] = 0; daddrs[i] = 0;
page = hmm_pfn_to_page(range, range->pfns[i]); page = hmm_device_entry_to_page(range, range->pfns[i]);
if (page == NULL) if (page == NULL)
continue; continue;
...@@ -1252,7 +1255,7 @@ long hmm_range_dma_map(struct hmm_range *range, ...@@ -1252,7 +1255,7 @@ long hmm_range_dma_map(struct hmm_range *range,
enum dma_data_direction dir = DMA_TO_DEVICE; enum dma_data_direction dir = DMA_TO_DEVICE;
struct page *page; struct page *page;
page = hmm_pfn_to_page(range, range->pfns[i]); page = hmm_device_entry_to_page(range, range->pfns[i]);
if (page == NULL) if (page == NULL)
continue; continue;
...@@ -1307,7 +1310,7 @@ long hmm_range_dma_unmap(struct hmm_range *range, ...@@ -1307,7 +1310,7 @@ long hmm_range_dma_unmap(struct hmm_range *range,
enum dma_data_direction dir = DMA_TO_DEVICE; enum dma_data_direction dir = DMA_TO_DEVICE;
struct page *page; struct page *page;
page = hmm_pfn_to_page(range, range->pfns[i]); page = hmm_device_entry_to_page(range, range->pfns[i]);
if (page == NULL) if (page == NULL)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment