Commit 06ab838c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.3-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen terminology fixes from David Vrabel:
 "Use the correct GFN/BFN terms more consistently"

* tag 'for-linus-4.3-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/xenbus: Rename the variable xen_store_mfn to xen_store_gfn
  xen/privcmd: Further s/MFN/GFN/ clean-up
  hvc/xen: Further s/MFN/GFN clean-up
  video/xen-fbfront: Further s/MFN/GFN clean-up
  xen/tmem: Use xen_page_to_gfn rather than pfn_to_gfn
  xen: Use correctly the Xen memory terminologies
  arm/xen: implement correctly pfn_to_mfn
  xen: Make clear that swiotlb and biomerge are dealing with DMA address
parents 573c577a 5f51042f
...@@ -34,7 +34,19 @@ typedef struct xpaddr { ...@@ -34,7 +34,19 @@ typedef struct xpaddr {
unsigned long __pfn_to_mfn(unsigned long pfn); unsigned long __pfn_to_mfn(unsigned long pfn);
extern struct rb_root phys_to_mach; extern struct rb_root phys_to_mach;
static inline unsigned long pfn_to_mfn(unsigned long pfn) /* Pseudo-physical <-> Guest conversion */
static inline unsigned long pfn_to_gfn(unsigned long pfn)
{
return pfn;
}
static inline unsigned long gfn_to_pfn(unsigned long gfn)
{
return gfn;
}
/* Pseudo-physical <-> BUS conversion */
static inline unsigned long pfn_to_bfn(unsigned long pfn)
{ {
unsigned long mfn; unsigned long mfn;
...@@ -47,16 +59,16 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) ...@@ -47,16 +59,16 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
return pfn; return pfn;
} }
static inline unsigned long mfn_to_pfn(unsigned long mfn) static inline unsigned long bfn_to_pfn(unsigned long bfn)
{ {
return mfn; return bfn;
} }
#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) #define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
/* VIRT <-> MACHINE conversion */ /* VIRT <-> GUEST conversion */
#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) #define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v)))
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #define gfn_to_virt(m) (__va(gfn_to_pfn(m) << PAGE_SHIFT))
/* Only used in PV code. But ARM guests are always HVM. */ /* Only used in PV code. But ARM guests are always HVM. */
static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
...@@ -96,7 +108,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) ...@@ -96,7 +108,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
bool xen_arch_need_swiotlb(struct device *dev, bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn, unsigned long pfn,
unsigned long mfn); unsigned long bfn);
unsigned long xen_get_swiotlb_free_pages(unsigned int order); unsigned long xen_get_swiotlb_free_pages(unsigned int order);
#endif /* _ASM_ARM_XEN_PAGE_H */ #endif /* _ASM_ARM_XEN_PAGE_H */
...@@ -49,35 +49,35 @@ static __read_mostly unsigned int xen_events_irq; ...@@ -49,35 +49,35 @@ static __read_mostly unsigned int xen_events_irq;
static __initdata struct device_node *xen_node; static __initdata struct device_node *xen_node;
int xen_remap_domain_mfn_array(struct vm_area_struct *vma, int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t *mfn, int nr, xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot, int *err_ptr, pgprot_t prot,
unsigned domid, unsigned domid,
struct page **pages) struct page **pages)
{ {
return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
prot, domid, pages); prot, domid, pages);
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
/* Not used by XENFEAT_auto_translated guests. */ /* Not used by XENFEAT_auto_translated guests. */
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t mfn, int nr, xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid, pgprot_t prot, unsigned domid,
struct page **pages) struct page **pages)
{ {
return -ENOSYS; return -ENOSYS;
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int nr, struct page **pages) int nr, struct page **pages)
{ {
return xen_xlate_unmap_gfn_range(vma, nr, pages); return xen_xlate_unmap_gfn_range(vma, nr, pages);
} }
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
static void xen_percpu_init(void) static void xen_percpu_init(void)
{ {
......
...@@ -139,9 +139,9 @@ void __xen_dma_sync_single_for_device(struct device *hwdev, ...@@ -139,9 +139,9 @@ void __xen_dma_sync_single_for_device(struct device *hwdev,
bool xen_arch_need_swiotlb(struct device *dev, bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn, unsigned long pfn,
unsigned long mfn) unsigned long bfn)
{ {
return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev)); return (!hypercall_cflush && (pfn != bfn) && !is_device_dma_coherent(dev));
} }
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
......
...@@ -101,6 +101,11 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) ...@@ -101,6 +101,11 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
{ {
unsigned long mfn; unsigned long mfn;
/*
* Some x86 code are still using pfn_to_mfn instead of
* pfn_to_mfn. This will have to be removed when we figured
* out which call.
*/
if (xen_feature(XENFEAT_auto_translated_physmap)) if (xen_feature(XENFEAT_auto_translated_physmap))
return pfn; return pfn;
...@@ -147,6 +152,11 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) ...@@ -147,6 +152,11 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
{ {
unsigned long pfn; unsigned long pfn;
/*
* Some x86 code are still using mfn_to_pfn instead of
* gfn_to_pfn. This will have to be removed when we figure
* out which call.
*/
if (xen_feature(XENFEAT_auto_translated_physmap)) if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn; return mfn;
...@@ -176,6 +186,27 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine) ...@@ -176,6 +186,27 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
} }
/* Pseudo-physical <-> Guest conversion */
static inline unsigned long pfn_to_gfn(unsigned long pfn)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return pfn;
else
return pfn_to_mfn(pfn);
}
static inline unsigned long gfn_to_pfn(unsigned long gfn)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return gfn;
else
return mfn_to_pfn(gfn);
}
/* Pseudo-physical <-> Bus conversion */
#define pfn_to_bfn(pfn) pfn_to_gfn(pfn)
#define bfn_to_pfn(bfn) gfn_to_pfn(bfn)
/* /*
* We detect special mappings in one of two ways: * We detect special mappings in one of two ways:
* 1. If the MFN is an I/O page then Xen will set the m2p entry * 1. If the MFN is an I/O page then Xen will set the m2p entry
...@@ -196,7 +227,7 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine) ...@@ -196,7 +227,7 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
* require. In all the cases we care about, the FOREIGN_FRAME bit is * require. In all the cases we care about, the FOREIGN_FRAME bit is
* masked (e.g., pfn_to_mfn()) so behaviour there is correct. * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
*/ */
static inline unsigned long mfn_to_local_pfn(unsigned long mfn) static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
{ {
unsigned long pfn; unsigned long pfn;
...@@ -215,6 +246,10 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) ...@@ -215,6 +246,10 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
/* VIRT <-> GUEST conversion */
#define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v)))
#define gfn_to_virt(g) (__va(gfn_to_pfn(g) << PAGE_SHIFT))
static inline unsigned long pte_mfn(pte_t pte) static inline unsigned long pte_mfn(pte_t pte)
{ {
return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
...@@ -262,7 +297,7 @@ void make_lowmem_page_readwrite(void *vaddr); ...@@ -262,7 +297,7 @@ void make_lowmem_page_readwrite(void *vaddr);
static inline bool xen_arch_need_swiotlb(struct device *dev, static inline bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn, unsigned long pfn,
unsigned long mfn) unsigned long bfn)
{ {
return false; return false;
} }
......
...@@ -2812,9 +2812,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, ...@@ -2812,9 +2812,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
return 0; return 0;
} }
static int do_remap_mfn(struct vm_area_struct *vma, static int do_remap_gfn(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t *mfn, int nr, xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot, int *err_ptr, pgprot_t prot,
unsigned domid, unsigned domid,
struct page **pages) struct page **pages)
...@@ -2830,14 +2830,14 @@ static int do_remap_mfn(struct vm_area_struct *vma, ...@@ -2830,14 +2830,14 @@ static int do_remap_mfn(struct vm_area_struct *vma,
if (xen_feature(XENFEAT_auto_translated_physmap)) { if (xen_feature(XENFEAT_auto_translated_physmap)) {
#ifdef CONFIG_XEN_PVH #ifdef CONFIG_XEN_PVH
/* We need to update the local page tables and the xen HAP */ /* We need to update the local page tables and the xen HAP */
return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
prot, domid, pages); prot, domid, pages);
#else #else
return -EINVAL; return -EINVAL;
#endif #endif
} }
rmd.mfn = mfn; rmd.mfn = gfn;
rmd.prot = prot; rmd.prot = prot;
/* We use the err_ptr to indicate if there we are doing a contigious /* We use the err_ptr to indicate if there we are doing a contigious
* mapping or a discontigious mapping. */ * mapping or a discontigious mapping. */
...@@ -2865,8 +2865,8 @@ static int do_remap_mfn(struct vm_area_struct *vma, ...@@ -2865,8 +2865,8 @@ static int do_remap_mfn(struct vm_area_struct *vma,
batch_left, &done, domid); batch_left, &done, domid);
/* /*
* @err_ptr may be the same buffer as @mfn, so * @err_ptr may be the same buffer as @gfn, so
* only clear it after each chunk of @mfn is * only clear it after each chunk of @gfn is
* used. * used.
*/ */
if (err_ptr) { if (err_ptr) {
...@@ -2896,19 +2896,19 @@ static int do_remap_mfn(struct vm_area_struct *vma, ...@@ -2896,19 +2896,19 @@ static int do_remap_mfn(struct vm_area_struct *vma,
return err < 0 ? err : mapped; return err < 0 ? err : mapped;
} }
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t mfn, int nr, xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid, pgprot_t prot, unsigned domid,
struct page **pages) struct page **pages)
{ {
return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages); return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
int xen_remap_domain_mfn_array(struct vm_area_struct *vma, int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t *mfn, int nr, xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot, int *err_ptr, pgprot_t prot,
unsigned domid, struct page **pages) unsigned domid, struct page **pages)
{ {
...@@ -2917,13 +2917,13 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma, ...@@ -2917,13 +2917,13 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
* cause of "wrong memory was mapped in". * cause of "wrong memory was mapped in".
*/ */
BUG_ON(err_ptr == NULL); BUG_ON(err_ptr == NULL);
return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages); return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
/* Returns: 0 success */ /* Returns: 0 success */
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages) int numpgs, struct page **pages)
{ {
if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
...@@ -2935,4 +2935,4 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, ...@@ -2935,4 +2935,4 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
return -EINVAL; return -EINVAL;
#endif #endif
} }
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
...@@ -453,7 +453,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ...@@ -453,7 +453,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
} }
#endif #endif
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
BUG(); BUG();
......
...@@ -249,7 +249,7 @@ static struct grant *get_grant(grant_ref_t *gref_head, ...@@ -249,7 +249,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
struct blkfront_info *info) struct blkfront_info *info)
{ {
struct grant *gnt_list_entry; struct grant *gnt_list_entry;
unsigned long buffer_mfn; unsigned long buffer_gfn;
BUG_ON(list_empty(&info->grants)); BUG_ON(list_empty(&info->grants));
gnt_list_entry = list_first_entry(&info->grants, struct grant, gnt_list_entry = list_first_entry(&info->grants, struct grant,
...@@ -268,10 +268,10 @@ static struct grant *get_grant(grant_ref_t *gref_head, ...@@ -268,10 +268,10 @@ static struct grant *get_grant(grant_ref_t *gref_head,
BUG_ON(!pfn); BUG_ON(!pfn);
gnt_list_entry->pfn = pfn; gnt_list_entry->pfn = pfn;
} }
buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); buffer_gfn = pfn_to_gfn(gnt_list_entry->pfn);
gnttab_grant_foreign_access_ref(gnt_list_entry->gref, gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
info->xbdev->otherend_id, info->xbdev->otherend_id,
buffer_mfn, 0); buffer_gfn, 0);
return gnt_list_entry; return gnt_list_entry;
} }
......
...@@ -232,7 +232,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev, ...@@ -232,7 +232,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
struct xenbus_transaction xbt; struct xenbus_transaction xbt;
ret = gnttab_grant_foreign_access(dev->otherend_id, ret = gnttab_grant_foreign_access(dev->otherend_id,
virt_to_mfn(info->page), 0); virt_to_gfn(info->page), 0);
if (ret < 0) if (ret < 0)
return ret; return ret;
info->gref = ret; info->gref = ret;
...@@ -255,7 +255,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev, ...@@ -255,7 +255,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
goto error_irqh; goto error_irqh;
} }
ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
virt_to_mfn(info->page)); virt_to_gfn(info->page));
if (ret) if (ret)
goto error_xenbus; goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref); ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref);
......
...@@ -325,7 +325,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb ...@@ -325,7 +325,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
} else { } else {
copy_gop->source.domid = DOMID_SELF; copy_gop->source.domid = DOMID_SELF;
copy_gop->source.u.gmfn = copy_gop->source.u.gmfn =
virt_to_mfn(page_address(page)); virt_to_gfn(page_address(page));
} }
copy_gop->source.offset = offset; copy_gop->source.offset = offset;
...@@ -1406,7 +1406,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, ...@@ -1406,7 +1406,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
queue->tx_copy_ops[*copy_ops].dest.u.gmfn = queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
virt_to_mfn(skb->data); virt_to_gfn(skb->data);
queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
queue->tx_copy_ops[*copy_ops].dest.offset = queue->tx_copy_ops[*copy_ops].dest.offset =
offset_in_page(skb->data); offset_in_page(skb->data);
......
...@@ -291,7 +291,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) ...@@ -291,7 +291,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
struct sk_buff *skb; struct sk_buff *skb;
unsigned short id; unsigned short id;
grant_ref_t ref; grant_ref_t ref;
unsigned long pfn; unsigned long gfn;
struct xen_netif_rx_request *req; struct xen_netif_rx_request *req;
skb = xennet_alloc_one_rx_buffer(queue); skb = xennet_alloc_one_rx_buffer(queue);
...@@ -307,12 +307,12 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) ...@@ -307,12 +307,12 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
BUG_ON((signed short)ref < 0); BUG_ON((signed short)ref < 0);
queue->grant_rx_ref[id] = ref; queue->grant_rx_ref[id] = ref;
pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); gfn = xen_page_to_gfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
req = RING_GET_REQUEST(&queue->rx, req_prod); req = RING_GET_REQUEST(&queue->rx, req_prod);
gnttab_grant_foreign_access_ref(ref, gnttab_grant_foreign_access_ref(ref,
queue->info->xbdev->otherend_id, queue->info->xbdev->otherend_id,
pfn_to_mfn(pfn), gfn,
0); 0);
req->id = id; req->id = id;
...@@ -430,8 +430,10 @@ static struct xen_netif_tx_request *xennet_make_one_txreq( ...@@ -430,8 +430,10 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
ref = gnttab_claim_grant_reference(&queue->gref_tx_head); ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
BUG_ON((signed short)ref < 0); BUG_ON((signed short)ref < 0);
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, gnttab_grant_foreign_access_ref(ref,
page_to_mfn(page), GNTMAP_readonly); queue->info->xbdev->otherend_id,
xen_page_to_gfn(page),
GNTMAP_readonly);
queue->tx_skbs[id].skb = skb; queue->tx_skbs[id].skb = skb;
queue->grant_tx_page[id] = page; queue->grant_tx_page[id] = page;
......
...@@ -377,7 +377,6 @@ static int map_data_for_request(struct vscsifrnt_info *info, ...@@ -377,7 +377,6 @@ static int map_data_for_request(struct vscsifrnt_info *info,
unsigned int data_len = scsi_bufflen(sc); unsigned int data_len = scsi_bufflen(sc);
unsigned int data_grants = 0, seg_grants = 0; unsigned int data_grants = 0, seg_grants = 0;
struct scatterlist *sg; struct scatterlist *sg;
unsigned long mfn;
struct scsiif_request_segment *seg; struct scsiif_request_segment *seg;
ring_req->nr_segments = 0; ring_req->nr_segments = 0;
...@@ -420,9 +419,9 @@ static int map_data_for_request(struct vscsifrnt_info *info, ...@@ -420,9 +419,9 @@ static int map_data_for_request(struct vscsifrnt_info *info,
ref = gnttab_claim_grant_reference(&gref_head); ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC); BUG_ON(ref == -ENOSPC);
mfn = pfn_to_mfn(page_to_pfn(page));
gnttab_grant_foreign_access_ref(ref, gnttab_grant_foreign_access_ref(ref,
info->dev->otherend_id, mfn, 1); info->dev->otherend_id,
xen_page_to_gfn(page), 1);
shadow->gref[ref_cnt] = ref; shadow->gref[ref_cnt] = ref;
ring_req->seg[ref_cnt].gref = ref; ring_req->seg[ref_cnt].gref = ref;
ring_req->seg[ref_cnt].offset = (uint16_t)off; ring_req->seg[ref_cnt].offset = (uint16_t)off;
...@@ -454,9 +453,10 @@ static int map_data_for_request(struct vscsifrnt_info *info, ...@@ -454,9 +453,10 @@ static int map_data_for_request(struct vscsifrnt_info *info,
ref = gnttab_claim_grant_reference(&gref_head); ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC); BUG_ON(ref == -ENOSPC);
mfn = pfn_to_mfn(page_to_pfn(page));
gnttab_grant_foreign_access_ref(ref, gnttab_grant_foreign_access_ref(ref,
info->dev->otherend_id, mfn, grant_ro); info->dev->otherend_id,
xen_page_to_gfn(page),
grant_ro);
shadow->gref[ref_cnt] = ref; shadow->gref[ref_cnt] = ref;
seg->gref = ref; seg->gref = ref;
......
...@@ -200,7 +200,7 @@ static int xen_hvm_console_init(void) ...@@ -200,7 +200,7 @@ static int xen_hvm_console_init(void)
{ {
int r; int r;
uint64_t v = 0; uint64_t v = 0;
unsigned long mfn; unsigned long gfn;
struct xencons_info *info; struct xencons_info *info;
if (!xen_hvm_domain()) if (!xen_hvm_domain())
...@@ -217,7 +217,7 @@ static int xen_hvm_console_init(void) ...@@ -217,7 +217,7 @@ static int xen_hvm_console_init(void)
} }
/* /*
* If the toolstack (or the hypervisor) hasn't set these values, the * If the toolstack (or the hypervisor) hasn't set these values, the
* default value is 0. Even though mfn = 0 and evtchn = 0 are * default value is 0. Even though gfn = 0 and evtchn = 0 are
* theoretically correct values, in practice they never are and they * theoretically correct values, in practice they never are and they
* mean that a legacy toolstack hasn't initialized the pv console correctly. * mean that a legacy toolstack hasn't initialized the pv console correctly.
*/ */
...@@ -229,8 +229,8 @@ static int xen_hvm_console_init(void) ...@@ -229,8 +229,8 @@ static int xen_hvm_console_init(void)
r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
if (r < 0 || v == 0) if (r < 0 || v == 0)
goto err; goto err;
mfn = v; gfn = v;
info->intf = xen_remap(mfn << PAGE_SHIFT, PAGE_SIZE); info->intf = xen_remap(gfn << PAGE_SHIFT, PAGE_SIZE);
if (info->intf == NULL) if (info->intf == NULL)
goto err; goto err;
info->vtermno = HVC_COOKIE; info->vtermno = HVC_COOKIE;
...@@ -265,7 +265,8 @@ static int xen_pv_console_init(void) ...@@ -265,7 +265,8 @@ static int xen_pv_console_init(void)
return 0; return 0;
} }
info->evtchn = xen_start_info->console.domU.evtchn; info->evtchn = xen_start_info->console.domU.evtchn;
info->intf = mfn_to_virt(xen_start_info->console.domU.mfn); /* GFN == MFN for PV guest */
info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
info->vtermno = HVC_COOKIE; info->vtermno = HVC_COOKIE;
spin_lock(&xencons_lock); spin_lock(&xencons_lock);
...@@ -374,7 +375,6 @@ static int xencons_connect_backend(struct xenbus_device *dev, ...@@ -374,7 +375,6 @@ static int xencons_connect_backend(struct xenbus_device *dev,
int ret, evtchn, devid, ref, irq; int ret, evtchn, devid, ref, irq;
struct xenbus_transaction xbt; struct xenbus_transaction xbt;
grant_ref_t gref_head; grant_ref_t gref_head;
unsigned long mfn;
ret = xenbus_alloc_evtchn(dev, &evtchn); ret = xenbus_alloc_evtchn(dev, &evtchn);
if (ret) if (ret)
...@@ -389,10 +389,6 @@ static int xencons_connect_backend(struct xenbus_device *dev, ...@@ -389,10 +389,6 @@ static int xencons_connect_backend(struct xenbus_device *dev,
irq, &domU_hvc_ops, 256); irq, &domU_hvc_ops, 256);
if (IS_ERR(info->hvc)) if (IS_ERR(info->hvc))
return PTR_ERR(info->hvc); return PTR_ERR(info->hvc);
if (xen_pv_domain())
mfn = virt_to_mfn(info->intf);
else
mfn = __pa(info->intf) >> PAGE_SHIFT;
ret = gnttab_alloc_grant_references(1, &gref_head); ret = gnttab_alloc_grant_references(1, &gref_head);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -401,7 +397,7 @@ static int xencons_connect_backend(struct xenbus_device *dev, ...@@ -401,7 +397,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
if (ref < 0) if (ref < 0)
return ref; return ref;
gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id, gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
mfn, 0); virt_to_gfn(info->intf), 0);
again: again:
ret = xenbus_transaction_start(&xbt); ret = xenbus_transaction_start(&xbt);
......
...@@ -46,7 +46,7 @@ struct xenfb_info { ...@@ -46,7 +46,7 @@ struct xenfb_info {
int nr_pages; int nr_pages;
int irq; int irq;
struct xenfb_page *page; struct xenfb_page *page;
unsigned long *mfns; unsigned long *gfns;
int update_wanted; /* XENFB_TYPE_UPDATE wanted */ int update_wanted; /* XENFB_TYPE_UPDATE wanted */
int feature_resize; /* XENFB_TYPE_RESIZE ok */ int feature_resize; /* XENFB_TYPE_RESIZE ok */
struct xenfb_resize resize; /* protected by resize_lock */ struct xenfb_resize resize; /* protected by resize_lock */
...@@ -402,8 +402,8 @@ static int xenfb_probe(struct xenbus_device *dev, ...@@ -402,8 +402,8 @@ static int xenfb_probe(struct xenbus_device *dev,
info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT; info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages); info->gfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
if (!info->mfns) if (!info->gfns)
goto error_nomem; goto error_nomem;
/* set up shared page */ /* set up shared page */
...@@ -530,29 +530,29 @@ static int xenfb_remove(struct xenbus_device *dev) ...@@ -530,29 +530,29 @@ static int xenfb_remove(struct xenbus_device *dev)
framebuffer_release(info->fb_info); framebuffer_release(info->fb_info);
} }
free_page((unsigned long)info->page); free_page((unsigned long)info->page);
vfree(info->mfns); vfree(info->gfns);
vfree(info->fb); vfree(info->fb);
kfree(info); kfree(info);
return 0; return 0;
} }
static unsigned long vmalloc_to_mfn(void *address) static unsigned long vmalloc_to_gfn(void *address)
{ {
return pfn_to_mfn(vmalloc_to_pfn(address)); return xen_page_to_gfn(vmalloc_to_page(address));
} }
static void xenfb_init_shared_page(struct xenfb_info *info, static void xenfb_init_shared_page(struct xenfb_info *info,
struct fb_info *fb_info) struct fb_info *fb_info)
{ {
int i; int i;
int epd = PAGE_SIZE / sizeof(info->mfns[0]); int epd = PAGE_SIZE / sizeof(info->gfns[0]);
for (i = 0; i < info->nr_pages; i++) for (i = 0; i < info->nr_pages; i++)
info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE); info->gfns[i] = vmalloc_to_gfn(info->fb + i * PAGE_SIZE);
for (i = 0; i * epd < info->nr_pages; i++) for (i = 0; i * epd < info->nr_pages; i++)
info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]); info->page->pd[i] = vmalloc_to_gfn(&info->gfns[i * epd]);
info->page->width = fb_info->var.xres; info->page->width = fb_info->var.xres;
info->page->height = fb_info->var.yres; info->page->height = fb_info->var.yres;
...@@ -586,7 +586,7 @@ static int xenfb_connect_backend(struct xenbus_device *dev, ...@@ -586,7 +586,7 @@ static int xenfb_connect_backend(struct xenbus_device *dev,
goto unbind_irq; goto unbind_irq;
} }
ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
virt_to_mfn(info->page)); virt_to_gfn(info->page));
if (ret) if (ret)
goto error_xenbus; goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
......
...@@ -441,7 +441,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -441,7 +441,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
/* Update direct mapping, invalidate P2M, and add to balloon. */ /* Update direct mapping, invalidate P2M, and add to balloon. */
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
pfn = frame_list[i]; pfn = frame_list[i];
frame_list[i] = pfn_to_mfn(pfn); frame_list[i] = pfn_to_gfn(pfn);
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
#ifdef CONFIG_XEN_HAVE_PVMMU #ifdef CONFIG_XEN_HAVE_PVMMU
......
...@@ -6,10 +6,10 @@ ...@@ -6,10 +6,10 @@
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
const struct bio_vec *vec2) const struct bio_vec *vec2)
{ {
unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page)); unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page)); unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
((mfn1 == mfn2) || ((mfn1+1) == mfn2)); ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
} }
EXPORT_SYMBOL(xen_biovec_phys_mergeable); EXPORT_SYMBOL(xen_biovec_phys_mergeable);
...@@ -1688,7 +1688,7 @@ void __init xen_init_IRQ(void) ...@@ -1688,7 +1688,7 @@ void __init xen_init_IRQ(void)
struct physdev_pirq_eoi_gmfn eoi_gmfn; struct physdev_pirq_eoi_gmfn eoi_gmfn;
pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map); eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
/* TODO: No PVH support for PIRQ EOI */ /* TODO: No PVH support for PIRQ EOI */
if (rc != 0) { if (rc != 0) {
......
...@@ -111,7 +111,7 @@ static int init_control_block(int cpu, ...@@ -111,7 +111,7 @@ static int init_control_block(int cpu,
for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++) for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
q->head[i] = 0; q->head[i] = 0;
init_control.control_gfn = virt_to_mfn(control_block); init_control.control_gfn = virt_to_gfn(control_block);
init_control.offset = 0; init_control.offset = 0;
init_control.vcpu = cpu; init_control.vcpu = cpu;
...@@ -167,7 +167,7 @@ static int evtchn_fifo_setup(struct irq_info *info) ...@@ -167,7 +167,7 @@ static int evtchn_fifo_setup(struct irq_info *info)
/* Mask all events in this page before adding it. */ /* Mask all events in this page before adding it. */
init_array_page(array_page); init_array_page(array_page);
expand_array.array_gfn = virt_to_mfn(array_page); expand_array.array_gfn = virt_to_gfn(array_page);
ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array); ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
if (ret < 0) if (ret < 0)
......
...@@ -142,7 +142,8 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, ...@@ -142,7 +142,8 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
/* Grant foreign access to the page. */ /* Grant foreign access to the page. */
rc = gnttab_grant_foreign_access(op->domid, rc = gnttab_grant_foreign_access(op->domid,
pfn_to_mfn(page_to_pfn(gref->page)), readonly); xen_page_to_gfn(gref->page),
readonly);
if (rc < 0) if (rc < 0)
goto undo; goto undo;
gref_ids[i] = gref->gref_id = rc; gref_ids[i] = gref->gref_id = rc;
......
...@@ -80,7 +80,7 @@ static int xen_suspend(void *data) ...@@ -80,7 +80,7 @@ static int xen_suspend(void *data)
* is resuming in a new domain. * is resuming in a new domain.
*/ */
si->cancelled = HYPERVISOR_suspend(xen_pv_domain() si->cancelled = HYPERVISOR_suspend(xen_pv_domain()
? virt_to_mfn(xen_start_info) ? virt_to_gfn(xen_start_info)
: 0); : 0);
xen_arch_post_suspend(si->cancelled); xen_arch_post_suspend(si->cancelled);
......
...@@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size, ...@@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size,
return ret; return ret;
} }
struct mmap_mfn_state { struct mmap_gfn_state {
unsigned long va; unsigned long va;
struct vm_area_struct *vma; struct vm_area_struct *vma;
domid_t domain; domid_t domain;
}; };
static int mmap_mfn_range(void *data, void *state) static int mmap_gfn_range(void *data, void *state)
{ {
struct privcmd_mmap_entry *msg = data; struct privcmd_mmap_entry *msg = data;
struct mmap_mfn_state *st = state; struct mmap_gfn_state *st = state;
struct vm_area_struct *vma = st->vma; struct vm_area_struct *vma = st->vma;
int rc; int rc;
...@@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state) ...@@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state)
((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
return -EINVAL; return -EINVAL;
rc = xen_remap_domain_mfn_range(vma, rc = xen_remap_domain_gfn_range(vma,
msg->va & PAGE_MASK, msg->va & PAGE_MASK,
msg->mfn, msg->npages, msg->mfn, msg->npages,
vma->vm_page_prot, vma->vm_page_prot,
...@@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata) ...@@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
struct vm_area_struct *vma; struct vm_area_struct *vma;
int rc; int rc;
LIST_HEAD(pagelist); LIST_HEAD(pagelist);
struct mmap_mfn_state state; struct mmap_gfn_state state;
/* We only support privcmd_ioctl_mmap_batch for auto translated. */ /* We only support privcmd_ioctl_mmap_batch for auto translated. */
if (xen_feature(XENFEAT_auto_translated_physmap)) if (xen_feature(XENFEAT_auto_translated_physmap))
...@@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata) ...@@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
&pagelist, &pagelist,
mmap_mfn_range, &state); mmap_gfn_range, &state);
out_up: out_up:
...@@ -299,18 +299,18 @@ struct mmap_batch_state { ...@@ -299,18 +299,18 @@ struct mmap_batch_state {
int global_error; int global_error;
int version; int version;
/* User-space mfn array to store errors in the second pass for V1. */ /* User-space gfn array to store errors in the second pass for V1. */
xen_pfn_t __user *user_mfn; xen_pfn_t __user *user_gfn;
/* User-space int array to store errors in the second pass for V2. */ /* User-space int array to store errors in the second pass for V2. */
int __user *user_err; int __user *user_err;
}; };
/* auto translated dom0 note: if domU being created is PV, then mfn is /* auto translated dom0 note: if domU being created is PV, then gfn is
* mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
*/ */
static int mmap_batch_fn(void *data, int nr, void *state) static int mmap_batch_fn(void *data, int nr, void *state)
{ {
xen_pfn_t *mfnp = data; xen_pfn_t *gfnp = data;
struct mmap_batch_state *st = state; struct mmap_batch_state *st = state;
struct vm_area_struct *vma = st->vma; struct vm_area_struct *vma = st->vma;
struct page **pages = vma->vm_private_data; struct page **pages = vma->vm_private_data;
...@@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state) ...@@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
cur_pages = &pages[st->index]; cur_pages = &pages[st->index];
BUG_ON(nr < 0); BUG_ON(nr < 0);
ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr, ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
(int *)mfnp, st->vma->vm_page_prot, (int *)gfnp, st->vma->vm_page_prot,
st->domain, cur_pages); st->domain, cur_pages);
/* Adjust the global_error? */ /* Adjust the global_error? */
...@@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st) ...@@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st)
if (st->version == 1) { if (st->version == 1) {
if (err) { if (err) {
xen_pfn_t mfn; xen_pfn_t gfn;
ret = get_user(mfn, st->user_mfn); ret = get_user(gfn, st->user_gfn);
if (ret < 0) if (ret < 0)
return ret; return ret;
/* /*
* V1 encodes the error codes in the 32bit top * V1 encodes the error codes in the 32bit top
* nibble of the mfn (with its known * nibble of the gfn (with its known
* limitations vis-a-vis 64 bit callers). * limitations vis-a-vis 64 bit callers).
*/ */
mfn |= (err == -ENOENT) ? gfn |= (err == -ENOENT) ?
PRIVCMD_MMAPBATCH_PAGED_ERROR : PRIVCMD_MMAPBATCH_PAGED_ERROR :
PRIVCMD_MMAPBATCH_MFN_ERROR; PRIVCMD_MMAPBATCH_MFN_ERROR;
return __put_user(mfn, st->user_mfn++); return __put_user(gfn, st->user_gfn++);
} else } else
st->user_mfn++; st->user_gfn++;
} else { /* st->version == 2 */ } else { /* st->version == 2 */
if (err) if (err)
return __put_user(err, st->user_err++); return __put_user(err, st->user_err++);
...@@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state) ...@@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state)
return 0; return 0;
} }
/* Allocate pfns that are then mapped with gmfns from foreign domid. Update /* Allocate pfns that are then mapped with gfns from foreign domid. Update
* the vma with the page info to use later. * the vma with the page info to use later.
* Returns: 0 if success, otherwise -errno * Returns: 0 if success, otherwise -errno
*/ */
...@@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) ...@@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
if (state.global_error) { if (state.global_error) {
/* Write back errors in second pass. */ /* Write back errors in second pass. */
state.user_mfn = (xen_pfn_t *)m.arr; state.user_gfn = (xen_pfn_t *)m.arr;
state.user_err = m.err; state.user_err = m.err;
ret = traverse_pages_block(m.num, sizeof(xen_pfn_t), ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_return_errors, &state); &pagelist, mmap_return_errors, &state);
...@@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma) ...@@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma)
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
return; return;
rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
if (rc == 0) if (rc == 0)
free_xenballooned_pages(numpgs, pages); free_xenballooned_pages(numpgs, pages);
else else
......
...@@ -82,8 +82,8 @@ static u64 start_dma_addr; ...@@ -82,8 +82,8 @@ static u64 start_dma_addr;
*/ */
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
{ {
unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr)); unsigned long bfn = pfn_to_bfn(PFN_DOWN(paddr));
dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT; dma_addr_t dma = (dma_addr_t)bfn << PAGE_SHIFT;
dma |= paddr & ~PAGE_MASK; dma |= paddr & ~PAGE_MASK;
...@@ -92,7 +92,7 @@ static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) ...@@ -92,7 +92,7 @@ static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
{ {
unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr)); unsigned long pfn = bfn_to_pfn(PFN_DOWN(baddr));
dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
phys_addr_t paddr = dma; phys_addr_t paddr = dma;
...@@ -110,15 +110,15 @@ static int check_pages_physically_contiguous(unsigned long pfn, ...@@ -110,15 +110,15 @@ static int check_pages_physically_contiguous(unsigned long pfn,
unsigned int offset, unsigned int offset,
size_t length) size_t length)
{ {
unsigned long next_mfn; unsigned long next_bfn;
int i; int i;
int nr_pages; int nr_pages;
next_mfn = pfn_to_mfn(pfn); next_bfn = pfn_to_bfn(pfn);
nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT; nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
for (i = 1; i < nr_pages; i++) { for (i = 1; i < nr_pages; i++) {
if (pfn_to_mfn(++pfn) != ++next_mfn) if (pfn_to_bfn(++pfn) != ++next_bfn)
return 0; return 0;
} }
return 1; return 1;
...@@ -138,8 +138,8 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) ...@@ -138,8 +138,8 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
{ {
unsigned long mfn = PFN_DOWN(dma_addr); unsigned long bfn = PFN_DOWN(dma_addr);
unsigned long pfn = mfn_to_local_pfn(mfn); unsigned long pfn = bfn_to_local_pfn(bfn);
phys_addr_t paddr; phys_addr_t paddr;
/* If the address is outside our domain, it CAN /* If the address is outside our domain, it CAN
......
...@@ -129,21 +129,17 @@ static int xen_tmem_new_pool(struct tmem_pool_uuid uuid, ...@@ -129,21 +129,17 @@ static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
/* xen generic tmem ops */ /* xen generic tmem ops */
static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
u32 index, unsigned long pfn) u32 index, struct page *page)
{ {
unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index, return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
gmfn, 0, 0, 0); xen_page_to_gfn(page), 0, 0, 0);
} }
static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid, static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
u32 index, unsigned long pfn) u32 index, struct page *page)
{ {
unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index, return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
gmfn, 0, 0, 0); xen_page_to_gfn(page), 0, 0, 0);
} }
static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index) static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
...@@ -173,14 +169,13 @@ static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key, ...@@ -173,14 +169,13 @@ static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
{ {
u32 ind = (u32) index; u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key; struct tmem_oid oid = *(struct tmem_oid *)&key;
unsigned long pfn = page_to_pfn(page);
if (pool < 0) if (pool < 0)
return; return;
if (ind != index) if (ind != index)
return; return;
mb(); /* ensure page is quiescent; tmem may address it with an alias */ mb(); /* ensure page is quiescent; tmem may address it with an alias */
(void)xen_tmem_put_page((u32)pool, oid, ind, pfn); (void)xen_tmem_put_page((u32)pool, oid, ind, page);
} }
static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
...@@ -188,7 +183,6 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, ...@@ -188,7 +183,6 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
{ {
u32 ind = (u32) index; u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key; struct tmem_oid oid = *(struct tmem_oid *)&key;
unsigned long pfn = page_to_pfn(page);
int ret; int ret;
/* translate return values to linux semantics */ /* translate return values to linux semantics */
...@@ -196,7 +190,7 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, ...@@ -196,7 +190,7 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
return -1; return -1;
if (ind != index) if (ind != index)
return -1; return -1;
ret = xen_tmem_get_page((u32)pool, oid, ind, pfn); ret = xen_tmem_get_page((u32)pool, oid, ind, page);
if (ret == 1) if (ret == 1)
return 0; return 0;
else else
...@@ -287,7 +281,6 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset, ...@@ -287,7 +281,6 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
{ {
u64 ind64 = (u64)offset; u64 ind64 = (u64)offset;
u32 ind = (u32)offset; u32 ind = (u32)offset;
unsigned long pfn = page_to_pfn(page);
int pool = tmem_frontswap_poolid; int pool = tmem_frontswap_poolid;
int ret; int ret;
...@@ -296,7 +289,7 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset, ...@@ -296,7 +289,7 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
if (ind64 != ind) if (ind64 != ind)
return -1; return -1;
mb(); /* ensure page is quiescent; tmem may address it with an alias */ mb(); /* ensure page is quiescent; tmem may address it with an alias */
ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn); ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
/* translate Xen tmem return values to linux semantics */ /* translate Xen tmem return values to linux semantics */
if (ret == 1) if (ret == 1)
return 0; return 0;
...@@ -313,7 +306,6 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset, ...@@ -313,7 +306,6 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset,
{ {
u64 ind64 = (u64)offset; u64 ind64 = (u64)offset;
u32 ind = (u32)offset; u32 ind = (u32)offset;
unsigned long pfn = page_to_pfn(page);
int pool = tmem_frontswap_poolid; int pool = tmem_frontswap_poolid;
int ret; int ret;
...@@ -321,7 +313,7 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset, ...@@ -321,7 +313,7 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset,
return -1; return -1;
if (ind64 != ind) if (ind64 != ind)
return -1; return -1;
ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn); ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
/* translate Xen tmem return values to linux semantics */ /* translate Xen tmem return values to linux semantics */
if (ret == 1) if (ret == 1)
return 0; return 0;
......
...@@ -380,7 +380,7 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, ...@@ -380,7 +380,7 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
err = gnttab_grant_foreign_access(dev->otherend_id, err = gnttab_grant_foreign_access(dev->otherend_id,
virt_to_mfn(vaddr), 0); virt_to_gfn(vaddr), 0);
if (err < 0) { if (err < 0) {
xenbus_dev_fatal(dev, err, xenbus_dev_fatal(dev, err,
"granting access to ring page"); "granting access to ring page");
......
...@@ -49,7 +49,7 @@ static long xenbus_alloc(domid_t domid) ...@@ -49,7 +49,7 @@ static long xenbus_alloc(domid_t domid)
goto out_err; goto out_err;
gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid, gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
virt_to_mfn(xen_store_interface), 0 /* writable */); virt_to_gfn(xen_store_interface), 0 /* writable */);
arg.dom = DOMID_SELF; arg.dom = DOMID_SELF;
arg.remote_dom = domid; arg.remote_dom = domid;
......
...@@ -75,7 +75,7 @@ EXPORT_SYMBOL_GPL(xen_store_interface); ...@@ -75,7 +75,7 @@ EXPORT_SYMBOL_GPL(xen_store_interface);
enum xenstore_init xen_store_domain_type; enum xenstore_init xen_store_domain_type;
EXPORT_SYMBOL_GPL(xen_store_domain_type); EXPORT_SYMBOL_GPL(xen_store_domain_type);
static unsigned long xen_store_mfn; static unsigned long xen_store_gfn;
static BLOCKING_NOTIFIER_HEAD(xenstore_chain); static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
...@@ -711,9 +711,7 @@ static int __init xenstored_local_init(void) ...@@ -711,9 +711,7 @@ static int __init xenstored_local_init(void)
if (!page) if (!page)
goto out_err; goto out_err;
xen_store_mfn = xen_start_info->store_mfn = xen_store_gfn = xen_start_info->store_mfn = virt_to_gfn((void *)page);
pfn_to_mfn(virt_to_phys((void *)page) >>
PAGE_SHIFT);
/* Next allocate a local port which xenstored can bind to */ /* Next allocate a local port which xenstored can bind to */
alloc_unbound.dom = DOMID_SELF; alloc_unbound.dom = DOMID_SELF;
...@@ -787,12 +785,12 @@ static int __init xenbus_init(void) ...@@ -787,12 +785,12 @@ static int __init xenbus_init(void)
err = xenstored_local_init(); err = xenstored_local_init();
if (err) if (err)
goto out_error; goto out_error;
xen_store_interface = mfn_to_virt(xen_store_mfn); xen_store_interface = gfn_to_virt(xen_store_gfn);
break; break;
case XS_PV: case XS_PV:
xen_store_evtchn = xen_start_info->store_evtchn; xen_store_evtchn = xen_start_info->store_evtchn;
xen_store_mfn = xen_start_info->store_mfn; xen_store_gfn = xen_start_info->store_mfn;
xen_store_interface = mfn_to_virt(xen_store_mfn); xen_store_interface = gfn_to_virt(xen_store_gfn);
break; break;
case XS_HVM: case XS_HVM:
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
...@@ -802,9 +800,9 @@ static int __init xenbus_init(void) ...@@ -802,9 +800,9 @@ static int __init xenbus_init(void)
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
if (err) if (err)
goto out_error; goto out_error;
xen_store_mfn = (unsigned long)v; xen_store_gfn = (unsigned long)v;
xen_store_interface = xen_store_interface =
xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); xen_remap(xen_store_gfn << PAGE_SHIFT, PAGE_SIZE);
break; break;
default: default:
pr_warn("Xenstore state unknown\n"); pr_warn("Xenstore state unknown\n");
......
...@@ -38,8 +38,8 @@ ...@@ -38,8 +38,8 @@
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include <xen/interface/memory.h> #include <xen/interface/memory.h>
/* map fgmfn of domid to lpfn in the current domain */ /* map fgfn of domid to lpfn in the current domain */
static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, static int map_foreign_page(unsigned long lpfn, unsigned long fgfn,
unsigned int domid) unsigned int domid)
{ {
int rc; int rc;
...@@ -49,7 +49,7 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, ...@@ -49,7 +49,7 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
.size = 1, .size = 1,
.space = XENMAPSPACE_gmfn_foreign, .space = XENMAPSPACE_gmfn_foreign,
}; };
xen_ulong_t idx = fgmfn; xen_ulong_t idx = fgfn;
xen_pfn_t gpfn = lpfn; xen_pfn_t gpfn = lpfn;
int err = 0; int err = 0;
...@@ -62,13 +62,13 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, ...@@ -62,13 +62,13 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
} }
struct remap_data { struct remap_data {
xen_pfn_t *fgmfn; /* foreign domain's gmfn */ xen_pfn_t *fgfn; /* foreign domain's gfn */
pgprot_t prot; pgprot_t prot;
domid_t domid; domid_t domid;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int index; int index;
struct page **pages; struct page **pages;
struct xen_remap_mfn_info *info; struct xen_remap_gfn_info *info;
int *err_ptr; int *err_ptr;
int mapped; int mapped;
}; };
...@@ -82,20 +82,20 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, ...@@ -82,20 +82,20 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
int rc; int rc;
rc = map_foreign_page(pfn, *info->fgmfn, info->domid); rc = map_foreign_page(pfn, *info->fgfn, info->domid);
*info->err_ptr++ = rc; *info->err_ptr++ = rc;
if (!rc) { if (!rc) {
set_pte_at(info->vma->vm_mm, addr, ptep, pte); set_pte_at(info->vma->vm_mm, addr, ptep, pte);
info->mapped++; info->mapped++;
} }
info->fgmfn++; info->fgfn++;
return 0; return 0;
} }
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t *mfn, int nr, xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot, int *err_ptr, pgprot_t prot,
unsigned domid, unsigned domid,
struct page **pages) struct page **pages)
...@@ -108,7 +108,7 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, ...@@ -108,7 +108,7 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
x86 PVOPS */ x86 PVOPS */
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
data.fgmfn = mfn; data.fgfn = gfn;
data.prot = prot; data.prot = prot;
data.domid = domid; data.domid = domid;
data.vma = vma; data.vma = vma;
......
...@@ -44,6 +44,10 @@ struct privcmd_hypercall { ...@@ -44,6 +44,10 @@ struct privcmd_hypercall {
struct privcmd_mmap_entry { struct privcmd_mmap_entry {
__u64 va; __u64 va;
/*
* This should be a GFN. It's not possible to change the name because
* it's exposed to the user-space.
*/
__u64 mfn; __u64 mfn;
__u64 npages; __u64 npages;
}; };
......
...@@ -3,9 +3,9 @@ ...@@ -3,9 +3,9 @@
#include <asm/xen/page.h> #include <asm/xen/page.h>
static inline unsigned long page_to_mfn(struct page *page) static inline unsigned long xen_page_to_gfn(struct page *page)
{ {
return pfn_to_mfn(page_to_pfn(page)); return pfn_to_gfn(page_to_pfn(page));
} }
struct xen_memory_region { struct xen_memory_region {
......
...@@ -30,7 +30,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); ...@@ -30,7 +30,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
struct vm_area_struct; struct vm_area_struct;
/* /*
* xen_remap_domain_mfn_array() - map an array of foreign frames * xen_remap_domain_gfn_array() - map an array of foreign frames
* @vma: VMA to map the pages into * @vma: VMA to map the pages into
* @addr: Address at which to map the pages * @addr: Address at which to map the pages
* @gfn: Array of GFNs to map * @gfn: Array of GFNs to map
...@@ -46,14 +46,14 @@ struct vm_area_struct; ...@@ -46,14 +46,14 @@ struct vm_area_struct;
* Returns the number of successfully mapped frames, or a -ve error * Returns the number of successfully mapped frames, or a -ve error
* code. * code.
*/ */
int xen_remap_domain_mfn_array(struct vm_area_struct *vma, int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t *gfn, int nr, xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot, int *err_ptr, pgprot_t prot,
unsigned domid, unsigned domid,
struct page **pages); struct page **pages);
/* xen_remap_domain_mfn_range() - map a range of foreign frames /* xen_remap_domain_gfn_range() - map a range of foreign frames
* @vma: VMA to map the pages into * @vma: VMA to map the pages into
* @addr: Address at which to map the pages * @addr: Address at which to map the pages
* @gfn: First GFN to map. * @gfn: First GFN to map.
...@@ -65,12 +65,12 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma, ...@@ -65,12 +65,12 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
* Returns the number of successfully mapped frames, or a -ve error * Returns the number of successfully mapped frames, or a -ve error
* code. * code.
*/ */
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t gfn, int nr, xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid, pgprot_t prot, unsigned domid,
struct page **pages); struct page **pages);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages); int numpgs, struct page **pages);
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment