Commit 06d38148 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'stable/vmalloc-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

* 'stable/vmalloc-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  net: xen-netback: use API provided by xenbus module to map rings
  block: xen-blkback: use API provided by xenbus module to map rings
  xen: use generic functions instead of xen_{alloc, free}_vm_area()
parents 5d5a8d2d c9d63699
/******************************************************************************
* arch/ia64/include/asm/xen/grant_table.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef _ASM_IA64_XEN_GRANT_TABLE_H
#define _ASM_IA64_XEN_GRANT_TABLE_H
struct vm_struct *xen_alloc_vm_area(unsigned long size);
void xen_free_vm_area(struct vm_struct *area);
#endif /* _ASM_IA64_XEN_GRANT_TABLE_H */
......@@ -31,68 +31,6 @@
#include <asm/xen/hypervisor.h>
struct vm_struct *xen_alloc_vm_area(unsigned long size)
{
int order;
unsigned long virt;
unsigned long nr_pages;
struct vm_struct *area;
order = get_order(size);
virt = __get_free_pages(GFP_KERNEL, order);
if (virt == 0)
goto err0;
nr_pages = 1 << order;
scrub_pages(virt, nr_pages);
area = kmalloc(sizeof(*area), GFP_KERNEL);
if (area == NULL)
goto err1;
area->flags = VM_IOREMAP;
area->addr = (void *)virt;
area->size = size;
area->pages = NULL;
area->nr_pages = nr_pages;
area->phys_addr = 0; /* xenbus_map_ring_valloc uses this field! */
return area;
err1:
free_pages(virt, order);
err0:
return NULL;
}
EXPORT_SYMBOL_GPL(xen_alloc_vm_area);
void xen_free_vm_area(struct vm_struct *area)
{
unsigned int order = get_order(area->size);
unsigned long i;
unsigned long phys_addr = __pa(area->addr);
/* This area is used for foreign page mappping.
* So underlying machine page may not be assigned. */
for (i = 0; i < (1 << order); i++) {
unsigned long ret;
unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i;
struct xen_memory_reservation reservation = {
.nr_extents = 1,
.address_bits = 0,
.extent_order = 0,
.domid = DOMID_SELF
};
set_xen_guest_handle(reservation.extent_start, &gpfn);
ret = HYPERVISOR_memory_op(XENMEM_populate_physmap,
&reservation);
BUG_ON(ret != 1);
}
free_pages((unsigned long)area->addr, order);
kfree(area);
}
EXPORT_SYMBOL_GPL(xen_free_vm_area);
/****************************************************************************
* grant table hack
* cmd: GNTTABOP_xxx
......
#ifndef _ASM_X86_XEN_GRANT_TABLE_H
#define _ASM_X86_XEN_GRANT_TABLE_H
#define xen_alloc_vm_area(size) alloc_vm_area(size)
#define xen_free_vm_area(area) free_vm_area(area)
#endif /* _ASM_X86_XEN_GRANT_TABLE_H */
......@@ -71,7 +71,7 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
if (shared == NULL) {
struct vm_struct *area =
xen_alloc_vm_area(PAGE_SIZE * max_nr_gframes);
alloc_vm_area(PAGE_SIZE * max_nr_gframes);
BUG_ON(area == NULL);
shared = area->addr;
*__shared = shared;
......
......@@ -170,7 +170,7 @@ struct xen_blkif {
enum blkif_protocol blk_protocol;
enum blkif_backend_type blk_backend_type;
union blkif_back_rings blk_rings;
struct vm_struct *blk_ring_area;
void *blk_ring;
/* The VBD attached to this interface. */
struct xen_vbd vbd;
/* Back pointer to the backend_info. */
......@@ -198,9 +198,6 @@ struct xen_blkif {
int st_wr_sect;
wait_queue_head_t waiting_to_free;
grant_handle_t shmem_handle;
grant_ref_t shmem_ref;
};
......
......@@ -122,38 +122,6 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
return blkif;
}
static int map_frontend_page(struct xen_blkif *blkif, unsigned long shared_page)
{
struct gnttab_map_grant_ref op;
gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
GNTMAP_host_map, shared_page, blkif->domid);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
if (op.status) {
DPRINTK("Grant table operation failure !\n");
return op.status;
}
blkif->shmem_ref = shared_page;
blkif->shmem_handle = op.handle;
return 0;
}
static void unmap_frontend_page(struct xen_blkif *blkif)
{
struct gnttab_unmap_grant_ref op;
gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
GNTMAP_host_map, blkif->shmem_handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
}
static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
unsigned int evtchn)
{
......@@ -163,35 +131,29 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
if (blkif->irq)
return 0;
blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
if (!blkif->blk_ring_area)
return -ENOMEM;
err = map_frontend_page(blkif, shared_page);
if (err) {
free_vm_area(blkif->blk_ring_area);
err = xenbus_map_ring_valloc(blkif->be->dev, shared_page, &blkif->blk_ring);
if (err < 0)
return err;
}
switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
struct blkif_sring *sring;
sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
sring = (struct blkif_sring *)blkif->blk_ring;
BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
struct blkif_x86_32_sring *sring_x86_32;
sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring;
BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_64:
{
struct blkif_x86_64_sring *sring_x86_64;
sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring;
BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
break;
}
......@@ -203,8 +165,7 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
xen_blkif_be_int, 0,
"blkif-backend", blkif);
if (err < 0) {
unmap_frontend_page(blkif);
free_vm_area(blkif->blk_ring_area);
xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
blkif->blk_rings.common.sring = NULL;
return err;
}
......@@ -230,8 +191,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
}
if (blkif->blk_rings.common.sring) {
unmap_frontend_page(blkif);
free_vm_area(blkif->blk_ring_area);
xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
blkif->blk_rings.common.sring = NULL;
}
}
......
......@@ -58,10 +58,6 @@ struct xenvif {
u8 fe_dev_addr[6];
/* Physical parameters of the comms window. */
grant_handle_t tx_shmem_handle;
grant_ref_t tx_shmem_ref;
grant_handle_t rx_shmem_handle;
grant_ref_t rx_shmem_ref;
unsigned int irq;
/* List of frontends to notify after a batch of frames sent. */
......@@ -70,8 +66,6 @@ struct xenvif {
/* The shared rings and indexes. */
struct xen_netif_tx_back_ring tx;
struct xen_netif_rx_back_ring rx;
struct vm_struct *tx_comms_area;
struct vm_struct *rx_comms_area;
/* Frontend feature information. */
u8 can_sg:1;
......@@ -106,6 +100,11 @@ struct xenvif {
wait_queue_head_t waiting_to_free;
};
static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
{
return to_xenbus_device(vif->dev->dev.parent);
}
#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
......
......@@ -1589,88 +1589,42 @@ static int xen_netbk_kthread(void *data)
void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
{
struct gnttab_unmap_grant_ref op;
if (vif->tx.sring) {
gnttab_set_unmap_op(&op, (unsigned long)vif->tx_comms_area->addr,
GNTMAP_host_map, vif->tx_shmem_handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
}
if (vif->rx.sring) {
gnttab_set_unmap_op(&op, (unsigned long)vif->rx_comms_area->addr,
GNTMAP_host_map, vif->rx_shmem_handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
}
if (vif->rx_comms_area)
free_vm_area(vif->rx_comms_area);
if (vif->tx_comms_area)
free_vm_area(vif->tx_comms_area);
if (vif->tx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
vif->tx.sring);
if (vif->rx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
vif->rx.sring);
}
int xen_netbk_map_frontend_rings(struct xenvif *vif,
grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref)
{
struct gnttab_map_grant_ref op;
void *addr;
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
int err = -ENOMEM;
vif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
if (vif->tx_comms_area == NULL)
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
tx_ring_ref, &addr);
if (err)
goto err;
vif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
if (vif->rx_comms_area == NULL)
goto err;
gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr,
GNTMAP_host_map, tx_ring_ref, vif->domid);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
if (op.status) {
netdev_warn(vif->dev,
"failed to map tx ring. err=%d status=%d\n",
err, op.status);
err = op.status;
goto err;
}
vif->tx_shmem_ref = tx_ring_ref;
vif->tx_shmem_handle = op.handle;
txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr;
txs = (struct xen_netif_tx_sring *)addr;
BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr,
GNTMAP_host_map, rx_ring_ref, vif->domid);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
if (op.status) {
netdev_warn(vif->dev,
"failed to map rx ring. err=%d status=%d\n",
err, op.status);
err = op.status;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
rx_ring_ref, &addr);
if (err)
goto err;
}
vif->rx_shmem_ref = rx_ring_ref;
vif->rx_shmem_handle = op.handle;
vif->rx_req_cons_peek = 0;
rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr;
rxs = (struct xen_netif_rx_sring *)addr;
BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
vif->rx_req_cons_peek = 0;
return 0;
err:
......
......@@ -443,7 +443,7 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
*vaddr = NULL;
area = xen_alloc_vm_area(PAGE_SIZE);
area = alloc_vm_area(PAGE_SIZE);
if (!area)
return -ENOMEM;
......@@ -453,7 +453,7 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
BUG();
if (op.status != GNTST_okay) {
xen_free_vm_area(area);
free_vm_area(area);
xenbus_dev_fatal(dev, op.status,
"mapping in shared page %d from domain %d",
gnt_ref, dev->otherend_id);
......@@ -552,7 +552,7 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
BUG();
if (op.status == GNTST_okay)
xen_free_vm_area(area);
free_vm_area(area);
else
xenbus_dev_error(dev, op.status,
"unmapping page at handle %d error %d",
......
......@@ -43,7 +43,6 @@
#include <xen/interface/grant_table.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/grant_table.h>
#include <xen/features.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment