Commit a4751306 authored by Zhu Lingshan's avatar Zhu Lingshan Committed by Michael S. Tsirkin

vDPA/ifcvf: virt queue ops take immediate actions

In this commit, virtqueue operations including:
set_vq_num(), set_vq_address(), set_vq_ready()
and get_vq_ready() access PCI registers directly
to take immediate actions.
Signed-off-by: default avatarZhu Lingshan <lingshan.zhu@intel.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Message-Id: <20230526145254.39537-2-lingshan.zhu@intel.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 6995e2de
...@@ -329,31 +329,49 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num) ...@@ -329,31 +329,49 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
return 0; return 0;
} }
static int ifcvf_hw_enable(struct ifcvf_hw *hw) void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
{ {
struct virtio_pci_common_cfg __iomem *cfg; struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
u32 i;
cfg = hw->common_cfg; vp_iowrite16(qid, &cfg->queue_select);
for (i = 0; i < hw->nr_vring; i++) { vp_iowrite16(num, &cfg->queue_size);
if (!hw->vring[i].ready) }
break;
int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
u64 driver_area, u64 device_area)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
vp_iowrite16(i, &cfg->queue_select); vp_iowrite16(qid, &cfg->queue_select);
vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo, vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
&cfg->queue_desc_hi); &cfg->queue_desc_hi);
vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo, vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
&cfg->queue_avail_hi); &cfg->queue_avail_hi);
vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo, vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
&cfg->queue_used_hi); &cfg->queue_used_hi);
vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
vp_iowrite16(1, &cfg->queue_enable);
}
return 0; return 0;
} }
bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
u16 queue_enable;
vp_iowrite16(qid, &cfg->queue_select);
queue_enable = vp_ioread16(&cfg->queue_enable);
return (bool)queue_enable;
}
void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
vp_iowrite16(qid, &cfg->queue_select);
vp_iowrite16(ready, &cfg->queue_enable);
}
static void ifcvf_hw_disable(struct ifcvf_hw *hw) static void ifcvf_hw_disable(struct ifcvf_hw *hw)
{ {
u32 i; u32 i;
...@@ -366,16 +384,12 @@ static void ifcvf_hw_disable(struct ifcvf_hw *hw) ...@@ -366,16 +384,12 @@ static void ifcvf_hw_disable(struct ifcvf_hw *hw)
int ifcvf_start_hw(struct ifcvf_hw *hw) int ifcvf_start_hw(struct ifcvf_hw *hw)
{ {
ifcvf_reset(hw);
ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE); ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER); ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
if (ifcvf_config_features(hw) < 0) if (ifcvf_config_features(hw) < 0)
return -EINVAL; return -EINVAL;
if (ifcvf_hw_enable(hw) < 0)
return -EINVAL;
ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK); ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
return 0; return 0;
......
...@@ -47,12 +47,7 @@ ...@@ -47,12 +47,7 @@
#define MSIX_VECTOR_DEV_SHARED 3 #define MSIX_VECTOR_DEV_SHARED 3
struct vring_info { struct vring_info {
u64 desc;
u64 avail;
u64 used;
u16 size;
u16 last_avail_idx; u16 last_avail_idx;
bool ready;
void __iomem *notify_addr; void __iomem *notify_addr;
phys_addr_t notify_pa; phys_addr_t notify_pa;
u32 irq; u32 irq;
...@@ -137,4 +132,9 @@ int ifcvf_probed_virtio_net(struct ifcvf_hw *hw); ...@@ -137,4 +132,9 @@ int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
u32 ifcvf_get_config_size(struct ifcvf_hw *hw); u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector); u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector); u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
u64 driver_area, u64 device_area);
bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
#endif /* _IFCVF_H_ */ #endif /* _IFCVF_H_ */
...@@ -382,10 +382,6 @@ static void ifcvf_reset_vring(struct ifcvf_adapter *adapter) ...@@ -382,10 +382,6 @@ static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
for (i = 0; i < vf->nr_vring; i++) { for (i = 0; i < vf->nr_vring; i++) {
vf->vring[i].last_avail_idx = 0; vf->vring[i].last_avail_idx = 0;
vf->vring[i].desc = 0;
vf->vring[i].avail = 0;
vf->vring[i].used = 0;
vf->vring[i].ready = 0;
vf->vring[i].cb.callback = NULL; vf->vring[i].cb.callback = NULL;
vf->vring[i].cb.private = NULL; vf->vring[i].cb.private = NULL;
} }
...@@ -542,14 +538,14 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, ...@@ -542,14 +538,14 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
{ {
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
vf->vring[qid].ready = ready; ifcvf_set_vq_ready(vf, qid, ready);
} }
static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid) static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
{ {
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return vf->vring[qid].ready; return ifcvf_get_vq_ready(vf, qid);
} }
static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
...@@ -557,7 +553,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, ...@@ -557,7 +553,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
{ {
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
vf->vring[qid].size = num; ifcvf_set_vq_num(vf, qid, num);
} }
static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
...@@ -566,11 +562,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, ...@@ -566,11 +562,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
{ {
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
vf->vring[qid].desc = desc_area; return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
vf->vring[qid].avail = driver_area;
vf->vring[qid].used = device_area;
return 0;
} }
static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid) static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment