Commit f4f5d7cf authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio updates from Michael Tsirkin:

 - vdpa generic device type support

 - more virtio hardening for broken devices (but on the same theme,
   revert some virtio hotplug hardening patches - they were misusing
   some interrupt flags and had to be reverted)

 - RSS support in virtio-net

 - max device MTU support in mlx5 vdpa

 - akcipher support in virtio-crypto

 - shared IRQ support in ifcvf vdpa

 - a minor performance improvement in vhost

 - enable virtio mem for ARM64

 - beginnings of advance dma support

 - cleanups, fixes all over the place

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (33 commits)
  vdpa/mlx5: Avoid processing works if workqueue was destroyed
  vhost: handle error while adding split ranges to iotlb
  vdpa: support exposing the count of vqs to userspace
  vdpa: change the type of nvqs to u32
  vdpa: support exposing the config size to userspace
  vdpa/mlx5: re-create forwarding rules after mac modified
  virtio: pci: check bar values read from virtio config space
  Revert "virtio_pci: harden MSI-X interrupts"
  Revert "virtio-pci: harden INTX interrupts"
  drivers/net/virtio_net: Added RSS hash report control.
  drivers/net/virtio_net: Added RSS hash report.
  drivers/net/virtio_net: Added basic RSS support.
  drivers/net/virtio_net: Fixed padded vheader to use v1 with hash.
  virtio: use virtio_device_ready() in virtio_device_restore()
  tools/virtio: compile with -pthread
  tools/virtio: fix after premapped buf support
  virtio_ring: remove flags check for unmap packed indirect desc
  virtio_ring: remove flags check for unmap split indirect desc
  virtio_ring: rename vring_unmap_state_packed() to vring_unmap_extra_packed()
  net/mlx5: Add support for configuring max device MTU
  ...
parents e729dbe8 ad6dc1da
...@@ -3,8 +3,11 @@ config CRYPTO_DEV_VIRTIO ...@@ -3,8 +3,11 @@ config CRYPTO_DEV_VIRTIO
tristate "VirtIO crypto driver" tristate "VirtIO crypto driver"
depends on VIRTIO depends on VIRTIO
select CRYPTO_AEAD select CRYPTO_AEAD
select CRYPTO_AKCIPHER2
select CRYPTO_SKCIPHER select CRYPTO_SKCIPHER
select CRYPTO_ENGINE select CRYPTO_ENGINE
select CRYPTO_RSA
select MPILIB
help help
This driver provides support for virtio crypto device. If you This driver provides support for virtio crypto device. If you
choose 'M' here, this module will be called virtio_crypto. choose 'M' here, this module will be called virtio_crypto.
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
virtio_crypto-objs := \ virtio_crypto-objs := \
virtio_crypto_algs.o \ virtio_crypto_skcipher_algs.o \
virtio_crypto_akcipher_algs.o \
virtio_crypto_mgr.o \ virtio_crypto_mgr.o \
virtio_crypto_core.o virtio_crypto_core.o
This diff is collapsed.
...@@ -56,6 +56,7 @@ struct virtio_crypto { ...@@ -56,6 +56,7 @@ struct virtio_crypto {
u32 mac_algo_l; u32 mac_algo_l;
u32 mac_algo_h; u32 mac_algo_h;
u32 aead_algo; u32 aead_algo;
u32 akcipher_algo;
/* Maximum length of cipher key */ /* Maximum length of cipher key */
u32 max_cipher_key_len; u32 max_cipher_key_len;
...@@ -129,7 +130,9 @@ static inline int virtio_crypto_get_current_node(void) ...@@ -129,7 +130,9 @@ static inline int virtio_crypto_get_current_node(void)
return node; return node;
} }
int virtio_crypto_algs_register(struct virtio_crypto *vcrypto); int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto);
void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto); void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto);
int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
#endif /* _VIRTIO_CRYPTO_COMMON_H */ #endif /* _VIRTIO_CRYPTO_COMMON_H */
...@@ -297,6 +297,7 @@ static int virtcrypto_probe(struct virtio_device *vdev) ...@@ -297,6 +297,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
u32 mac_algo_l = 0; u32 mac_algo_l = 0;
u32 mac_algo_h = 0; u32 mac_algo_h = 0;
u32 aead_algo = 0; u32 aead_algo = 0;
u32 akcipher_algo = 0;
u32 crypto_services = 0; u32 crypto_services = 0;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
...@@ -348,6 +349,9 @@ static int virtcrypto_probe(struct virtio_device *vdev) ...@@ -348,6 +349,9 @@ static int virtcrypto_probe(struct virtio_device *vdev)
mac_algo_h, &mac_algo_h); mac_algo_h, &mac_algo_h);
virtio_cread_le(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
aead_algo, &aead_algo); aead_algo, &aead_algo);
if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
virtio_cread_le(vdev, struct virtio_crypto_config,
akcipher_algo, &akcipher_algo);
/* Add virtio crypto device to global table */ /* Add virtio crypto device to global table */
err = virtcrypto_devmgr_add_dev(vcrypto); err = virtcrypto_devmgr_add_dev(vcrypto);
...@@ -374,7 +378,7 @@ static int virtcrypto_probe(struct virtio_device *vdev) ...@@ -374,7 +378,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
vcrypto->mac_algo_h = mac_algo_h; vcrypto->mac_algo_h = mac_algo_h;
vcrypto->hash_algo = hash_algo; vcrypto->hash_algo = hash_algo;
vcrypto->aead_algo = aead_algo; vcrypto->aead_algo = aead_algo;
vcrypto->akcipher_algo = akcipher_algo;
dev_info(&vdev->dev, dev_info(&vdev->dev,
"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n", "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
......
...@@ -237,8 +237,14 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service, ...@@ -237,8 +237,14 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
*/ */
int virtcrypto_dev_start(struct virtio_crypto *vcrypto) int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
{ {
if (virtio_crypto_algs_register(vcrypto)) { if (virtio_crypto_skcipher_algs_register(vcrypto)) {
pr_err("virtio_crypto: Failed to register crypto algs\n"); pr_err("virtio_crypto: Failed to register crypto skcipher algs\n");
return -EFAULT;
}
if (virtio_crypto_akcipher_algs_register(vcrypto)) {
pr_err("virtio_crypto: Failed to register crypto akcipher algs\n");
virtio_crypto_skcipher_algs_unregister(vcrypto);
return -EFAULT; return -EFAULT;
} }
...@@ -257,7 +263,8 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto) ...@@ -257,7 +263,8 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
*/ */
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto) void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
{ {
virtio_crypto_algs_unregister(vcrypto); virtio_crypto_skcipher_algs_unregister(vcrypto);
virtio_crypto_akcipher_algs_unregister(vcrypto);
} }
/* /*
...@@ -312,6 +319,10 @@ bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto, ...@@ -312,6 +319,10 @@ bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
case VIRTIO_CRYPTO_SERVICE_AEAD: case VIRTIO_CRYPTO_SERVICE_AEAD:
algo_mask = vcrypto->aead_algo; algo_mask = vcrypto->aead_algo;
break; break;
case VIRTIO_CRYPTO_SERVICE_AKCIPHER:
algo_mask = vcrypto->akcipher_algo;
break;
} }
if (!(algo_mask & (1u << algo))) if (!(algo_mask & (1u << algo)))
......
...@@ -613,7 +613,7 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { { ...@@ -613,7 +613,7 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { {
}, },
} }; } };
int virtio_crypto_algs_register(struct virtio_crypto *vcrypto) int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
{ {
int ret = 0; int ret = 0;
int i = 0; int i = 0;
...@@ -644,7 +644,7 @@ int virtio_crypto_algs_register(struct virtio_crypto *vcrypto) ...@@ -644,7 +644,7 @@ int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
return ret; return ret;
} }
void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto) void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
{ {
int i = 0; int i = 0;
......
This diff is collapsed.
...@@ -10,45 +10,29 @@ ...@@ -10,45 +10,29 @@
#include "ifcvf_base.h" #include "ifcvf_base.h"
static inline u8 ifc_ioread8(u8 __iomem *addr) struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
{
return ioread8(addr);
}
static inline u16 ifc_ioread16 (__le16 __iomem *addr)
{ {
return ioread16(addr); return container_of(hw, struct ifcvf_adapter, vf);
} }
static inline u32 ifc_ioread32(__le32 __iomem *addr) u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector)
{ {
return ioread32(addr); struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
}
static inline void ifc_iowrite8(u8 value, u8 __iomem *addr) vp_iowrite16(qid, &cfg->queue_select);
{ vp_iowrite16(vector, &cfg->queue_msix_vector);
iowrite8(value, addr);
}
static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr) return vp_ioread16(&cfg->queue_msix_vector);
{
iowrite16(value, addr);
} }
static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr) u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
{ {
iowrite32(value, addr); struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
}
static void ifc_iowrite64_twopart(u64 val, cfg = hw->common_cfg;
__le32 __iomem *lo, __le32 __iomem *hi) vp_iowrite16(vector, &cfg->msix_config);
{
ifc_iowrite32((u32)val, lo);
ifc_iowrite32(val >> 32, hi);
}
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw) return vp_ioread16(&cfg->msix_config);
{
return container_of(hw, struct ifcvf_adapter, vf);
} }
static void __iomem *get_cap_addr(struct ifcvf_hw *hw, static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
...@@ -158,15 +142,16 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev) ...@@ -158,15 +142,16 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
return -EIO; return -EIO;
} }
hw->nr_vring = ifc_ioread16(&hw->common_cfg->num_queues); hw->nr_vring = vp_ioread16(&hw->common_cfg->num_queues);
for (i = 0; i < hw->nr_vring; i++) { for (i = 0; i < hw->nr_vring; i++) {
ifc_iowrite16(i, &hw->common_cfg->queue_select); vp_iowrite16(i, &hw->common_cfg->queue_select);
notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off); notify_off = vp_ioread16(&hw->common_cfg->queue_notify_off);
hw->vring[i].notify_addr = hw->notify_base + hw->vring[i].notify_addr = hw->notify_base +
notify_off * hw->notify_off_multiplier; notify_off * hw->notify_off_multiplier;
hw->vring[i].notify_pa = hw->notify_base_pa + hw->vring[i].notify_pa = hw->notify_base_pa +
notify_off * hw->notify_off_multiplier; notify_off * hw->notify_off_multiplier;
hw->vring[i].irq = -EINVAL;
} }
hw->lm_cfg = hw->base[IFCVF_LM_BAR]; hw->lm_cfg = hw->base[IFCVF_LM_BAR];
...@@ -176,17 +161,20 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev) ...@@ -176,17 +161,20 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
hw->common_cfg, hw->notify_base, hw->isr, hw->common_cfg, hw->notify_base, hw->isr,
hw->dev_cfg, hw->notify_off_multiplier); hw->dev_cfg, hw->notify_off_multiplier);
hw->vqs_reused_irq = -EINVAL;
hw->config_irq = -EINVAL;
return 0; return 0;
} }
u8 ifcvf_get_status(struct ifcvf_hw *hw) u8 ifcvf_get_status(struct ifcvf_hw *hw)
{ {
return ifc_ioread8(&hw->common_cfg->device_status); return vp_ioread8(&hw->common_cfg->device_status);
} }
void ifcvf_set_status(struct ifcvf_hw *hw, u8 status) void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
{ {
ifc_iowrite8(status, &hw->common_cfg->device_status); vp_iowrite8(status, &hw->common_cfg->device_status);
} }
void ifcvf_reset(struct ifcvf_hw *hw) void ifcvf_reset(struct ifcvf_hw *hw)
...@@ -214,11 +202,11 @@ u64 ifcvf_get_hw_features(struct ifcvf_hw *hw) ...@@ -214,11 +202,11 @@ u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
u32 features_lo, features_hi; u32 features_lo, features_hi;
u64 features; u64 features;
ifc_iowrite32(0, &cfg->device_feature_select); vp_iowrite32(0, &cfg->device_feature_select);
features_lo = ifc_ioread32(&cfg->device_feature); features_lo = vp_ioread32(&cfg->device_feature);
ifc_iowrite32(1, &cfg->device_feature_select); vp_iowrite32(1, &cfg->device_feature_select);
features_hi = ifc_ioread32(&cfg->device_feature); features_hi = vp_ioread32(&cfg->device_feature);
features = ((u64)features_hi << 32) | features_lo; features = ((u64)features_hi << 32) | features_lo;
...@@ -271,12 +259,12 @@ void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset, ...@@ -271,12 +259,12 @@ void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
WARN_ON(offset + length > hw->config_size); WARN_ON(offset + length > hw->config_size);
do { do {
old_gen = ifc_ioread8(&hw->common_cfg->config_generation); old_gen = vp_ioread8(&hw->common_cfg->config_generation);
p = dst; p = dst;
for (i = 0; i < length; i++) for (i = 0; i < length; i++)
*p++ = ifc_ioread8(hw->dev_cfg + offset + i); *p++ = vp_ioread8(hw->dev_cfg + offset + i);
new_gen = ifc_ioread8(&hw->common_cfg->config_generation); new_gen = vp_ioread8(&hw->common_cfg->config_generation);
} while (old_gen != new_gen); } while (old_gen != new_gen);
} }
...@@ -289,18 +277,18 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset, ...@@ -289,18 +277,18 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
p = src; p = src;
WARN_ON(offset + length > hw->config_size); WARN_ON(offset + length > hw->config_size);
for (i = 0; i < length; i++) for (i = 0; i < length; i++)
ifc_iowrite8(*p++, hw->dev_cfg + offset + i); vp_iowrite8(*p++, hw->dev_cfg + offset + i);
} }
static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features) static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
{ {
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
ifc_iowrite32(0, &cfg->guest_feature_select); vp_iowrite32(0, &cfg->guest_feature_select);
ifc_iowrite32((u32)features, &cfg->guest_feature); vp_iowrite32((u32)features, &cfg->guest_feature);
ifc_iowrite32(1, &cfg->guest_feature_select); vp_iowrite32(1, &cfg->guest_feature_select);
ifc_iowrite32(features >> 32, &cfg->guest_feature); vp_iowrite32(features >> 32, &cfg->guest_feature);
} }
static int ifcvf_config_features(struct ifcvf_hw *hw) static int ifcvf_config_features(struct ifcvf_hw *hw)
...@@ -329,7 +317,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) ...@@ -329,7 +317,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg; ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
q_pair_id = qid / hw->nr_vring; q_pair_id = qid / hw->nr_vring;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2]; avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
last_avail_idx = ifc_ioread16(avail_idx_addr); last_avail_idx = vp_ioread16(avail_idx_addr);
return last_avail_idx; return last_avail_idx;
} }
...@@ -344,7 +332,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num) ...@@ -344,7 +332,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
q_pair_id = qid / hw->nr_vring; q_pair_id = qid / hw->nr_vring;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2]; avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
hw->vring[qid].last_avail_idx = num; hw->vring[qid].last_avail_idx = num;
ifc_iowrite16(num, avail_idx_addr); vp_iowrite16(num, avail_idx_addr);
return 0; return 0;
} }
...@@ -352,41 +340,23 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num) ...@@ -352,41 +340,23 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
static int ifcvf_hw_enable(struct ifcvf_hw *hw) static int ifcvf_hw_enable(struct ifcvf_hw *hw)
{ {
struct virtio_pci_common_cfg __iomem *cfg; struct virtio_pci_common_cfg __iomem *cfg;
struct ifcvf_adapter *ifcvf;
u32 i; u32 i;
ifcvf = vf_to_adapter(hw);
cfg = hw->common_cfg; cfg = hw->common_cfg;
ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n");
return -EINVAL;
}
for (i = 0; i < hw->nr_vring; i++) { for (i = 0; i < hw->nr_vring; i++) {
if (!hw->vring[i].ready) if (!hw->vring[i].ready)
break; break;
ifc_iowrite16(i, &cfg->queue_select); vp_iowrite16(i, &cfg->queue_select);
ifc_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo, vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
&cfg->queue_desc_hi); &cfg->queue_desc_hi);
ifc_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo, vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
&cfg->queue_avail_hi); &cfg->queue_avail_hi);
ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo, vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
&cfg->queue_used_hi); &cfg->queue_used_hi);
ifc_iowrite16(hw->vring[i].size, &cfg->queue_size); vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector);
if (ifc_ioread16(&cfg->queue_msix_vector) ==
VIRTIO_MSI_NO_VECTOR) {
IFCVF_ERR(ifcvf->pdev,
"No msix vector for queue %u\n", i);
return -EINVAL;
}
ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx); ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
ifc_iowrite16(1, &cfg->queue_enable); vp_iowrite16(1, &cfg->queue_enable);
} }
return 0; return 0;
...@@ -394,18 +364,12 @@ static int ifcvf_hw_enable(struct ifcvf_hw *hw) ...@@ -394,18 +364,12 @@ static int ifcvf_hw_enable(struct ifcvf_hw *hw)
static void ifcvf_hw_disable(struct ifcvf_hw *hw) static void ifcvf_hw_disable(struct ifcvf_hw *hw)
{ {
struct virtio_pci_common_cfg __iomem *cfg;
u32 i; u32 i;
cfg = hw->common_cfg; ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->msix_config);
for (i = 0; i < hw->nr_vring; i++) { for (i = 0; i < hw->nr_vring; i++) {
ifc_iowrite16(i, &cfg->queue_select); ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->queue_msix_vector);
} }
ifc_ioread16(&cfg->queue_msix_vector);
} }
int ifcvf_start_hw(struct ifcvf_hw *hw) int ifcvf_start_hw(struct ifcvf_hw *hw)
...@@ -433,5 +397,5 @@ void ifcvf_stop_hw(struct ifcvf_hw *hw) ...@@ -433,5 +397,5 @@ void ifcvf_stop_hw(struct ifcvf_hw *hw)
void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid) void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
{ {
ifc_iowrite16(qid, hw->vring[qid].notify_addr); vp_iowrite16(qid, hw->vring[qid].notify_addr);
} }
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/pci_regs.h> #include <linux/pci_regs.h>
#include <linux/vdpa.h> #include <linux/vdpa.h>
#include <linux/virtio_pci_modern.h>
#include <uapi/linux/virtio_net.h> #include <uapi/linux/virtio_net.h>
#include <uapi/linux/virtio_blk.h> #include <uapi/linux/virtio_blk.h>
#include <uapi/linux/virtio_config.h> #include <uapi/linux/virtio_config.h>
...@@ -27,8 +28,6 @@ ...@@ -27,8 +28,6 @@
#define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE #define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE
#define IFCVF_QUEUE_MAX 32768 #define IFCVF_QUEUE_MAX 32768
#define IFCVF_MSI_CONFIG_OFF 0
#define IFCVF_MSI_QUEUE_OFF 1
#define IFCVF_PCI_MAX_RESOURCE 6 #define IFCVF_PCI_MAX_RESOURCE 6
#define IFCVF_LM_CFG_SIZE 0x40 #define IFCVF_LM_CFG_SIZE 0x40
...@@ -42,6 +41,13 @@ ...@@ -42,6 +41,13 @@
#define ifcvf_private_to_vf(adapter) \ #define ifcvf_private_to_vf(adapter) \
(&((struct ifcvf_adapter *)adapter)->vf) (&((struct ifcvf_adapter *)adapter)->vf)
/* all vqs and config interrupt has its own vector */
#define MSIX_VECTOR_PER_VQ_AND_CONFIG 1
/* all vqs share a vector, and config interrupt has a separate vector */
#define MSIX_VECTOR_SHARED_VQ_AND_CONFIG 2
/* all vqs and config interrupt share a vector */
#define MSIX_VECTOR_DEV_SHARED 3
struct vring_info { struct vring_info {
u64 desc; u64 desc;
u64 avail; u64 avail;
...@@ -60,25 +66,27 @@ struct ifcvf_hw { ...@@ -60,25 +66,27 @@ struct ifcvf_hw {
u8 __iomem *isr; u8 __iomem *isr;
/* Live migration */ /* Live migration */
u8 __iomem *lm_cfg; u8 __iomem *lm_cfg;
u16 nr_vring;
/* Notification bar number */ /* Notification bar number */
u8 notify_bar; u8 notify_bar;
u8 msix_vector_status;
/* virtio-net or virtio-blk device config size */
u32 config_size;
/* Notificaiton bar address */ /* Notificaiton bar address */
void __iomem *notify_base; void __iomem *notify_base;
phys_addr_t notify_base_pa; phys_addr_t notify_base_pa;
u32 notify_off_multiplier; u32 notify_off_multiplier;
u32 dev_type;
u64 req_features; u64 req_features;
u64 hw_features; u64 hw_features;
u32 dev_type;
struct virtio_pci_common_cfg __iomem *common_cfg; struct virtio_pci_common_cfg __iomem *common_cfg;
void __iomem *dev_cfg; void __iomem *dev_cfg;
struct vring_info vring[IFCVF_MAX_QUEUES]; struct vring_info vring[IFCVF_MAX_QUEUES];
void __iomem * const *base; void __iomem * const *base;
char config_msix_name[256]; char config_msix_name[256];
struct vdpa_callback config_cb; struct vdpa_callback config_cb;
unsigned int config_irq; int config_irq;
/* virtio-net or virtio-blk device config size */ int vqs_reused_irq;
u32 config_size; u16 nr_vring;
}; };
struct ifcvf_adapter { struct ifcvf_adapter {
...@@ -123,4 +131,6 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num); ...@@ -123,4 +131,6 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw); struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
int ifcvf_probed_virtio_net(struct ifcvf_hw *hw); int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
u32 ifcvf_get_config_size(struct ifcvf_hw *hw); u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
#endif /* _IFCVF_H_ */ #endif /* _IFCVF_H_ */
This diff is collapsed.
...@@ -1475,7 +1475,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd) ...@@ -1475,7 +1475,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
virtio_net_ctrl_ack status = VIRTIO_NET_ERR; virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
struct mlx5_core_dev *pfmdev; struct mlx5_core_dev *pfmdev;
size_t read; size_t read;
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN], mac_back[ETH_ALEN];
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
switch (cmd) { switch (cmd) {
...@@ -1489,6 +1489,9 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd) ...@@ -1489,6 +1489,9 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
break; break;
} }
if (is_zero_ether_addr(mac))
break;
if (!is_zero_ether_addr(ndev->config.mac)) { if (!is_zero_ether_addr(ndev->config.mac)) {
if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n", mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n",
...@@ -1503,7 +1506,47 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd) ...@@ -1503,7 +1506,47 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
break; break;
} }
/* backup the original mac address so that if failed to add the forward rules
* we could restore it
*/
memcpy(mac_back, ndev->config.mac, ETH_ALEN);
memcpy(ndev->config.mac, mac, ETH_ALEN); memcpy(ndev->config.mac, mac, ETH_ALEN);
/* Need recreate the flow table entry, so that the packet could forward back
*/
remove_fwd_to_tir(ndev);
if (add_fwd_to_tir(ndev)) {
mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
/* Although it hardly run here, we still need double check */
if (is_zero_ether_addr(mac_back)) {
mlx5_vdpa_warn(mvdev, "restore mac failed: Original MAC is zero\n");
break;
}
/* Try to restore original mac address to MFPS table, and try to restore
* the forward rule entry.
*/
if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
mlx5_vdpa_warn(mvdev, "restore mac failed: delete MAC %pM from MPFS table failed\n",
ndev->config.mac);
}
if (mlx5_mpfs_add_mac(pfmdev, mac_back)) {
mlx5_vdpa_warn(mvdev, "restore mac failed: insert old MAC %pM into MPFS table failed\n",
mac_back);
}
memcpy(ndev->config.mac, mac_back, ETH_ALEN);
if (add_fwd_to_tir(ndev))
mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n");
break;
}
status = VIRTIO_NET_OK; status = VIRTIO_NET_OK;
break; break;
...@@ -1669,7 +1712,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) ...@@ -1669,7 +1712,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
return; return;
if (unlikely(is_ctrl_vq_idx(mvdev, idx))) { if (unlikely(is_ctrl_vq_idx(mvdev, idx))) {
if (!mvdev->cvq.ready) if (!mvdev->wq || !mvdev->cvq.ready)
return; return;
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
...@@ -2565,6 +2608,28 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p ...@@ -2565,6 +2608,28 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
return ret; return ret;
} }
static int config_func_mtu(struct mlx5_core_dev *mdev, u16 mtu)
{
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
void *in;
int err;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu,
mtu + MLX5V_ETH_HARD_MTU);
MLX5_SET(modify_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
return err;
}
static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
const struct vdpa_dev_set_config *add_config) const struct vdpa_dev_set_config *add_config)
{ {
...@@ -2624,6 +2689,13 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, ...@@ -2624,6 +2689,13 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
init_mvqs(ndev); init_mvqs(ndev);
mutex_init(&ndev->reslock); mutex_init(&ndev->reslock);
config = &ndev->config; config = &ndev->config;
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) {
err = config_func_mtu(mdev, add_config->net.mtu);
if (err)
goto err_mtu;
}
err = query_mtu(mdev, &mtu); err = query_mtu(mdev, &mtu);
if (err) if (err)
goto err_mtu; goto err_mtu;
...@@ -2707,9 +2779,12 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device * ...@@ -2707,9 +2779,12 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev); struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct mlx5_vdpa_dev *mvdev = to_mvdev(dev); struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct workqueue_struct *wq;
mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
destroy_workqueue(mvdev->wq); wq = mvdev->wq;
mvdev->wq = NULL;
destroy_workqueue(wq);
_vdpa_unregister_device(dev); _vdpa_unregister_device(dev);
mgtdev->ndev = NULL; mgtdev->ndev = NULL;
} }
...@@ -2741,7 +2816,8 @@ static int mlx5v_probe(struct auxiliary_device *adev, ...@@ -2741,7 +2816,8 @@ static int mlx5v_probe(struct auxiliary_device *adev,
mgtdev->mgtdev.device = mdev->device; mgtdev->mgtdev.device = mdev->device;
mgtdev->mgtdev.id_table = id_table; mgtdev->mgtdev.id_table = id_table;
mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) |
BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP); BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP) |
BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
mgtdev->mgtdev.max_supported_vqs = mgtdev->mgtdev.max_supported_vqs =
MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1; MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1;
mgtdev->mgtdev.supported_features = get_supported_features(mdev); mgtdev->mgtdev.supported_features = get_supported_features(mdev);
......
...@@ -232,7 +232,7 @@ static int vdpa_name_match(struct device *dev, const void *data) ...@@ -232,7 +232,7 @@ static int vdpa_name_match(struct device *dev, const void *data)
return (strcmp(dev_name(&vdev->dev), data) == 0); return (strcmp(dev_name(&vdev->dev), data) == 0);
} }
static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs) static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
{ {
struct device *dev; struct device *dev;
...@@ -257,7 +257,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs) ...@@ -257,7 +257,7 @@ static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
* *
* Return: Returns an error when fail to add device to vDPA bus * Return: Returns an error when fail to add device to vDPA bus
*/ */
int _vdpa_register_device(struct vdpa_device *vdev, int nvqs) int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
{ {
if (!vdev->mdev) if (!vdev->mdev)
return -EINVAL; return -EINVAL;
...@@ -274,7 +274,7 @@ EXPORT_SYMBOL_GPL(_vdpa_register_device); ...@@ -274,7 +274,7 @@ EXPORT_SYMBOL_GPL(_vdpa_register_device);
* *
* Return: Returns an error when fail to add to vDPA bus * Return: Returns an error when fail to add to vDPA bus
*/ */
int vdpa_register_device(struct vdpa_device *vdev, int nvqs) int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
{ {
int err; int err;
......
...@@ -62,8 +62,12 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb, ...@@ -62,8 +62,12 @@ int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
*/ */
if (start == 0 && last == ULONG_MAX) { if (start == 0 && last == ULONG_MAX) {
u64 mid = last / 2; u64 mid = last / 2;
int err = vhost_iotlb_add_range_ctx(iotlb, start, mid, addr,
perm, opaque);
if (err)
return err;
vhost_iotlb_add_range_ctx(iotlb, start, mid, addr, perm, opaque);
addr += mid + 1; addr += mid + 1;
start = mid + 1; start = mid + 1;
} }
......
...@@ -42,7 +42,7 @@ struct vhost_vdpa { ...@@ -42,7 +42,7 @@ struct vhost_vdpa {
struct device dev; struct device dev;
struct cdev cdev; struct cdev cdev;
atomic_t opened; atomic_t opened;
int nvqs; u32 nvqs;
int virtio_id; int virtio_id;
int minor; int minor;
struct eventfd_ctx *config_ctx; struct eventfd_ctx *config_ctx;
...@@ -97,8 +97,11 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid) ...@@ -97,8 +97,11 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
return; return;
irq = ops->get_vq_irq(vdpa, qid); irq = ops->get_vq_irq(vdpa, qid);
if (irq < 0)
return;
irq_bypass_unregister_producer(&vq->call_ctx.producer); irq_bypass_unregister_producer(&vq->call_ctx.producer);
if (!vq->call_ctx.ctx || irq < 0) if (!vq->call_ctx.ctx)
return; return;
vq->call_ctx.producer.token = vq->call_ctx.ctx; vq->call_ctx.producer.token = vq->call_ctx.ctx;
...@@ -158,7 +161,8 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp) ...@@ -158,7 +161,8 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
struct vdpa_device *vdpa = v->vdpa; struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config; const struct vdpa_config_ops *ops = vdpa->config;
u8 status, status_old; u8 status, status_old;
int ret, nvqs = v->nvqs; u32 nvqs = v->nvqs;
int ret;
u16 i; u16 i;
if (copy_from_user(&status, statusp, sizeof(status))) if (copy_from_user(&status, statusp, sizeof(status)))
...@@ -355,6 +359,30 @@ static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp) ...@@ -355,6 +359,30 @@ static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
return 0; return 0;
} }
static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u32 size;
size = ops->get_config_size(vdpa);
if (copy_to_user(argp, &size, sizeof(size)))
return -EFAULT;
return 0;
}
static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
{
struct vdpa_device *vdpa = v->vdpa;
if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
return -EFAULT;
return 0;
}
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp) void __user *argp)
{ {
...@@ -492,6 +520,12 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, ...@@ -492,6 +520,12 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
case VHOST_VDPA_GET_IOVA_RANGE: case VHOST_VDPA_GET_IOVA_RANGE:
r = vhost_vdpa_get_iova_range(v, argp); r = vhost_vdpa_get_iova_range(v, argp);
break; break;
case VHOST_VDPA_GET_CONFIG_SIZE:
r = vhost_vdpa_get_config_size(v, argp);
break;
case VHOST_VDPA_GET_VQS_COUNT:
r = vhost_vdpa_get_vqs_count(v, argp);
break;
default: default:
r = vhost_dev_ioctl(&v->vdev, cmd, argp); r = vhost_dev_ioctl(&v->vdev, cmd, argp);
if (r == -ENOIOCTLCMD) if (r == -ENOIOCTLCMD)
...@@ -948,7 +982,8 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep) ...@@ -948,7 +982,8 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
struct vhost_vdpa *v; struct vhost_vdpa *v;
struct vhost_dev *dev; struct vhost_dev *dev;
struct vhost_virtqueue **vqs; struct vhost_virtqueue **vqs;
int nvqs, i, r, opened; int r, opened;
u32 i, nvqs;
v = container_of(inode->i_cdev, struct vhost_vdpa, cdev); v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
...@@ -1001,7 +1036,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep) ...@@ -1001,7 +1036,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
static void vhost_vdpa_clean_irq(struct vhost_vdpa *v) static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
{ {
int i; u32 i;
for (i = 0; i < v->nvqs; i++) for (i = 0; i < v->nvqs; i++)
vhost_vdpa_unsetup_vq_irq(v, i); vhost_vdpa_unsetup_vq_irq(v, i);
......
...@@ -2550,8 +2550,9 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) ...@@ -2550,8 +2550,9 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
&vq->avail->idx, r); &vq->avail->idx, r);
return false; return false;
} }
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx; return vq->avail_idx != vq->last_avail_idx;
} }
EXPORT_SYMBOL_GPL(vhost_enable_notify); EXPORT_SYMBOL_GPL(vhost_enable_notify);
......
...@@ -105,7 +105,7 @@ config VIRTIO_BALLOON ...@@ -105,7 +105,7 @@ config VIRTIO_BALLOON
config VIRTIO_MEM config VIRTIO_MEM
tristate "Virtio mem driver" tristate "Virtio mem driver"
depends on X86_64 depends on X86_64 || ARM64
depends on VIRTIO depends on VIRTIO
depends on MEMORY_HOTPLUG depends on MEMORY_HOTPLUG
depends on MEMORY_HOTREMOVE depends on MEMORY_HOTREMOVE
...@@ -115,8 +115,9 @@ config VIRTIO_MEM ...@@ -115,8 +115,9 @@ config VIRTIO_MEM
This driver provides access to virtio-mem paravirtualized memory This driver provides access to virtio-mem paravirtualized memory
devices, allowing to hotplug and hotunplug memory. devices, allowing to hotplug and hotunplug memory.
This driver was only tested under x86-64, but should theoretically This driver was only tested under x86-64 and arm64, but should
work on all architectures that support memory hotplug and hotremove. theoretically work on all architectures that support memory hotplug
and hotremove.
If unsure, say M. If unsure, say M.
......
...@@ -526,8 +526,9 @@ int virtio_device_restore(struct virtio_device *dev) ...@@ -526,8 +526,9 @@ int virtio_device_restore(struct virtio_device *dev)
goto err; goto err;
} }
/* Finally, tell the device we're all set */ /* If restore didn't do it, mark device DRIVER_OK ourselves. */
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
virtio_device_ready(dev);
virtio_config_enable(dev); virtio_config_enable(dev);
......
...@@ -24,46 +24,17 @@ MODULE_PARM_DESC(force_legacy, ...@@ -24,46 +24,17 @@ MODULE_PARM_DESC(force_legacy,
"Force legacy mode for transitional virtio 1 devices"); "Force legacy mode for transitional virtio 1 devices");
#endif #endif
/* disable irq handlers */ /* wait for pending irq handlers */
void vp_disable_cbs(struct virtio_device *vdev) void vp_synchronize_vectors(struct virtio_device *vdev)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i; int i;
if (vp_dev->intx_enabled) { if (vp_dev->intx_enabled)
/*
* The below synchronize() guarantees that any
* interrupt for this line arriving after
* synchronize_irq() has completed is guaranteed to see
* intx_soft_enabled == false.
*/
WRITE_ONCE(vp_dev->intx_soft_enabled, false);
synchronize_irq(vp_dev->pci_dev->irq); synchronize_irq(vp_dev->pci_dev->irq);
}
for (i = 0; i < vp_dev->msix_vectors; ++i)
disable_irq(pci_irq_vector(vp_dev->pci_dev, i));
}
/* enable irq handlers */
void vp_enable_cbs(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i;
if (vp_dev->intx_enabled) {
disable_irq(vp_dev->pci_dev->irq);
/*
* The above disable_irq() provides TSO ordering and
* as such promotes the below store to store-release.
*/
WRITE_ONCE(vp_dev->intx_soft_enabled, true);
enable_irq(vp_dev->pci_dev->irq);
return;
}
for (i = 0; i < vp_dev->msix_vectors; ++i) for (i = 0; i < vp_dev->msix_vectors; ++i)
enable_irq(pci_irq_vector(vp_dev->pci_dev, i)); synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
} }
/* the notify function used when creating a virt queue */ /* the notify function used when creating a virt queue */
...@@ -113,9 +84,6 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) ...@@ -113,9 +84,6 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
struct virtio_pci_device *vp_dev = opaque; struct virtio_pci_device *vp_dev = opaque;
u8 isr; u8 isr;
if (!READ_ONCE(vp_dev->intx_soft_enabled))
return IRQ_NONE;
/* reading the ISR has the effect of also clearing it so it's very /* reading the ISR has the effect of also clearing it so it's very
* important to save off the value. */ * important to save off the value. */
isr = ioread8(vp_dev->isr); isr = ioread8(vp_dev->isr);
...@@ -173,8 +141,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, ...@@ -173,8 +141,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-config", name); "%s-config", name);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
vp_config_changed, IRQF_NO_AUTOEN, vp_config_changed, 0, vp_dev->msix_names[v],
vp_dev->msix_names[v],
vp_dev); vp_dev);
if (err) if (err)
goto error; goto error;
...@@ -193,8 +160,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, ...@@ -193,8 +160,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-virtqueues", name); "%s-virtqueues", name);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
vp_vring_interrupt, IRQF_NO_AUTOEN, vp_vring_interrupt, 0, vp_dev->msix_names[v],
vp_dev->msix_names[v],
vp_dev); vp_dev);
if (err) if (err)
goto error; goto error;
...@@ -371,7 +337,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, ...@@ -371,7 +337,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
"%s-%s", "%s-%s",
dev_name(&vp_dev->vdev.dev), names[i]); dev_name(&vp_dev->vdev.dev), names[i]);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
vring_interrupt, IRQF_NO_AUTOEN, vring_interrupt, 0,
vp_dev->msix_names[msix_vec], vp_dev->msix_names[msix_vec],
vqs[i]); vqs[i]);
if (err) if (err)
......
...@@ -63,7 +63,6 @@ struct virtio_pci_device { ...@@ -63,7 +63,6 @@ struct virtio_pci_device {
/* MSI-X support */ /* MSI-X support */
int msix_enabled; int msix_enabled;
int intx_enabled; int intx_enabled;
bool intx_soft_enabled;
cpumask_var_t *msix_affinity_masks; cpumask_var_t *msix_affinity_masks;
/* Name strings for interrupts. This size should be enough, /* Name strings for interrupts. This size should be enough,
* and I'm too lazy to allocate each name separately. */ * and I'm too lazy to allocate each name separately. */
...@@ -102,10 +101,8 @@ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) ...@@ -102,10 +101,8 @@ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
return container_of(vdev, struct virtio_pci_device, vdev); return container_of(vdev, struct virtio_pci_device, vdev);
} }
/* disable irq handlers */ /* wait for pending irq handlers */
void vp_disable_cbs(struct virtio_device *vdev); void vp_synchronize_vectors(struct virtio_device *vdev);
/* enable irq handlers */
void vp_enable_cbs(struct virtio_device *vdev);
/* the notify function used when creating a virt queue */ /* the notify function used when creating a virt queue */
bool vp_notify(struct virtqueue *vq); bool vp_notify(struct virtqueue *vq);
/* the config->del_vqs() implementation */ /* the config->del_vqs() implementation */
......
...@@ -98,8 +98,8 @@ static void vp_reset(struct virtio_device *vdev) ...@@ -98,8 +98,8 @@ static void vp_reset(struct virtio_device *vdev)
/* Flush out the status write, and flush in device writes, /* Flush out the status write, and flush in device writes,
* including MSi-X interrupts, if any. */ * including MSi-X interrupts, if any. */
vp_legacy_get_status(&vp_dev->ldev); vp_legacy_get_status(&vp_dev->ldev);
/* Disable VQ/configuration callbacks. */ /* Flush pending VQ/configuration callbacks. */
vp_disable_cbs(vdev); vp_synchronize_vectors(vdev);
} }
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
...@@ -185,7 +185,6 @@ static void del_vq(struct virtio_pci_vq_info *info) ...@@ -185,7 +185,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
} }
static const struct virtio_config_ops virtio_pci_config_ops = { static const struct virtio_config_ops virtio_pci_config_ops = {
.enable_cbs = vp_enable_cbs,
.get = vp_get, .get = vp_get,
.set = vp_set, .set = vp_set,
.get_status = vp_get_status, .get_status = vp_get_status,
......
...@@ -172,8 +172,8 @@ static void vp_reset(struct virtio_device *vdev) ...@@ -172,8 +172,8 @@ static void vp_reset(struct virtio_device *vdev)
*/ */
while (vp_modern_get_status(mdev)) while (vp_modern_get_status(mdev))
msleep(1); msleep(1);
/* Disable VQ/configuration callbacks. */ /* Flush pending VQ/configuration callbacks. */
vp_disable_cbs(vdev); vp_synchronize_vectors(vdev);
} }
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
...@@ -293,7 +293,7 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id, ...@@ -293,7 +293,7 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0; for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
u8 type, cap_len, id; u8 type, cap_len, id, res_bar;
u32 tmp32; u32 tmp32;
u64 res_offset, res_length; u64 res_offset, res_length;
...@@ -315,9 +315,14 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id, ...@@ -315,9 +315,14 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
if (id != required_id) if (id != required_id)
continue; continue;
/* Type, and ID match, looks good */
pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
bar), bar); bar), &res_bar);
if (res_bar >= PCI_STD_NUM_BARS)
continue;
/* Type and ID match, and the BAR value isn't reserved.
* Looks good.
*/
/* Read the lower 32bit of length and offset */ /* Read the lower 32bit of length and offset */
pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap, pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
...@@ -337,6 +342,7 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id, ...@@ -337,6 +342,7 @@ static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
length_hi), &tmp32); length_hi), &tmp32);
res_length |= ((u64)tmp32) << 32; res_length |= ((u64)tmp32) << 32;
*bar = res_bar;
*offset = res_offset; *offset = res_offset;
*len = res_length; *len = res_length;
...@@ -380,7 +386,6 @@ static bool vp_get_shm_region(struct virtio_device *vdev, ...@@ -380,7 +386,6 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
} }
static const struct virtio_config_ops virtio_pci_config_nodev_ops = { static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.enable_cbs = vp_enable_cbs,
.get = NULL, .get = NULL,
.set = NULL, .set = NULL,
.generation = vp_generation, .generation = vp_generation,
...@@ -398,7 +403,6 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = { ...@@ -398,7 +403,6 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
}; };
static const struct virtio_config_ops virtio_pci_config_ops = { static const struct virtio_config_ops virtio_pci_config_ops = {
.enable_cbs = vp_enable_cbs,
.get = vp_get, .get = vp_get,
.set = vp_set, .set = vp_set,
.generation = vp_generation, .generation = vp_generation,
......
...@@ -35,6 +35,13 @@ vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, ...@@ -35,6 +35,13 @@ vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
&length); &length);
/* Check if the BAR may have changed since we requested the region. */
if (bar >= PCI_STD_NUM_BARS || !(mdev->modern_bars & (1 << bar))) {
dev_err(&dev->dev,
"virtio_pci: bar unexpectedly changed to %u\n", bar);
return NULL;
}
if (length <= start) { if (length <= start) {
dev_err(&dev->dev, dev_err(&dev->dev,
"virtio_pci: bad capability len %u (>%u expected)\n", "virtio_pci: bad capability len %u (>%u expected)\n",
...@@ -120,7 +127,7 @@ static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, ...@@ -120,7 +127,7 @@ static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
&bar); &bar);
/* Ignore structures with reserved BAR values */ /* Ignore structures with reserved BAR values */
if (bar > 0x5) if (bar >= PCI_STD_NUM_BARS)
continue; continue;
if (type == cfg_type) { if (type == cfg_type) {
......
...@@ -379,19 +379,11 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, ...@@ -379,19 +379,11 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
if (flags & VRING_DESC_F_INDIRECT) { dma_unmap_page(vring_dma_dev(vq),
dma_unmap_single(vring_dma_dev(vq), virtio64_to_cpu(vq->vq.vdev, desc->addr),
virtio64_to_cpu(vq->vq.vdev, desc->addr), virtio32_to_cpu(vq->vq.vdev, desc->len),
virtio32_to_cpu(vq->vq.vdev, desc->len), (flags & VRING_DESC_F_WRITE) ?
(flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
dma_unmap_page(vring_dma_dev(vq),
virtio64_to_cpu(vq->vq.vdev, desc->addr),
virtio32_to_cpu(vq->vq.vdev, desc->len),
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
} }
static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
...@@ -984,24 +976,24 @@ static struct virtqueue *vring_create_virtqueue_split( ...@@ -984,24 +976,24 @@ static struct virtqueue *vring_create_virtqueue_split(
* Packed ring specific functions - *_packed(). * Packed ring specific functions - *_packed().
*/ */
static void vring_unmap_state_packed(const struct vring_virtqueue *vq, static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
struct vring_desc_extra *state) struct vring_desc_extra *extra)
{ {
u16 flags; u16 flags;
if (!vq->use_dma_api) if (!vq->use_dma_api)
return; return;
flags = state->flags; flags = extra->flags;
if (flags & VRING_DESC_F_INDIRECT) { if (flags & VRING_DESC_F_INDIRECT) {
dma_unmap_single(vring_dma_dev(vq), dma_unmap_single(vring_dma_dev(vq),
state->addr, state->len, extra->addr, extra->len,
(flags & VRING_DESC_F_WRITE) ? (flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE); DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else { } else {
dma_unmap_page(vring_dma_dev(vq), dma_unmap_page(vring_dma_dev(vq),
state->addr, state->len, extra->addr, extra->len,
(flags & VRING_DESC_F_WRITE) ? (flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE); DMA_FROM_DEVICE : DMA_TO_DEVICE);
} }
...@@ -1017,19 +1009,11 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq, ...@@ -1017,19 +1009,11 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
flags = le16_to_cpu(desc->flags); flags = le16_to_cpu(desc->flags);
if (flags & VRING_DESC_F_INDIRECT) { dma_unmap_page(vring_dma_dev(vq),
dma_unmap_single(vring_dma_dev(vq), le64_to_cpu(desc->addr),
le64_to_cpu(desc->addr), le32_to_cpu(desc->len),
le32_to_cpu(desc->len), (flags & VRING_DESC_F_WRITE) ?
(flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
dma_unmap_page(vring_dma_dev(vq),
le64_to_cpu(desc->addr),
le32_to_cpu(desc->len),
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
} }
static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg, static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
...@@ -1303,8 +1287,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, ...@@ -1303,8 +1287,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
for (n = 0; n < total_sg; n++) { for (n = 0; n < total_sg; n++) {
if (i == err_idx) if (i == err_idx)
break; break;
vring_unmap_state_packed(vq, vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
&vq->packed.desc_extra[curr]);
curr = vq->packed.desc_extra[curr].next; curr = vq->packed.desc_extra[curr].next;
i++; i++;
if (i >= vq->packed.vring.num) if (i >= vq->packed.vring.num)
...@@ -1383,8 +1366,8 @@ static void detach_buf_packed(struct vring_virtqueue *vq, ...@@ -1383,8 +1366,8 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
if (unlikely(vq->use_dma_api)) { if (unlikely(vq->use_dma_api)) {
curr = id; curr = id;
for (i = 0; i < state->num; i++) { for (i = 0; i < state->num; i++) {
vring_unmap_state_packed(vq, vring_unmap_extra_packed(vq,
&vq->packed.desc_extra[curr]); &vq->packed.desc_extra[curr]);
curr = vq->packed.desc_extra[curr].next; curr = vq->packed.desc_extra[curr].next;
} }
} }
......
...@@ -80,12 +80,6 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) ...@@ -80,12 +80,6 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
#ifdef CONFIG_BALLOON_COMPACTION #ifdef CONFIG_BALLOON_COMPACTION
extern const struct address_space_operations balloon_aops; extern const struct address_space_operations balloon_aops;
extern bool balloon_page_isolate(struct page *page,
isolate_mode_t mode);
extern void balloon_page_putback(struct page *page);
extern int balloon_page_migrate(struct address_space *mapping,
struct page *newpage,
struct page *page, enum migrate_mode mode);
/* /*
* balloon_page_insert - insert a page into the balloon's page list and make * balloon_page_insert - insert a page into the balloon's page list and make
...@@ -155,22 +149,6 @@ static inline void balloon_page_delete(struct page *page) ...@@ -155,22 +149,6 @@ static inline void balloon_page_delete(struct page *page)
list_del(&page->lru); list_del(&page->lru);
} }
static inline bool balloon_page_isolate(struct page *page)
{
return false;
}
static inline void balloon_page_putback(struct page *page)
{
return;
}
static inline int balloon_page_migrate(struct page *newpage,
struct page *page, enum migrate_mode mode)
{
return 0;
}
static inline gfp_t balloon_mapping_gfp_mask(void) static inline gfp_t balloon_mapping_gfp_mask(void)
{ {
return GFP_HIGHUSER; return GFP_HIGHUSER;
......
...@@ -83,7 +83,7 @@ struct vdpa_device { ...@@ -83,7 +83,7 @@ struct vdpa_device {
unsigned int index; unsigned int index;
bool features_valid; bool features_valid;
bool use_va; bool use_va;
int nvqs; u32 nvqs;
struct vdpa_mgmt_dev *mdev; struct vdpa_mgmt_dev *mdev;
}; };
...@@ -207,7 +207,8 @@ struct vdpa_map_file { ...@@ -207,7 +207,8 @@ struct vdpa_map_file {
* @reset: Reset device * @reset: Reset device
* @vdev: vdpa device * @vdev: vdpa device
* Returns integer: success (0) or error (< 0) * Returns integer: success (0) or error (< 0)
* @get_config_size: Get the size of the configuration space * @get_config_size: Get the size of the configuration space includes
* fields that are conditional on feature bits.
* @vdev: vdpa device * @vdev: vdpa device
* Returns size_t: configuration size * Returns size_t: configuration size
* @get_config: Read from device specific configuration space * @get_config: Read from device specific configuration space
...@@ -337,10 +338,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, ...@@ -337,10 +338,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
dev_struct, member)), name, use_va), \ dev_struct, member)), name, use_va), \
dev_struct, member) dev_struct, member)
int vdpa_register_device(struct vdpa_device *vdev, int nvqs); int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
void vdpa_unregister_device(struct vdpa_device *vdev); void vdpa_unregister_device(struct vdpa_device *vdev);
int _vdpa_register_device(struct vdpa_device *vdev, int nvqs); int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
void _vdpa_unregister_device(struct vdpa_device *vdev); void _vdpa_unregister_device(struct vdpa_device *vdev);
/** /**
......
...@@ -150,4 +150,11 @@ ...@@ -150,4 +150,11 @@
/* Get the valid iova range */ /* Get the valid iova range */
#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \ #define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \
struct vhost_vdpa_iova_range) struct vhost_vdpa_iova_range)
/* Get the config size */
#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
/* Get the count of all virtqueues */
#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
#endif #endif
...@@ -82,6 +82,12 @@ ...@@ -82,6 +82,12 @@
/* This feature indicates support for the packed virtqueue layout. */ /* This feature indicates support for the packed virtqueue layout. */
#define VIRTIO_F_RING_PACKED 34 #define VIRTIO_F_RING_PACKED 34
/*
* Inorder feature indicates that all buffers are used by the device
* in the same order in which they have been made available.
*/
#define VIRTIO_F_IN_ORDER 35
/* /*
* This feature indicates that memory accesses by the driver and the * This feature indicates that memory accesses by the driver and the
* device are ordered in a way described by the platform. * device are ordered in a way described by the platform.
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#define VIRTIO_CRYPTO_SERVICE_HASH 1 #define VIRTIO_CRYPTO_SERVICE_HASH 1
#define VIRTIO_CRYPTO_SERVICE_MAC 2 #define VIRTIO_CRYPTO_SERVICE_MAC 2
#define VIRTIO_CRYPTO_SERVICE_AEAD 3 #define VIRTIO_CRYPTO_SERVICE_AEAD 3
#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4
#define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op)) #define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op))
...@@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header { ...@@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02) VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \ #define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03) VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04)
#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05)
__le32 opcode; __le32 opcode;
__le32 algo; __le32 algo;
__le32 flag; __le32 flag;
...@@ -180,6 +185,58 @@ struct virtio_crypto_aead_create_session_req { ...@@ -180,6 +185,58 @@ struct virtio_crypto_aead_create_session_req {
__u8 padding[32]; __u8 padding[32];
}; };
struct virtio_crypto_rsa_session_para {
#define VIRTIO_CRYPTO_RSA_RAW_PADDING 0
#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1
__le32 padding_algo;
#define VIRTIO_CRYPTO_RSA_NO_HASH 0
#define VIRTIO_CRYPTO_RSA_MD2 1
#define VIRTIO_CRYPTO_RSA_MD3 2
#define VIRTIO_CRYPTO_RSA_MD4 3
#define VIRTIO_CRYPTO_RSA_MD5 4
#define VIRTIO_CRYPTO_RSA_SHA1 5
#define VIRTIO_CRYPTO_RSA_SHA256 6
#define VIRTIO_CRYPTO_RSA_SHA384 7
#define VIRTIO_CRYPTO_RSA_SHA512 8
#define VIRTIO_CRYPTO_RSA_SHA224 9
__le32 hash_algo;
};
struct virtio_crypto_ecdsa_session_para {
#define VIRTIO_CRYPTO_CURVE_UNKNOWN 0
#define VIRTIO_CRYPTO_CURVE_NIST_P192 1
#define VIRTIO_CRYPTO_CURVE_NIST_P224 2
#define VIRTIO_CRYPTO_CURVE_NIST_P256 3
#define VIRTIO_CRYPTO_CURVE_NIST_P384 4
#define VIRTIO_CRYPTO_CURVE_NIST_P521 5
__le32 curve_id;
__le32 padding;
};
struct virtio_crypto_akcipher_session_para {
#define VIRTIO_CRYPTO_NO_AKCIPHER 0
#define VIRTIO_CRYPTO_AKCIPHER_RSA 1
#define VIRTIO_CRYPTO_AKCIPHER_DSA 2
#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3
__le32 algo;
#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC 1
#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2
__le32 keytype;
__le32 keylen;
union {
struct virtio_crypto_rsa_session_para rsa;
struct virtio_crypto_ecdsa_session_para ecdsa;
} u;
};
struct virtio_crypto_akcipher_create_session_req {
struct virtio_crypto_akcipher_session_para para;
__u8 padding[36];
};
struct virtio_crypto_alg_chain_session_para { struct virtio_crypto_alg_chain_session_para {
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2
...@@ -247,6 +304,8 @@ struct virtio_crypto_op_ctrl_req { ...@@ -247,6 +304,8 @@ struct virtio_crypto_op_ctrl_req {
mac_create_session; mac_create_session;
struct virtio_crypto_aead_create_session_req struct virtio_crypto_aead_create_session_req
aead_create_session; aead_create_session;
struct virtio_crypto_akcipher_create_session_req
akcipher_create_session;
struct virtio_crypto_destroy_session_req struct virtio_crypto_destroy_session_req
destroy_session; destroy_session;
__u8 padding[56]; __u8 padding[56];
...@@ -266,6 +325,14 @@ struct virtio_crypto_op_header { ...@@ -266,6 +325,14 @@ struct virtio_crypto_op_header {
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00) VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
#define VIRTIO_CRYPTO_AEAD_DECRYPT \ #define VIRTIO_CRYPTO_AEAD_DECRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01) VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03)
__le32 opcode; __le32 opcode;
/* algo should be service-specific algorithms */ /* algo should be service-specific algorithms */
__le32 algo; __le32 algo;
...@@ -390,6 +457,16 @@ struct virtio_crypto_aead_data_req { ...@@ -390,6 +457,16 @@ struct virtio_crypto_aead_data_req {
__u8 padding[32]; __u8 padding[32];
}; };
struct virtio_crypto_akcipher_para {
__le32 src_data_len;
__le32 dst_data_len;
};
struct virtio_crypto_akcipher_data_req {
struct virtio_crypto_akcipher_para para;
__u8 padding[40];
};
/* The request of the data virtqueue's packet */ /* The request of the data virtqueue's packet */
struct virtio_crypto_op_data_req { struct virtio_crypto_op_data_req {
struct virtio_crypto_op_header header; struct virtio_crypto_op_header header;
...@@ -399,6 +476,7 @@ struct virtio_crypto_op_data_req { ...@@ -399,6 +476,7 @@ struct virtio_crypto_op_data_req {
struct virtio_crypto_hash_data_req hash_req; struct virtio_crypto_hash_data_req hash_req;
struct virtio_crypto_mac_data_req mac_req; struct virtio_crypto_mac_data_req mac_req;
struct virtio_crypto_aead_data_req aead_req; struct virtio_crypto_aead_data_req aead_req;
struct virtio_crypto_akcipher_data_req akcipher_req;
__u8 padding[48]; __u8 padding[48];
} u; } u;
}; };
...@@ -408,6 +486,8 @@ struct virtio_crypto_op_data_req { ...@@ -408,6 +486,8 @@ struct virtio_crypto_op_data_req {
#define VIRTIO_CRYPTO_BADMSG 2 #define VIRTIO_CRYPTO_BADMSG 2
#define VIRTIO_CRYPTO_NOTSUPP 3 #define VIRTIO_CRYPTO_NOTSUPP 3
#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */ #define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */
#define VIRTIO_CRYPTO_NOSPC 5 /* no free session ID */
#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */
/* The accelerator hardware is ready */ /* The accelerator hardware is ready */
#define VIRTIO_CRYPTO_S_HW_READY (1 << 0) #define VIRTIO_CRYPTO_S_HW_READY (1 << 0)
...@@ -438,7 +518,7 @@ struct virtio_crypto_config { ...@@ -438,7 +518,7 @@ struct virtio_crypto_config {
__le32 max_cipher_key_len; __le32 max_cipher_key_len;
/* Maximum length of authenticated key */ /* Maximum length of authenticated key */
__le32 max_auth_key_len; __le32 max_auth_key_len;
__le32 reserve; __le32 akcipher_algo;
/* Maximum size of each crypto request's content */ /* Maximum size of each crypto request's content */
__le64 max_size; __le64 max_size;
}; };
......
...@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(balloon_page_dequeue); ...@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(balloon_page_dequeue);
#ifdef CONFIG_BALLOON_COMPACTION #ifdef CONFIG_BALLOON_COMPACTION
bool balloon_page_isolate(struct page *page, isolate_mode_t mode) static bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
{ {
struct balloon_dev_info *b_dev_info = balloon_page_device(page); struct balloon_dev_info *b_dev_info = balloon_page_device(page);
...@@ -217,7 +217,7 @@ bool balloon_page_isolate(struct page *page, isolate_mode_t mode) ...@@ -217,7 +217,7 @@ bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
return true; return true;
} }
void balloon_page_putback(struct page *page) static void balloon_page_putback(struct page *page)
{ {
struct balloon_dev_info *b_dev_info = balloon_page_device(page); struct balloon_dev_info *b_dev_info = balloon_page_device(page);
unsigned long flags; unsigned long flags;
...@@ -230,7 +230,7 @@ void balloon_page_putback(struct page *page) ...@@ -230,7 +230,7 @@ void balloon_page_putback(struct page *page)
/* move_to_new_page() counterpart for a ballooned page */ /* move_to_new_page() counterpart for a ballooned page */
int balloon_page_migrate(struct address_space *mapping, static int balloon_page_migrate(struct address_space *mapping,
struct page *newpage, struct page *page, struct page *newpage, struct page *page,
enum migrate_mode mode) enum migrate_mode mode)
{ {
......
...@@ -5,7 +5,8 @@ virtio_test: virtio_ring.o virtio_test.o ...@@ -5,7 +5,8 @@ virtio_test: virtio_ring.o virtio_test.o
vringh_test: vringh_test.o vringh.o virtio_ring.o vringh_test: vringh_test.o vringh.o virtio_ring.o
CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
LDFLAGS += -lpthread CFLAGS += -pthread
LDFLAGS += -pthread
vpath %.c ../../drivers/virtio ../../drivers/vhost vpath %.c ../../drivers/virtio ../../drivers/vhost
mod: mod:
${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V} ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
......
...@@ -26,8 +26,8 @@ enum dma_data_direction { ...@@ -26,8 +26,8 @@ enum dma_data_direction {
#define dma_map_single(d, p, s, dir) (virt_to_phys(p)) #define dma_map_single(d, p, s, dir) (virt_to_phys(p))
#define dma_mapping_error(...) (0) #define dma_mapping_error(...) (0)
#define dma_unmap_single(...) do { } while (0) #define dma_unmap_single(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
#define dma_unmap_page(...) do { } while (0) #define dma_unmap_page(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
#define dma_max_mapping_size(...) SIZE_MAX #define dma_max_mapping_size(...) SIZE_MAX
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment