Commit 0530a683 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'vsock-virtio-add-support-for-device-suspend-resume'

Stefano Garzarella says:

====================
vsock/virtio: add support for device suspend/resume

Vilas reported that virtio-vsock no longer worked properly after
suspend/resume (echo mem >/sys/power/state).
It was impossible to connect to the host and vice versa.

Indeed, the support has never been implemented.

This series implement .freeze and .restore callbacks of struct virtio_driver
to support device suspend/resume.

The first patch factors our the code to initialize and delete VQs.
The second patch uses that code to support device suspend/resume.
Signed-off-by: default avatarStefano Garzarella <sgarzare@redhat.com>
====================

Link: https://lore.kernel.org/r/20220428132241.152679-1-sgarzare@redhat.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 954f46d2 bd50c5dc
...@@ -566,67 +566,28 @@ static void virtio_transport_rx_work(struct work_struct *work) ...@@ -566,67 +566,28 @@ static void virtio_transport_rx_work(struct work_struct *work)
mutex_unlock(&vsock->rx_lock); mutex_unlock(&vsock->rx_lock);
} }
static int virtio_vsock_probe(struct virtio_device *vdev) static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
{ {
vq_callback_t *callbacks[] = { struct virtio_device *vdev = vsock->vdev;
virtio_vsock_rx_done,
virtio_vsock_tx_done,
virtio_vsock_event_done,
};
static const char * const names[] = { static const char * const names[] = {
"rx", "rx",
"tx", "tx",
"event", "event",
}; };
struct virtio_vsock *vsock = NULL; vq_callback_t *callbacks[] = {
virtio_vsock_rx_done,
virtio_vsock_tx_done,
virtio_vsock_event_done,
};
int ret; int ret;
ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, callbacks, names,
if (ret)
return ret;
/* Only one virtio-vsock device per guest is supported */
if (rcu_dereference_protected(the_virtio_vsock,
lockdep_is_held(&the_virtio_vsock_mutex))) {
ret = -EBUSY;
goto out;
}
vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
if (!vsock) {
ret = -ENOMEM;
goto out;
}
vsock->vdev = vdev;
ret = virtio_find_vqs(vsock->vdev, VSOCK_VQ_MAX,
vsock->vqs, callbacks, names,
NULL); NULL);
if (ret < 0) if (ret < 0)
goto out; return ret;
virtio_vsock_update_guest_cid(vsock); virtio_vsock_update_guest_cid(vsock);
vsock->rx_buf_nr = 0;
vsock->rx_buf_max_nr = 0;
atomic_set(&vsock->queued_replies, 0);
mutex_init(&vsock->tx_lock);
mutex_init(&vsock->rx_lock);
mutex_init(&vsock->event_lock);
spin_lock_init(&vsock->send_pkt_list_lock);
INIT_LIST_HEAD(&vsock->send_pkt_list);
INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
INIT_WORK(&vsock->event_work, virtio_transport_event_work);
INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
vsock->seqpacket_allow = true;
vdev->priv = vsock;
virtio_device_ready(vdev); virtio_device_ready(vdev);
mutex_lock(&vsock->tx_lock); mutex_lock(&vsock->tx_lock);
...@@ -643,30 +604,15 @@ static int virtio_vsock_probe(struct virtio_device *vdev) ...@@ -643,30 +604,15 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
vsock->event_run = true; vsock->event_run = true;
mutex_unlock(&vsock->event_lock); mutex_unlock(&vsock->event_lock);
rcu_assign_pointer(the_virtio_vsock, vsock);
mutex_unlock(&the_virtio_vsock_mutex);
return 0; return 0;
out:
kfree(vsock);
mutex_unlock(&the_virtio_vsock_mutex);
return ret;
} }
static void virtio_vsock_remove(struct virtio_device *vdev) static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
{ {
struct virtio_vsock *vsock = vdev->priv; struct virtio_device *vdev = vsock->vdev;
struct virtio_vsock_pkt *pkt; struct virtio_vsock_pkt *pkt;
mutex_lock(&the_virtio_vsock_mutex); /* Reset all connected sockets when the VQs disappear */
vdev->priv = NULL;
rcu_assign_pointer(the_virtio_vsock, NULL);
synchronize_rcu();
/* Reset all connected sockets when the device disappear */
vsock_for_each_connected_socket(&virtio_transport.transport, vsock_for_each_connected_socket(&virtio_transport.transport,
virtio_vsock_reset_sock); virtio_vsock_reset_sock);
...@@ -711,6 +657,78 @@ static void virtio_vsock_remove(struct virtio_device *vdev) ...@@ -711,6 +657,78 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
/* Delete virtqueues and flush outstanding callbacks if any */ /* Delete virtqueues and flush outstanding callbacks if any */
vdev->config->del_vqs(vdev); vdev->config->del_vqs(vdev);
}
static int virtio_vsock_probe(struct virtio_device *vdev)
{
struct virtio_vsock *vsock = NULL;
int ret;
ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
if (ret)
return ret;
/* Only one virtio-vsock device per guest is supported */
if (rcu_dereference_protected(the_virtio_vsock,
lockdep_is_held(&the_virtio_vsock_mutex))) {
ret = -EBUSY;
goto out;
}
vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
if (!vsock) {
ret = -ENOMEM;
goto out;
}
vsock->vdev = vdev;
vsock->rx_buf_nr = 0;
vsock->rx_buf_max_nr = 0;
atomic_set(&vsock->queued_replies, 0);
mutex_init(&vsock->tx_lock);
mutex_init(&vsock->rx_lock);
mutex_init(&vsock->event_lock);
spin_lock_init(&vsock->send_pkt_list_lock);
INIT_LIST_HEAD(&vsock->send_pkt_list);
INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
INIT_WORK(&vsock->event_work, virtio_transport_event_work);
INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
vsock->seqpacket_allow = true;
vdev->priv = vsock;
ret = virtio_vsock_vqs_init(vsock);
if (ret < 0)
goto out;
rcu_assign_pointer(the_virtio_vsock, vsock);
mutex_unlock(&the_virtio_vsock_mutex);
return 0;
out:
kfree(vsock);
mutex_unlock(&the_virtio_vsock_mutex);
return ret;
}
static void virtio_vsock_remove(struct virtio_device *vdev)
{
struct virtio_vsock *vsock = vdev->priv;
mutex_lock(&the_virtio_vsock_mutex);
vdev->priv = NULL;
rcu_assign_pointer(the_virtio_vsock, NULL);
synchronize_rcu();
virtio_vsock_vqs_del(vsock);
/* Other works can be queued before 'config->del_vqs()', so we flush /* Other works can be queued before 'config->del_vqs()', so we flush
* all works before to free the vsock object to avoid use after free. * all works before to free the vsock object to avoid use after free.
...@@ -725,6 +743,49 @@ static void virtio_vsock_remove(struct virtio_device *vdev) ...@@ -725,6 +743,49 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
kfree(vsock); kfree(vsock);
} }
#ifdef CONFIG_PM_SLEEP
static int virtio_vsock_freeze(struct virtio_device *vdev)
{
struct virtio_vsock *vsock = vdev->priv;
mutex_lock(&the_virtio_vsock_mutex);
rcu_assign_pointer(the_virtio_vsock, NULL);
synchronize_rcu();
virtio_vsock_vqs_del(vsock);
mutex_unlock(&the_virtio_vsock_mutex);
return 0;
}
static int virtio_vsock_restore(struct virtio_device *vdev)
{
struct virtio_vsock *vsock = vdev->priv;
int ret;
mutex_lock(&the_virtio_vsock_mutex);
/* Only one virtio-vsock device per guest is supported */
if (rcu_dereference_protected(the_virtio_vsock,
lockdep_is_held(&the_virtio_vsock_mutex))) {
ret = -EBUSY;
goto out;
}
ret = virtio_vsock_vqs_init(vsock);
if (ret < 0)
goto out;
rcu_assign_pointer(the_virtio_vsock, vsock);
out:
mutex_unlock(&the_virtio_vsock_mutex);
return ret;
}
#endif /* CONFIG_PM_SLEEP */
static struct virtio_device_id id_table[] = { static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID }, { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
{ 0 }, { 0 },
...@@ -742,6 +803,10 @@ static struct virtio_driver virtio_vsock_driver = { ...@@ -742,6 +803,10 @@ static struct virtio_driver virtio_vsock_driver = {
.id_table = id_table, .id_table = id_table,
.probe = virtio_vsock_probe, .probe = virtio_vsock_probe,
.remove = virtio_vsock_remove, .remove = virtio_vsock_remove,
#ifdef CONFIG_PM_SLEEP
.freeze = virtio_vsock_freeze,
.restore = virtio_vsock_restore,
#endif
}; };
static int __init virtio_vsock_init(void) static int __init virtio_vsock_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment