Commit fcaa0ad7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml

Pull UML updates from Richard Weinberger:

 - Fixes for our new virtio code

 - Fix for the irqflags tracer

 - Kconfig coding style fixes

 - Allow BPF firmware loading in our vector driver

* tag 'for-linus-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml:
  um: Loadable BPF "Firmware" for vector drivers
  um: Fix Kconfig indentation
  um: virtio_uml: Disallow modular build
  um: virtio: Keep reading on -EAGAIN
  um: virtio: Remove device on disconnect
  um: Don't trace irqflags during shutdown
parents e3a251e3 9807019a
...@@ -337,7 +337,7 @@ config UML_NET_SLIRP ...@@ -337,7 +337,7 @@ config UML_NET_SLIRP
endmenu endmenu
config VIRTIO_UML config VIRTIO_UML
tristate "UML driver for virtio devices" bool "UML driver for virtio devices"
select VIRTIO select VIRTIO
help help
This driver provides support for virtio based paravirtual device This driver provides support for virtio based paravirtual device
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (C) 2017 - Cambridge Greys Limited * Copyright (C) 2017 - 2019 Cambridge Greys Limited
* Copyright (C) 2011 - 2014 Cisco Systems Inc * Copyright (C) 2011 - 2014 Cisco Systems Inc
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
...@@ -21,6 +21,9 @@ ...@@ -21,6 +21,9 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/fs.h>
#include <uapi/linux/filter.h>
#include <init.h> #include <init.h>
#include <irq_kern.h> #include <irq_kern.h>
#include <irq_user.h> #include <irq_user.h>
...@@ -128,6 +131,23 @@ static int get_mtu(struct arglist *def) ...@@ -128,6 +131,23 @@ static int get_mtu(struct arglist *def)
return ETH_MAX_PACKET; return ETH_MAX_PACKET;
} }
static char *get_bpf_file(struct arglist *def)
{
return uml_vector_fetch_arg(def, "bpffile");
}
static bool get_bpf_flash(struct arglist *def)
{
char *allow = uml_vector_fetch_arg(def, "bpfflash");
long result;
if (allow != NULL) {
if (kstrtoul(allow, 10, &result) == 0)
return (allow > 0);
}
return false;
}
static int get_depth(struct arglist *def) static int get_depth(struct arglist *def)
{ {
char *mtu = uml_vector_fetch_arg(def, "depth"); char *mtu = uml_vector_fetch_arg(def, "depth");
...@@ -176,6 +196,7 @@ static int get_transport_options(struct arglist *def) ...@@ -176,6 +196,7 @@ static int get_transport_options(struct arglist *def)
int vec_rx = VECTOR_RX; int vec_rx = VECTOR_RX;
int vec_tx = VECTOR_TX; int vec_tx = VECTOR_TX;
long parsed; long parsed;
int result = 0;
if (vector != NULL) { if (vector != NULL) {
if (kstrtoul(vector, 10, &parsed) == 0) { if (kstrtoul(vector, 10, &parsed) == 0) {
...@@ -186,14 +207,16 @@ static int get_transport_options(struct arglist *def) ...@@ -186,14 +207,16 @@ static int get_transport_options(struct arglist *def)
} }
} }
if (get_bpf_flash(def))
result = VECTOR_BPF_FLASH;
if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0) if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
return 0; return result;
if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0) if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
return (vec_rx | VECTOR_BPF); return (result | vec_rx | VECTOR_BPF);
if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0) if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
return (vec_rx | vec_tx | VECTOR_QDISC_BYPASS); return (result | vec_rx | vec_tx | VECTOR_QDISC_BYPASS);
return (vec_rx | vec_tx); return (result | vec_rx | vec_tx);
} }
...@@ -1139,6 +1162,8 @@ static int vector_net_close(struct net_device *dev) ...@@ -1139,6 +1162,8 @@ static int vector_net_close(struct net_device *dev)
} }
tasklet_kill(&vp->tx_poll); tasklet_kill(&vp->tx_poll);
if (vp->fds->rx_fd > 0) { if (vp->fds->rx_fd > 0) {
if (vp->bpf)
uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
os_close_file(vp->fds->rx_fd); os_close_file(vp->fds->rx_fd);
vp->fds->rx_fd = -1; vp->fds->rx_fd = -1;
} }
...@@ -1146,7 +1171,10 @@ static int vector_net_close(struct net_device *dev) ...@@ -1146,7 +1171,10 @@ static int vector_net_close(struct net_device *dev)
os_close_file(vp->fds->tx_fd); os_close_file(vp->fds->tx_fd);
vp->fds->tx_fd = -1; vp->fds->tx_fd = -1;
} }
if (vp->bpf != NULL)
kfree(vp->bpf->filter);
kfree(vp->bpf); kfree(vp->bpf);
vp->bpf = NULL;
kfree(vp->fds->remote_addr); kfree(vp->fds->remote_addr);
kfree(vp->transport_data); kfree(vp->transport_data);
kfree(vp->header_rxbuffer); kfree(vp->header_rxbuffer);
...@@ -1181,6 +1209,7 @@ static void vector_reset_tx(struct work_struct *work) ...@@ -1181,6 +1209,7 @@ static void vector_reset_tx(struct work_struct *work)
netif_start_queue(vp->dev); netif_start_queue(vp->dev);
netif_wake_queue(vp->dev); netif_wake_queue(vp->dev);
} }
static int vector_net_open(struct net_device *dev) static int vector_net_open(struct net_device *dev)
{ {
struct vector_private *vp = netdev_priv(dev); struct vector_private *vp = netdev_priv(dev);
...@@ -1196,6 +1225,8 @@ static int vector_net_open(struct net_device *dev) ...@@ -1196,6 +1225,8 @@ static int vector_net_open(struct net_device *dev)
vp->opened = true; vp->opened = true;
spin_unlock_irqrestore(&vp->lock, flags); spin_unlock_irqrestore(&vp->lock, flags);
vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed));
vp->fds = uml_vector_user_open(vp->unit, vp->parsed); vp->fds = uml_vector_user_open(vp->unit, vp->parsed);
if (vp->fds == NULL) if (vp->fds == NULL)
...@@ -1267,8 +1298,11 @@ static int vector_net_open(struct net_device *dev) ...@@ -1267,8 +1298,11 @@ static int vector_net_open(struct net_device *dev)
if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd)) if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
vp->options |= VECTOR_BPF; vp->options |= VECTOR_BPF;
} }
if ((vp->options & VECTOR_BPF) != 0) if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL))
vp->bpf = uml_vector_default_bpf(vp->fds->rx_fd, dev->dev_addr); vp->bpf = uml_vector_default_bpf(dev->dev_addr);
if (vp->bpf != NULL)
uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
netif_start_queue(dev); netif_start_queue(dev);
...@@ -1347,6 +1381,65 @@ static void vector_net_get_drvinfo(struct net_device *dev, ...@@ -1347,6 +1381,65 @@ static void vector_net_get_drvinfo(struct net_device *dev,
strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
} }
static int vector_net_load_bpf_flash(struct net_device *dev,
struct ethtool_flash *efl)
{
struct vector_private *vp = netdev_priv(dev);
struct vector_device *vdevice;
const struct firmware *fw;
int result = 0;
if (!(vp->options & VECTOR_BPF_FLASH)) {
netdev_err(dev, "loading firmware not permitted: %s\n", efl->data);
return -1;
}
spin_lock(&vp->lock);
if (vp->bpf != NULL) {
if (vp->opened)
uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
kfree(vp->bpf->filter);
vp->bpf->filter = NULL;
} else {
vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
if (vp->bpf == NULL) {
netdev_err(dev, "failed to allocate memory for firmware\n");
goto flash_fail;
}
}
vdevice = find_device(vp->unit);
if (request_firmware(&fw, efl->data, &vdevice->pdev.dev))
goto flash_fail;
vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!vp->bpf->filter)
goto free_buffer;
vp->bpf->len = fw->size / sizeof(struct sock_filter);
release_firmware(fw);
if (vp->opened)
result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
spin_unlock(&vp->lock);
return result;
free_buffer:
release_firmware(fw);
flash_fail:
spin_unlock(&vp->lock);
if (vp->bpf != NULL)
kfree(vp->bpf->filter);
kfree(vp->bpf);
vp->bpf = NULL;
return -1;
}
static void vector_get_ringparam(struct net_device *netdev, static void vector_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring) struct ethtool_ringparam *ring)
{ {
...@@ -1424,6 +1517,7 @@ static const struct ethtool_ops vector_net_ethtool_ops = { ...@@ -1424,6 +1517,7 @@ static const struct ethtool_ops vector_net_ethtool_ops = {
.get_ethtool_stats = vector_get_ethtool_stats, .get_ethtool_stats = vector_get_ethtool_stats,
.get_coalesce = vector_get_coalesce, .get_coalesce = vector_get_coalesce,
.set_coalesce = vector_set_coalesce, .set_coalesce = vector_set_coalesce,
.flash_device = vector_net_load_bpf_flash,
}; };
...@@ -1528,7 +1622,8 @@ static void vector_eth_configure( ...@@ -1528,7 +1622,8 @@ static void vector_eth_configure(
.in_write_poll = false, .in_write_poll = false,
.coalesce = 2, .coalesce = 2,
.req_size = get_req_size(def), .req_size = get_req_size(def),
.in_error = false .in_error = false,
.bpf = NULL
}); });
dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST); dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
......
...@@ -29,10 +29,13 @@ ...@@ -29,10 +29,13 @@
#define VECTOR_TX (1 << 1) #define VECTOR_TX (1 << 1)
#define VECTOR_BPF (1 << 2) #define VECTOR_BPF (1 << 2)
#define VECTOR_QDISC_BYPASS (1 << 3) #define VECTOR_QDISC_BYPASS (1 << 3)
#define VECTOR_BPF_FLASH (1 << 4)
#define ETH_MAX_PACKET 1500 #define ETH_MAX_PACKET 1500
#define ETH_HEADER_OTHER 32 /* just in case someone decides to go mad on QnQ */ #define ETH_HEADER_OTHER 32 /* just in case someone decides to go mad on QnQ */
#define MAX_FILTER_PROG (2 << 16)
struct vector_queue { struct vector_queue {
struct mmsghdr *mmsg_vector; struct mmsghdr *mmsg_vector;
void **skbuff_vector; void **skbuff_vector;
...@@ -118,10 +121,13 @@ struct vector_private { ...@@ -118,10 +121,13 @@ struct vector_private {
bool in_write_poll; bool in_write_poll;
bool in_error; bool in_error;
/* guest allowed to use ethtool flash to load bpf */
bool bpf_via_flash;
/* ethtool stats */ /* ethtool stats */
struct vector_estats estats; struct vector_estats estats;
void *bpf; struct sock_fprog *bpf;
char user[0]; char user[0];
}; };
......
...@@ -46,7 +46,8 @@ ...@@ -46,7 +46,8 @@
#define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s" #define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s"
#define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i" #define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i"
#define UNIX_BIND_FAIL "unix_open : could not bind socket err=%i" #define UNIX_BIND_FAIL "unix_open : could not bind socket err=%i"
#define BPF_ATTACH_FAIL "Failed to attach filter size %d to %d, err %d\n" #define BPF_ATTACH_FAIL "Failed to attach filter size %d prog %px to %d, err %d\n"
#define BPF_DETACH_FAIL "Failed to detach filter size %d prog %px to %d, err %d\n"
#define MAX_UN_LEN 107 #define MAX_UN_LEN 107
...@@ -660,31 +661,44 @@ int uml_vector_recvmmsg( ...@@ -660,31 +661,44 @@ int uml_vector_recvmmsg(
else else
return -errno; return -errno;
} }
int uml_vector_attach_bpf(int fd, void *bpf, int bpf_len) int uml_vector_attach_bpf(int fd, void *bpf)
{ {
int err = setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, bpf, bpf_len); struct sock_fprog *prog = bpf;
int err = setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, bpf, sizeof(struct sock_fprog));
if (err < 0) if (err < 0)
printk(KERN_ERR BPF_ATTACH_FAIL, bpf_len, fd, -errno); printk(KERN_ERR BPF_ATTACH_FAIL, prog->len, prog->filter, fd, -errno);
return err; return err;
} }
#define DEFAULT_BPF_LEN 6 int uml_vector_detach_bpf(int fd, void *bpf)
{
struct sock_fprog *prog = bpf;
void *uml_vector_default_bpf(int fd, void *mac) int err = setsockopt(fd, SOL_SOCKET, SO_DETACH_FILTER, bpf, sizeof(struct sock_fprog));
if (err < 0)
printk(KERN_ERR BPF_DETACH_FAIL, prog->len, prog->filter, fd, -errno);
return err;
}
void *uml_vector_default_bpf(void *mac)
{ {
struct sock_filter *bpf; struct sock_filter *bpf;
uint32_t *mac1 = (uint32_t *)(mac + 2); uint32_t *mac1 = (uint32_t *)(mac + 2);
uint16_t *mac2 = (uint16_t *) mac; uint16_t *mac2 = (uint16_t *) mac;
struct sock_fprog bpf_prog = { struct sock_fprog *bpf_prog;
.len = 6,
.filter = NULL,
};
bpf_prog = uml_kmalloc(sizeof(struct sock_fprog), UM_GFP_KERNEL);
if (bpf_prog) {
bpf_prog->len = DEFAULT_BPF_LEN;
bpf_prog->filter = NULL;
} else {
return NULL;
}
bpf = uml_kmalloc( bpf = uml_kmalloc(
sizeof(struct sock_filter) * DEFAULT_BPF_LEN, UM_GFP_KERNEL); sizeof(struct sock_filter) * DEFAULT_BPF_LEN, UM_GFP_KERNEL);
if (bpf != NULL) { if (bpf) {
bpf_prog.filter = bpf; bpf_prog->filter = bpf;
/* ld [8] */ /* ld [8] */
bpf[0] = (struct sock_filter){ 0x20, 0, 0, 0x00000008 }; bpf[0] = (struct sock_filter){ 0x20, 0, 0, 0x00000008 };
/* jeq #0xMAC[2-6] jt 2 jf 5*/ /* jeq #0xMAC[2-6] jt 2 jf 5*/
...@@ -697,12 +711,56 @@ void *uml_vector_default_bpf(int fd, void *mac) ...@@ -697,12 +711,56 @@ void *uml_vector_default_bpf(int fd, void *mac)
bpf[4] = (struct sock_filter){ 0x6, 0, 0, 0x00000000 }; bpf[4] = (struct sock_filter){ 0x6, 0, 0, 0x00000000 };
/* ret #0x40000 */ /* ret #0x40000 */
bpf[5] = (struct sock_filter){ 0x6, 0, 0, 0x00040000 }; bpf[5] = (struct sock_filter){ 0x6, 0, 0, 0x00040000 };
if (uml_vector_attach_bpf( } else {
fd, &bpf_prog, sizeof(struct sock_fprog)) < 0) { kfree(bpf_prog);
kfree(bpf); bpf_prog = NULL;
bpf = NULL;
}
} }
return bpf; return bpf_prog;
} }
/* Note - this function requires a valid mac being passed as an arg */
void *uml_vector_user_bpf(char *filename)
{
struct sock_filter *bpf;
struct sock_fprog *bpf_prog;
struct stat statbuf;
int res, ffd = -1;
if (filename == NULL)
return NULL;
if (stat(filename, &statbuf) < 0) {
printk(KERN_ERR "Error %d reading bpf file", -errno);
return false;
}
bpf_prog = uml_kmalloc(sizeof(struct sock_fprog), UM_GFP_KERNEL);
if (bpf_prog != NULL) {
bpf_prog->len = statbuf.st_size / sizeof(struct sock_filter);
bpf_prog->filter = NULL;
}
ffd = os_open_file(filename, of_read(OPENFLAGS()), 0);
if (ffd < 0) {
printk(KERN_ERR "Error %d opening bpf file", -errno);
goto bpf_failed;
}
bpf = uml_kmalloc(statbuf.st_size, UM_GFP_KERNEL);
if (bpf == NULL) {
printk(KERN_ERR "Failed to allocate bpf buffer");
goto bpf_failed;
}
bpf_prog->filter = bpf;
res = os_read_file(ffd, bpf, statbuf.st_size);
if (res < statbuf.st_size) {
printk(KERN_ERR "Failed to read bpf program %s, error %d", filename, res);
kfree(bpf);
goto bpf_failed;
}
os_close_file(ffd);
return bpf_prog;
bpf_failed:
if (ffd > 0)
os_close_file(ffd);
kfree(bpf_prog);
return NULL;
}
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#define TRANS_BESS "bess" #define TRANS_BESS "bess"
#define TRANS_BESS_LEN strlen(TRANS_BESS) #define TRANS_BESS_LEN strlen(TRANS_BESS)
#define DEFAULT_BPF_LEN 6
#ifndef IPPROTO_GRE #ifndef IPPROTO_GRE
#define IPPROTO_GRE 0x2F #define IPPROTO_GRE 0x2F
#endif #endif
...@@ -95,8 +97,10 @@ extern int uml_vector_recvmmsg( ...@@ -95,8 +97,10 @@ extern int uml_vector_recvmmsg(
unsigned int vlen, unsigned int vlen,
unsigned int flags unsigned int flags
); );
extern void *uml_vector_default_bpf(int fd, void *mac); extern void *uml_vector_default_bpf(void *mac);
extern int uml_vector_attach_bpf(int fd, void *bpf, int bpf_len); extern void *uml_vector_user_bpf(char *filename);
extern int uml_vector_attach_bpf(int fd, void *bpf);
extern int uml_vector_detach_bpf(int fd, void *bpf);
extern bool uml_raw_enable_qdisc_bypass(int fd); extern bool uml_raw_enable_qdisc_bypass(int fd);
extern bool uml_raw_enable_vnet_headers(int fd); extern bool uml_raw_enable_vnet_headers(int fd);
extern bool uml_tap_enable_vnet_headers(int fd); extern bool uml_tap_enable_vnet_headers(int fd);
......
...@@ -4,12 +4,12 @@ ...@@ -4,12 +4,12 @@
* *
* Copyright(c) 2019 Intel Corporation * Copyright(c) 2019 Intel Corporation
* *
* This module allows virtio devices to be used over a vhost-user socket. * This driver allows virtio devices to be used over a vhost-user socket.
* *
* Guest devices can be instantiated by kernel module or command line * Guest devices can be instantiated by kernel module or command line
* parameters. One device will be created for each parameter. Syntax: * parameters. One device will be created for each parameter. Syntax:
* *
* [virtio_uml.]device=<socket>:<virtio_id>[:<platform_id>] * virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
* where: * where:
* <socket> := vhost-user socket path to connect * <socket> := vhost-user socket path to connect
* <virtio_id> := virtio device id (as in virtio_ids.h) * <virtio_id> := virtio device id (as in virtio_ids.h)
...@@ -42,6 +42,13 @@ ...@@ -42,6 +42,13 @@
#define to_virtio_uml_device(_vdev) \ #define to_virtio_uml_device(_vdev) \
container_of(_vdev, struct virtio_uml_device, vdev) container_of(_vdev, struct virtio_uml_device, vdev)
struct virtio_uml_platform_data {
u32 virtio_device_id;
const char *socket_path;
struct work_struct conn_broken_wk;
struct platform_device *pdev;
};
struct virtio_uml_device { struct virtio_uml_device {
struct virtio_device vdev; struct virtio_device vdev;
struct platform_device *pdev; struct platform_device *pdev;
...@@ -50,6 +57,7 @@ struct virtio_uml_device { ...@@ -50,6 +57,7 @@ struct virtio_uml_device {
u64 features; u64 features;
u64 protocol_features; u64 protocol_features;
u8 status; u8 status;
u8 registered:1;
}; };
struct virtio_uml_vq_info { struct virtio_uml_vq_info {
...@@ -83,7 +91,7 @@ static int full_sendmsg_fds(int fd, const void *buf, unsigned int len, ...@@ -83,7 +91,7 @@ static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
return 0; return 0;
} }
static int full_read(int fd, void *buf, int len) static int full_read(int fd, void *buf, int len, bool abortable)
{ {
int rc; int rc;
...@@ -93,7 +101,7 @@ static int full_read(int fd, void *buf, int len) ...@@ -93,7 +101,7 @@ static int full_read(int fd, void *buf, int len)
buf += rc; buf += rc;
len -= rc; len -= rc;
} }
} while (len && (rc > 0 || rc == -EINTR)); } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
if (rc < 0) if (rc < 0)
return rc; return rc;
...@@ -104,28 +112,37 @@ static int full_read(int fd, void *buf, int len) ...@@ -104,28 +112,37 @@ static int full_read(int fd, void *buf, int len)
static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg) static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
{ {
return full_read(fd, msg, sizeof(msg->header)); return full_read(fd, msg, sizeof(msg->header), true);
} }
static int vhost_user_recv(int fd, struct vhost_user_msg *msg, static int vhost_user_recv(struct virtio_uml_device *vu_dev,
int fd, struct vhost_user_msg *msg,
size_t max_payload_size) size_t max_payload_size)
{ {
size_t size; size_t size;
int rc = vhost_user_recv_header(fd, msg); int rc = vhost_user_recv_header(fd, msg);
if (rc == -ECONNRESET && vu_dev->registered) {
struct virtio_uml_platform_data *pdata;
pdata = vu_dev->pdev->dev.platform_data;
virtio_break_device(&vu_dev->vdev);
schedule_work(&pdata->conn_broken_wk);
}
if (rc) if (rc)
return rc; return rc;
size = msg->header.size; size = msg->header.size;
if (size > max_payload_size) if (size > max_payload_size)
return -EPROTO; return -EPROTO;
return full_read(fd, &msg->payload, size); return full_read(fd, &msg->payload, size, false);
} }
static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev, static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg, struct vhost_user_msg *msg,
size_t max_payload_size) size_t max_payload_size)
{ {
int rc = vhost_user_recv(vu_dev->sock, msg, max_payload_size); int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg, max_payload_size);
if (rc) if (rc)
return rc; return rc;
...@@ -155,7 +172,7 @@ static int vhost_user_recv_req(struct virtio_uml_device *vu_dev, ...@@ -155,7 +172,7 @@ static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg, struct vhost_user_msg *msg,
size_t max_payload_size) size_t max_payload_size)
{ {
int rc = vhost_user_recv(vu_dev->req_fd, msg, max_payload_size); int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg, max_payload_size);
if (rc) if (rc)
return rc; return rc;
...@@ -963,11 +980,6 @@ static void virtio_uml_release_dev(struct device *d) ...@@ -963,11 +980,6 @@ static void virtio_uml_release_dev(struct device *d)
/* Platform device */ /* Platform device */
struct virtio_uml_platform_data {
u32 virtio_device_id;
const char *socket_path;
};
static int virtio_uml_probe(struct platform_device *pdev) static int virtio_uml_probe(struct platform_device *pdev)
{ {
struct virtio_uml_platform_data *pdata = pdev->dev.platform_data; struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
...@@ -1005,6 +1017,7 @@ static int virtio_uml_probe(struct platform_device *pdev) ...@@ -1005,6 +1017,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
rc = register_virtio_device(&vu_dev->vdev); rc = register_virtio_device(&vu_dev->vdev);
if (rc) if (rc)
put_device(&vu_dev->vdev.dev); put_device(&vu_dev->vdev.dev);
vu_dev->registered = 1;
return rc; return rc;
error_init: error_init:
...@@ -1034,13 +1047,31 @@ static struct device vu_cmdline_parent = { ...@@ -1034,13 +1047,31 @@ static struct device vu_cmdline_parent = {
static bool vu_cmdline_parent_registered; static bool vu_cmdline_parent_registered;
static int vu_cmdline_id; static int vu_cmdline_id;
static int vu_unregister_cmdline_device(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
kfree(pdata->socket_path);
platform_device_unregister(pdev);
return 0;
}
static void vu_conn_broken(struct work_struct *wk)
{
struct virtio_uml_platform_data *pdata;
pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
}
static int vu_cmdline_set(const char *device, const struct kernel_param *kp) static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
{ {
const char *ids = strchr(device, ':'); const char *ids = strchr(device, ':');
unsigned int virtio_device_id; unsigned int virtio_device_id;
int processed, consumed, err; int processed, consumed, err;
char *socket_path; char *socket_path;
struct virtio_uml_platform_data pdata; struct virtio_uml_platform_data pdata, *ppdata;
struct platform_device *pdev; struct platform_device *pdev;
if (!ids || ids == device) if (!ids || ids == device)
...@@ -1079,6 +1110,11 @@ static int vu_cmdline_set(const char *device, const struct kernel_param *kp) ...@@ -1079,6 +1110,11 @@ static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
err = PTR_ERR_OR_ZERO(pdev); err = PTR_ERR_OR_ZERO(pdev);
if (err) if (err)
goto free; goto free;
ppdata = pdev->dev.platform_data;
ppdata->pdev = pdev;
INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
return 0; return 0;
free: free:
...@@ -1121,16 +1157,6 @@ __uml_help(vu_cmdline_param_ops, ...@@ -1121,16 +1157,6 @@ __uml_help(vu_cmdline_param_ops,
); );
static int vu_unregister_cmdline_device(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
kfree(pdata->socket_path);
platform_device_unregister(pdev);
return 0;
}
static void vu_unregister_cmdline_devices(void) static void vu_unregister_cmdline_devices(void)
{ {
if (vu_cmdline_parent_registered) { if (vu_cmdline_parent_registered) {
......
...@@ -170,7 +170,7 @@ int __init main(int argc, char **argv, char **envp) ...@@ -170,7 +170,7 @@ int __init main(int argc, char **argv, char **envp)
* that they won't be delivered after the exec, when * that they won't be delivered after the exec, when
* they are definitely not expected. * they are definitely not expected.
*/ */
unblock_signals_trace(); unblock_signals();
os_info("\n"); os_info("\n");
/* Reboot */ /* Reboot */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment