Commit d74669eb authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-generic-6.5' of https://github.com/kvm-x86/linux into HEAD

Common KVM changes for 6.5:

 - Fix unprotected vcpu->pid dereference via debugfs

 - Fix KVM_BUG() and KVM_BUG_ON() macros with 64-bit conditionals

 - Refactor failure path in kvm_io_bus_unregister_dev() to simplify the code

 - Misc cleanups
parents cc744042 cc77b95a
...@@ -55,10 +55,4 @@ static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu, ...@@ -55,10 +55,4 @@ static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu,
: -EOPNOTSUPP; : -EOPNOTSUPP;
} }
static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
{
if (dev->ops->destructor)
dev->ops->destructor(dev);
}
#endif /* __KVM_IODEV_H__ */ #endif /* __KVM_IODEV_H__ */
...@@ -849,7 +849,7 @@ static inline void kvm_vm_bugged(struct kvm *kvm) ...@@ -849,7 +849,7 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
#define KVM_BUG(cond, kvm, fmt...) \ #define KVM_BUG(cond, kvm, fmt...) \
({ \ ({ \
int __ret = (cond); \ bool __ret = !!(cond); \
\ \
if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \ if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
kvm_vm_bugged(kvm); \ kvm_vm_bugged(kvm); \
...@@ -858,7 +858,7 @@ static inline void kvm_vm_bugged(struct kvm *kvm) ...@@ -858,7 +858,7 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
#define KVM_BUG_ON(cond, kvm) \ #define KVM_BUG_ON(cond, kvm) \
({ \ ({ \
int __ret = (cond); \ bool __ret = !!(cond); \
\ \
if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
kvm_vm_bugged(kvm); \ kvm_vm_bugged(kvm); \
......
...@@ -1617,7 +1617,7 @@ struct kvm_s390_ucas_mapping { ...@@ -1617,7 +1617,7 @@ struct kvm_s390_ucas_mapping {
#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs) #define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs) #define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
/* /*
* vcpu version available with KVM_ENABLE_CAP * vcpu version available with KVM_CAP_ENABLE_CAP
* vm version available with KVM_CAP_ENABLE_CAP_VM * vm version available with KVM_CAP_ENABLE_CAP_VM
*/ */
#define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap) #define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap)
......
...@@ -186,15 +186,10 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, ...@@ -186,15 +186,10 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
coalesced_mmio_in_range(dev, zone->addr, zone->size)) { coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
r = kvm_io_bus_unregister_dev(kvm, r = kvm_io_bus_unregister_dev(kvm,
zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev); zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
kvm_iodevice_destructor(&dev->dev);
/* /*
* On failure, unregister destroys all devices on the * On failure, unregister destroys all devices on the
* bus _except_ the target device, i.e. coalesced_zones * bus, including the target device. There's no need
* has been modified. Bail after destroying the target * to restart the walk as there aren't any zones left.
* device, there's no need to restart the walk as there
* aren't any zones left.
*/ */
if (r) if (r)
break; break;
......
...@@ -889,9 +889,9 @@ static int kvm_assign_ioeventfd_idx(struct kvm *kvm, ...@@ -889,9 +889,9 @@ static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
unlock_fail: unlock_fail:
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
kfree(p);
fail: fail:
kfree(p);
eventfd_ctx_put(eventfd); eventfd_ctx_put(eventfd);
return ret; return ret;
...@@ -901,7 +901,7 @@ static int ...@@ -901,7 +901,7 @@ static int
kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_ioeventfd *args) struct kvm_ioeventfd *args)
{ {
struct _ioeventfd *p, *tmp; struct _ioeventfd *p;
struct eventfd_ctx *eventfd; struct eventfd_ctx *eventfd;
struct kvm_io_bus *bus; struct kvm_io_bus *bus;
int ret = -ENOENT; int ret = -ENOENT;
...@@ -915,8 +915,7 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, ...@@ -915,8 +915,7 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { list_for_each_entry(p, &kvm->ioeventfds, list) {
if (p->bus_idx != bus_idx || if (p->bus_idx != bus_idx ||
p->eventfd != eventfd || p->eventfd != eventfd ||
p->addr != args->addr || p->addr != args->addr ||
...@@ -931,7 +930,6 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, ...@@ -931,7 +930,6 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
bus = kvm_get_bus(kvm, bus_idx); bus = kvm_get_bus(kvm, bus_idx);
if (bus) if (bus)
bus->ioeventfd_count--; bus->ioeventfd_count--;
ioeventfd_release(p);
ret = 0; ret = 0;
break; break;
} }
......
...@@ -3888,7 +3888,10 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu) ...@@ -3888,7 +3888,10 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
static int vcpu_get_pid(void *data, u64 *val) static int vcpu_get_pid(void *data, u64 *val)
{ {
struct kvm_vcpu *vcpu = data; struct kvm_vcpu *vcpu = data;
*val = pid_nr(rcu_access_pointer(vcpu->pid));
rcu_read_lock();
*val = pid_nr(rcu_dereference(vcpu->pid));
rcu_read_unlock();
return 0; return 0;
} }
...@@ -3990,7 +3993,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) ...@@ -3990,7 +3993,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
if (r < 0) if (r < 0)
goto kvm_put_xa_release; goto kvm_put_xa_release;
if (KVM_BUG_ON(!!xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) { if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
r = -EINVAL; r = -EINVAL;
goto kvm_put_xa_release; goto kvm_put_xa_release;
} }
...@@ -5313,6 +5316,12 @@ static void hardware_disable_all(void) ...@@ -5313,6 +5316,12 @@ static void hardware_disable_all(void)
} }
#endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */ #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
static void kvm_iodevice_destructor(struct kvm_io_device *dev)
{
if (dev->ops->destructor)
dev->ops->destructor(dev);
}
static void kvm_io_bus_destroy(struct kvm_io_bus *bus) static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{ {
int i; int i;
...@@ -5536,7 +5545,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, ...@@ -5536,7 +5545,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev) struct kvm_io_device *dev)
{ {
int i, j; int i;
struct kvm_io_bus *new_bus, *bus; struct kvm_io_bus *new_bus, *bus;
lockdep_assert_held(&kvm->slots_lock); lockdep_assert_held(&kvm->slots_lock);
...@@ -5566,18 +5575,19 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, ...@@ -5566,18 +5575,19 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
rcu_assign_pointer(kvm->buses[bus_idx], new_bus); rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu); synchronize_srcu_expedited(&kvm->srcu);
/* Destroy the old bus _after_ installing the (null) bus. */ /*
* If NULL bus is installed, destroy the old bus, including all the
* attached devices. Otherwise, destroy the caller's device only.
*/
if (!new_bus) { if (!new_bus) {
pr_err("kvm: failed to shrink bus, removing it completely\n"); pr_err("kvm: failed to shrink bus, removing it completely\n");
for (j = 0; j < bus->dev_count; j++) { kvm_io_bus_destroy(bus);
if (j == i) return -ENOMEM;
continue;
kvm_iodevice_destructor(bus->range[j].dev);
}
} }
kvm_iodevice_destructor(dev);
kfree(bus); kfree(bus);
return new_bus ? 0 : -ENOMEM; return 0;
} }
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment