Commit ff1ffd71 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'hyperv-fixes-signed-20210915' of...

Merge tag 'hyperv-fixes-signed-20210915' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux

Pull hyperv fixes from Wei Liu:

 - Fix kernel crash caused by uio driver (Vitaly Kuznetsov)

 - Remove on-stack cpumask from HV APIC code (Wei Liu)

* tag 'hyperv-fixes-signed-20210915' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  x86/hyperv: remove on-stack cpumask from hv_send_ipi_mask_allbutself
  asm-generic/hyperv: provide cpumask_to_vpset_noself
  Drivers: hv: vmbus: Fix kernel crash upon unbinding a device from uio_hv_generic driver
parents 453fa43c dfb5c1e1
...@@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val) ...@@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
/* /*
* IPI implementation on Hyper-V. * IPI implementation on Hyper-V.
*/ */
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
bool exclude_self)
{ {
struct hv_send_ipi_ex **arg; struct hv_send_ipi_ex **arg;
struct hv_send_ipi_ex *ipi_arg; struct hv_send_ipi_ex *ipi_arg;
...@@ -123,7 +124,10 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) ...@@ -123,7 +124,10 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
if (!cpumask_equal(mask, cpu_present_mask)) { if (!cpumask_equal(mask, cpu_present_mask)) {
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); if (exclude_self)
nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
else
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
} }
if (nr_bank < 0) if (nr_bank < 0)
goto ipi_mask_ex_done; goto ipi_mask_ex_done;
...@@ -138,15 +142,25 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) ...@@ -138,15 +142,25 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
return hv_result_success(status); return hv_result_success(status);
} }
static bool __send_ipi_mask(const struct cpumask *mask, int vector) static bool __send_ipi_mask(const struct cpumask *mask, int vector,
bool exclude_self)
{ {
int cur_cpu, vcpu; int cur_cpu, vcpu, this_cpu = smp_processor_id();
struct hv_send_ipi ipi_arg; struct hv_send_ipi ipi_arg;
u64 status; u64 status;
unsigned int weight;
trace_hyperv_send_ipi_mask(mask, vector); trace_hyperv_send_ipi_mask(mask, vector);
if (cpumask_empty(mask)) weight = cpumask_weight(mask);
/*
* Do nothing if
* 1. the mask is empty
* 2. the mask only contains self when exclude_self is true
*/
if (weight == 0 ||
(exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
return true; return true;
if (!hv_hypercall_pg) if (!hv_hypercall_pg)
...@@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) ...@@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
ipi_arg.cpu_mask = 0; ipi_arg.cpu_mask = 0;
for_each_cpu(cur_cpu, mask) { for_each_cpu(cur_cpu, mask) {
if (exclude_self && cur_cpu == this_cpu)
continue;
vcpu = hv_cpu_number_to_vp_number(cur_cpu); vcpu = hv_cpu_number_to_vp_number(cur_cpu);
if (vcpu == VP_INVAL) if (vcpu == VP_INVAL)
return false; return false;
...@@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) ...@@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
return hv_result_success(status); return hv_result_success(status);
do_ex_hypercall: do_ex_hypercall:
return __send_ipi_mask_ex(mask, vector); return __send_ipi_mask_ex(mask, vector, exclude_self);
} }
static bool __send_ipi_one(int cpu, int vector) static bool __send_ipi_one(int cpu, int vector)
...@@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector) ...@@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector)
return false; return false;
if (vp >= 64) if (vp >= 64)
return __send_ipi_mask_ex(cpumask_of(cpu), vector); return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp)); status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
return hv_result_success(status); return hv_result_success(status);
...@@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector) ...@@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector)
static void hv_send_ipi_mask(const struct cpumask *mask, int vector) static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
{ {
if (!__send_ipi_mask(mask, vector)) if (!__send_ipi_mask(mask, vector, false))
orig_apic.send_IPI_mask(mask, vector); orig_apic.send_IPI_mask(mask, vector);
} }
static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
{ {
unsigned int this_cpu = smp_processor_id(); if (!__send_ipi_mask(mask, vector, true))
struct cpumask new_mask;
const struct cpumask *local_mask;
cpumask_copy(&new_mask, mask);
cpumask_clear_cpu(this_cpu, &new_mask);
local_mask = &new_mask;
if (!__send_ipi_mask(local_mask, vector))
orig_apic.send_IPI_mask_allbutself(mask, vector); orig_apic.send_IPI_mask_allbutself(mask, vector);
} }
...@@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector) ...@@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector)
static void hv_send_ipi_all(int vector) static void hv_send_ipi_all(int vector)
{ {
if (!__send_ipi_mask(cpu_online_mask, vector)) if (!__send_ipi_mask(cpu_online_mask, vector, false))
orig_apic.send_IPI_all(vector); orig_apic.send_IPI_all(vector);
} }
......
...@@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) ...@@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
mutex_unlock(&ring_info->ring_buffer_mutex); mutex_unlock(&ring_info->ring_buffer_mutex);
kfree(ring_info->pkt_buffer); kfree(ring_info->pkt_buffer);
ring_info->pkt_buffer = NULL;
ring_info->pkt_buffer_size = 0; ring_info->pkt_buffer_size = 0;
} }
......
...@@ -197,10 +197,12 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number) ...@@ -197,10 +197,12 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
return hv_vp_index[cpu_number]; return hv_vp_index[cpu_number];
} }
static inline int cpumask_to_vpset(struct hv_vpset *vpset, static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
const struct cpumask *cpus) const struct cpumask *cpus,
bool exclude_self)
{ {
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
int this_cpu = smp_processor_id();
/* valid_bank_mask can represent up to 64 banks */ /* valid_bank_mask can represent up to 64 banks */
if (hv_max_vp_index / 64 >= 64) if (hv_max_vp_index / 64 >= 64)
...@@ -218,6 +220,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, ...@@ -218,6 +220,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
* Some banks may end up being empty but this is acceptable. * Some banks may end up being empty but this is acceptable.
*/ */
for_each_cpu(cpu, cpus) { for_each_cpu(cpu, cpus) {
if (exclude_self && cpu == this_cpu)
continue;
vcpu = hv_cpu_number_to_vp_number(cpu); vcpu = hv_cpu_number_to_vp_number(cpu);
if (vcpu == VP_INVAL) if (vcpu == VP_INVAL)
return -1; return -1;
...@@ -232,6 +236,19 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, ...@@ -232,6 +236,19 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
return nr_bank; return nr_bank;
} }
static inline int cpumask_to_vpset(struct hv_vpset *vpset,
const struct cpumask *cpus)
{
return __cpumask_to_vpset(vpset, cpus, false);
}
static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset,
const struct cpumask *cpus)
{
WARN_ON_ONCE(preemptible());
return __cpumask_to_vpset(vpset, cpus, true);
}
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
bool hv_is_hyperv_initialized(void); bool hv_is_hyperv_initialized(void);
bool hv_is_hibernation_supported(void); bool hv_is_hibernation_supported(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment