Commit a9f61ca7 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Greg Kroah-Hartman

Drivers: hv: avoid vfree() on crash

When we crash from NMI context (e.g. after NMI injection from host when
'sysctl -w kernel.unknown_nmi_panic=1' is set) we hit

    kernel BUG at mm/vmalloc.c:1530!

as vfree() is denied. While the issue could be solved with in_nmi() check
instead I opted for skipping vfree on all sorts of crashes to reduce the
amount of work which can cause consequent crashes. We don't really need to
free anything on crash.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 069b188f
...@@ -278,7 +278,7 @@ int hv_init(void) ...@@ -278,7 +278,7 @@ int hv_init(void)
* *
* This routine is called normally during driver unloading or exiting. * This routine is called normally during driver unloading or exiting.
*/ */
void hv_cleanup(void) void hv_cleanup(bool crash)
{ {
union hv_x64_msr_hypercall_contents hypercall_msr; union hv_x64_msr_hypercall_contents hypercall_msr;
...@@ -288,6 +288,7 @@ void hv_cleanup(void) ...@@ -288,6 +288,7 @@ void hv_cleanup(void)
if (hv_context.hypercall_page) { if (hv_context.hypercall_page) {
hypercall_msr.as_uint64 = 0; hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
if (!crash)
vfree(hv_context.hypercall_page); vfree(hv_context.hypercall_page);
hv_context.hypercall_page = NULL; hv_context.hypercall_page = NULL;
} }
...@@ -308,6 +309,7 @@ void hv_cleanup(void) ...@@ -308,6 +309,7 @@ void hv_cleanup(void)
hypercall_msr.as_uint64 = 0; hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
if (!crash)
vfree(hv_context.tsc_page); vfree(hv_context.tsc_page);
hv_context.tsc_page = NULL; hv_context.tsc_page = NULL;
} }
......
...@@ -495,7 +495,7 @@ struct hv_ring_buffer_debug_info { ...@@ -495,7 +495,7 @@ struct hv_ring_buffer_debug_info {
extern int hv_init(void); extern int hv_init(void);
extern void hv_cleanup(void); extern void hv_cleanup(bool crash);
extern int hv_post_message(union hv_connection_id connection_id, extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type, enum hv_message_type message_type,
......
...@@ -874,7 +874,7 @@ static int vmbus_bus_init(void) ...@@ -874,7 +874,7 @@ static int vmbus_bus_init(void)
bus_unregister(&hv_bus); bus_unregister(&hv_bus);
err_cleanup: err_cleanup:
hv_cleanup(); hv_cleanup(false);
return ret; return ret;
} }
...@@ -1326,7 +1326,7 @@ static void hv_kexec_handler(void) ...@@ -1326,7 +1326,7 @@ static void hv_kexec_handler(void)
vmbus_initiate_unload(false); vmbus_initiate_unload(false);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
hv_cleanup(); hv_cleanup(false);
}; };
static void hv_crash_handler(struct pt_regs *regs) static void hv_crash_handler(struct pt_regs *regs)
...@@ -1338,7 +1338,7 @@ static void hv_crash_handler(struct pt_regs *regs) ...@@ -1338,7 +1338,7 @@ static void hv_crash_handler(struct pt_regs *regs)
* for kdump. * for kdump.
*/ */
hv_synic_cleanup(NULL); hv_synic_cleanup(NULL);
hv_cleanup(); hv_cleanup(true);
}; };
static int __init hv_acpi_init(void) static int __init hv_acpi_init(void)
...@@ -1398,7 +1398,7 @@ static void __exit vmbus_exit(void) ...@@ -1398,7 +1398,7 @@ static void __exit vmbus_exit(void)
&hyperv_panic_block); &hyperv_panic_block);
} }
bus_unregister(&hv_bus); bus_unregister(&hv_bus);
hv_cleanup(); hv_cleanup(false);
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
tasklet_kill(hv_context.event_dpc[cpu]); tasklet_kill(hv_context.event_dpc[cpu]);
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment