Commit 98d731bb authored by K. Y. Srinivasan's avatar K. Y. Srinivasan Committed by Greg Kroah-Hartman

Drivers: hv: vmbus: Cleanup vmbus_close_internal()

Eliminate calls to BUG_ON() in vmbus_close_internal().
We have chosen to potentially leak memory, than crash the guest
in case of failures.

In this version of the patch I have addressed comments from
Dan Carpenter (dan.carpenter@oracle.com).
Signed-off-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Cc: <stable@vger.kernel.org>
Tested-by: default avatarSitsofe Wheeler <sitsofe@yahoo.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 45d727ce
......@@ -478,7 +478,7 @@ static void reset_channel_cb(void *arg)
channel->onchannel_callback = NULL;
}
static void vmbus_close_internal(struct vmbus_channel *channel)
static int vmbus_close_internal(struct vmbus_channel *channel)
{
struct vmbus_channel_close_channel *msg;
int ret;
......@@ -501,11 +501,28 @@ static void vmbus_close_internal(struct vmbus_channel *channel)
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
BUG_ON(ret != 0);
if (ret) {
pr_err("Close failed: close post msg return is %d\n", ret);
/*
* If we failed to post the close msg,
* it is perhaps better to leak memory.
*/
return ret;
}
/* Tear down the gpadl for the channel's ring buffer */
if (channel->ringbuffer_gpadlhandle)
vmbus_teardown_gpadl(channel,
channel->ringbuffer_gpadlhandle);
if (channel->ringbuffer_gpadlhandle) {
ret = vmbus_teardown_gpadl(channel,
channel->ringbuffer_gpadlhandle);
if (ret) {
pr_err("Close failed: teardown gpadl return %d\n", ret);
/*
* If we failed to teardown gpadl,
* it is perhaps better to leak memory.
*/
return ret;
}
}
/* Cleanup the ring buffers for this channel */
hv_ringbuffer_cleanup(&channel->outbound);
......@@ -514,7 +531,7 @@ static void vmbus_close_internal(struct vmbus_channel *channel)
free_pages((unsigned long)channel->ringbuffer_pages,
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
return ret;
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment