Drivers: hv: vmbus: Clean up hv_sock channels by force upon suspend

Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
hibernation. There is no better method to clean up the channels since
some of the channels may still be referenced by the userspace apps when
hibernation is triggered: in this case, with this patch, the "rescind"
fields of the channels are set, and the apps will thoroughly destroy
the channels after hibernation.

Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Dexuan Cui 2019-09-05 23:01:20 +00:00 committed by Sasha Levin
parent f53335e328
commit 1f48dcf180

View File

@ -24,6 +24,7 @@
#include <linux/sched/task_stack.h>
#include <asm/mshyperv.h>
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/ptrace.h>
#include <linux/screen_info.h>
@ -1069,6 +1070,41 @@ msg_handled:
vmbus_signal_eom(msg, message_type);
}
/*
* Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
* hibernation, because hv_sock connections can not persist across hibernation.
*/
static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
{
struct onmessage_work_context *ctx;
struct vmbus_channel_rescind_offer *rescind;
WARN_ON(!is_hvsock_channel(channel));
/*
* sizeof(*ctx) is small and the allocation should really not fail,
* otherwise the state of the hv_sock connections ends up in limbo.
*/
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
/*
* So far, these are not really used by Linux. Just set them to the
* reasonable values conforming to the definitions of the fields.
*/
ctx->msg.header.message_type = 1;
ctx->msg.header.payload_size = sizeof(*rescind);
/* These values are actually used by Linux. */
rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.u.payload;
rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
rescind->child_relid = channel->offermsg.child_relid;
INIT_WORK(&ctx->work, vmbus_onmessage_work);
queue_work_on(vmbus_connection.connect_cpu,
vmbus_connection.work_queue,
&ctx->work);
}
/*
* Direct callback for channels using other deferred processing
@ -2091,6 +2127,25 @@ acpi_walk_err:
static int vmbus_bus_suspend(struct device *dev)
{
struct vmbus_channel *channel;
while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
/*
* We wait here until the completion of any channel
* offers that are currently in progress.
*/
msleep(1);
}
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (!is_hvsock_channel(channel))
continue;
vmbus_force_channel_rescinded(channel);
}
mutex_unlock(&vmbus_connection.channel_mutex);
vmbus_initiate_unload(false);
vmbus_connection.conn_state = DISCONNECTED;