Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * An implementation of host initiated guest snapshot.
  3 *
  4 *
  5 * Copyright (C) 2013, Microsoft, Inc.
  6 * Author : K. Y. Srinivasan <kys@microsoft.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of the GNU General Public License version 2 as published
 10 * by the Free Software Foundation.
 11 *
 12 * This program is distributed in the hope that it will be useful, but
 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 15 * NON INFRINGEMENT.  See the GNU General Public License for more
 16 * details.
 17 *
 18 */
 19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 20
 21#include <linux/net.h>
 22#include <linux/nls.h>
 23#include <linux/connector.h>
 24#include <linux/workqueue.h>
 25#include <linux/hyperv.h>
 
 26
 27#include "hyperv_vmbus.h"
 28#include "hv_utils_transport.h"
 29
 30#define VSS_MAJOR  5
 31#define VSS_MINOR  0
 32#define VSS_VERSION    (VSS_MAJOR << 16 | VSS_MINOR)
 33
 34#define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35
 36/*
 37 * Global state maintained for transaction that is being processed. For a class
 38 * of integration services, including the "VSS service", the specified protocol
 39 * is a "request/response" protocol which means that there can only be single
 40 * outstanding transaction from the host at any given point in time. We use
 41 * this to simplify memory management in this driver - we cache and process
 42 * only one message at a time.
 43 *
 44 * While the request/response protocol is guaranteed by the host, we further
 45 * ensure this by serializing packet processing in this driver - we do not
 46 * read additional packets from the VMBUs until the current packet is fully
 47 * handled.
 48 */
 49
 50static struct {
 51	int state;   /* hvutil_device_state */
 52	int recv_len; /* number of bytes received. */
 53	struct vmbus_channel *recv_channel; /* chn we got the request */
 54	u64 recv_req_id; /* request ID. */
 55	struct hv_vss_msg  *msg; /* current message */
 56} vss_transaction;
 57
 58
 59static void vss_respond_to_host(int error);
 60
 61/*
 62 * This state maintains the version number registered by the daemon.
 63 */
 64static int dm_reg_value;
 65
 66static const char vss_devname[] = "vmbus/hv_vss";
 67static __u8 *recv_buffer;
 68static struct hvutil_transport *hvt;
 69
 70static void vss_send_op(struct work_struct *dummy);
 71static void vss_timeout_func(struct work_struct *dummy);
 
 72
 73static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
 74static DECLARE_WORK(vss_send_op_work, vss_send_op);
 75
 76static void vss_poll_wrapper(void *channel)
 77{
 78	/* Transaction is finished, reset the state here to avoid races. */
 79	vss_transaction.state = HVUTIL_READY;
 80	hv_vss_onchannelcallback(channel);
 81}
 82
 83/*
 84 * Callback when data is received from user mode.
 85 */
 86
 87static void vss_timeout_func(struct work_struct *dummy)
 88{
 89	/*
 90	 * Timeout waiting for userspace component to reply happened.
 91	 */
 92	pr_warn("VSS: timeout waiting for daemon to reply\n");
 93	vss_respond_to_host(HV_E_FAIL);
 94
 95	hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
 96}
 97
 
 
 
 
 
 
 98static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
 99{
100	u32 our_ver = VSS_OP_REGISTER1;
101
102	switch (vss_msg->vss_hdr.operation) {
103	case VSS_OP_REGISTER:
104		/* Daemon doesn't expect us to reply */
105		dm_reg_value = VSS_OP_REGISTER;
106		break;
107	case VSS_OP_REGISTER1:
108		/* Daemon expects us to reply with our own version*/
109		if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver)))
 
110			return -EFAULT;
111		dm_reg_value = VSS_OP_REGISTER1;
112		break;
113	default:
114		return -EINVAL;
115	}
116	hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
117	pr_debug("VSS: userspace daemon ver. %d registered\n", dm_reg_value);
118	return 0;
119}
120
121static int vss_on_msg(void *msg, int len)
122{
123	struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
124
125	if (len != sizeof(*vss_msg))
 
126		return -EINVAL;
 
127
128	if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
129	    vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
130		/*
131		 * Don't process registration messages if we're in the middle
132		 * of a transaction processing.
133		 */
134		if (vss_transaction.state > HVUTIL_READY)
 
135			return -EINVAL;
 
 
136		return vss_handle_handshake(vss_msg);
137	} else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
138		vss_transaction.state = HVUTIL_USERSPACE_RECV;
 
 
 
 
 
139		if (cancel_delayed_work_sync(&vss_timeout_work)) {
140			vss_respond_to_host(vss_msg->error);
141			/* Transaction is finished, reset the state. */
142			hv_poll_channel(vss_transaction.recv_channel,
143					vss_poll_wrapper);
144		}
145	} else {
146		/* This is a spurious call! */
147		pr_warn("VSS: Transaction not active\n");
148		return -EINVAL;
149	}
150	return 0;
151}
152
153
154static void vss_send_op(struct work_struct *dummy)
155{
156	int op = vss_transaction.msg->vss_hdr.operation;
157	int rc;
158	struct hv_vss_msg *vss_msg;
159
160	/* The transaction state is wrong. */
161	if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
 
162		return;
 
163
164	vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
165	if (!vss_msg)
166		return;
167
168	vss_msg->vss_hdr.operation = op;
169
170	vss_transaction.state = HVUTIL_USERSPACE_REQ;
171	rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg));
 
 
 
 
172	if (rc) {
173		pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
174		if (cancel_delayed_work_sync(&vss_timeout_work)) {
175			vss_respond_to_host(HV_E_FAIL);
176			vss_transaction.state = HVUTIL_READY;
177		}
178	}
179
180	kfree(vss_msg);
 
181
182	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183}
184
185/*
186 * Send a response back to the host.
187 */
188
189static void
190vss_respond_to_host(int error)
191{
192	struct icmsg_hdr *icmsghdrp;
193	u32	buf_len;
194	struct vmbus_channel *channel;
195	u64	req_id;
196
197	/*
198	 * Copy the global state for completing the transaction. Note that
199	 * only one transaction can be active at a time.
200	 */
201
202	buf_len = vss_transaction.recv_len;
203	channel = vss_transaction.recv_channel;
204	req_id = vss_transaction.recv_req_id;
205
206	icmsghdrp = (struct icmsg_hdr *)
207			&recv_buffer[sizeof(struct vmbuspipe_hdr)];
208
209	if (channel->onchannel_callback == NULL)
210		/*
211		 * We have raced with util driver being unloaded;
212		 * silently return.
213		 */
214		return;
215
216	icmsghdrp->status = error;
217
218	icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
219
220	vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
221				VM_PKT_DATA_INBAND, 0);
222
223}
224
225/*
226 * This callback is invoked when we get a VSS message from the host.
227 * The host ensures that only one VSS transaction can be active at a time.
228 */
229
230void hv_vss_onchannelcallback(void *context)
231{
232	struct vmbus_channel *channel = context;
233	u32 recvlen;
234	u64 requestid;
235	struct hv_vss_msg *vss_msg;
236
237
238	struct icmsg_hdr *icmsghdrp;
239	struct icmsg_negotiate *negop = NULL;
240
241	if (vss_transaction.state > HVUTIL_READY)
242		return;
243
244	vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
245			 &requestid);
 
 
246
247	if (recvlen > 0) {
248		icmsghdrp = (struct icmsg_hdr *)&recv_buffer[
249			sizeof(struct vmbuspipe_hdr)];
250
251		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
252			vmbus_prep_negotiate_resp(icmsghdrp, negop,
253				 recv_buffer, UTIL_FW_VERSION,
254				 VSS_VERSION);
255		} else {
256			vss_msg = (struct hv_vss_msg *)&recv_buffer[
257				sizeof(struct vmbuspipe_hdr) +
258				sizeof(struct icmsg_hdr)];
259
260			/*
261			 * Stash away this global state for completing the
262			 * transaction; note transactions are serialized.
263			 */
264
265			vss_transaction.recv_len = recvlen;
266			vss_transaction.recv_req_id = requestid;
267			vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
268
269			switch (vss_msg->vss_hdr.operation) {
270				/*
271				 * Initiate a "freeze/thaw"
272				 * operation in the guest.
273				 * We respond to the host once
274				 * the operation is complete.
275				 *
276				 * We send the message to the
277				 * user space daemon and the
278				 * operation is performed in
279				 * the daemon.
280				 */
281			case VSS_OP_FREEZE:
282			case VSS_OP_THAW:
283				if (vss_transaction.state < HVUTIL_READY) {
284					/* Userspace is not registered yet */
285					vss_respond_to_host(HV_E_FAIL);
286					return;
287				}
288				vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
289				schedule_work(&vss_send_op_work);
290				schedule_delayed_work(&vss_timeout_work,
291						      VSS_USERSPACE_TIMEOUT);
292				return;
293
294			case VSS_OP_HOT_BACKUP:
295				vss_msg->vss_cf.flags =
296					 VSS_HBU_NO_AUTO_RECOVERY;
297				vss_respond_to_host(0);
298				return;
299
300			case VSS_OP_GET_DM_INFO:
301				vss_msg->dm_info.flags = 0;
302				vss_respond_to_host(0);
303				return;
304
305			default:
306				vss_respond_to_host(0);
307				return;
308
309			}
 
 
 
 
 
310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311		}
 
 
 
 
 
 
312
313		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
314			| ICMSGHDRFLAG_RESPONSE;
 
315
316		vmbus_sendpacket(channel, recv_buffer,
317				       recvlen, requestid,
318				       VM_PKT_DATA_INBAND, 0);
 
 
 
319	}
320
 
 
 
 
321}
322
323static void vss_on_reset(void)
324{
325	if (cancel_delayed_work_sync(&vss_timeout_work))
326		vss_respond_to_host(HV_E_FAIL);
327	vss_transaction.state = HVUTIL_DEVICE_INIT;
328}
329
330int
331hv_vss_init(struct hv_util_service *srv)
332{
333	if (vmbus_proto_version < VERSION_WIN8_1) {
334		pr_warn("Integration service 'Backup (volume snapshot)'"
335			" not supported on this host version.\n");
336		return -ENOTSUPP;
337	}
338	recv_buffer = srv->recv_buffer;
339	vss_transaction.recv_channel = srv->channel;
 
340
341	/*
342	 * When this driver loads, the user level daemon that
343	 * processes the host requests may not yet be running.
344	 * Defer processing channel callbacks until the daemon
345	 * has registered.
346	 */
347	vss_transaction.state = HVUTIL_DEVICE_INIT;
348
349	hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
350				    vss_on_msg, vss_on_reset);
351	if (!hvt)
 
352		return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
354	return 0;
355}
356
357void hv_vss_deinit(void)
358{
359	vss_transaction.state = HVUTIL_DEVICE_DYING;
360	cancel_delayed_work_sync(&vss_timeout_work);
361	cancel_work_sync(&vss_send_op_work);
 
362	hvutil_transport_destroy(hvt);
363}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * An implementation of host initiated guest snapshot.
  4 *
 
  5 * Copyright (C) 2013, Microsoft, Inc.
  6 * Author : K. Y. Srinivasan <kys@microsoft.com>
 
 
 
 
 
 
 
 
 
 
 
  7 */
  8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9
 10#include <linux/net.h>
 11#include <linux/nls.h>
 12#include <linux/connector.h>
 13#include <linux/workqueue.h>
 14#include <linux/hyperv.h>
 15#include <asm/hyperv-tlfs.h>
 16
 17#include "hyperv_vmbus.h"
 18#include "hv_utils_transport.h"
 19
 20#define VSS_MAJOR  5
 21#define VSS_MINOR  0
 22#define VSS_VERSION    (VSS_MAJOR << 16 | VSS_MINOR)
 23
 24#define VSS_VER_COUNT 1
 25static const int vss_versions[] = {
 26	VSS_VERSION
 27};
 28
 29#define FW_VER_COUNT 1
 30static const int fw_versions[] = {
 31	UTIL_FW_VERSION
 32};
 33
 34/* See comment with struct hv_vss_msg regarding the max VMbus packet size */
 35#define VSS_MAX_PKT_SIZE (HV_HYP_PAGE_SIZE * 2)
 36
 37/*
 38 * Timeout values are based on expecations from host
 39 */
 40#define VSS_FREEZE_TIMEOUT (15 * 60)
 41
 42/*
 43 * Global state maintained for transaction that is being processed. For a class
 44 * of integration services, including the "VSS service", the specified protocol
 45 * is a "request/response" protocol which means that there can only be single
 46 * outstanding transaction from the host at any given point in time. We use
 47 * this to simplify memory management in this driver - we cache and process
 48 * only one message at a time.
 49 *
 50 * While the request/response protocol is guaranteed by the host, we further
 51 * ensure this by serializing packet processing in this driver - we do not
 52 * read additional packets from the VMBUs until the current packet is fully
 53 * handled.
 54 */
 55
 56static struct {
 57	int state;   /* hvutil_device_state */
 58	int recv_len; /* number of bytes received. */
 59	struct vmbus_channel *recv_channel; /* chn we got the request */
 60	u64 recv_req_id; /* request ID. */
 61	struct hv_vss_msg  *msg; /* current message */
 62} vss_transaction;
 63
 64
 65static void vss_respond_to_host(int error);
 66
 67/*
 68 * This state maintains the version number registered by the daemon.
 69 */
 70static int dm_reg_value;
 71
 72static const char vss_devname[] = "vmbus/hv_vss";
 73static __u8 *recv_buffer;
 74static struct hvutil_transport *hvt;
 75
 
 76static void vss_timeout_func(struct work_struct *dummy);
 77static void vss_handle_request(struct work_struct *dummy);
 78
 79static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
 80static DECLARE_WORK(vss_handle_request_work, vss_handle_request);
 81
 82static void vss_poll_wrapper(void *channel)
 83{
 84	/* Transaction is finished, reset the state here to avoid races. */
 85	vss_transaction.state = HVUTIL_READY;
 86	tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
 87}
 88
 89/*
 90 * Callback when data is received from user mode.
 91 */
 92
 93static void vss_timeout_func(struct work_struct *dummy)
 94{
 95	/*
 96	 * Timeout waiting for userspace component to reply happened.
 97	 */
 98	pr_warn("VSS: timeout waiting for daemon to reply\n");
 99	vss_respond_to_host(HV_E_FAIL);
100
101	hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
102}
103
104static void vss_register_done(void)
105{
106	hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
107	pr_debug("VSS: userspace daemon registered\n");
108}
109
110static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
111{
112	u32 our_ver = VSS_OP_REGISTER1;
113
114	switch (vss_msg->vss_hdr.operation) {
115	case VSS_OP_REGISTER:
116		/* Daemon doesn't expect us to reply */
117		dm_reg_value = VSS_OP_REGISTER;
118		break;
119	case VSS_OP_REGISTER1:
120		/* Daemon expects us to reply with our own version */
121		if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
122					  vss_register_done))
123			return -EFAULT;
124		dm_reg_value = VSS_OP_REGISTER1;
125		break;
126	default:
127		return -EINVAL;
128	}
129	pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
 
130	return 0;
131}
132
133static int vss_on_msg(void *msg, int len)
134{
135	struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
136
137	if (len != sizeof(*vss_msg)) {
138		pr_debug("VSS: Message size does not match length\n");
139		return -EINVAL;
140	}
141
142	if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
143	    vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
144		/*
145		 * Don't process registration messages if we're in the middle
146		 * of a transaction processing.
147		 */
148		if (vss_transaction.state > HVUTIL_READY) {
149			pr_debug("VSS: Got unexpected registration request\n");
150			return -EINVAL;
151		}
152
153		return vss_handle_handshake(vss_msg);
154	} else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
155		vss_transaction.state = HVUTIL_USERSPACE_RECV;
156
157		if (vss_msg->vss_hdr.operation == VSS_OP_HOT_BACKUP)
158			vss_transaction.msg->vss_cf.flags =
159				VSS_HBU_NO_AUTO_RECOVERY;
160
161		if (cancel_delayed_work_sync(&vss_timeout_work)) {
162			vss_respond_to_host(vss_msg->error);
163			/* Transaction is finished, reset the state. */
164			hv_poll_channel(vss_transaction.recv_channel,
165					vss_poll_wrapper);
166		}
167	} else {
168		/* This is a spurious call! */
169		pr_debug("VSS: Transaction not active\n");
170		return -EINVAL;
171	}
172	return 0;
173}
174
175static void vss_send_op(void)
 
176{
177	int op = vss_transaction.msg->vss_hdr.operation;
178	int rc;
179	struct hv_vss_msg *vss_msg;
180
181	/* The transaction state is wrong. */
182	if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) {
183		pr_debug("VSS: Unexpected attempt to send to daemon\n");
184		return;
185	}
186
187	vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
188	if (!vss_msg)
189		return;
190
191	vss_msg->vss_hdr.operation = op;
192
193	vss_transaction.state = HVUTIL_USERSPACE_REQ;
194
195	schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
196			VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
197
198	rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
199	if (rc) {
200		pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
201		if (cancel_delayed_work_sync(&vss_timeout_work)) {
202			vss_respond_to_host(HV_E_FAIL);
203			vss_transaction.state = HVUTIL_READY;
204		}
205	}
206
207	kfree(vss_msg);
208}
209
210static void vss_handle_request(struct work_struct *dummy)
211{
212	switch (vss_transaction.msg->vss_hdr.operation) {
213	/*
214	 * Initiate a "freeze/thaw" operation in the guest.
215	 * We respond to the host once the operation is complete.
216	 *
217	 * We send the message to the user space daemon and the operation is
218	 * performed in the daemon.
219	 */
220	case VSS_OP_THAW:
221	case VSS_OP_FREEZE:
222	case VSS_OP_HOT_BACKUP:
223		if (vss_transaction.state < HVUTIL_READY) {
224			/* Userspace is not registered yet */
225			pr_debug("VSS: Not ready for request.\n");
226			vss_respond_to_host(HV_E_FAIL);
227			return;
228		}
229
230		pr_debug("VSS: Received request for op code: %d\n",
231			vss_transaction.msg->vss_hdr.operation);
232		vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
233		vss_send_op();
234		return;
235	case VSS_OP_GET_DM_INFO:
236		vss_transaction.msg->dm_info.flags = 0;
237		break;
238	default:
239		break;
240	}
241
242	vss_respond_to_host(0);
243	hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
244}
245
246/*
247 * Send a response back to the host.
248 */
249
250static void
251vss_respond_to_host(int error)
252{
253	struct icmsg_hdr *icmsghdrp;
254	u32	buf_len;
255	struct vmbus_channel *channel;
256	u64	req_id;
257
258	/*
259	 * Copy the global state for completing the transaction. Note that
260	 * only one transaction can be active at a time.
261	 */
262
263	buf_len = vss_transaction.recv_len;
264	channel = vss_transaction.recv_channel;
265	req_id = vss_transaction.recv_req_id;
266
267	icmsghdrp = (struct icmsg_hdr *)
268			&recv_buffer[sizeof(struct vmbuspipe_hdr)];
269
270	if (channel->onchannel_callback == NULL)
271		/*
272		 * We have raced with util driver being unloaded;
273		 * silently return.
274		 */
275		return;
276
277	icmsghdrp->status = error;
278
279	icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
280
281	vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
282				VM_PKT_DATA_INBAND, 0);
283
284}
285
286/*
287 * This callback is invoked when we get a VSS message from the host.
288 * The host ensures that only one VSS transaction can be active at a time.
289 */
290
291void hv_vss_onchannelcallback(void *context)
292{
293	struct vmbus_channel *channel = context;
294	u32 recvlen;
295	u64 requestid;
296	struct hv_vss_msg *vss_msg;
297	int vss_srv_version;
298
299	struct icmsg_hdr *icmsghdrp;
 
300
301	if (vss_transaction.state > HVUTIL_READY)
302		return;
303
304	if (vmbus_recvpacket(channel, recv_buffer, VSS_MAX_PKT_SIZE, &recvlen, &requestid)) {
305		pr_err_ratelimited("VSS request received. Could not read into recv buf\n");
306		return;
307	}
308
309	if (!recvlen)
310		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
312	/* Ensure recvlen is big enough to read header data */
313	if (recvlen < ICMSG_HDR) {
314		pr_err_ratelimited("VSS request received. Packet length too small: %d\n",
315				   recvlen);
316		return;
317	}
318
319	icmsghdrp = (struct icmsg_hdr *)&recv_buffer[sizeof(struct vmbuspipe_hdr)];
320
321	if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
322		if (vmbus_prep_negotiate_resp(icmsghdrp,
323				recv_buffer, recvlen,
324				fw_versions, FW_VER_COUNT,
325				vss_versions, VSS_VER_COUNT,
326				NULL, &vss_srv_version)) {
327
328			pr_info("VSS IC version %d.%d\n",
329				vss_srv_version >> 16,
330				vss_srv_version & 0xFFFF);
331		}
332	} else if (icmsghdrp->icmsgtype == ICMSGTYPE_VSS) {
333		/* Ensure recvlen is big enough to contain hv_vss_msg */
334		if (recvlen < ICMSG_HDR + sizeof(struct hv_vss_msg)) {
335			pr_err_ratelimited("Invalid VSS msg. Packet length too small: %u\n",
336					   recvlen);
337			return;
338		}
339		vss_msg = (struct hv_vss_msg *)&recv_buffer[ICMSG_HDR];
340
341		/*
342		 * Stash away this global state for completing the
343		 * transaction; note transactions are serialized.
344		 */
345
346		vss_transaction.recv_len = recvlen;
347		vss_transaction.recv_req_id = requestid;
348		vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
349
350		schedule_work(&vss_handle_request_work);
351		return;
352	} else {
353		pr_err_ratelimited("VSS request received. Invalid msg type: %d\n",
354				   icmsghdrp->icmsgtype);
355		return;
356	}
357
358	icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION |
359		ICMSGHDRFLAG_RESPONSE;
360	vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
361			 VM_PKT_DATA_INBAND, 0);
362}
363
364static void vss_on_reset(void)
365{
366	if (cancel_delayed_work_sync(&vss_timeout_work))
367		vss_respond_to_host(HV_E_FAIL);
368	vss_transaction.state = HVUTIL_DEVICE_INIT;
369}
370
371int
372hv_vss_init(struct hv_util_service *srv)
373{
374	if (vmbus_proto_version < VERSION_WIN8_1) {
375		pr_warn("Integration service 'Backup (volume snapshot)'"
376			" not supported on this host version.\n");
377		return -ENOTSUPP;
378	}
379	recv_buffer = srv->recv_buffer;
380	vss_transaction.recv_channel = srv->channel;
381	vss_transaction.recv_channel->max_pkt_size = VSS_MAX_PKT_SIZE;
382
383	/*
384	 * When this driver loads, the user level daemon that
385	 * processes the host requests may not yet be running.
386	 * Defer processing channel callbacks until the daemon
387	 * has registered.
388	 */
389	vss_transaction.state = HVUTIL_DEVICE_INIT;
390
391	hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
392				    vss_on_msg, vss_on_reset);
393	if (!hvt) {
394		pr_warn("VSS: Failed to initialize transport\n");
395		return -EFAULT;
396	}
397
398	return 0;
399}
400
401static void hv_vss_cancel_work(void)
402{
403	cancel_delayed_work_sync(&vss_timeout_work);
404	cancel_work_sync(&vss_handle_request_work);
405}
406
407int hv_vss_pre_suspend(void)
408{
409	struct vmbus_channel *channel = vss_transaction.recv_channel;
410	struct hv_vss_msg *vss_msg;
411
412	/*
413	 * Fake a THAW message for the user space daemon in case the daemon
414	 * has frozen the file systems. It doesn't matter if there is already
415	 * a message pending to be delivered to the user space since we force
416	 * vss_transaction.state to be HVUTIL_READY, so the user space daemon's
417	 * write() will fail with EINVAL (see vss_on_msg()), and the daemon
418	 * will reset the device by closing and re-opening it.
419	 */
420	vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
421	if (!vss_msg)
422		return -ENOMEM;
423
424	tasklet_disable(&channel->callback_event);
425
426	vss_msg->vss_hdr.operation = VSS_OP_THAW;
427
428	/* Cancel any possible pending work. */
429	hv_vss_cancel_work();
430
431	/* We don't care about the return value. */
432	hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
433
434	kfree(vss_msg);
435
436	vss_transaction.state = HVUTIL_READY;
437
438	/* tasklet_enable() will be called in hv_vss_pre_resume(). */
439	return 0;
440}
441
442int hv_vss_pre_resume(void)
443{
444	struct vmbus_channel *channel = vss_transaction.recv_channel;
445
446	tasklet_enable(&channel->callback_event);
447
448	return 0;
449}
450
451void hv_vss_deinit(void)
452{
453	vss_transaction.state = HVUTIL_DEVICE_DYING;
454
455	hv_vss_cancel_work();
456
457	hvutil_transport_destroy(hvt);
458}