Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * An implementation of host initiated guest snapshot.
4 *
5 * Copyright (C) 2013, Microsoft, Inc.
6 * Author : K. Y. Srinivasan <kys@microsoft.com>
7 */
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/net.h>
11#include <linux/nls.h>
12#include <linux/connector.h>
13#include <linux/workqueue.h>
14#include <linux/hyperv.h>
15#include <asm/hyperv-tlfs.h>
16
17#include "hyperv_vmbus.h"
18#include "hv_utils_transport.h"
19
20#define VSS_MAJOR 5
21#define VSS_MINOR 0
22#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
23
24#define VSS_VER_COUNT 1
25static const int vss_versions[] = {
26 VSS_VERSION
27};
28
29#define FW_VER_COUNT 1
30static const int fw_versions[] = {
31 UTIL_FW_VERSION
32};
33
34/*
35 * Timeout values are based on expecations from host
36 */
37#define VSS_FREEZE_TIMEOUT (15 * 60)
38
39/*
40 * Global state maintained for transaction that is being processed. For a class
41 * of integration services, including the "VSS service", the specified protocol
42 * is a "request/response" protocol which means that there can only be single
43 * outstanding transaction from the host at any given point in time. We use
44 * this to simplify memory management in this driver - we cache and process
45 * only one message at a time.
46 *
47 * While the request/response protocol is guaranteed by the host, we further
48 * ensure this by serializing packet processing in this driver - we do not
49 * read additional packets from the VMBUs until the current packet is fully
50 * handled.
51 */
52
53static struct {
54 int state; /* hvutil_device_state */
55 int recv_len; /* number of bytes received. */
56 struct vmbus_channel *recv_channel; /* chn we got the request */
57 u64 recv_req_id; /* request ID. */
58 struct hv_vss_msg *msg; /* current message */
59} vss_transaction;
60
61
62static void vss_respond_to_host(int error);
63
64/*
65 * This state maintains the version number registered by the daemon.
66 */
67static int dm_reg_value;
68
69static const char vss_devname[] = "vmbus/hv_vss";
70static __u8 *recv_buffer;
71static struct hvutil_transport *hvt;
72
73static void vss_timeout_func(struct work_struct *dummy);
74static void vss_handle_request(struct work_struct *dummy);
75
76static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
77static DECLARE_WORK(vss_handle_request_work, vss_handle_request);
78
79static void vss_poll_wrapper(void *channel)
80{
81 /* Transaction is finished, reset the state here to avoid races. */
82 vss_transaction.state = HVUTIL_READY;
83 tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
84}
85
86/*
87 * Callback when data is received from user mode.
88 */
89
90static void vss_timeout_func(struct work_struct *dummy)
91{
92 /*
93 * Timeout waiting for userspace component to reply happened.
94 */
95 pr_warn("VSS: timeout waiting for daemon to reply\n");
96 vss_respond_to_host(HV_E_FAIL);
97
98 hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
99}
100
101static void vss_register_done(void)
102{
103 hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
104 pr_debug("VSS: userspace daemon registered\n");
105}
106
107static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
108{
109 u32 our_ver = VSS_OP_REGISTER1;
110
111 switch (vss_msg->vss_hdr.operation) {
112 case VSS_OP_REGISTER:
113 /* Daemon doesn't expect us to reply */
114 dm_reg_value = VSS_OP_REGISTER;
115 break;
116 case VSS_OP_REGISTER1:
117 /* Daemon expects us to reply with our own version */
118 if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
119 vss_register_done))
120 return -EFAULT;
121 dm_reg_value = VSS_OP_REGISTER1;
122 break;
123 default:
124 return -EINVAL;
125 }
126 pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
127 return 0;
128}
129
130static int vss_on_msg(void *msg, int len)
131{
132 struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
133
134 if (len != sizeof(*vss_msg)) {
135 pr_debug("VSS: Message size does not match length\n");
136 return -EINVAL;
137 }
138
139 if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
140 vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
141 /*
142 * Don't process registration messages if we're in the middle
143 * of a transaction processing.
144 */
145 if (vss_transaction.state > HVUTIL_READY) {
146 pr_debug("VSS: Got unexpected registration request\n");
147 return -EINVAL;
148 }
149
150 return vss_handle_handshake(vss_msg);
151 } else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
152 vss_transaction.state = HVUTIL_USERSPACE_RECV;
153
154 if (vss_msg->vss_hdr.operation == VSS_OP_HOT_BACKUP)
155 vss_transaction.msg->vss_cf.flags =
156 VSS_HBU_NO_AUTO_RECOVERY;
157
158 if (cancel_delayed_work_sync(&vss_timeout_work)) {
159 vss_respond_to_host(vss_msg->error);
160 /* Transaction is finished, reset the state. */
161 hv_poll_channel(vss_transaction.recv_channel,
162 vss_poll_wrapper);
163 }
164 } else {
165 /* This is a spurious call! */
166 pr_debug("VSS: Transaction not active\n");
167 return -EINVAL;
168 }
169 return 0;
170}
171
172static void vss_send_op(void)
173{
174 int op = vss_transaction.msg->vss_hdr.operation;
175 int rc;
176 struct hv_vss_msg *vss_msg;
177
178 /* The transaction state is wrong. */
179 if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) {
180 pr_debug("VSS: Unexpected attempt to send to daemon\n");
181 return;
182 }
183
184 vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
185 if (!vss_msg)
186 return;
187
188 vss_msg->vss_hdr.operation = op;
189
190 vss_transaction.state = HVUTIL_USERSPACE_REQ;
191
192 schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
193 VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
194
195 rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
196 if (rc) {
197 pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
198 if (cancel_delayed_work_sync(&vss_timeout_work)) {
199 vss_respond_to_host(HV_E_FAIL);
200 vss_transaction.state = HVUTIL_READY;
201 }
202 }
203
204 kfree(vss_msg);
205}
206
207static void vss_handle_request(struct work_struct *dummy)
208{
209 switch (vss_transaction.msg->vss_hdr.operation) {
210 /*
211 * Initiate a "freeze/thaw" operation in the guest.
212 * We respond to the host once the operation is complete.
213 *
214 * We send the message to the user space daemon and the operation is
215 * performed in the daemon.
216 */
217 case VSS_OP_THAW:
218 case VSS_OP_FREEZE:
219 case VSS_OP_HOT_BACKUP:
220 if (vss_transaction.state < HVUTIL_READY) {
221 /* Userspace is not registered yet */
222 pr_debug("VSS: Not ready for request.\n");
223 vss_respond_to_host(HV_E_FAIL);
224 return;
225 }
226
227 pr_debug("VSS: Received request for op code: %d\n",
228 vss_transaction.msg->vss_hdr.operation);
229 vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
230 vss_send_op();
231 return;
232 case VSS_OP_GET_DM_INFO:
233 vss_transaction.msg->dm_info.flags = 0;
234 break;
235 default:
236 break;
237 }
238
239 vss_respond_to_host(0);
240 hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
241}
242
243/*
244 * Send a response back to the host.
245 */
246
247static void
248vss_respond_to_host(int error)
249{
250 struct icmsg_hdr *icmsghdrp;
251 u32 buf_len;
252 struct vmbus_channel *channel;
253 u64 req_id;
254
255 /*
256 * Copy the global state for completing the transaction. Note that
257 * only one transaction can be active at a time.
258 */
259
260 buf_len = vss_transaction.recv_len;
261 channel = vss_transaction.recv_channel;
262 req_id = vss_transaction.recv_req_id;
263
264 icmsghdrp = (struct icmsg_hdr *)
265 &recv_buffer[sizeof(struct vmbuspipe_hdr)];
266
267 if (channel->onchannel_callback == NULL)
268 /*
269 * We have raced with util driver being unloaded;
270 * silently return.
271 */
272 return;
273
274 icmsghdrp->status = error;
275
276 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
277
278 vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
279 VM_PKT_DATA_INBAND, 0);
280
281}
282
283/*
284 * This callback is invoked when we get a VSS message from the host.
285 * The host ensures that only one VSS transaction can be active at a time.
286 */
287
288void hv_vss_onchannelcallback(void *context)
289{
290 struct vmbus_channel *channel = context;
291 u32 recvlen;
292 u64 requestid;
293 struct hv_vss_msg *vss_msg;
294 int vss_srv_version;
295
296 struct icmsg_hdr *icmsghdrp;
297
298 if (vss_transaction.state > HVUTIL_READY)
299 return;
300
301 vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
302 &requestid);
303
304 if (recvlen > 0) {
305 icmsghdrp = (struct icmsg_hdr *)&recv_buffer[
306 sizeof(struct vmbuspipe_hdr)];
307
308 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
309 if (vmbus_prep_negotiate_resp(icmsghdrp,
310 recv_buffer, fw_versions, FW_VER_COUNT,
311 vss_versions, VSS_VER_COUNT,
312 NULL, &vss_srv_version)) {
313
314 pr_info("VSS IC version %d.%d\n",
315 vss_srv_version >> 16,
316 vss_srv_version & 0xFFFF);
317 }
318 } else {
319 vss_msg = (struct hv_vss_msg *)&recv_buffer[
320 sizeof(struct vmbuspipe_hdr) +
321 sizeof(struct icmsg_hdr)];
322
323 /*
324 * Stash away this global state for completing the
325 * transaction; note transactions are serialized.
326 */
327
328 vss_transaction.recv_len = recvlen;
329 vss_transaction.recv_req_id = requestid;
330 vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
331
332 schedule_work(&vss_handle_request_work);
333 return;
334 }
335
336 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
337 | ICMSGHDRFLAG_RESPONSE;
338
339 vmbus_sendpacket(channel, recv_buffer,
340 recvlen, requestid,
341 VM_PKT_DATA_INBAND, 0);
342 }
343
344}
345
346static void vss_on_reset(void)
347{
348 if (cancel_delayed_work_sync(&vss_timeout_work))
349 vss_respond_to_host(HV_E_FAIL);
350 vss_transaction.state = HVUTIL_DEVICE_INIT;
351}
352
353int
354hv_vss_init(struct hv_util_service *srv)
355{
356 if (vmbus_proto_version < VERSION_WIN8_1) {
357 pr_warn("Integration service 'Backup (volume snapshot)'"
358 " not supported on this host version.\n");
359 return -ENOTSUPP;
360 }
361 recv_buffer = srv->recv_buffer;
362 vss_transaction.recv_channel = srv->channel;
363
364 /*
365 * When this driver loads, the user level daemon that
366 * processes the host requests may not yet be running.
367 * Defer processing channel callbacks until the daemon
368 * has registered.
369 */
370 vss_transaction.state = HVUTIL_DEVICE_INIT;
371
372 hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
373 vss_on_msg, vss_on_reset);
374 if (!hvt) {
375 pr_warn("VSS: Failed to initialize transport\n");
376 return -EFAULT;
377 }
378
379 return 0;
380}
381
382static void hv_vss_cancel_work(void)
383{
384 cancel_delayed_work_sync(&vss_timeout_work);
385 cancel_work_sync(&vss_handle_request_work);
386}
387
388int hv_vss_pre_suspend(void)
389{
390 struct vmbus_channel *channel = vss_transaction.recv_channel;
391 struct hv_vss_msg *vss_msg;
392
393 /*
394 * Fake a THAW message for the user space daemon in case the daemon
395 * has frozen the file systems. It doesn't matter if there is already
396 * a message pending to be delivered to the user space since we force
397 * vss_transaction.state to be HVUTIL_READY, so the user space daemon's
398 * write() will fail with EINVAL (see vss_on_msg()), and the daemon
399 * will reset the device by closing and re-opening it.
400 */
401 vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
402 if (!vss_msg)
403 return -ENOMEM;
404
405 tasklet_disable(&channel->callback_event);
406
407 vss_msg->vss_hdr.operation = VSS_OP_THAW;
408
409 /* Cancel any possible pending work. */
410 hv_vss_cancel_work();
411
412 /* We don't care about the return value. */
413 hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
414
415 kfree(vss_msg);
416
417 vss_transaction.state = HVUTIL_READY;
418
419 /* tasklet_enable() will be called in hv_vss_pre_resume(). */
420 return 0;
421}
422
423int hv_vss_pre_resume(void)
424{
425 struct vmbus_channel *channel = vss_transaction.recv_channel;
426
427 tasklet_enable(&channel->callback_event);
428
429 return 0;
430}
431
432void hv_vss_deinit(void)
433{
434 vss_transaction.state = HVUTIL_DEVICE_DYING;
435
436 hv_vss_cancel_work();
437
438 hvutil_transport_destroy(hvt);
439}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * An implementation of host initiated guest snapshot.
4 *
5 * Copyright (C) 2013, Microsoft, Inc.
6 * Author : K. Y. Srinivasan <kys@microsoft.com>
7 */
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/net.h>
11#include <linux/nls.h>
12#include <linux/connector.h>
13#include <linux/workqueue.h>
14#include <linux/hyperv.h>
15#include <asm/hyperv-tlfs.h>
16
17#include "hyperv_vmbus.h"
18#include "hv_utils_transport.h"
19
20#define VSS_MAJOR 5
21#define VSS_MINOR 0
22#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
23
24#define VSS_VER_COUNT 1
25static const int vss_versions[] = {
26 VSS_VERSION
27};
28
29#define FW_VER_COUNT 1
30static const int fw_versions[] = {
31 UTIL_FW_VERSION
32};
33
34/* See comment with struct hv_vss_msg regarding the max VMbus packet size */
35#define VSS_MAX_PKT_SIZE (HV_HYP_PAGE_SIZE * 2)
36
37/*
38 * Timeout values are based on expecations from host
39 */
40#define VSS_FREEZE_TIMEOUT (15 * 60)
41
42/*
43 * Global state maintained for transaction that is being processed. For a class
44 * of integration services, including the "VSS service", the specified protocol
45 * is a "request/response" protocol which means that there can only be single
46 * outstanding transaction from the host at any given point in time. We use
47 * this to simplify memory management in this driver - we cache and process
48 * only one message at a time.
49 *
50 * While the request/response protocol is guaranteed by the host, we further
51 * ensure this by serializing packet processing in this driver - we do not
52 * read additional packets from the VMBUs until the current packet is fully
53 * handled.
54 */
55
56static struct {
57 int state; /* hvutil_device_state */
58 int recv_len; /* number of bytes received. */
59 struct vmbus_channel *recv_channel; /* chn we got the request */
60 u64 recv_req_id; /* request ID. */
61 struct hv_vss_msg *msg; /* current message */
62} vss_transaction;
63
64
65static void vss_respond_to_host(int error);
66
67/*
68 * This state maintains the version number registered by the daemon.
69 */
70static int dm_reg_value;
71
72static const char vss_devname[] = "vmbus/hv_vss";
73static __u8 *recv_buffer;
74static struct hvutil_transport *hvt;
75
76static void vss_timeout_func(struct work_struct *dummy);
77static void vss_handle_request(struct work_struct *dummy);
78
79static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
80static DECLARE_WORK(vss_handle_request_work, vss_handle_request);
81
82static void vss_poll_wrapper(void *channel)
83{
84 /* Transaction is finished, reset the state here to avoid races. */
85 vss_transaction.state = HVUTIL_READY;
86 tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
87}
88
89/*
90 * Callback when data is received from user mode.
91 */
92
93static void vss_timeout_func(struct work_struct *dummy)
94{
95 /*
96 * Timeout waiting for userspace component to reply happened.
97 */
98 pr_warn("VSS: timeout waiting for daemon to reply\n");
99 vss_respond_to_host(HV_E_FAIL);
100
101 hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
102}
103
104static void vss_register_done(void)
105{
106 hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
107 pr_debug("VSS: userspace daemon registered\n");
108}
109
110static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
111{
112 u32 our_ver = VSS_OP_REGISTER1;
113
114 switch (vss_msg->vss_hdr.operation) {
115 case VSS_OP_REGISTER:
116 /* Daemon doesn't expect us to reply */
117 dm_reg_value = VSS_OP_REGISTER;
118 break;
119 case VSS_OP_REGISTER1:
120 /* Daemon expects us to reply with our own version */
121 if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
122 vss_register_done))
123 return -EFAULT;
124 dm_reg_value = VSS_OP_REGISTER1;
125 break;
126 default:
127 return -EINVAL;
128 }
129 pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
130 return 0;
131}
132
133static int vss_on_msg(void *msg, int len)
134{
135 struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
136
137 if (len != sizeof(*vss_msg)) {
138 pr_debug("VSS: Message size does not match length\n");
139 return -EINVAL;
140 }
141
142 if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
143 vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
144 /*
145 * Don't process registration messages if we're in the middle
146 * of a transaction processing.
147 */
148 if (vss_transaction.state > HVUTIL_READY) {
149 pr_debug("VSS: Got unexpected registration request\n");
150 return -EINVAL;
151 }
152
153 return vss_handle_handshake(vss_msg);
154 } else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
155 vss_transaction.state = HVUTIL_USERSPACE_RECV;
156
157 if (vss_msg->vss_hdr.operation == VSS_OP_HOT_BACKUP)
158 vss_transaction.msg->vss_cf.flags =
159 VSS_HBU_NO_AUTO_RECOVERY;
160
161 if (cancel_delayed_work_sync(&vss_timeout_work)) {
162 vss_respond_to_host(vss_msg->error);
163 /* Transaction is finished, reset the state. */
164 hv_poll_channel(vss_transaction.recv_channel,
165 vss_poll_wrapper);
166 }
167 } else {
168 /* This is a spurious call! */
169 pr_debug("VSS: Transaction not active\n");
170 return -EINVAL;
171 }
172 return 0;
173}
174
175static void vss_send_op(void)
176{
177 int op = vss_transaction.msg->vss_hdr.operation;
178 int rc;
179 struct hv_vss_msg *vss_msg;
180
181 /* The transaction state is wrong. */
182 if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) {
183 pr_debug("VSS: Unexpected attempt to send to daemon\n");
184 return;
185 }
186
187 vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
188 if (!vss_msg)
189 return;
190
191 vss_msg->vss_hdr.operation = op;
192
193 vss_transaction.state = HVUTIL_USERSPACE_REQ;
194
195 schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
196 VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
197
198 rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
199 if (rc) {
200 pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
201 if (cancel_delayed_work_sync(&vss_timeout_work)) {
202 vss_respond_to_host(HV_E_FAIL);
203 vss_transaction.state = HVUTIL_READY;
204 }
205 }
206
207 kfree(vss_msg);
208}
209
210static void vss_handle_request(struct work_struct *dummy)
211{
212 switch (vss_transaction.msg->vss_hdr.operation) {
213 /*
214 * Initiate a "freeze/thaw" operation in the guest.
215 * We respond to the host once the operation is complete.
216 *
217 * We send the message to the user space daemon and the operation is
218 * performed in the daemon.
219 */
220 case VSS_OP_THAW:
221 case VSS_OP_FREEZE:
222 case VSS_OP_HOT_BACKUP:
223 if (vss_transaction.state < HVUTIL_READY) {
224 /* Userspace is not registered yet */
225 pr_debug("VSS: Not ready for request.\n");
226 vss_respond_to_host(HV_E_FAIL);
227 return;
228 }
229
230 pr_debug("VSS: Received request for op code: %d\n",
231 vss_transaction.msg->vss_hdr.operation);
232 vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
233 vss_send_op();
234 return;
235 case VSS_OP_GET_DM_INFO:
236 vss_transaction.msg->dm_info.flags = 0;
237 break;
238 default:
239 break;
240 }
241
242 vss_respond_to_host(0);
243 hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
244}
245
246/*
247 * Send a response back to the host.
248 */
249
250static void
251vss_respond_to_host(int error)
252{
253 struct icmsg_hdr *icmsghdrp;
254 u32 buf_len;
255 struct vmbus_channel *channel;
256 u64 req_id;
257
258 /*
259 * Copy the global state for completing the transaction. Note that
260 * only one transaction can be active at a time.
261 */
262
263 buf_len = vss_transaction.recv_len;
264 channel = vss_transaction.recv_channel;
265 req_id = vss_transaction.recv_req_id;
266
267 icmsghdrp = (struct icmsg_hdr *)
268 &recv_buffer[sizeof(struct vmbuspipe_hdr)];
269
270 if (channel->onchannel_callback == NULL)
271 /*
272 * We have raced with util driver being unloaded;
273 * silently return.
274 */
275 return;
276
277 icmsghdrp->status = error;
278
279 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
280
281 vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
282 VM_PKT_DATA_INBAND, 0);
283
284}
285
286/*
287 * This callback is invoked when we get a VSS message from the host.
288 * The host ensures that only one VSS transaction can be active at a time.
289 */
290
291void hv_vss_onchannelcallback(void *context)
292{
293 struct vmbus_channel *channel = context;
294 u32 recvlen;
295 u64 requestid;
296 struct hv_vss_msg *vss_msg;
297 int vss_srv_version;
298
299 struct icmsg_hdr *icmsghdrp;
300
301 if (vss_transaction.state > HVUTIL_READY)
302 return;
303
304 if (vmbus_recvpacket(channel, recv_buffer, VSS_MAX_PKT_SIZE, &recvlen, &requestid)) {
305 pr_err_ratelimited("VSS request received. Could not read into recv buf\n");
306 return;
307 }
308
309 if (!recvlen)
310 return;
311
312 /* Ensure recvlen is big enough to read header data */
313 if (recvlen < ICMSG_HDR) {
314 pr_err_ratelimited("VSS request received. Packet length too small: %d\n",
315 recvlen);
316 return;
317 }
318
319 icmsghdrp = (struct icmsg_hdr *)&recv_buffer[sizeof(struct vmbuspipe_hdr)];
320
321 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
322 if (vmbus_prep_negotiate_resp(icmsghdrp,
323 recv_buffer, recvlen,
324 fw_versions, FW_VER_COUNT,
325 vss_versions, VSS_VER_COUNT,
326 NULL, &vss_srv_version)) {
327
328 pr_info("VSS IC version %d.%d\n",
329 vss_srv_version >> 16,
330 vss_srv_version & 0xFFFF);
331 }
332 } else if (icmsghdrp->icmsgtype == ICMSGTYPE_VSS) {
333 /* Ensure recvlen is big enough to contain hv_vss_msg */
334 if (recvlen < ICMSG_HDR + sizeof(struct hv_vss_msg)) {
335 pr_err_ratelimited("Invalid VSS msg. Packet length too small: %u\n",
336 recvlen);
337 return;
338 }
339 vss_msg = (struct hv_vss_msg *)&recv_buffer[ICMSG_HDR];
340
341 /*
342 * Stash away this global state for completing the
343 * transaction; note transactions are serialized.
344 */
345
346 vss_transaction.recv_len = recvlen;
347 vss_transaction.recv_req_id = requestid;
348 vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
349
350 schedule_work(&vss_handle_request_work);
351 return;
352 } else {
353 pr_err_ratelimited("VSS request received. Invalid msg type: %d\n",
354 icmsghdrp->icmsgtype);
355 return;
356 }
357
358 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION |
359 ICMSGHDRFLAG_RESPONSE;
360 vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
361 VM_PKT_DATA_INBAND, 0);
362}
363
364static void vss_on_reset(void)
365{
366 if (cancel_delayed_work_sync(&vss_timeout_work))
367 vss_respond_to_host(HV_E_FAIL);
368 vss_transaction.state = HVUTIL_DEVICE_INIT;
369}
370
371int
372hv_vss_init(struct hv_util_service *srv)
373{
374 if (vmbus_proto_version < VERSION_WIN8_1) {
375 pr_warn("Integration service 'Backup (volume snapshot)'"
376 " not supported on this host version.\n");
377 return -ENOTSUPP;
378 }
379 recv_buffer = srv->recv_buffer;
380 vss_transaction.recv_channel = srv->channel;
381 vss_transaction.recv_channel->max_pkt_size = VSS_MAX_PKT_SIZE;
382
383 /*
384 * When this driver loads, the user level daemon that
385 * processes the host requests may not yet be running.
386 * Defer processing channel callbacks until the daemon
387 * has registered.
388 */
389 vss_transaction.state = HVUTIL_DEVICE_INIT;
390
391 hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
392 vss_on_msg, vss_on_reset);
393 if (!hvt) {
394 pr_warn("VSS: Failed to initialize transport\n");
395 return -EFAULT;
396 }
397
398 return 0;
399}
400
401static void hv_vss_cancel_work(void)
402{
403 cancel_delayed_work_sync(&vss_timeout_work);
404 cancel_work_sync(&vss_handle_request_work);
405}
406
407int hv_vss_pre_suspend(void)
408{
409 struct vmbus_channel *channel = vss_transaction.recv_channel;
410 struct hv_vss_msg *vss_msg;
411
412 /*
413 * Fake a THAW message for the user space daemon in case the daemon
414 * has frozen the file systems. It doesn't matter if there is already
415 * a message pending to be delivered to the user space since we force
416 * vss_transaction.state to be HVUTIL_READY, so the user space daemon's
417 * write() will fail with EINVAL (see vss_on_msg()), and the daemon
418 * will reset the device by closing and re-opening it.
419 */
420 vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
421 if (!vss_msg)
422 return -ENOMEM;
423
424 tasklet_disable(&channel->callback_event);
425
426 vss_msg->vss_hdr.operation = VSS_OP_THAW;
427
428 /* Cancel any possible pending work. */
429 hv_vss_cancel_work();
430
431 /* We don't care about the return value. */
432 hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
433
434 kfree(vss_msg);
435
436 vss_transaction.state = HVUTIL_READY;
437
438 /* tasklet_enable() will be called in hv_vss_pre_resume(). */
439 return 0;
440}
441
442int hv_vss_pre_resume(void)
443{
444 struct vmbus_channel *channel = vss_transaction.recv_channel;
445
446 tasklet_enable(&channel->callback_event);
447
448 return 0;
449}
450
451void hv_vss_deinit(void)
452{
453 vss_transaction.state = HVUTIL_DEVICE_DYING;
454
455 hv_vss_cancel_work();
456
457 hvutil_transport_destroy(hvt);
458}