Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2010, Microsoft Corporation.
  4 *
  5 * Authors:
  6 *   Haiyang Zhang <haiyangz@microsoft.com>
  7 *   Hank Janssen  <hjanssen@microsoft.com>
  8 */
  9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 10
 11#include <linux/kernel.h>
 12#include <linux/init.h>
 13#include <linux/module.h>
 14#include <linux/slab.h>
 15#include <linux/sysctl.h>
 16#include <linux/reboot.h>
 17#include <linux/hyperv.h>
 18#include <linux/clockchips.h>
 19#include <linux/ptp_clock_kernel.h>
 20#include <clocksource/hyperv_timer.h>
 21#include <asm/mshyperv.h>
 22
 23#include "hyperv_vmbus.h"
 24
 25#define SD_MAJOR	3
 26#define SD_MINOR	0
 27#define SD_MINOR_1	1
 28#define SD_MINOR_2	2
 29#define SD_VERSION_3_1	(SD_MAJOR << 16 | SD_MINOR_1)
 30#define SD_VERSION_3_2	(SD_MAJOR << 16 | SD_MINOR_2)
 31#define SD_VERSION	(SD_MAJOR << 16 | SD_MINOR)
 32
 33#define SD_MAJOR_1	1
 34#define SD_VERSION_1	(SD_MAJOR_1 << 16 | SD_MINOR)
 35
 36#define TS_MAJOR	4
 37#define TS_MINOR	0
 38#define TS_VERSION	(TS_MAJOR << 16 | TS_MINOR)
 39
 40#define TS_MAJOR_1	1
 41#define TS_VERSION_1	(TS_MAJOR_1 << 16 | TS_MINOR)
 42
 43#define TS_MAJOR_3	3
 44#define TS_VERSION_3	(TS_MAJOR_3 << 16 | TS_MINOR)
 45
 46#define HB_MAJOR	3
 47#define HB_MINOR	0
 48#define HB_VERSION	(HB_MAJOR << 16 | HB_MINOR)
 49
 50#define HB_MAJOR_1	1
 51#define HB_VERSION_1	(HB_MAJOR_1 << 16 | HB_MINOR)
 52
 53static int sd_srv_version;
 54static int ts_srv_version;
 55static int hb_srv_version;
 56
 57#define SD_VER_COUNT 4
 58static const int sd_versions[] = {
 59	SD_VERSION_3_2,
 60	SD_VERSION_3_1,
 61	SD_VERSION,
 62	SD_VERSION_1
 63};
 64
 65#define TS_VER_COUNT 3
 66static const int ts_versions[] = {
 67	TS_VERSION,
 68	TS_VERSION_3,
 69	TS_VERSION_1
 70};
 71
 72#define HB_VER_COUNT 2
 73static const int hb_versions[] = {
 74	HB_VERSION,
 75	HB_VERSION_1
 76};
 77
 78#define FW_VER_COUNT 2
 79static const int fw_versions[] = {
 80	UTIL_FW_VERSION,
 81	UTIL_WS2K8_FW_VERSION
 82};
 83
 84/*
 85 * Send the "hibernate" udev event in a thread context.
 86 */
 87struct hibernate_work_context {
 88	struct work_struct work;
 89	struct hv_device *dev;
 90};
 91
 92static struct hibernate_work_context hibernate_context;
 93static bool hibernation_supported;
 94
 95static void send_hibernate_uevent(struct work_struct *work)
 96{
 97	char *uevent_env[2] = { "EVENT=hibernate", NULL };
 98	struct hibernate_work_context *ctx;
 99
100	ctx = container_of(work, struct hibernate_work_context, work);
101
102	kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env);
103
104	pr_info("Sent hibernation uevent\n");
105}
106
107static int hv_shutdown_init(struct hv_util_service *srv)
108{
109	struct vmbus_channel *channel = srv->channel;
110
111	INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
112	hibernate_context.dev = channel->device_obj;
113
114	hibernation_supported = hv_is_hibernation_supported();
115
116	return 0;
117}
118
119static void shutdown_onchannelcallback(void *context);
120static struct hv_util_service util_shutdown = {
121	.util_cb = shutdown_onchannelcallback,
122	.util_init = hv_shutdown_init,
123};
124
125static int hv_timesync_init(struct hv_util_service *srv);
126static int hv_timesync_pre_suspend(void);
127static void hv_timesync_deinit(void);
128
129static void timesync_onchannelcallback(void *context);
130static struct hv_util_service util_timesynch = {
131	.util_cb = timesync_onchannelcallback,
132	.util_init = hv_timesync_init,
133	.util_pre_suspend = hv_timesync_pre_suspend,
134	.util_deinit = hv_timesync_deinit,
135};
136
137static void heartbeat_onchannelcallback(void *context);
138static struct hv_util_service util_heartbeat = {
139	.util_cb = heartbeat_onchannelcallback,
140};
141
142static struct hv_util_service util_kvp = {
143	.util_cb = hv_kvp_onchannelcallback,
144	.util_init = hv_kvp_init,
145	.util_pre_suspend = hv_kvp_pre_suspend,
146	.util_pre_resume = hv_kvp_pre_resume,
147	.util_deinit = hv_kvp_deinit,
148};
149
150static struct hv_util_service util_vss = {
151	.util_cb = hv_vss_onchannelcallback,
152	.util_init = hv_vss_init,
153	.util_pre_suspend = hv_vss_pre_suspend,
154	.util_pre_resume = hv_vss_pre_resume,
155	.util_deinit = hv_vss_deinit,
156};
157
158static struct hv_util_service util_fcopy = {
159	.util_cb = hv_fcopy_onchannelcallback,
160	.util_init = hv_fcopy_init,
161	.util_pre_suspend = hv_fcopy_pre_suspend,
162	.util_pre_resume = hv_fcopy_pre_resume,
163	.util_deinit = hv_fcopy_deinit,
164};
165
166static void perform_shutdown(struct work_struct *dummy)
167{
168	orderly_poweroff(true);
169}
170
171static void perform_restart(struct work_struct *dummy)
172{
173	orderly_reboot();
174}
175
176/*
177 * Perform the shutdown operation in a thread context.
178 */
179static DECLARE_WORK(shutdown_work, perform_shutdown);
180
181/*
182 * Perform the restart operation in a thread context.
183 */
184static DECLARE_WORK(restart_work, perform_restart);
185
186static void shutdown_onchannelcallback(void *context)
187{
188	struct vmbus_channel *channel = context;
189	struct work_struct *work = NULL;
190	u32 recvlen;
191	u64 requestid;
192	u8  *shut_txf_buf = util_shutdown.recv_buffer;
193
194	struct shutdown_msg_data *shutdown_msg;
195
196	struct icmsg_hdr *icmsghdrp;
197
198	vmbus_recvpacket(channel, shut_txf_buf,
199			 HV_HYP_PAGE_SIZE, &recvlen, &requestid);
 
 
200
201	if (recvlen > 0) {
202		icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
203			sizeof(struct vmbuspipe_hdr)];
204
205		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
206			if (vmbus_prep_negotiate_resp(icmsghdrp, shut_txf_buf,
207					fw_versions, FW_VER_COUNT,
208					sd_versions, SD_VER_COUNT,
209					NULL, &sd_srv_version)) {
210				pr_info("Shutdown IC version %d.%d\n",
211					sd_srv_version >> 16,
212					sd_srv_version & 0xFFFF);
213			}
214		} else {
215			shutdown_msg =
216				(struct shutdown_msg_data *)&shut_txf_buf[
217					sizeof(struct vmbuspipe_hdr) +
218					sizeof(struct icmsg_hdr)];
219
220			/*
221			 * shutdown_msg->flags can be 0(shut down), 2(reboot),
222			 * or 4(hibernate). It may bitwise-OR 1, which means
223			 * performing the request by force. Linux always tries
224			 * to perform the request by force.
225			 */
226			switch (shutdown_msg->flags) {
227			case 0:
228			case 1:
229				icmsghdrp->status = HV_S_OK;
230				work = &shutdown_work;
231				pr_info("Shutdown request received -"
232					    " graceful shutdown initiated\n");
233				break;
234			case 2:
235			case 3:
236				icmsghdrp->status = HV_S_OK;
237				work = &restart_work;
238				pr_info("Restart request received -"
239					    " graceful restart initiated\n");
240				break;
241			case 4:
242			case 5:
243				pr_info("Hibernation request received\n");
244				icmsghdrp->status = hibernation_supported ?
245					HV_S_OK : HV_E_FAIL;
246				if (hibernation_supported)
247					work = &hibernate_context.work;
248				break;
249			default:
250				icmsghdrp->status = HV_E_FAIL;
251				pr_info("Shutdown request received -"
252					    " Invalid request\n");
253				break;
254			}
255		}
256
257		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
258			| ICMSGHDRFLAG_RESPONSE;
259
260		vmbus_sendpacket(channel, shut_txf_buf,
261				       recvlen, requestid,
262				       VM_PKT_DATA_INBAND, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263	}
264
 
 
 
 
 
 
 
265	if (work)
266		schedule_work(work);
267}
268
269/*
270 * Set the host time in a process context.
271 */
272static struct work_struct adj_time_work;
273
274/*
275 * The last time sample, received from the host. PTP device responds to
276 * requests by using this data and the current partition-wide time reference
277 * count.
278 */
279static struct {
280	u64				host_time;
281	u64				ref_time;
282	spinlock_t			lock;
283} host_ts;
284
285static inline u64 reftime_to_ns(u64 reftime)
286{
287	return (reftime - WLTIMEDELTA) * 100;
288}
289
290/*
291 * Hard coded threshold for host timesync delay: 600 seconds
292 */
293static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
294
295static int hv_get_adj_host_time(struct timespec64 *ts)
296{
297	u64 newtime, reftime, timediff_adj;
298	unsigned long flags;
299	int ret = 0;
300
301	spin_lock_irqsave(&host_ts.lock, flags);
302	reftime = hv_read_reference_counter();
303
304	/*
305	 * We need to let the caller know that last update from host
306	 * is older than the max allowable threshold. clock_gettime()
307	 * and PTP ioctl do not have a documented error that we could
308	 * return for this specific case. Use ESTALE to report this.
309	 */
310	timediff_adj = reftime - host_ts.ref_time;
311	if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
312		pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
313			     (timediff_adj * 100));
314		ret = -ESTALE;
315	}
316
317	newtime = host_ts.host_time + timediff_adj;
318	*ts = ns_to_timespec64(reftime_to_ns(newtime));
319	spin_unlock_irqrestore(&host_ts.lock, flags);
320
321	return ret;
322}
323
324static void hv_set_host_time(struct work_struct *work)
325{
326
327	struct timespec64 ts;
328
329	if (!hv_get_adj_host_time(&ts))
330		do_settimeofday64(&ts);
331}
332
333/*
334 * Synchronize time with host after reboot, restore, etc.
335 *
336 * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
337 * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
338 * message after the timesync channel is opened. Since the hv_utils module is
339 * loaded after hv_vmbus, the first message is usually missed. This bit is
340 * considered a hard request to discipline the clock.
341 *
342 * ICTIMESYNCFLAG_SAMPLE bit indicates a time sample from host. This is
343 * typically used as a hint to the guest. The guest is under no obligation
344 * to discipline the clock.
345 */
346static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
347{
348	unsigned long flags;
349	u64 cur_reftime;
350
351	/*
352	 * Save the adjusted time sample from the host and the snapshot
353	 * of the current system time.
354	 */
355	spin_lock_irqsave(&host_ts.lock, flags);
356
357	cur_reftime = hv_read_reference_counter();
358	host_ts.host_time = hosttime;
359	host_ts.ref_time = cur_reftime;
360
361	/*
362	 * TimeSync v4 messages contain reference time (guest's Hyper-V
363	 * clocksource read when the time sample was generated), we can
364	 * improve the precision by adding the delta between now and the
365	 * time of generation. For older protocols we set
366	 * reftime == cur_reftime on call.
367	 */
368	host_ts.host_time += (cur_reftime - reftime);
369
370	spin_unlock_irqrestore(&host_ts.lock, flags);
371
372	/* Schedule work to do do_settimeofday64() */
373	if (adj_flags & ICTIMESYNCFLAG_SYNC)
374		schedule_work(&adj_time_work);
375}
376
377/*
378 * Time Sync Channel message handler.
379 */
380static void timesync_onchannelcallback(void *context)
381{
382	struct vmbus_channel *channel = context;
383	u32 recvlen;
384	u64 requestid;
385	struct icmsg_hdr *icmsghdrp;
386	struct ictimesync_data *timedatap;
387	struct ictimesync_ref_data *refdata;
388	u8 *time_txf_buf = util_timesynch.recv_buffer;
389
390	/*
391	 * Drain the ring buffer and use the last packet to update
392	 * host_ts
393	 */
394	while (1) {
395		int ret = vmbus_recvpacket(channel, time_txf_buf,
396					   HV_HYP_PAGE_SIZE, &recvlen,
397					   &requestid);
398		if (ret) {
399			pr_warn_once("TimeSync IC pkt recv failed (Err: %d)\n",
400				     ret);
401			break;
402		}
403
404		if (!recvlen)
405			break;
406
 
 
 
 
 
 
 
407		icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
408				sizeof(struct vmbuspipe_hdr)];
409
410		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
411			if (vmbus_prep_negotiate_resp(icmsghdrp, time_txf_buf,
 
412						fw_versions, FW_VER_COUNT,
413						ts_versions, TS_VER_COUNT,
414						NULL, &ts_srv_version)) {
415				pr_info("TimeSync IC version %d.%d\n",
416					ts_srv_version >> 16,
417					ts_srv_version & 0xFFFF);
418			}
419		} else {
420			if (ts_srv_version > TS_VERSION_3) {
421				refdata = (struct ictimesync_ref_data *)
422					&time_txf_buf[
423					sizeof(struct vmbuspipe_hdr) +
424					sizeof(struct icmsg_hdr)];
 
 
 
425
426				adj_guesttime(refdata->parenttime,
427						refdata->vmreferencetime,
428						refdata->flags);
429			} else {
430				timedatap = (struct ictimesync_data *)
431					&time_txf_buf[
432					sizeof(struct vmbuspipe_hdr) +
433					sizeof(struct icmsg_hdr)];
 
 
 
 
434				adj_guesttime(timedatap->parenttime,
435					      hv_read_reference_counter(),
436					      timedatap->flags);
437			}
 
 
 
 
438		}
439
440		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
441			| ICMSGHDRFLAG_RESPONSE;
442
443		vmbus_sendpacket(channel, time_txf_buf,
444				recvlen, requestid,
445				VM_PKT_DATA_INBAND, 0);
446	}
447}
448
449/*
450 * Heartbeat functionality.
451 * Every two seconds, Hyper-V send us a heartbeat request message.
452 * we respond to this message, and Hyper-V knows we are alive.
453 */
454static void heartbeat_onchannelcallback(void *context)
455{
456	struct vmbus_channel *channel = context;
457	u32 recvlen;
458	u64 requestid;
459	struct icmsg_hdr *icmsghdrp;
460	struct heartbeat_msg_data *heartbeat_msg;
461	u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
462
463	while (1) {
464
465		vmbus_recvpacket(channel, hbeat_txf_buf,
466				 HV_HYP_PAGE_SIZE, &recvlen, &requestid);
 
 
 
467
468		if (!recvlen)
469			break;
470
 
 
 
 
 
 
 
471		icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
472				sizeof(struct vmbuspipe_hdr)];
473
474		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
475			if (vmbus_prep_negotiate_resp(icmsghdrp,
476					hbeat_txf_buf,
477					fw_versions, FW_VER_COUNT,
478					hb_versions, HB_VER_COUNT,
479					NULL, &hb_srv_version)) {
480
481				pr_info("Heartbeat IC version %d.%d\n",
482					hb_srv_version >> 16,
483					hb_srv_version & 0xFFFF);
484			}
485		} else {
486			heartbeat_msg =
487				(struct heartbeat_msg_data *)&hbeat_txf_buf[
488					sizeof(struct vmbuspipe_hdr) +
489					sizeof(struct icmsg_hdr)];
 
 
 
 
 
 
490
491			heartbeat_msg->seq_num += 1;
 
 
 
 
492		}
493
494		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
495			| ICMSGHDRFLAG_RESPONSE;
496
497		vmbus_sendpacket(channel, hbeat_txf_buf,
498				       recvlen, requestid,
499				       VM_PKT_DATA_INBAND, 0);
500	}
501}
502
 
 
 
503static int util_probe(struct hv_device *dev,
504			const struct hv_vmbus_device_id *dev_id)
505{
506	struct hv_util_service *srv =
507		(struct hv_util_service *)dev_id->driver_data;
508	int ret;
509
510	srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
511	if (!srv->recv_buffer)
512		return -ENOMEM;
513	srv->channel = dev->channel;
514	if (srv->util_init) {
515		ret = srv->util_init(srv);
516		if (ret) {
517			ret = -ENODEV;
518			goto error1;
519		}
520	}
521
522	/*
523	 * The set of services managed by the util driver are not performance
524	 * critical and do not need batched reading. Furthermore, some services
525	 * such as KVP can only handle one message from the host at a time.
526	 * Turn off batched reading for all util drivers before we open the
527	 * channel.
528	 */
529	set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
530
531	hv_set_drvdata(dev, srv);
532
533	ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
534			 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
535			 dev->channel);
536	if (ret)
537		goto error;
538
539	return 0;
540
541error:
542	if (srv->util_deinit)
543		srv->util_deinit();
544error1:
545	kfree(srv->recv_buffer);
546	return ret;
547}
548
549static int util_remove(struct hv_device *dev)
550{
551	struct hv_util_service *srv = hv_get_drvdata(dev);
552
553	if (srv->util_deinit)
554		srv->util_deinit();
555	vmbus_close(dev->channel);
556	kfree(srv->recv_buffer);
557
558	return 0;
559}
560
561/*
562 * When we're in util_suspend(), all the userspace processes have been frozen
563 * (refer to hibernate() -> freeze_processes()). The userspace is thawed only
564 * after the whole resume procedure, including util_resume(), finishes.
565 */
566static int util_suspend(struct hv_device *dev)
567{
568	struct hv_util_service *srv = hv_get_drvdata(dev);
569	int ret = 0;
570
571	if (srv->util_pre_suspend) {
572		ret = srv->util_pre_suspend();
573		if (ret)
574			return ret;
575	}
576
577	vmbus_close(dev->channel);
578
579	return 0;
580}
581
582static int util_resume(struct hv_device *dev)
583{
584	struct hv_util_service *srv = hv_get_drvdata(dev);
585	int ret = 0;
586
587	if (srv->util_pre_resume) {
588		ret = srv->util_pre_resume();
589		if (ret)
590			return ret;
591	}
592
593	ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
594			 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
595			 dev->channel);
596	return ret;
597}
598
599static const struct hv_vmbus_device_id id_table[] = {
600	/* Shutdown guid */
601	{ HV_SHUTDOWN_GUID,
602	  .driver_data = (unsigned long)&util_shutdown
603	},
604	/* Time synch guid */
605	{ HV_TS_GUID,
606	  .driver_data = (unsigned long)&util_timesynch
607	},
608	/* Heartbeat guid */
609	{ HV_HEART_BEAT_GUID,
610	  .driver_data = (unsigned long)&util_heartbeat
611	},
612	/* KVP guid */
613	{ HV_KVP_GUID,
614	  .driver_data = (unsigned long)&util_kvp
615	},
616	/* VSS GUID */
617	{ HV_VSS_GUID,
618	  .driver_data = (unsigned long)&util_vss
619	},
620	/* File copy GUID */
621	{ HV_FCOPY_GUID,
622	  .driver_data = (unsigned long)&util_fcopy
623	},
624	{ },
625};
626
627MODULE_DEVICE_TABLE(vmbus, id_table);
628
629/* The one and only one */
630static  struct hv_driver util_drv = {
631	.name = "hv_utils",
632	.id_table = id_table,
633	.probe =  util_probe,
634	.remove =  util_remove,
635	.suspend = util_suspend,
636	.resume =  util_resume,
637	.driver = {
638		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
639	},
640};
641
642static int hv_ptp_enable(struct ptp_clock_info *info,
643			 struct ptp_clock_request *request, int on)
644{
645	return -EOPNOTSUPP;
646}
647
648static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
649{
650	return -EOPNOTSUPP;
651}
652
653static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
654{
655	return -EOPNOTSUPP;
656}
657static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
658{
659	return -EOPNOTSUPP;
660}
661
662static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
663{
664	return hv_get_adj_host_time(ts);
665}
666
667static struct ptp_clock_info ptp_hyperv_info = {
668	.name		= "hyperv",
669	.enable         = hv_ptp_enable,
670	.adjtime        = hv_ptp_adjtime,
671	.adjfreq        = hv_ptp_adjfreq,
672	.gettime64      = hv_ptp_gettime,
673	.settime64      = hv_ptp_settime,
674	.owner		= THIS_MODULE,
675};
676
677static struct ptp_clock *hv_ptp_clock;
678
679static int hv_timesync_init(struct hv_util_service *srv)
680{
681	/* TimeSync requires Hyper-V clocksource. */
682	if (!hv_read_reference_counter)
683		return -ENODEV;
684
685	spin_lock_init(&host_ts.lock);
686
687	INIT_WORK(&adj_time_work, hv_set_host_time);
688
689	/*
690	 * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
691	 * disabled but the driver is still useful without the PTP device
692	 * as it still handles the ICTIMESYNCFLAG_SYNC case.
693	 */
694	hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
695	if (IS_ERR_OR_NULL(hv_ptp_clock)) {
696		pr_err("cannot register PTP clock: %ld\n",
697		       PTR_ERR(hv_ptp_clock));
698		hv_ptp_clock = NULL;
699	}
700
701	return 0;
702}
703
704static void hv_timesync_cancel_work(void)
705{
706	cancel_work_sync(&adj_time_work);
707}
708
709static int hv_timesync_pre_suspend(void)
710{
711	hv_timesync_cancel_work();
712	return 0;
713}
714
715static void hv_timesync_deinit(void)
716{
717	if (hv_ptp_clock)
718		ptp_clock_unregister(hv_ptp_clock);
719
720	hv_timesync_cancel_work();
721}
722
723static int __init init_hyperv_utils(void)
724{
725	pr_info("Registering HyperV Utility Driver\n");
726
727	return vmbus_driver_register(&util_drv);
728}
729
730static void exit_hyperv_utils(void)
731{
732	pr_info("De-Registered HyperV Utility Driver\n");
733
734	vmbus_driver_unregister(&util_drv);
735}
736
737module_init(init_hyperv_utils);
738module_exit(exit_hyperv_utils);
739
740MODULE_DESCRIPTION("Hyper-V Utilities");
741MODULE_LICENSE("GPL");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2010, Microsoft Corporation.
  4 *
  5 * Authors:
  6 *   Haiyang Zhang <haiyangz@microsoft.com>
  7 *   Hank Janssen  <hjanssen@microsoft.com>
  8 */
  9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 10
 11#include <linux/kernel.h>
 12#include <linux/init.h>
 13#include <linux/module.h>
 14#include <linux/slab.h>
 15#include <linux/sysctl.h>
 16#include <linux/reboot.h>
 17#include <linux/hyperv.h>
 18#include <linux/clockchips.h>
 19#include <linux/ptp_clock_kernel.h>
 20#include <clocksource/hyperv_timer.h>
 21#include <asm/mshyperv.h>
 22
 23#include "hyperv_vmbus.h"
 24
 25#define SD_MAJOR	3
 26#define SD_MINOR	0
 27#define SD_MINOR_1	1
 28#define SD_MINOR_2	2
 29#define SD_VERSION_3_1	(SD_MAJOR << 16 | SD_MINOR_1)
 30#define SD_VERSION_3_2	(SD_MAJOR << 16 | SD_MINOR_2)
 31#define SD_VERSION	(SD_MAJOR << 16 | SD_MINOR)
 32
 33#define SD_MAJOR_1	1
 34#define SD_VERSION_1	(SD_MAJOR_1 << 16 | SD_MINOR)
 35
 36#define TS_MAJOR	4
 37#define TS_MINOR	0
 38#define TS_VERSION	(TS_MAJOR << 16 | TS_MINOR)
 39
 40#define TS_MAJOR_1	1
 41#define TS_VERSION_1	(TS_MAJOR_1 << 16 | TS_MINOR)
 42
 43#define TS_MAJOR_3	3
 44#define TS_VERSION_3	(TS_MAJOR_3 << 16 | TS_MINOR)
 45
 46#define HB_MAJOR	3
 47#define HB_MINOR	0
 48#define HB_VERSION	(HB_MAJOR << 16 | HB_MINOR)
 49
 50#define HB_MAJOR_1	1
 51#define HB_VERSION_1	(HB_MAJOR_1 << 16 | HB_MINOR)
 52
 53static int sd_srv_version;
 54static int ts_srv_version;
 55static int hb_srv_version;
 56
 57#define SD_VER_COUNT 4
 58static const int sd_versions[] = {
 59	SD_VERSION_3_2,
 60	SD_VERSION_3_1,
 61	SD_VERSION,
 62	SD_VERSION_1
 63};
 64
 65#define TS_VER_COUNT 3
 66static const int ts_versions[] = {
 67	TS_VERSION,
 68	TS_VERSION_3,
 69	TS_VERSION_1
 70};
 71
 72#define HB_VER_COUNT 2
 73static const int hb_versions[] = {
 74	HB_VERSION,
 75	HB_VERSION_1
 76};
 77
 78#define FW_VER_COUNT 2
 79static const int fw_versions[] = {
 80	UTIL_FW_VERSION,
 81	UTIL_WS2K8_FW_VERSION
 82};
 83
 84/*
 85 * Send the "hibernate" udev event in a thread context.
 86 */
 87struct hibernate_work_context {
 88	struct work_struct work;
 89	struct hv_device *dev;
 90};
 91
 92static struct hibernate_work_context hibernate_context;
 93static bool hibernation_supported;
 94
 95static void send_hibernate_uevent(struct work_struct *work)
 96{
 97	char *uevent_env[2] = { "EVENT=hibernate", NULL };
 98	struct hibernate_work_context *ctx;
 99
100	ctx = container_of(work, struct hibernate_work_context, work);
101
102	kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env);
103
104	pr_info("Sent hibernation uevent\n");
105}
106
107static int hv_shutdown_init(struct hv_util_service *srv)
108{
109	struct vmbus_channel *channel = srv->channel;
110
111	INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
112	hibernate_context.dev = channel->device_obj;
113
114	hibernation_supported = hv_is_hibernation_supported();
115
116	return 0;
117}
118
119static void shutdown_onchannelcallback(void *context);
120static struct hv_util_service util_shutdown = {
121	.util_cb = shutdown_onchannelcallback,
122	.util_init = hv_shutdown_init,
123};
124
125static int hv_timesync_init(struct hv_util_service *srv);
126static int hv_timesync_pre_suspend(void);
127static void hv_timesync_deinit(void);
128
129static void timesync_onchannelcallback(void *context);
130static struct hv_util_service util_timesynch = {
131	.util_cb = timesync_onchannelcallback,
132	.util_init = hv_timesync_init,
133	.util_pre_suspend = hv_timesync_pre_suspend,
134	.util_deinit = hv_timesync_deinit,
135};
136
137static void heartbeat_onchannelcallback(void *context);
138static struct hv_util_service util_heartbeat = {
139	.util_cb = heartbeat_onchannelcallback,
140};
141
142static struct hv_util_service util_kvp = {
143	.util_cb = hv_kvp_onchannelcallback,
144	.util_init = hv_kvp_init,
145	.util_pre_suspend = hv_kvp_pre_suspend,
146	.util_pre_resume = hv_kvp_pre_resume,
147	.util_deinit = hv_kvp_deinit,
148};
149
150static struct hv_util_service util_vss = {
151	.util_cb = hv_vss_onchannelcallback,
152	.util_init = hv_vss_init,
153	.util_pre_suspend = hv_vss_pre_suspend,
154	.util_pre_resume = hv_vss_pre_resume,
155	.util_deinit = hv_vss_deinit,
156};
157
158static struct hv_util_service util_fcopy = {
159	.util_cb = hv_fcopy_onchannelcallback,
160	.util_init = hv_fcopy_init,
161	.util_pre_suspend = hv_fcopy_pre_suspend,
162	.util_pre_resume = hv_fcopy_pre_resume,
163	.util_deinit = hv_fcopy_deinit,
164};
165
166static void perform_shutdown(struct work_struct *dummy)
167{
168	orderly_poweroff(true);
169}
170
171static void perform_restart(struct work_struct *dummy)
172{
173	orderly_reboot();
174}
175
176/*
177 * Perform the shutdown operation in a thread context.
178 */
179static DECLARE_WORK(shutdown_work, perform_shutdown);
180
181/*
182 * Perform the restart operation in a thread context.
183 */
184static DECLARE_WORK(restart_work, perform_restart);
185
186static void shutdown_onchannelcallback(void *context)
187{
188	struct vmbus_channel *channel = context;
189	struct work_struct *work = NULL;
190	u32 recvlen;
191	u64 requestid;
192	u8  *shut_txf_buf = util_shutdown.recv_buffer;
193
194	struct shutdown_msg_data *shutdown_msg;
195
196	struct icmsg_hdr *icmsghdrp;
197
198	if (vmbus_recvpacket(channel, shut_txf_buf, HV_HYP_PAGE_SIZE, &recvlen, &requestid)) {
199		pr_err_ratelimited("Shutdown request received. Could not read into shut txf buf\n");
200		return;
201	}
202
203	if (!recvlen)
204		return;
 
205
206	/* Ensure recvlen is big enough to read header data */
207	if (recvlen < ICMSG_HDR) {
208		pr_err_ratelimited("Shutdown request received. Packet length too small: %d\n",
209				   recvlen);
210		return;
211	}
 
 
 
 
 
 
 
 
212
213	icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[sizeof(struct vmbuspipe_hdr)];
214
215	if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
216		if (vmbus_prep_negotiate_resp(icmsghdrp,
217				shut_txf_buf, recvlen,
218				fw_versions, FW_VER_COUNT,
219				sd_versions, SD_VER_COUNT,
220				NULL, &sd_srv_version)) {
221			pr_info("Shutdown IC version %d.%d\n",
222				sd_srv_version >> 16,
223				sd_srv_version & 0xFFFF);
224		}
225	} else if (icmsghdrp->icmsgtype == ICMSGTYPE_SHUTDOWN) {
226		/* Ensure recvlen is big enough to contain shutdown_msg_data struct */
227		if (recvlen < ICMSG_HDR + sizeof(struct shutdown_msg_data)) {
228			pr_err_ratelimited("Invalid shutdown msg data. Packet length too small: %u\n",
229					   recvlen);
230			return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231		}
232
233		shutdown_msg = (struct shutdown_msg_data *)&shut_txf_buf[ICMSG_HDR];
 
234
235		/*
236		 * shutdown_msg->flags can be 0(shut down), 2(reboot),
237		 * or 4(hibernate). It may bitwise-OR 1, which means
238		 * performing the request by force. Linux always tries
239		 * to perform the request by force.
240		 */
241		switch (shutdown_msg->flags) {
242		case 0:
243		case 1:
244			icmsghdrp->status = HV_S_OK;
245			work = &shutdown_work;
246			pr_info("Shutdown request received - graceful shutdown initiated\n");
247			break;
248		case 2:
249		case 3:
250			icmsghdrp->status = HV_S_OK;
251			work = &restart_work;
252			pr_info("Restart request received - graceful restart initiated\n");
253			break;
254		case 4:
255		case 5:
256			pr_info("Hibernation request received\n");
257			icmsghdrp->status = hibernation_supported ?
258				HV_S_OK : HV_E_FAIL;
259			if (hibernation_supported)
260				work = &hibernate_context.work;
261			break;
262		default:
263			icmsghdrp->status = HV_E_FAIL;
264			pr_info("Shutdown request received - Invalid request\n");
265			break;
266		}
267	} else {
268		icmsghdrp->status = HV_E_FAIL;
269		pr_err_ratelimited("Shutdown request received. Invalid msg type: %d\n",
270				   icmsghdrp->icmsgtype);
271	}
272
273	icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
274		| ICMSGHDRFLAG_RESPONSE;
275
276	vmbus_sendpacket(channel, shut_txf_buf,
277			 recvlen, requestid,
278			 VM_PKT_DATA_INBAND, 0);
279
280	if (work)
281		schedule_work(work);
282}
283
284/*
285 * Set the host time in a process context.
286 */
287static struct work_struct adj_time_work;
288
289/*
290 * The last time sample, received from the host. PTP device responds to
291 * requests by using this data and the current partition-wide time reference
292 * count.
293 */
294static struct {
295	u64				host_time;
296	u64				ref_time;
297	spinlock_t			lock;
298} host_ts;
299
300static inline u64 reftime_to_ns(u64 reftime)
301{
302	return (reftime - WLTIMEDELTA) * 100;
303}
304
305/*
306 * Hard coded threshold for host timesync delay: 600 seconds
307 */
308static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
309
310static int hv_get_adj_host_time(struct timespec64 *ts)
311{
312	u64 newtime, reftime, timediff_adj;
313	unsigned long flags;
314	int ret = 0;
315
316	spin_lock_irqsave(&host_ts.lock, flags);
317	reftime = hv_read_reference_counter();
318
319	/*
320	 * We need to let the caller know that last update from host
321	 * is older than the max allowable threshold. clock_gettime()
322	 * and PTP ioctl do not have a documented error that we could
323	 * return for this specific case. Use ESTALE to report this.
324	 */
325	timediff_adj = reftime - host_ts.ref_time;
326	if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
327		pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
328			     (timediff_adj * 100));
329		ret = -ESTALE;
330	}
331
332	newtime = host_ts.host_time + timediff_adj;
333	*ts = ns_to_timespec64(reftime_to_ns(newtime));
334	spin_unlock_irqrestore(&host_ts.lock, flags);
335
336	return ret;
337}
338
339static void hv_set_host_time(struct work_struct *work)
340{
341
342	struct timespec64 ts;
343
344	if (!hv_get_adj_host_time(&ts))
345		do_settimeofday64(&ts);
346}
347
348/*
349 * Synchronize time with host after reboot, restore, etc.
350 *
351 * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
352 * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
353 * message after the timesync channel is opened. Since the hv_utils module is
354 * loaded after hv_vmbus, the first message is usually missed. This bit is
355 * considered a hard request to discipline the clock.
356 *
357 * ICTIMESYNCFLAG_SAMPLE bit indicates a time sample from host. This is
358 * typically used as a hint to the guest. The guest is under no obligation
359 * to discipline the clock.
360 */
361static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
362{
363	unsigned long flags;
364	u64 cur_reftime;
365
366	/*
367	 * Save the adjusted time sample from the host and the snapshot
368	 * of the current system time.
369	 */
370	spin_lock_irqsave(&host_ts.lock, flags);
371
372	cur_reftime = hv_read_reference_counter();
373	host_ts.host_time = hosttime;
374	host_ts.ref_time = cur_reftime;
375
376	/*
377	 * TimeSync v4 messages contain reference time (guest's Hyper-V
378	 * clocksource read when the time sample was generated), we can
379	 * improve the precision by adding the delta between now and the
380	 * time of generation. For older protocols we set
381	 * reftime == cur_reftime on call.
382	 */
383	host_ts.host_time += (cur_reftime - reftime);
384
385	spin_unlock_irqrestore(&host_ts.lock, flags);
386
387	/* Schedule work to do do_settimeofday64() */
388	if (adj_flags & ICTIMESYNCFLAG_SYNC)
389		schedule_work(&adj_time_work);
390}
391
392/*
393 * Time Sync Channel message handler.
394 */
395static void timesync_onchannelcallback(void *context)
396{
397	struct vmbus_channel *channel = context;
398	u32 recvlen;
399	u64 requestid;
400	struct icmsg_hdr *icmsghdrp;
401	struct ictimesync_data *timedatap;
402	struct ictimesync_ref_data *refdata;
403	u8 *time_txf_buf = util_timesynch.recv_buffer;
404
405	/*
406	 * Drain the ring buffer and use the last packet to update
407	 * host_ts
408	 */
409	while (1) {
410		int ret = vmbus_recvpacket(channel, time_txf_buf,
411					   HV_HYP_PAGE_SIZE, &recvlen,
412					   &requestid);
413		if (ret) {
414			pr_err_ratelimited("TimeSync IC pkt recv failed (Err: %d)\n",
415					   ret);
416			break;
417		}
418
419		if (!recvlen)
420			break;
421
422		/* Ensure recvlen is big enough to read header data */
423		if (recvlen < ICMSG_HDR) {
424			pr_err_ratelimited("Timesync request received. Packet length too small: %d\n",
425					   recvlen);
426			break;
427		}
428
429		icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
430				sizeof(struct vmbuspipe_hdr)];
431
432		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
433			if (vmbus_prep_negotiate_resp(icmsghdrp,
434						time_txf_buf, recvlen,
435						fw_versions, FW_VER_COUNT,
436						ts_versions, TS_VER_COUNT,
437						NULL, &ts_srv_version)) {
438				pr_info("TimeSync IC version %d.%d\n",
439					ts_srv_version >> 16,
440					ts_srv_version & 0xFFFF);
441			}
442		} else if (icmsghdrp->icmsgtype == ICMSGTYPE_TIMESYNC) {
443			if (ts_srv_version > TS_VERSION_3) {
444				/* Ensure recvlen is big enough to read ictimesync_ref_data */
445				if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_ref_data)) {
446					pr_err_ratelimited("Invalid ictimesync ref data. Length too small: %u\n",
447							   recvlen);
448					break;
449				}
450				refdata = (struct ictimesync_ref_data *)&time_txf_buf[ICMSG_HDR];
451
452				adj_guesttime(refdata->parenttime,
453						refdata->vmreferencetime,
454						refdata->flags);
455			} else {
456				/* Ensure recvlen is big enough to read ictimesync_data */
457				if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_data)) {
458					pr_err_ratelimited("Invalid ictimesync data. Length too small: %u\n",
459							   recvlen);
460					break;
461				}
462				timedatap = (struct ictimesync_data *)&time_txf_buf[ICMSG_HDR];
463
464				adj_guesttime(timedatap->parenttime,
465					      hv_read_reference_counter(),
466					      timedatap->flags);
467			}
468		} else {
469			icmsghdrp->status = HV_E_FAIL;
470			pr_err_ratelimited("Timesync request received. Invalid msg type: %d\n",
471					   icmsghdrp->icmsgtype);
472		}
473
474		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
475			| ICMSGHDRFLAG_RESPONSE;
476
477		vmbus_sendpacket(channel, time_txf_buf,
478				 recvlen, requestid,
479				 VM_PKT_DATA_INBAND, 0);
480	}
481}
482
483/*
484 * Heartbeat functionality.
485 * Every two seconds, Hyper-V send us a heartbeat request message.
486 * we respond to this message, and Hyper-V knows we are alive.
487 */
488static void heartbeat_onchannelcallback(void *context)
489{
490	struct vmbus_channel *channel = context;
491	u32 recvlen;
492	u64 requestid;
493	struct icmsg_hdr *icmsghdrp;
494	struct heartbeat_msg_data *heartbeat_msg;
495	u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
496
497	while (1) {
498
499		if (vmbus_recvpacket(channel, hbeat_txf_buf, HV_HYP_PAGE_SIZE,
500				     &recvlen, &requestid)) {
501			pr_err_ratelimited("Heartbeat request received. Could not read into hbeat txf buf\n");
502			return;
503		}
504
505		if (!recvlen)
506			break;
507
508		/* Ensure recvlen is big enough to read header data */
509		if (recvlen < ICMSG_HDR) {
510			pr_err_ratelimited("Heartbeat request received. Packet length too small: %d\n",
511					   recvlen);
512			break;
513		}
514
515		icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
516				sizeof(struct vmbuspipe_hdr)];
517
518		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
519			if (vmbus_prep_negotiate_resp(icmsghdrp,
520					hbeat_txf_buf, recvlen,
521					fw_versions, FW_VER_COUNT,
522					hb_versions, HB_VER_COUNT,
523					NULL, &hb_srv_version)) {
524
525				pr_info("Heartbeat IC version %d.%d\n",
526					hb_srv_version >> 16,
527					hb_srv_version & 0xFFFF);
528			}
529		} else if (icmsghdrp->icmsgtype == ICMSGTYPE_HEARTBEAT) {
530			/*
531			 * Ensure recvlen is big enough to read seq_num. Reserved area is not
532			 * included in the check as the host may not fill it up entirely
533			 */
534			if (recvlen < ICMSG_HDR + sizeof(u64)) {
535				pr_err_ratelimited("Invalid heartbeat msg data. Length too small: %u\n",
536						   recvlen);
537				break;
538			}
539			heartbeat_msg = (struct heartbeat_msg_data *)&hbeat_txf_buf[ICMSG_HDR];
540
541			heartbeat_msg->seq_num += 1;
542		} else {
543			icmsghdrp->status = HV_E_FAIL;
544			pr_err_ratelimited("Heartbeat request received. Invalid msg type: %d\n",
545					   icmsghdrp->icmsgtype);
546		}
547
548		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
549			| ICMSGHDRFLAG_RESPONSE;
550
551		vmbus_sendpacket(channel, hbeat_txf_buf,
552				 recvlen, requestid,
553				 VM_PKT_DATA_INBAND, 0);
554	}
555}
556
557#define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
558#define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
559
560static int util_probe(struct hv_device *dev,
561			const struct hv_vmbus_device_id *dev_id)
562{
563	struct hv_util_service *srv =
564		(struct hv_util_service *)dev_id->driver_data;
565	int ret;
566
567	srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
568	if (!srv->recv_buffer)
569		return -ENOMEM;
570	srv->channel = dev->channel;
571	if (srv->util_init) {
572		ret = srv->util_init(srv);
573		if (ret) {
574			ret = -ENODEV;
575			goto error1;
576		}
577	}
578
579	/*
580	 * The set of services managed by the util driver are not performance
581	 * critical and do not need batched reading. Furthermore, some services
582	 * such as KVP can only handle one message from the host at a time.
583	 * Turn off batched reading for all util drivers before we open the
584	 * channel.
585	 */
586	set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
587
588	hv_set_drvdata(dev, srv);
589
590	ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
591			 HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
592			 dev->channel);
593	if (ret)
594		goto error;
595
596	return 0;
597
598error:
599	if (srv->util_deinit)
600		srv->util_deinit();
601error1:
602	kfree(srv->recv_buffer);
603	return ret;
604}
605
606static int util_remove(struct hv_device *dev)
607{
608	struct hv_util_service *srv = hv_get_drvdata(dev);
609
610	if (srv->util_deinit)
611		srv->util_deinit();
612	vmbus_close(dev->channel);
613	kfree(srv->recv_buffer);
614
615	return 0;
616}
617
618/*
619 * When we're in util_suspend(), all the userspace processes have been frozen
620 * (refer to hibernate() -> freeze_processes()). The userspace is thawed only
621 * after the whole resume procedure, including util_resume(), finishes.
622 */
623static int util_suspend(struct hv_device *dev)
624{
625	struct hv_util_service *srv = hv_get_drvdata(dev);
626	int ret = 0;
627
628	if (srv->util_pre_suspend) {
629		ret = srv->util_pre_suspend();
630		if (ret)
631			return ret;
632	}
633
634	vmbus_close(dev->channel);
635
636	return 0;
637}
638
639static int util_resume(struct hv_device *dev)
640{
641	struct hv_util_service *srv = hv_get_drvdata(dev);
642	int ret = 0;
643
644	if (srv->util_pre_resume) {
645		ret = srv->util_pre_resume();
646		if (ret)
647			return ret;
648	}
649
650	ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
651			 HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
652			 dev->channel);
653	return ret;
654}
655
656static const struct hv_vmbus_device_id id_table[] = {
657	/* Shutdown guid */
658	{ HV_SHUTDOWN_GUID,
659	  .driver_data = (unsigned long)&util_shutdown
660	},
661	/* Time synch guid */
662	{ HV_TS_GUID,
663	  .driver_data = (unsigned long)&util_timesynch
664	},
665	/* Heartbeat guid */
666	{ HV_HEART_BEAT_GUID,
667	  .driver_data = (unsigned long)&util_heartbeat
668	},
669	/* KVP guid */
670	{ HV_KVP_GUID,
671	  .driver_data = (unsigned long)&util_kvp
672	},
673	/* VSS GUID */
674	{ HV_VSS_GUID,
675	  .driver_data = (unsigned long)&util_vss
676	},
677	/* File copy GUID */
678	{ HV_FCOPY_GUID,
679	  .driver_data = (unsigned long)&util_fcopy
680	},
681	{ },
682};
683
684MODULE_DEVICE_TABLE(vmbus, id_table);
685
686/* The one and only one */
687static  struct hv_driver util_drv = {
688	.name = "hv_utils",
689	.id_table = id_table,
690	.probe =  util_probe,
691	.remove =  util_remove,
692	.suspend = util_suspend,
693	.resume =  util_resume,
694	.driver = {
695		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
696	},
697};
698
699static int hv_ptp_enable(struct ptp_clock_info *info,
700			 struct ptp_clock_request *request, int on)
701{
702	return -EOPNOTSUPP;
703}
704
705static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
706{
707	return -EOPNOTSUPP;
708}
709
710static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
711{
712	return -EOPNOTSUPP;
713}
714static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
715{
716	return -EOPNOTSUPP;
717}
718
719static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
720{
721	return hv_get_adj_host_time(ts);
722}
723
724static struct ptp_clock_info ptp_hyperv_info = {
725	.name		= "hyperv",
726	.enable         = hv_ptp_enable,
727	.adjtime        = hv_ptp_adjtime,
728	.adjfreq        = hv_ptp_adjfreq,
729	.gettime64      = hv_ptp_gettime,
730	.settime64      = hv_ptp_settime,
731	.owner		= THIS_MODULE,
732};
733
734static struct ptp_clock *hv_ptp_clock;
735
736static int hv_timesync_init(struct hv_util_service *srv)
737{
738	/* TimeSync requires Hyper-V clocksource. */
739	if (!hv_read_reference_counter)
740		return -ENODEV;
741
742	spin_lock_init(&host_ts.lock);
743
744	INIT_WORK(&adj_time_work, hv_set_host_time);
745
746	/*
747	 * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
748	 * disabled but the driver is still useful without the PTP device
749	 * as it still handles the ICTIMESYNCFLAG_SYNC case.
750	 */
751	hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
752	if (IS_ERR_OR_NULL(hv_ptp_clock)) {
753		pr_err("cannot register PTP clock: %d\n",
754		       PTR_ERR_OR_ZERO(hv_ptp_clock));
755		hv_ptp_clock = NULL;
756	}
757
758	return 0;
759}
760
761static void hv_timesync_cancel_work(void)
762{
763	cancel_work_sync(&adj_time_work);
764}
765
766static int hv_timesync_pre_suspend(void)
767{
768	hv_timesync_cancel_work();
769	return 0;
770}
771
772static void hv_timesync_deinit(void)
773{
774	if (hv_ptp_clock)
775		ptp_clock_unregister(hv_ptp_clock);
776
777	hv_timesync_cancel_work();
778}
779
780static int __init init_hyperv_utils(void)
781{
782	pr_info("Registering HyperV Utility Driver\n");
783
784	return vmbus_driver_register(&util_drv);
785}
786
787static void exit_hyperv_utils(void)
788{
789	pr_info("De-Registered HyperV Utility Driver\n");
790
791	vmbus_driver_unregister(&util_drv);
792}
793
794module_init(init_hyperv_utils);
795module_exit(exit_hyperv_utils);
796
797MODULE_DESCRIPTION("Hyper-V Utilities");
798MODULE_LICENSE("GPL");