Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Xen stolen ticks accounting.
4 */
5#include <linux/kernel.h>
6#include <linux/kernel_stat.h>
7#include <linux/math64.h>
8#include <linux/gfp.h>
9#include <linux/slab.h>
10
11#include <asm/paravirt.h>
12#include <asm/xen/hypervisor.h>
13#include <asm/xen/hypercall.h>
14
15#include <xen/events.h>
16#include <xen/features.h>
17#include <xen/interface/xen.h>
18#include <xen/interface/vcpu.h>
19#include <xen/xen-ops.h>
20
21/* runstate info updated by Xen */
22static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
23
24static DEFINE_PER_CPU(u64[4], old_runstate_time);
25
26/* return an consistent snapshot of 64-bit time/counter value */
27static u64 get64(const u64 *p)
28{
29 u64 ret;
30
31 if (BITS_PER_LONG < 64) {
32 u32 *p32 = (u32 *)p;
33 u32 h, l, h2;
34
35 /*
36 * Read high then low, and then make sure high is
37 * still the same; this will only loop if low wraps
38 * and carries into high.
39 * XXX some clean way to make this endian-proof?
40 */
41 do {
42 h = READ_ONCE(p32[1]);
43 l = READ_ONCE(p32[0]);
44 h2 = READ_ONCE(p32[1]);
45 } while(h2 != h);
46
47 ret = (((u64)h) << 32) | l;
48 } else
49 ret = READ_ONCE(*p);
50
51 return ret;
52}
53
54static void xen_get_runstate_snapshot_cpu_delta(
55 struct vcpu_runstate_info *res, unsigned int cpu)
56{
57 u64 state_time;
58 struct vcpu_runstate_info *state;
59
60 BUG_ON(preemptible());
61
62 state = per_cpu_ptr(&xen_runstate, cpu);
63
64 do {
65 state_time = get64(&state->state_entry_time);
66 rmb(); /* Hypervisor might update data. */
67 *res = __READ_ONCE(*state);
68 rmb(); /* Hypervisor might update data. */
69 } while (get64(&state->state_entry_time) != state_time ||
70 (state_time & XEN_RUNSTATE_UPDATE));
71}
72
73static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
74 unsigned int cpu)
75{
76 int i;
77
78 xen_get_runstate_snapshot_cpu_delta(res, cpu);
79
80 for (i = 0; i < 4; i++)
81 res->time[i] += per_cpu(old_runstate_time, cpu)[i];
82}
83
84void xen_manage_runstate_time(int action)
85{
86 static struct vcpu_runstate_info *runstate_delta;
87 struct vcpu_runstate_info state;
88 int cpu, i;
89
90 switch (action) {
91 case -1: /* backup runstate time before suspend */
92 if (unlikely(runstate_delta))
93 pr_warn_once("%s: memory leak as runstate_delta is not NULL\n",
94 __func__);
95
96 runstate_delta = kmalloc_array(num_possible_cpus(),
97 sizeof(*runstate_delta),
98 GFP_ATOMIC);
99 if (unlikely(!runstate_delta)) {
100 pr_warn("%s: failed to allocate runstate_delta\n",
101 __func__);
102 return;
103 }
104
105 for_each_possible_cpu(cpu) {
106 xen_get_runstate_snapshot_cpu_delta(&state, cpu);
107 memcpy(runstate_delta[cpu].time, state.time,
108 sizeof(runstate_delta[cpu].time));
109 }
110
111 break;
112
113 case 0: /* backup runstate time after resume */
114 if (unlikely(!runstate_delta)) {
115 pr_warn("%s: cannot accumulate runstate time as runstate_delta is NULL\n",
116 __func__);
117 return;
118 }
119
120 for_each_possible_cpu(cpu) {
121 for (i = 0; i < 4; i++)
122 per_cpu(old_runstate_time, cpu)[i] +=
123 runstate_delta[cpu].time[i];
124 }
125
126 break;
127
128 default: /* do not accumulate runstate time for checkpointing */
129 break;
130 }
131
132 if (action != -1 && runstate_delta) {
133 kfree(runstate_delta);
134 runstate_delta = NULL;
135 }
136}
137
138/*
139 * Runstate accounting
140 */
141void xen_get_runstate_snapshot(struct vcpu_runstate_info *res)
142{
143 xen_get_runstate_snapshot_cpu(res, smp_processor_id());
144}
145
146/* return true when a vcpu could run but has no real cpu to run on */
147bool xen_vcpu_stolen(int vcpu)
148{
149 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
150}
151
152u64 xen_steal_clock(int cpu)
153{
154 struct vcpu_runstate_info state;
155
156 xen_get_runstate_snapshot_cpu(&state, cpu);
157 return state.time[RUNSTATE_runnable] + state.time[RUNSTATE_offline];
158}
159
160void xen_setup_runstate_info(int cpu)
161{
162 struct vcpu_register_runstate_memory_area area;
163
164 area.addr.v = &per_cpu(xen_runstate, cpu);
165
166 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
167 xen_vcpu_nr(cpu), &area))
168 BUG();
169}
170
171void __init xen_time_setup_guest(void)
172{
173 bool xen_runstate_remote;
174
175 xen_runstate_remote = !HYPERVISOR_vm_assist(VMASST_CMD_enable,
176 VMASST_TYPE_runstate_update_flag);
177
178 pv_ops.time.steal_clock = xen_steal_clock;
179
180 static_key_slow_inc(¶virt_steal_enabled);
181 if (xen_runstate_remote)
182 static_key_slow_inc(¶virt_steal_rq_enabled);
183}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Xen stolen ticks accounting.
4 */
5#include <linux/kernel.h>
6#include <linux/kernel_stat.h>
7#include <linux/math64.h>
8#include <linux/gfp.h>
9#include <linux/slab.h>
10#include <linux/static_call.h>
11
12#include <asm/paravirt.h>
13#include <asm/xen/hypervisor.h>
14#include <asm/xen/hypercall.h>
15
16#include <xen/events.h>
17#include <xen/features.h>
18#include <xen/interface/xen.h>
19#include <xen/interface/vcpu.h>
20#include <xen/xen-ops.h>
21
22/* runstate info updated by Xen */
23static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
24
25static DEFINE_PER_CPU(u64[4], old_runstate_time);
26
27/* return an consistent snapshot of 64-bit time/counter value */
28static u64 get64(const u64 *p)
29{
30 u64 ret;
31
32 if (BITS_PER_LONG < 64) {
33 u32 *p32 = (u32 *)p;
34 u32 h, l, h2;
35
36 /*
37 * Read high then low, and then make sure high is
38 * still the same; this will only loop if low wraps
39 * and carries into high.
40 * XXX some clean way to make this endian-proof?
41 */
42 do {
43 h = READ_ONCE(p32[1]);
44 l = READ_ONCE(p32[0]);
45 h2 = READ_ONCE(p32[1]);
46 } while(h2 != h);
47
48 ret = (((u64)h) << 32) | l;
49 } else
50 ret = READ_ONCE(*p);
51
52 return ret;
53}
54
55static void xen_get_runstate_snapshot_cpu_delta(
56 struct vcpu_runstate_info *res, unsigned int cpu)
57{
58 u64 state_time;
59 struct vcpu_runstate_info *state;
60
61 BUG_ON(preemptible());
62
63 state = per_cpu_ptr(&xen_runstate, cpu);
64
65 do {
66 state_time = get64(&state->state_entry_time);
67 rmb(); /* Hypervisor might update data. */
68 *res = __READ_ONCE(*state);
69 rmb(); /* Hypervisor might update data. */
70 } while (get64(&state->state_entry_time) != state_time ||
71 (state_time & XEN_RUNSTATE_UPDATE));
72}
73
74static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
75 unsigned int cpu)
76{
77 int i;
78
79 xen_get_runstate_snapshot_cpu_delta(res, cpu);
80
81 for (i = 0; i < 4; i++)
82 res->time[i] += per_cpu(old_runstate_time, cpu)[i];
83}
84
85void xen_manage_runstate_time(int action)
86{
87 static struct vcpu_runstate_info *runstate_delta;
88 struct vcpu_runstate_info state;
89 int cpu, i;
90
91 switch (action) {
92 case -1: /* backup runstate time before suspend */
93 if (unlikely(runstate_delta))
94 pr_warn_once("%s: memory leak as runstate_delta is not NULL\n",
95 __func__);
96
97 runstate_delta = kmalloc_array(num_possible_cpus(),
98 sizeof(*runstate_delta),
99 GFP_ATOMIC);
100 if (unlikely(!runstate_delta)) {
101 pr_warn("%s: failed to allocate runstate_delta\n",
102 __func__);
103 return;
104 }
105
106 for_each_possible_cpu(cpu) {
107 xen_get_runstate_snapshot_cpu_delta(&state, cpu);
108 memcpy(runstate_delta[cpu].time, state.time,
109 sizeof(runstate_delta[cpu].time));
110 }
111
112 break;
113
114 case 0: /* backup runstate time after resume */
115 if (unlikely(!runstate_delta)) {
116 pr_warn("%s: cannot accumulate runstate time as runstate_delta is NULL\n",
117 __func__);
118 return;
119 }
120
121 for_each_possible_cpu(cpu) {
122 for (i = 0; i < 4; i++)
123 per_cpu(old_runstate_time, cpu)[i] +=
124 runstate_delta[cpu].time[i];
125 }
126
127 break;
128
129 default: /* do not accumulate runstate time for checkpointing */
130 break;
131 }
132
133 if (action != -1 && runstate_delta) {
134 kfree(runstate_delta);
135 runstate_delta = NULL;
136 }
137}
138
139/*
140 * Runstate accounting
141 */
142void xen_get_runstate_snapshot(struct vcpu_runstate_info *res)
143{
144 xen_get_runstate_snapshot_cpu(res, smp_processor_id());
145}
146
147/* return true when a vcpu could run but has no real cpu to run on */
148bool xen_vcpu_stolen(int vcpu)
149{
150 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
151}
152
153u64 xen_steal_clock(int cpu)
154{
155 struct vcpu_runstate_info state;
156
157 xen_get_runstate_snapshot_cpu(&state, cpu);
158 return state.time[RUNSTATE_runnable] + state.time[RUNSTATE_offline];
159}
160
161void xen_setup_runstate_info(int cpu)
162{
163 struct vcpu_register_runstate_memory_area area;
164
165 area.addr.v = &per_cpu(xen_runstate, cpu);
166
167 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
168 xen_vcpu_nr(cpu), &area))
169 BUG();
170}
171
172void __init xen_time_setup_guest(void)
173{
174 bool xen_runstate_remote;
175
176 xen_runstate_remote = !HYPERVISOR_vm_assist(VMASST_CMD_enable,
177 VMASST_TYPE_runstate_update_flag);
178
179 static_call_update(pv_steal_clock, xen_steal_clock);
180
181 static_key_slow_inc(¶virt_steal_enabled);
182 if (xen_runstate_remote)
183 static_key_slow_inc(¶virt_steal_rq_enabled);
184}