Loading...
1/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/kernel.h>
27#include <linux/mm.h>
28#include <linux/hyperv.h>
29#include <linux/uio.h>
30
31#include "hyperv_vmbus.h"
32
33void hv_begin_read(struct hv_ring_buffer_info *rbi)
34{
35 rbi->ring_buffer->interrupt_mask = 1;
36 mb();
37}
38
39u32 hv_end_read(struct hv_ring_buffer_info *rbi)
40{
41 u32 read;
42 u32 write;
43
44 rbi->ring_buffer->interrupt_mask = 0;
45 mb();
46
47 /*
48 * Now check to see if the ring buffer is still empty.
49 * If it is not, we raced and we need to process new
50 * incoming messages.
51 */
52 hv_get_ringbuffer_availbytes(rbi, &read, &write);
53
54 return read;
55}
56
57/*
58 * When we write to the ring buffer, check if the host needs to
59 * be signaled. Here is the details of this protocol:
60 *
61 * 1. The host guarantees that while it is draining the
62 * ring buffer, it will set the interrupt_mask to
63 * indicate it does not need to be interrupted when
64 * new data is placed.
65 *
66 * 2. The host guarantees that it will completely drain
67 * the ring buffer before exiting the read loop. Further,
68 * once the ring buffer is empty, it will clear the
69 * interrupt_mask and re-check to see if new data has
70 * arrived.
71 */
72
73static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
74{
75 mb();
76 if (rbi->ring_buffer->interrupt_mask)
77 return false;
78
79 /* check interrupt_mask before read_index */
80 rmb();
81 /*
82 * This is the only case we need to signal when the
83 * ring transitions from being empty to non-empty.
84 */
85 if (old_write == rbi->ring_buffer->read_index)
86 return true;
87
88 return false;
89}
90
91/*
92 * To optimize the flow management on the send-side,
93 * when the sender is blocked because of lack of
94 * sufficient space in the ring buffer, potential the
95 * consumer of the ring buffer can signal the producer.
96 * This is controlled by the following parameters:
97 *
98 * 1. pending_send_sz: This is the size in bytes that the
99 * producer is trying to send.
100 * 2. The feature bit feat_pending_send_sz set to indicate if
101 * the consumer of the ring will signal when the ring
102 * state transitions from being full to a state where
103 * there is room for the producer to send the pending packet.
104 */
105
106static bool hv_need_to_signal_on_read(u32 old_rd,
107 struct hv_ring_buffer_info *rbi)
108{
109 u32 prev_write_sz;
110 u32 cur_write_sz;
111 u32 r_size;
112 u32 write_loc = rbi->ring_buffer->write_index;
113 u32 read_loc = rbi->ring_buffer->read_index;
114 u32 pending_sz = rbi->ring_buffer->pending_send_sz;
115
116 /*
117 * If the other end is not blocked on write don't bother.
118 */
119 if (pending_sz == 0)
120 return false;
121
122 r_size = rbi->ring_datasize;
123 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
124 read_loc - write_loc;
125
126 prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
127 old_rd - write_loc;
128
129
130 if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
131 return true;
132
133 return false;
134}
135
136/*
137 * hv_get_next_write_location()
138 *
139 * Get the next write location for the specified ring buffer
140 *
141 */
142static inline u32
143hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
144{
145 u32 next = ring_info->ring_buffer->write_index;
146
147 return next;
148}
149
150/*
151 * hv_set_next_write_location()
152 *
153 * Set the next write location for the specified ring buffer
154 *
155 */
156static inline void
157hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
158 u32 next_write_location)
159{
160 ring_info->ring_buffer->write_index = next_write_location;
161}
162
163/*
164 * hv_get_next_read_location()
165 *
166 * Get the next read location for the specified ring buffer
167 */
168static inline u32
169hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
170{
171 u32 next = ring_info->ring_buffer->read_index;
172
173 return next;
174}
175
176/*
177 * hv_get_next_readlocation_withoffset()
178 *
179 * Get the next read location + offset for the specified ring buffer.
180 * This allows the caller to skip
181 */
182static inline u32
183hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
184 u32 offset)
185{
186 u32 next = ring_info->ring_buffer->read_index;
187
188 next += offset;
189 next %= ring_info->ring_datasize;
190
191 return next;
192}
193
194/*
195 *
196 * hv_set_next_read_location()
197 *
198 * Set the next read location for the specified ring buffer
199 *
200 */
201static inline void
202hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
203 u32 next_read_location)
204{
205 ring_info->ring_buffer->read_index = next_read_location;
206}
207
208
209/*
210 *
211 * hv_get_ring_buffer()
212 *
213 * Get the start of the ring buffer
214 */
215static inline void *
216hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
217{
218 return (void *)ring_info->ring_buffer->buffer;
219}
220
221
222/*
223 *
224 * hv_get_ring_buffersize()
225 *
226 * Get the size of the ring buffer
227 */
228static inline u32
229hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
230{
231 return ring_info->ring_datasize;
232}
233
234/*
235 *
236 * hv_get_ring_bufferindices()
237 *
238 * Get the read and write indices as u64 of the specified ring buffer
239 *
240 */
241static inline u64
242hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
243{
244 return (u64)ring_info->ring_buffer->write_index << 32;
245}
246
247/*
248 *
249 * hv_copyfrom_ringbuffer()
250 *
251 * Helper routine to copy to source from ring buffer.
252 * Assume there is enough room. Handles wrap-around in src case only!!
253 *
254 */
255static u32 hv_copyfrom_ringbuffer(
256 struct hv_ring_buffer_info *ring_info,
257 void *dest,
258 u32 destlen,
259 u32 start_read_offset)
260{
261 void *ring_buffer = hv_get_ring_buffer(ring_info);
262 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
263
264 u32 frag_len;
265
266 /* wrap-around detected at the src */
267 if (destlen > ring_buffer_size - start_read_offset) {
268 frag_len = ring_buffer_size - start_read_offset;
269
270 memcpy(dest, ring_buffer + start_read_offset, frag_len);
271 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
272 } else
273
274 memcpy(dest, ring_buffer + start_read_offset, destlen);
275
276
277 start_read_offset += destlen;
278 start_read_offset %= ring_buffer_size;
279
280 return start_read_offset;
281}
282
283
284/*
285 *
286 * hv_copyto_ringbuffer()
287 *
288 * Helper routine to copy from source to ring buffer.
289 * Assume there is enough room. Handles wrap-around in dest case only!!
290 *
291 */
292static u32 hv_copyto_ringbuffer(
293 struct hv_ring_buffer_info *ring_info,
294 u32 start_write_offset,
295 void *src,
296 u32 srclen)
297{
298 void *ring_buffer = hv_get_ring_buffer(ring_info);
299 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
300 u32 frag_len;
301
302 /* wrap-around detected! */
303 if (srclen > ring_buffer_size - start_write_offset) {
304 frag_len = ring_buffer_size - start_write_offset;
305 memcpy(ring_buffer + start_write_offset, src, frag_len);
306 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
307 } else
308 memcpy(ring_buffer + start_write_offset, src, srclen);
309
310 start_write_offset += srclen;
311 start_write_offset %= ring_buffer_size;
312
313 return start_write_offset;
314}
315
316/*
317 *
318 * hv_ringbuffer_get_debuginfo()
319 *
320 * Get various debug metrics for the specified ring buffer
321 *
322 */
323void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
324 struct hv_ring_buffer_debug_info *debug_info)
325{
326 u32 bytes_avail_towrite;
327 u32 bytes_avail_toread;
328
329 if (ring_info->ring_buffer) {
330 hv_get_ringbuffer_availbytes(ring_info,
331 &bytes_avail_toread,
332 &bytes_avail_towrite);
333
334 debug_info->bytes_avail_toread = bytes_avail_toread;
335 debug_info->bytes_avail_towrite = bytes_avail_towrite;
336 debug_info->current_read_index =
337 ring_info->ring_buffer->read_index;
338 debug_info->current_write_index =
339 ring_info->ring_buffer->write_index;
340 debug_info->current_interrupt_mask =
341 ring_info->ring_buffer->interrupt_mask;
342 }
343}
344
345/*
346 *
347 * hv_ringbuffer_init()
348 *
349 *Initialize the ring buffer
350 *
351 */
352int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
353 void *buffer, u32 buflen)
354{
355 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
356 return -EINVAL;
357
358 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
359
360 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
361 ring_info->ring_buffer->read_index =
362 ring_info->ring_buffer->write_index = 0;
363
364 ring_info->ring_size = buflen;
365 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
366
367 spin_lock_init(&ring_info->ring_lock);
368
369 return 0;
370}
371
372/*
373 *
374 * hv_ringbuffer_cleanup()
375 *
376 * Cleanup the ring buffer
377 *
378 */
379void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
380{
381}
382
383/*
384 *
385 * hv_ringbuffer_write()
386 *
387 * Write to the ring buffer
388 *
389 */
390int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
391 struct kvec *kv_list, u32 kv_count, bool *signal)
392{
393 int i = 0;
394 u32 bytes_avail_towrite;
395 u32 bytes_avail_toread;
396 u32 totalbytes_towrite = 0;
397
398 u32 next_write_location;
399 u32 old_write;
400 u64 prev_indices = 0;
401 unsigned long flags;
402
403 for (i = 0; i < kv_count; i++)
404 totalbytes_towrite += kv_list[i].iov_len;
405
406 totalbytes_towrite += sizeof(u64);
407
408 spin_lock_irqsave(&outring_info->ring_lock, flags);
409
410 hv_get_ringbuffer_availbytes(outring_info,
411 &bytes_avail_toread,
412 &bytes_avail_towrite);
413
414
415 /* If there is only room for the packet, assume it is full. */
416 /* Otherwise, the next time around, we think the ring buffer */
417 /* is empty since the read index == write index */
418 if (bytes_avail_towrite <= totalbytes_towrite) {
419 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
420 return -EAGAIN;
421 }
422
423 /* Write to the ring buffer */
424 next_write_location = hv_get_next_write_location(outring_info);
425
426 old_write = next_write_location;
427
428 for (i = 0; i < kv_count; i++) {
429 next_write_location = hv_copyto_ringbuffer(outring_info,
430 next_write_location,
431 kv_list[i].iov_base,
432 kv_list[i].iov_len);
433 }
434
435 /* Set previous packet start */
436 prev_indices = hv_get_ring_bufferindices(outring_info);
437
438 next_write_location = hv_copyto_ringbuffer(outring_info,
439 next_write_location,
440 &prev_indices,
441 sizeof(u64));
442
443 /* Issue a full memory barrier before updating the write index */
444 mb();
445
446 /* Now, update the write location */
447 hv_set_next_write_location(outring_info, next_write_location);
448
449
450 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
451
452 *signal = hv_need_to_signal(old_write, outring_info);
453 return 0;
454}
455
456
457/*
458 *
459 * hv_ringbuffer_peek()
460 *
461 * Read without advancing the read index
462 *
463 */
464int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
465 void *Buffer, u32 buflen)
466{
467 u32 bytes_avail_towrite;
468 u32 bytes_avail_toread;
469 u32 next_read_location = 0;
470 unsigned long flags;
471
472 spin_lock_irqsave(&Inring_info->ring_lock, flags);
473
474 hv_get_ringbuffer_availbytes(Inring_info,
475 &bytes_avail_toread,
476 &bytes_avail_towrite);
477
478 /* Make sure there is something to read */
479 if (bytes_avail_toread < buflen) {
480
481 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
482
483 return -EAGAIN;
484 }
485
486 /* Convert to byte offset */
487 next_read_location = hv_get_next_read_location(Inring_info);
488
489 next_read_location = hv_copyfrom_ringbuffer(Inring_info,
490 Buffer,
491 buflen,
492 next_read_location);
493
494 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
495
496 return 0;
497}
498
499
500/*
501 *
502 * hv_ringbuffer_read()
503 *
504 * Read and advance the read index
505 *
506 */
507int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
508 u32 buflen, u32 offset, bool *signal)
509{
510 u32 bytes_avail_towrite;
511 u32 bytes_avail_toread;
512 u32 next_read_location = 0;
513 u64 prev_indices = 0;
514 unsigned long flags;
515 u32 old_read;
516
517 if (buflen <= 0)
518 return -EINVAL;
519
520 spin_lock_irqsave(&inring_info->ring_lock, flags);
521
522 hv_get_ringbuffer_availbytes(inring_info,
523 &bytes_avail_toread,
524 &bytes_avail_towrite);
525
526 old_read = bytes_avail_toread;
527
528 /* Make sure there is something to read */
529 if (bytes_avail_toread < buflen) {
530 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
531
532 return -EAGAIN;
533 }
534
535 next_read_location =
536 hv_get_next_readlocation_withoffset(inring_info, offset);
537
538 next_read_location = hv_copyfrom_ringbuffer(inring_info,
539 buffer,
540 buflen,
541 next_read_location);
542
543 next_read_location = hv_copyfrom_ringbuffer(inring_info,
544 &prev_indices,
545 sizeof(u64),
546 next_read_location);
547
548 /* Make sure all reads are done before we update the read index since */
549 /* the writer may start writing to the read area once the read index */
550 /*is updated */
551 mb();
552
553 /* Update the read index */
554 hv_set_next_read_location(inring_info, next_read_location);
555
556 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
557
558 *signal = hv_need_to_signal_on_read(old_read, inring_info);
559
560 return 0;
561}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (c) 2009, Microsoft Corporation.
5 *
6 * Authors:
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
10 */
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/hyperv.h>
16#include <linux/uio.h>
17#include <linux/vmalloc.h>
18#include <linux/slab.h>
19#include <linux/prefetch.h>
20#include <linux/io.h>
21#include <asm/mshyperv.h>
22
23#include "hyperv_vmbus.h"
24
25#define VMBUS_PKT_TRAILER 8
26
27/*
28 * When we write to the ring buffer, check if the host needs to
29 * be signaled. Here is the details of this protocol:
30 *
31 * 1. The host guarantees that while it is draining the
32 * ring buffer, it will set the interrupt_mask to
33 * indicate it does not need to be interrupted when
34 * new data is placed.
35 *
36 * 2. The host guarantees that it will completely drain
37 * the ring buffer before exiting the read loop. Further,
38 * once the ring buffer is empty, it will clear the
39 * interrupt_mask and re-check to see if new data has
40 * arrived.
41 *
42 * KYS: Oct. 30, 2016:
43 * It looks like Windows hosts have logic to deal with DOS attacks that
44 * can be triggered if it receives interrupts when it is not expecting
45 * the interrupt. The host expects interrupts only when the ring
46 * transitions from empty to non-empty (or full to non full on the guest
47 * to host ring).
48 * So, base the signaling decision solely on the ring state until the
49 * host logic is fixed.
50 */
51
52static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
53{
54 struct hv_ring_buffer_info *rbi = &channel->outbound;
55
56 virt_mb();
57 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
58 return;
59
60 /* check interrupt_mask before read_index */
61 virt_rmb();
62 /*
63 * This is the only case we need to signal when the
64 * ring transitions from being empty to non-empty.
65 */
66 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
67 ++channel->intr_out_empty;
68 vmbus_setevent(channel);
69 }
70}
71
72/* Get the next write location for the specified ring buffer. */
73static inline u32
74hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
75{
76 u32 next = ring_info->ring_buffer->write_index;
77
78 return next;
79}
80
81/* Set the next write location for the specified ring buffer. */
82static inline void
83hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
84 u32 next_write_location)
85{
86 ring_info->ring_buffer->write_index = next_write_location;
87}
88
89/* Get the size of the ring buffer. */
90static inline u32
91hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
92{
93 return ring_info->ring_datasize;
94}
95
96/* Get the read and write indices as u64 of the specified ring buffer. */
97static inline u64
98hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
99{
100 return (u64)ring_info->ring_buffer->write_index << 32;
101}
102
103/*
104 * Helper routine to copy from source to ring buffer.
105 * Assume there is enough room. Handles wrap-around in dest case only!!
106 */
107static u32 hv_copyto_ringbuffer(
108 struct hv_ring_buffer_info *ring_info,
109 u32 start_write_offset,
110 const void *src,
111 u32 srclen)
112{
113 void *ring_buffer = hv_get_ring_buffer(ring_info);
114 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
115
116 memcpy(ring_buffer + start_write_offset, src, srclen);
117
118 start_write_offset += srclen;
119 if (start_write_offset >= ring_buffer_size)
120 start_write_offset -= ring_buffer_size;
121
122 return start_write_offset;
123}
124
125/*
126 *
127 * hv_get_ringbuffer_availbytes()
128 *
129 * Get number of bytes available to read and to write to
130 * for the specified ring buffer
131 */
132static void
133hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
134 u32 *read, u32 *write)
135{
136 u32 read_loc, write_loc, dsize;
137
138 /* Capture the read/write indices before they changed */
139 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
140 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
141 dsize = rbi->ring_datasize;
142
143 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
144 read_loc - write_loc;
145 *read = dsize - *write;
146}
147
148/* Get various debug metrics for the specified ring buffer. */
149int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
150 struct hv_ring_buffer_debug_info *debug_info)
151{
152 u32 bytes_avail_towrite;
153 u32 bytes_avail_toread;
154
155 mutex_lock(&ring_info->ring_buffer_mutex);
156
157 if (!ring_info->ring_buffer) {
158 mutex_unlock(&ring_info->ring_buffer_mutex);
159 return -EINVAL;
160 }
161
162 hv_get_ringbuffer_availbytes(ring_info,
163 &bytes_avail_toread,
164 &bytes_avail_towrite);
165 debug_info->bytes_avail_toread = bytes_avail_toread;
166 debug_info->bytes_avail_towrite = bytes_avail_towrite;
167 debug_info->current_read_index = ring_info->ring_buffer->read_index;
168 debug_info->current_write_index = ring_info->ring_buffer->write_index;
169 debug_info->current_interrupt_mask
170 = ring_info->ring_buffer->interrupt_mask;
171 mutex_unlock(&ring_info->ring_buffer_mutex);
172
173 return 0;
174}
175EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
176
177/* Initialize a channel's ring buffer info mutex locks */
178void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
179{
180 mutex_init(&channel->inbound.ring_buffer_mutex);
181 mutex_init(&channel->outbound.ring_buffer_mutex);
182}
183
184/* Initialize the ring buffer. */
185int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
186 struct page *pages, u32 page_cnt, u32 max_pkt_size)
187{
188 struct page **pages_wraparound;
189 unsigned long *pfns_wraparound;
190 u64 pfn;
191 int i;
192
193 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
194
195 /*
196 * First page holds struct hv_ring_buffer, do wraparound mapping for
197 * the rest.
198 */
199 if (hv_isolation_type_snp()) {
200 pfn = page_to_pfn(pages) +
201 PFN_DOWN(ms_hyperv.shared_gpa_boundary);
202
203 pfns_wraparound = kcalloc(page_cnt * 2 - 1,
204 sizeof(unsigned long), GFP_KERNEL);
205 if (!pfns_wraparound)
206 return -ENOMEM;
207
208 pfns_wraparound[0] = pfn;
209 for (i = 0; i < 2 * (page_cnt - 1); i++)
210 pfns_wraparound[i + 1] = pfn + i % (page_cnt - 1) + 1;
211
212 ring_info->ring_buffer = (struct hv_ring_buffer *)
213 vmap_pfn(pfns_wraparound, page_cnt * 2 - 1,
214 PAGE_KERNEL);
215 kfree(pfns_wraparound);
216
217 if (!ring_info->ring_buffer)
218 return -ENOMEM;
219
220 /* Zero ring buffer after setting memory host visibility. */
221 memset(ring_info->ring_buffer, 0x00, PAGE_SIZE * page_cnt);
222 } else {
223 pages_wraparound = kcalloc(page_cnt * 2 - 1,
224 sizeof(struct page *),
225 GFP_KERNEL);
226 if (!pages_wraparound)
227 return -ENOMEM;
228
229 pages_wraparound[0] = pages;
230 for (i = 0; i < 2 * (page_cnt - 1); i++)
231 pages_wraparound[i + 1] =
232 &pages[i % (page_cnt - 1) + 1];
233
234 ring_info->ring_buffer = (struct hv_ring_buffer *)
235 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP,
236 PAGE_KERNEL);
237
238 kfree(pages_wraparound);
239 if (!ring_info->ring_buffer)
240 return -ENOMEM;
241 }
242
243
244 ring_info->ring_buffer->read_index =
245 ring_info->ring_buffer->write_index = 0;
246
247 /* Set the feature bit for enabling flow control. */
248 ring_info->ring_buffer->feature_bits.value = 1;
249
250 ring_info->ring_size = page_cnt << PAGE_SHIFT;
251 ring_info->ring_size_div10_reciprocal =
252 reciprocal_value(ring_info->ring_size / 10);
253 ring_info->ring_datasize = ring_info->ring_size -
254 sizeof(struct hv_ring_buffer);
255 ring_info->priv_read_index = 0;
256
257 /* Initialize buffer that holds copies of incoming packets */
258 if (max_pkt_size) {
259 ring_info->pkt_buffer = kzalloc(max_pkt_size, GFP_KERNEL);
260 if (!ring_info->pkt_buffer)
261 return -ENOMEM;
262 ring_info->pkt_buffer_size = max_pkt_size;
263 }
264
265 spin_lock_init(&ring_info->ring_lock);
266
267 return 0;
268}
269
270/* Cleanup the ring buffer. */
271void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
272{
273 mutex_lock(&ring_info->ring_buffer_mutex);
274 vunmap(ring_info->ring_buffer);
275 ring_info->ring_buffer = NULL;
276 mutex_unlock(&ring_info->ring_buffer_mutex);
277
278 kfree(ring_info->pkt_buffer);
279 ring_info->pkt_buffer = NULL;
280 ring_info->pkt_buffer_size = 0;
281}
282
283/*
284 * Check if the ring buffer spinlock is available to take or not; used on
285 * atomic contexts, like panic path (see the Hyper-V framebuffer driver).
286 */
287
288bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel)
289{
290 struct hv_ring_buffer_info *rinfo = &channel->outbound;
291
292 return spin_is_locked(&rinfo->ring_lock);
293}
294EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy);
295
296/* Write to the ring buffer. */
297int hv_ringbuffer_write(struct vmbus_channel *channel,
298 const struct kvec *kv_list, u32 kv_count,
299 u64 requestid, u64 *trans_id)
300{
301 int i;
302 u32 bytes_avail_towrite;
303 u32 totalbytes_towrite = sizeof(u64);
304 u32 next_write_location;
305 u32 old_write;
306 u64 prev_indices;
307 unsigned long flags;
308 struct hv_ring_buffer_info *outring_info = &channel->outbound;
309 struct vmpacket_descriptor *desc = kv_list[0].iov_base;
310 u64 __trans_id, rqst_id = VMBUS_NO_RQSTOR;
311
312 if (channel->rescind)
313 return -ENODEV;
314
315 for (i = 0; i < kv_count; i++)
316 totalbytes_towrite += kv_list[i].iov_len;
317
318 spin_lock_irqsave(&outring_info->ring_lock, flags);
319
320 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
321
322 /*
323 * If there is only room for the packet, assume it is full.
324 * Otherwise, the next time around, we think the ring buffer
325 * is empty since the read index == write index.
326 */
327 if (bytes_avail_towrite <= totalbytes_towrite) {
328 ++channel->out_full_total;
329
330 if (!channel->out_full_flag) {
331 ++channel->out_full_first;
332 channel->out_full_flag = true;
333 }
334
335 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
336 return -EAGAIN;
337 }
338
339 channel->out_full_flag = false;
340
341 /* Write to the ring buffer */
342 next_write_location = hv_get_next_write_location(outring_info);
343
344 old_write = next_write_location;
345
346 for (i = 0; i < kv_count; i++) {
347 next_write_location = hv_copyto_ringbuffer(outring_info,
348 next_write_location,
349 kv_list[i].iov_base,
350 kv_list[i].iov_len);
351 }
352
353 /*
354 * Allocate the request ID after the data has been copied into the
355 * ring buffer. Once this request ID is allocated, the completion
356 * path could find the data and free it.
357 */
358
359 if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
360 if (channel->next_request_id_callback != NULL) {
361 rqst_id = channel->next_request_id_callback(channel, requestid);
362 if (rqst_id == VMBUS_RQST_ERROR) {
363 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
364 return -EAGAIN;
365 }
366 }
367 }
368 desc = hv_get_ring_buffer(outring_info) + old_write;
369 __trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
370 /*
371 * Ensure the compiler doesn't generate code that reads the value of
372 * the transaction ID from the ring buffer, which is shared with the
373 * Hyper-V host and subject to being changed at any time.
374 */
375 WRITE_ONCE(desc->trans_id, __trans_id);
376 if (trans_id)
377 *trans_id = __trans_id;
378
379 /* Set previous packet start */
380 prev_indices = hv_get_ring_bufferindices(outring_info);
381
382 next_write_location = hv_copyto_ringbuffer(outring_info,
383 next_write_location,
384 &prev_indices,
385 sizeof(u64));
386
387 /* Issue a full memory barrier before updating the write index */
388 virt_mb();
389
390 /* Now, update the write location */
391 hv_set_next_write_location(outring_info, next_write_location);
392
393
394 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
395
396 hv_signal_on_write(old_write, channel);
397
398 if (channel->rescind) {
399 if (rqst_id != VMBUS_NO_RQSTOR) {
400 /* Reclaim request ID to avoid leak of IDs */
401 if (channel->request_addr_callback != NULL)
402 channel->request_addr_callback(channel, rqst_id);
403 }
404 return -ENODEV;
405 }
406
407 return 0;
408}
409
410int hv_ringbuffer_read(struct vmbus_channel *channel,
411 void *buffer, u32 buflen, u32 *buffer_actual_len,
412 u64 *requestid, bool raw)
413{
414 struct vmpacket_descriptor *desc;
415 u32 packetlen, offset;
416
417 if (unlikely(buflen == 0))
418 return -EINVAL;
419
420 *buffer_actual_len = 0;
421 *requestid = 0;
422
423 /* Make sure there is something to read */
424 desc = hv_pkt_iter_first(channel);
425 if (desc == NULL) {
426 /*
427 * No error is set when there is even no header, drivers are
428 * supposed to analyze buffer_actual_len.
429 */
430 return 0;
431 }
432
433 offset = raw ? 0 : (desc->offset8 << 3);
434 packetlen = (desc->len8 << 3) - offset;
435 *buffer_actual_len = packetlen;
436 *requestid = desc->trans_id;
437
438 if (unlikely(packetlen > buflen))
439 return -ENOBUFS;
440
441 /* since ring is double mapped, only one copy is necessary */
442 memcpy(buffer, (const char *)desc + offset, packetlen);
443
444 /* Advance ring index to next packet descriptor */
445 __hv_pkt_iter_next(channel, desc);
446
447 /* Notify host of update */
448 hv_pkt_iter_close(channel);
449
450 return 0;
451}
452
453/*
454 * Determine number of bytes available in ring buffer after
455 * the current iterator (priv_read_index) location.
456 *
457 * This is similar to hv_get_bytes_to_read but with private
458 * read index instead.
459 */
460static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
461{
462 u32 priv_read_loc = rbi->priv_read_index;
463 u32 write_loc;
464
465 /*
466 * The Hyper-V host writes the packet data, then uses
467 * store_release() to update the write_index. Use load_acquire()
468 * here to prevent loads of the packet data from being re-ordered
469 * before the read of the write_index and potentially getting
470 * stale data.
471 */
472 write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
473
474 if (write_loc >= priv_read_loc)
475 return write_loc - priv_read_loc;
476 else
477 return (rbi->ring_datasize - priv_read_loc) + write_loc;
478}
479
480/*
481 * Get first vmbus packet from ring buffer after read_index
482 *
483 * If ring buffer is empty, returns NULL and no other action needed.
484 */
485struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
486{
487 struct hv_ring_buffer_info *rbi = &channel->inbound;
488 struct vmpacket_descriptor *desc, *desc_copy;
489 u32 bytes_avail, pkt_len, pkt_offset;
490
491 hv_debug_delay_test(channel, MESSAGE_DELAY);
492
493 bytes_avail = hv_pkt_iter_avail(rbi);
494 if (bytes_avail < sizeof(struct vmpacket_descriptor))
495 return NULL;
496 bytes_avail = min(rbi->pkt_buffer_size, bytes_avail);
497
498 desc = (struct vmpacket_descriptor *)(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
499
500 /*
501 * Ensure the compiler does not use references to incoming Hyper-V values (which
502 * could change at any moment) when reading local variables later in the code
503 */
504 pkt_len = READ_ONCE(desc->len8) << 3;
505 pkt_offset = READ_ONCE(desc->offset8) << 3;
506
507 /*
508 * If pkt_len is invalid, set it to the smaller of hv_pkt_iter_avail() and
509 * rbi->pkt_buffer_size
510 */
511 if (pkt_len < sizeof(struct vmpacket_descriptor) || pkt_len > bytes_avail)
512 pkt_len = bytes_avail;
513
514 /*
515 * If pkt_offset is invalid, arbitrarily set it to
516 * the size of vmpacket_descriptor
517 */
518 if (pkt_offset < sizeof(struct vmpacket_descriptor) || pkt_offset > pkt_len)
519 pkt_offset = sizeof(struct vmpacket_descriptor);
520
521 /* Copy the Hyper-V packet out of the ring buffer */
522 desc_copy = (struct vmpacket_descriptor *)rbi->pkt_buffer;
523 memcpy(desc_copy, desc, pkt_len);
524
525 /*
526 * Hyper-V could still change len8 and offset8 after the earlier read.
527 * Ensure that desc_copy has legal values for len8 and offset8 that
528 * are consistent with the copy we just made
529 */
530 desc_copy->len8 = pkt_len >> 3;
531 desc_copy->offset8 = pkt_offset >> 3;
532
533 return desc_copy;
534}
535EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
536
537/*
538 * Get next vmbus packet from ring buffer.
539 *
540 * Advances the current location (priv_read_index) and checks for more
541 * data. If the end of the ring buffer is reached, then return NULL.
542 */
543struct vmpacket_descriptor *
544__hv_pkt_iter_next(struct vmbus_channel *channel,
545 const struct vmpacket_descriptor *desc)
546{
547 struct hv_ring_buffer_info *rbi = &channel->inbound;
548 u32 packetlen = desc->len8 << 3;
549 u32 dsize = rbi->ring_datasize;
550
551 hv_debug_delay_test(channel, MESSAGE_DELAY);
552 /* bump offset to next potential packet */
553 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
554 if (rbi->priv_read_index >= dsize)
555 rbi->priv_read_index -= dsize;
556
557 /* more data? */
558 return hv_pkt_iter_first(channel);
559}
560EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
561
562/* How many bytes were read in this iterator cycle */
563static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
564 u32 start_read_index)
565{
566 if (rbi->priv_read_index >= start_read_index)
567 return rbi->priv_read_index - start_read_index;
568 else
569 return rbi->ring_datasize - start_read_index +
570 rbi->priv_read_index;
571}
572
573/*
574 * Update host ring buffer after iterating over packets. If the host has
575 * stopped queuing new entries because it found the ring buffer full, and
576 * sufficient space is being freed up, signal the host. But be careful to
577 * only signal the host when necessary, both for performance reasons and
578 * because Hyper-V protects itself by throttling guests that signal
579 * inappropriately.
580 *
581 * Determining when to signal is tricky. There are three key data inputs
582 * that must be handled in this order to avoid race conditions:
583 *
584 * 1. Update the read_index
585 * 2. Read the pending_send_sz
586 * 3. Read the current write_index
587 *
588 * The interrupt_mask is not used to determine when to signal. The
589 * interrupt_mask is used only on the guest->host ring buffer when
590 * sending requests to the host. The host does not use it on the host->
591 * guest ring buffer to indicate whether it should be signaled.
592 */
593void hv_pkt_iter_close(struct vmbus_channel *channel)
594{
595 struct hv_ring_buffer_info *rbi = &channel->inbound;
596 u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
597
598 /*
599 * Make sure all reads are done before we update the read index since
600 * the writer may start writing to the read area once the read index
601 * is updated.
602 */
603 virt_rmb();
604 start_read_index = rbi->ring_buffer->read_index;
605 rbi->ring_buffer->read_index = rbi->priv_read_index;
606
607 /*
608 * Older versions of Hyper-V (before WS2102 and Win8) do not
609 * implement pending_send_sz and simply poll if the host->guest
610 * ring buffer is full. No signaling is needed or expected.
611 */
612 if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
613 return;
614
615 /*
616 * Issue a full memory barrier before making the signaling decision.
617 * If reading pending_send_sz were to be reordered and happen
618 * before we commit the new read_index, a race could occur. If the
619 * host were to set the pending_send_sz after we have sampled
620 * pending_send_sz, and the ring buffer blocks before we commit the
621 * read index, we could miss sending the interrupt. Issue a full
622 * memory barrier to address this.
623 */
624 virt_mb();
625
626 /*
627 * If the pending_send_sz is zero, then the ring buffer is not
628 * blocked and there is no need to signal. This is far by the
629 * most common case, so exit quickly for best performance.
630 */
631 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
632 if (!pending_sz)
633 return;
634
635 /*
636 * Ensure the read of write_index in hv_get_bytes_to_write()
637 * happens after the read of pending_send_sz.
638 */
639 virt_rmb();
640 curr_write_sz = hv_get_bytes_to_write(rbi);
641 bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
642
643 /*
644 * We want to signal the host only if we're transitioning
645 * from a "not enough free space" state to a "enough free
646 * space" state. For example, it's possible that this function
647 * could run and free up enough space to signal the host, and then
648 * run again and free up additional space before the host has a
649 * chance to clear the pending_send_sz. The 2nd invocation would
650 * be a null transition from "enough free space" to "enough free
651 * space", which doesn't warrant a signal.
652 *
653 * Exactly filling the ring buffer is treated as "not enough
654 * space". The ring buffer always must have at least one byte
655 * empty so the empty and full conditions are distinguishable.
656 * hv_get_bytes_to_write() doesn't fully tell the truth in
657 * this regard.
658 *
659 * So first check if we were in the "enough free space" state
660 * before we began the iteration. If so, the host was not
661 * blocked, and there's no need to signal.
662 */
663 if (curr_write_sz - bytes_read > pending_sz)
664 return;
665
666 /*
667 * Similarly, if the new state is "not enough space", then
668 * there's no need to signal.
669 */
670 if (curr_write_sz <= pending_sz)
671 return;
672
673 ++channel->intr_in_full;
674 vmbus_setevent(channel);
675}
676EXPORT_SYMBOL_GPL(hv_pkt_iter_close);