Loading...
1/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/kernel.h>
27#include <linux/mm.h>
28#include <linux/hyperv.h>
29#include <linux/uio.h>
30
31#include "hyperv_vmbus.h"
32
33void hv_begin_read(struct hv_ring_buffer_info *rbi)
34{
35 rbi->ring_buffer->interrupt_mask = 1;
36 mb();
37}
38
39u32 hv_end_read(struct hv_ring_buffer_info *rbi)
40{
41 u32 read;
42 u32 write;
43
44 rbi->ring_buffer->interrupt_mask = 0;
45 mb();
46
47 /*
48 * Now check to see if the ring buffer is still empty.
49 * If it is not, we raced and we need to process new
50 * incoming messages.
51 */
52 hv_get_ringbuffer_availbytes(rbi, &read, &write);
53
54 return read;
55}
56
57/*
58 * When we write to the ring buffer, check if the host needs to
59 * be signaled. Here is the details of this protocol:
60 *
61 * 1. The host guarantees that while it is draining the
62 * ring buffer, it will set the interrupt_mask to
63 * indicate it does not need to be interrupted when
64 * new data is placed.
65 *
66 * 2. The host guarantees that it will completely drain
67 * the ring buffer before exiting the read loop. Further,
68 * once the ring buffer is empty, it will clear the
69 * interrupt_mask and re-check to see if new data has
70 * arrived.
71 */
72
73static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
74{
75 mb();
76 if (rbi->ring_buffer->interrupt_mask)
77 return false;
78
79 /* check interrupt_mask before read_index */
80 rmb();
81 /*
82 * This is the only case we need to signal when the
83 * ring transitions from being empty to non-empty.
84 */
85 if (old_write == rbi->ring_buffer->read_index)
86 return true;
87
88 return false;
89}
90
91/*
92 * To optimize the flow management on the send-side,
93 * when the sender is blocked because of lack of
94 * sufficient space in the ring buffer, potential the
95 * consumer of the ring buffer can signal the producer.
96 * This is controlled by the following parameters:
97 *
98 * 1. pending_send_sz: This is the size in bytes that the
99 * producer is trying to send.
100 * 2. The feature bit feat_pending_send_sz set to indicate if
101 * the consumer of the ring will signal when the ring
102 * state transitions from being full to a state where
103 * there is room for the producer to send the pending packet.
104 */
105
106static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
107{
108 u32 cur_write_sz;
109 u32 r_size;
110 u32 write_loc;
111 u32 read_loc = rbi->ring_buffer->read_index;
112 u32 pending_sz;
113
114 /*
115 * Issue a full memory barrier before making the signaling decision.
116 * Here is the reason for having this barrier:
117 * If the reading of the pend_sz (in this function)
118 * were to be reordered and read before we commit the new read
119 * index (in the calling function) we could
120 * have a problem. If the host were to set the pending_sz after we
121 * have sampled pending_sz and go to sleep before we commit the
122 * read index, we could miss sending the interrupt. Issue a full
123 * memory barrier to address this.
124 */
125 mb();
126
127 pending_sz = rbi->ring_buffer->pending_send_sz;
128 write_loc = rbi->ring_buffer->write_index;
129 /* If the other end is not blocked on write don't bother. */
130 if (pending_sz == 0)
131 return false;
132
133 r_size = rbi->ring_datasize;
134 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
135 read_loc - write_loc;
136
137 if (cur_write_sz >= pending_sz)
138 return true;
139
140 return false;
141}
142
143/* Get the next write location for the specified ring buffer. */
144static inline u32
145hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
146{
147 u32 next = ring_info->ring_buffer->write_index;
148
149 return next;
150}
151
152/* Set the next write location for the specified ring buffer. */
153static inline void
154hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
155 u32 next_write_location)
156{
157 ring_info->ring_buffer->write_index = next_write_location;
158}
159
160/* Get the next read location for the specified ring buffer. */
161static inline u32
162hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
163{
164 u32 next = ring_info->ring_buffer->read_index;
165
166 return next;
167}
168
169/*
170 * Get the next read location + offset for the specified ring buffer.
171 * This allows the caller to skip.
172 */
173static inline u32
174hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
175 u32 offset)
176{
177 u32 next = ring_info->ring_buffer->read_index;
178
179 next += offset;
180 next %= ring_info->ring_datasize;
181
182 return next;
183}
184
185/* Set the next read location for the specified ring buffer. */
186static inline void
187hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
188 u32 next_read_location)
189{
190 ring_info->ring_buffer->read_index = next_read_location;
191}
192
193
194/* Get the start of the ring buffer. */
195static inline void *
196hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
197{
198 return (void *)ring_info->ring_buffer->buffer;
199}
200
201
202/* Get the size of the ring buffer. */
203static inline u32
204hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
205{
206 return ring_info->ring_datasize;
207}
208
209/* Get the read and write indices as u64 of the specified ring buffer. */
210static inline u64
211hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
212{
213 return (u64)ring_info->ring_buffer->write_index << 32;
214}
215
216/*
217 * Helper routine to copy to source from ring buffer.
218 * Assume there is enough room. Handles wrap-around in src case only!!
219 */
220static u32 hv_copyfrom_ringbuffer(
221 struct hv_ring_buffer_info *ring_info,
222 void *dest,
223 u32 destlen,
224 u32 start_read_offset)
225{
226 void *ring_buffer = hv_get_ring_buffer(ring_info);
227 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
228
229 u32 frag_len;
230
231 /* wrap-around detected at the src */
232 if (destlen > ring_buffer_size - start_read_offset) {
233 frag_len = ring_buffer_size - start_read_offset;
234
235 memcpy(dest, ring_buffer + start_read_offset, frag_len);
236 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
237 } else
238
239 memcpy(dest, ring_buffer + start_read_offset, destlen);
240
241
242 start_read_offset += destlen;
243 start_read_offset %= ring_buffer_size;
244
245 return start_read_offset;
246}
247
248
249/*
250 * Helper routine to copy from source to ring buffer.
251 * Assume there is enough room. Handles wrap-around in dest case only!!
252 */
253static u32 hv_copyto_ringbuffer(
254 struct hv_ring_buffer_info *ring_info,
255 u32 start_write_offset,
256 void *src,
257 u32 srclen)
258{
259 void *ring_buffer = hv_get_ring_buffer(ring_info);
260 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
261 u32 frag_len;
262
263 /* wrap-around detected! */
264 if (srclen > ring_buffer_size - start_write_offset) {
265 frag_len = ring_buffer_size - start_write_offset;
266 memcpy(ring_buffer + start_write_offset, src, frag_len);
267 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
268 } else
269 memcpy(ring_buffer + start_write_offset, src, srclen);
270
271 start_write_offset += srclen;
272 start_write_offset %= ring_buffer_size;
273
274 return start_write_offset;
275}
276
277/* Get various debug metrics for the specified ring buffer. */
278void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
279 struct hv_ring_buffer_debug_info *debug_info)
280{
281 u32 bytes_avail_towrite;
282 u32 bytes_avail_toread;
283
284 if (ring_info->ring_buffer) {
285 hv_get_ringbuffer_availbytes(ring_info,
286 &bytes_avail_toread,
287 &bytes_avail_towrite);
288
289 debug_info->bytes_avail_toread = bytes_avail_toread;
290 debug_info->bytes_avail_towrite = bytes_avail_towrite;
291 debug_info->current_read_index =
292 ring_info->ring_buffer->read_index;
293 debug_info->current_write_index =
294 ring_info->ring_buffer->write_index;
295 debug_info->current_interrupt_mask =
296 ring_info->ring_buffer->interrupt_mask;
297 }
298}
299
300/* Initialize the ring buffer. */
301int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
302 void *buffer, u32 buflen)
303{
304 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
305 return -EINVAL;
306
307 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
308
309 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
310 ring_info->ring_buffer->read_index =
311 ring_info->ring_buffer->write_index = 0;
312
313 /* Set the feature bit for enabling flow control. */
314 ring_info->ring_buffer->feature_bits.value = 1;
315
316 ring_info->ring_size = buflen;
317 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
318
319 spin_lock_init(&ring_info->ring_lock);
320
321 return 0;
322}
323
324/* Cleanup the ring buffer. */
325void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
326{
327}
328
329/* Write to the ring buffer. */
330int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
331 struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
332{
333 int i = 0;
334 u32 bytes_avail_towrite;
335 u32 bytes_avail_toread;
336 u32 totalbytes_towrite = 0;
337
338 u32 next_write_location;
339 u32 old_write;
340 u64 prev_indices = 0;
341 unsigned long flags = 0;
342
343 for (i = 0; i < kv_count; i++)
344 totalbytes_towrite += kv_list[i].iov_len;
345
346 totalbytes_towrite += sizeof(u64);
347
348 if (lock)
349 spin_lock_irqsave(&outring_info->ring_lock, flags);
350
351 hv_get_ringbuffer_availbytes(outring_info,
352 &bytes_avail_toread,
353 &bytes_avail_towrite);
354
355 /*
356 * If there is only room for the packet, assume it is full.
357 * Otherwise, the next time around, we think the ring buffer
358 * is empty since the read index == write index.
359 */
360 if (bytes_avail_towrite <= totalbytes_towrite) {
361 if (lock)
362 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
363 return -EAGAIN;
364 }
365
366 /* Write to the ring buffer */
367 next_write_location = hv_get_next_write_location(outring_info);
368
369 old_write = next_write_location;
370
371 for (i = 0; i < kv_count; i++) {
372 next_write_location = hv_copyto_ringbuffer(outring_info,
373 next_write_location,
374 kv_list[i].iov_base,
375 kv_list[i].iov_len);
376 }
377
378 /* Set previous packet start */
379 prev_indices = hv_get_ring_bufferindices(outring_info);
380
381 next_write_location = hv_copyto_ringbuffer(outring_info,
382 next_write_location,
383 &prev_indices,
384 sizeof(u64));
385
386 /* Issue a full memory barrier before updating the write index */
387 mb();
388
389 /* Now, update the write location */
390 hv_set_next_write_location(outring_info, next_write_location);
391
392
393 if (lock)
394 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
395
396 *signal = hv_need_to_signal(old_write, outring_info);
397 return 0;
398}
399
400int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
401 void *buffer, u32 buflen, u32 *buffer_actual_len,
402 u64 *requestid, bool *signal, bool raw)
403{
404 u32 bytes_avail_towrite;
405 u32 bytes_avail_toread;
406 u32 next_read_location = 0;
407 u64 prev_indices = 0;
408 struct vmpacket_descriptor desc;
409 u32 offset;
410 u32 packetlen;
411 int ret = 0;
412
413 if (buflen <= 0)
414 return -EINVAL;
415
416
417 *buffer_actual_len = 0;
418 *requestid = 0;
419
420 hv_get_ringbuffer_availbytes(inring_info,
421 &bytes_avail_toread,
422 &bytes_avail_towrite);
423
424 /* Make sure there is something to read */
425 if (bytes_avail_toread < sizeof(desc)) {
426 /*
427 * No error is set when there is even no header, drivers are
428 * supposed to analyze buffer_actual_len.
429 */
430 return ret;
431 }
432
433 next_read_location = hv_get_next_read_location(inring_info);
434 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
435 sizeof(desc),
436 next_read_location);
437
438 offset = raw ? 0 : (desc.offset8 << 3);
439 packetlen = (desc.len8 << 3) - offset;
440 *buffer_actual_len = packetlen;
441 *requestid = desc.trans_id;
442
443 if (bytes_avail_toread < packetlen + offset)
444 return -EAGAIN;
445
446 if (packetlen > buflen)
447 return -ENOBUFS;
448
449 next_read_location =
450 hv_get_next_readlocation_withoffset(inring_info, offset);
451
452 next_read_location = hv_copyfrom_ringbuffer(inring_info,
453 buffer,
454 packetlen,
455 next_read_location);
456
457 next_read_location = hv_copyfrom_ringbuffer(inring_info,
458 &prev_indices,
459 sizeof(u64),
460 next_read_location);
461
462 /*
463 * Make sure all reads are done before we update the read index since
464 * the writer may start writing to the read area once the read index
465 * is updated.
466 */
467 mb();
468
469 /* Update the read index */
470 hv_set_next_read_location(inring_info, next_read_location);
471
472 *signal = hv_need_to_signal_on_read(inring_info);
473
474 return ret;
475}
1/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/kernel.h>
27#include <linux/mm.h>
28#include <linux/hyperv.h>
29#include <linux/uio.h>
30#include <linux/vmalloc.h>
31#include <linux/slab.h>
32#include <linux/prefetch.h>
33
34#include "hyperv_vmbus.h"
35
36#define VMBUS_PKT_TRAILER 8
37
38/*
39 * When we write to the ring buffer, check if the host needs to
40 * be signaled. Here is the details of this protocol:
41 *
42 * 1. The host guarantees that while it is draining the
43 * ring buffer, it will set the interrupt_mask to
44 * indicate it does not need to be interrupted when
45 * new data is placed.
46 *
47 * 2. The host guarantees that it will completely drain
48 * the ring buffer before exiting the read loop. Further,
49 * once the ring buffer is empty, it will clear the
50 * interrupt_mask and re-check to see if new data has
51 * arrived.
52 *
53 * KYS: Oct. 30, 2016:
54 * It looks like Windows hosts have logic to deal with DOS attacks that
55 * can be triggered if it receives interrupts when it is not expecting
56 * the interrupt. The host expects interrupts only when the ring
57 * transitions from empty to non-empty (or full to non full on the guest
58 * to host ring).
59 * So, base the signaling decision solely on the ring state until the
60 * host logic is fixed.
61 */
62
63static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
64{
65 struct hv_ring_buffer_info *rbi = &channel->outbound;
66
67 virt_mb();
68 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
69 return;
70
71 /* check interrupt_mask before read_index */
72 virt_rmb();
73 /*
74 * This is the only case we need to signal when the
75 * ring transitions from being empty to non-empty.
76 */
77 if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
78 vmbus_setevent(channel);
79}
80
81/* Get the next write location for the specified ring buffer. */
82static inline u32
83hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
84{
85 u32 next = ring_info->ring_buffer->write_index;
86
87 return next;
88}
89
90/* Set the next write location for the specified ring buffer. */
91static inline void
92hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
93 u32 next_write_location)
94{
95 ring_info->ring_buffer->write_index = next_write_location;
96}
97
98/* Set the next read location for the specified ring buffer. */
99static inline void
100hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
101 u32 next_read_location)
102{
103 ring_info->ring_buffer->read_index = next_read_location;
104 ring_info->priv_read_index = next_read_location;
105}
106
107/* Get the size of the ring buffer. */
108static inline u32
109hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
110{
111 return ring_info->ring_datasize;
112}
113
114/* Get the read and write indices as u64 of the specified ring buffer. */
115static inline u64
116hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
117{
118 return (u64)ring_info->ring_buffer->write_index << 32;
119}
120
121/*
122 * Helper routine to copy from source to ring buffer.
123 * Assume there is enough room. Handles wrap-around in dest case only!!
124 */
125static u32 hv_copyto_ringbuffer(
126 struct hv_ring_buffer_info *ring_info,
127 u32 start_write_offset,
128 const void *src,
129 u32 srclen)
130{
131 void *ring_buffer = hv_get_ring_buffer(ring_info);
132 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
133
134 memcpy(ring_buffer + start_write_offset, src, srclen);
135
136 start_write_offset += srclen;
137 if (start_write_offset >= ring_buffer_size)
138 start_write_offset -= ring_buffer_size;
139
140 return start_write_offset;
141}
142
143/*
144 *
145 * hv_get_ringbuffer_availbytes()
146 *
147 * Get number of bytes available to read and to write to
148 * for the specified ring buffer
149 */
150static void
151hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
152 u32 *read, u32 *write)
153{
154 u32 read_loc, write_loc, dsize;
155
156 /* Capture the read/write indices before they changed */
157 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
158 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
159 dsize = rbi->ring_datasize;
160
161 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
162 read_loc - write_loc;
163 *read = dsize - *write;
164}
165
166/* Get various debug metrics for the specified ring buffer. */
167void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
168 struct hv_ring_buffer_debug_info *debug_info)
169{
170 u32 bytes_avail_towrite;
171 u32 bytes_avail_toread;
172
173 if (ring_info->ring_buffer) {
174 hv_get_ringbuffer_availbytes(ring_info,
175 &bytes_avail_toread,
176 &bytes_avail_towrite);
177
178 debug_info->bytes_avail_toread = bytes_avail_toread;
179 debug_info->bytes_avail_towrite = bytes_avail_towrite;
180 debug_info->current_read_index =
181 ring_info->ring_buffer->read_index;
182 debug_info->current_write_index =
183 ring_info->ring_buffer->write_index;
184 debug_info->current_interrupt_mask =
185 ring_info->ring_buffer->interrupt_mask;
186 }
187}
188EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
189
190/* Initialize the ring buffer. */
191int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
192 struct page *pages, u32 page_cnt)
193{
194 int i;
195 struct page **pages_wraparound;
196
197 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
198
199 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
200
201 /*
202 * First page holds struct hv_ring_buffer, do wraparound mapping for
203 * the rest.
204 */
205 pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
206 GFP_KERNEL);
207 if (!pages_wraparound)
208 return -ENOMEM;
209
210 pages_wraparound[0] = pages;
211 for (i = 0; i < 2 * (page_cnt - 1); i++)
212 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
213
214 ring_info->ring_buffer = (struct hv_ring_buffer *)
215 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
216
217 kfree(pages_wraparound);
218
219
220 if (!ring_info->ring_buffer)
221 return -ENOMEM;
222
223 ring_info->ring_buffer->read_index =
224 ring_info->ring_buffer->write_index = 0;
225
226 /* Set the feature bit for enabling flow control. */
227 ring_info->ring_buffer->feature_bits.value = 1;
228
229 ring_info->ring_size = page_cnt << PAGE_SHIFT;
230 ring_info->ring_datasize = ring_info->ring_size -
231 sizeof(struct hv_ring_buffer);
232
233 spin_lock_init(&ring_info->ring_lock);
234
235 return 0;
236}
237
238/* Cleanup the ring buffer. */
239void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
240{
241 vunmap(ring_info->ring_buffer);
242}
243
244/* Write to the ring buffer. */
245int hv_ringbuffer_write(struct vmbus_channel *channel,
246 const struct kvec *kv_list, u32 kv_count)
247{
248 int i;
249 u32 bytes_avail_towrite;
250 u32 totalbytes_towrite = sizeof(u64);
251 u32 next_write_location;
252 u32 old_write;
253 u64 prev_indices;
254 unsigned long flags;
255 struct hv_ring_buffer_info *outring_info = &channel->outbound;
256
257 if (channel->rescind)
258 return -ENODEV;
259
260 for (i = 0; i < kv_count; i++)
261 totalbytes_towrite += kv_list[i].iov_len;
262
263 spin_lock_irqsave(&outring_info->ring_lock, flags);
264
265 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
266
267 /*
268 * If there is only room for the packet, assume it is full.
269 * Otherwise, the next time around, we think the ring buffer
270 * is empty since the read index == write index.
271 */
272 if (bytes_avail_towrite <= totalbytes_towrite) {
273 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
274 return -EAGAIN;
275 }
276
277 /* Write to the ring buffer */
278 next_write_location = hv_get_next_write_location(outring_info);
279
280 old_write = next_write_location;
281
282 for (i = 0; i < kv_count; i++) {
283 next_write_location = hv_copyto_ringbuffer(outring_info,
284 next_write_location,
285 kv_list[i].iov_base,
286 kv_list[i].iov_len);
287 }
288
289 /* Set previous packet start */
290 prev_indices = hv_get_ring_bufferindices(outring_info);
291
292 next_write_location = hv_copyto_ringbuffer(outring_info,
293 next_write_location,
294 &prev_indices,
295 sizeof(u64));
296
297 /* Issue a full memory barrier before updating the write index */
298 virt_mb();
299
300 /* Now, update the write location */
301 hv_set_next_write_location(outring_info, next_write_location);
302
303
304 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
305
306 hv_signal_on_write(old_write, channel);
307
308 if (channel->rescind)
309 return -ENODEV;
310
311 return 0;
312}
313
314int hv_ringbuffer_read(struct vmbus_channel *channel,
315 void *buffer, u32 buflen, u32 *buffer_actual_len,
316 u64 *requestid, bool raw)
317{
318 struct vmpacket_descriptor *desc;
319 u32 packetlen, offset;
320
321 if (unlikely(buflen == 0))
322 return -EINVAL;
323
324 *buffer_actual_len = 0;
325 *requestid = 0;
326
327 /* Make sure there is something to read */
328 desc = hv_pkt_iter_first(channel);
329 if (desc == NULL) {
330 /*
331 * No error is set when there is even no header, drivers are
332 * supposed to analyze buffer_actual_len.
333 */
334 return 0;
335 }
336
337 offset = raw ? 0 : (desc->offset8 << 3);
338 packetlen = (desc->len8 << 3) - offset;
339 *buffer_actual_len = packetlen;
340 *requestid = desc->trans_id;
341
342 if (unlikely(packetlen > buflen))
343 return -ENOBUFS;
344
345 /* since ring is double mapped, only one copy is necessary */
346 memcpy(buffer, (const char *)desc + offset, packetlen);
347
348 /* Advance ring index to next packet descriptor */
349 __hv_pkt_iter_next(channel, desc);
350
351 /* Notify host of update */
352 hv_pkt_iter_close(channel);
353
354 return 0;
355}
356
357/*
358 * Determine number of bytes available in ring buffer after
359 * the current iterator (priv_read_index) location.
360 *
361 * This is similar to hv_get_bytes_to_read but with private
362 * read index instead.
363 */
364static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
365{
366 u32 priv_read_loc = rbi->priv_read_index;
367 u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
368
369 if (write_loc >= priv_read_loc)
370 return write_loc - priv_read_loc;
371 else
372 return (rbi->ring_datasize - priv_read_loc) + write_loc;
373}
374
375/*
376 * Get first vmbus packet from ring buffer after read_index
377 *
378 * If ring buffer is empty, returns NULL and no other action needed.
379 */
380struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
381{
382 struct hv_ring_buffer_info *rbi = &channel->inbound;
383 struct vmpacket_descriptor *desc;
384
385 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
386 return NULL;
387
388 desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
389 if (desc)
390 prefetch((char *)desc + (desc->len8 << 3));
391
392 return desc;
393}
394EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
395
396/*
397 * Get next vmbus packet from ring buffer.
398 *
399 * Advances the current location (priv_read_index) and checks for more
400 * data. If the end of the ring buffer is reached, then return NULL.
401 */
402struct vmpacket_descriptor *
403__hv_pkt_iter_next(struct vmbus_channel *channel,
404 const struct vmpacket_descriptor *desc)
405{
406 struct hv_ring_buffer_info *rbi = &channel->inbound;
407 u32 packetlen = desc->len8 << 3;
408 u32 dsize = rbi->ring_datasize;
409
410 /* bump offset to next potential packet */
411 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
412 if (rbi->priv_read_index >= dsize)
413 rbi->priv_read_index -= dsize;
414
415 /* more data? */
416 return hv_pkt_iter_first(channel);
417}
418EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
419
420/* How many bytes were read in this iterator cycle */
421static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
422 u32 start_read_index)
423{
424 if (rbi->priv_read_index >= start_read_index)
425 return rbi->priv_read_index - start_read_index;
426 else
427 return rbi->ring_datasize - start_read_index +
428 rbi->priv_read_index;
429}
430
431/*
432 * Update host ring buffer after iterating over packets.
433 */
434void hv_pkt_iter_close(struct vmbus_channel *channel)
435{
436 struct hv_ring_buffer_info *rbi = &channel->inbound;
437 u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
438
439 /*
440 * Make sure all reads are done before we update the read index since
441 * the writer may start writing to the read area once the read index
442 * is updated.
443 */
444 virt_rmb();
445 start_read_index = rbi->ring_buffer->read_index;
446 rbi->ring_buffer->read_index = rbi->priv_read_index;
447
448 if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
449 return;
450
451 /*
452 * Issue a full memory barrier before making the signaling decision.
453 * Here is the reason for having this barrier:
454 * If the reading of the pend_sz (in this function)
455 * were to be reordered and read before we commit the new read
456 * index (in the calling function) we could
457 * have a problem. If the host were to set the pending_sz after we
458 * have sampled pending_sz and go to sleep before we commit the
459 * read index, we could miss sending the interrupt. Issue a full
460 * memory barrier to address this.
461 */
462 virt_mb();
463
464 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
465 if (!pending_sz)
466 return;
467
468 /*
469 * Ensure the read of write_index in hv_get_bytes_to_write()
470 * happens after the read of pending_send_sz.
471 */
472 virt_rmb();
473 curr_write_sz = hv_get_bytes_to_write(rbi);
474 bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
475
476 /*
477 * If there was space before we began iteration,
478 * then host was not blocked.
479 */
480
481 if (curr_write_sz - bytes_read > pending_sz)
482 return;
483
484 /* If pending write will not fit, don't give false hope. */
485 if (curr_write_sz <= pending_sz)
486 return;
487
488 vmbus_setevent(channel);
489}
490EXPORT_SYMBOL_GPL(hv_pkt_iter_close);