Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * virtio-snd: Virtio sound device
  4 * Copyright (C) 2021 OpenSynergy GmbH
  5 */
  6#include <sound/pcm_params.h>
  7
  8#include "virtio_card.h"
  9
 10/**
 11 * struct virtio_pcm_msg - VirtIO I/O message.
 12 * @substream: VirtIO PCM substream.
 13 * @xfer: Request header payload.
 14 * @status: Response header payload.
 15 * @length: Data length in bytes.
 16 * @sgs: Payload scatter-gather table.
 17 */
 18struct virtio_pcm_msg {
 19	struct virtio_pcm_substream *substream;
 20	struct virtio_snd_pcm_xfer xfer;
 21	struct virtio_snd_pcm_status status;
 22	size_t length;
 23	struct scatterlist sgs[];
 24};
 25
 26/**
 27 * enum pcm_msg_sg_index - Index values for the virtio_pcm_msg->sgs field in
 28 *                         an I/O message.
 29 * @PCM_MSG_SG_XFER: Element containing a virtio_snd_pcm_xfer structure.
 30 * @PCM_MSG_SG_STATUS: Element containing a virtio_snd_pcm_status structure.
 31 * @PCM_MSG_SG_DATA: The first element containing a data buffer.
 32 */
 33enum pcm_msg_sg_index {
 34	PCM_MSG_SG_XFER = 0,
 35	PCM_MSG_SG_STATUS,
 36	PCM_MSG_SG_DATA
 37};
 38
 39/**
 40 * virtsnd_pcm_sg_num() - Count the number of sg-elements required to represent
 41 *                        vmalloc'ed buffer.
 42 * @data: Pointer to vmalloc'ed buffer.
 43 * @length: Buffer size.
 44 *
 45 * Context: Any context.
 46 * Return: Number of physically contiguous parts in the @data.
 47 */
 48static int virtsnd_pcm_sg_num(u8 *data, unsigned int length)
 49{
 50	phys_addr_t sg_address;
 51	unsigned int sg_length;
 52	int num = 0;
 53
 54	while (length) {
 55		struct page *pg = vmalloc_to_page(data);
 56		phys_addr_t pg_address = page_to_phys(pg);
 57		size_t pg_length;
 58
 59		pg_length = PAGE_SIZE - offset_in_page(data);
 60		if (pg_length > length)
 61			pg_length = length;
 62
 63		if (!num || sg_address + sg_length != pg_address) {
 64			sg_address = pg_address;
 65			sg_length = pg_length;
 66			num++;
 67		} else {
 68			sg_length += pg_length;
 69		}
 70
 71		data += pg_length;
 72		length -= pg_length;
 73	}
 74
 75	return num;
 76}
 77
 78/**
 79 * virtsnd_pcm_sg_from() - Build sg-list from vmalloc'ed buffer.
 80 * @sgs: Preallocated sg-list to populate.
 81 * @nsgs: The maximum number of elements in the @sgs.
 82 * @data: Pointer to vmalloc'ed buffer.
 83 * @length: Buffer size.
 84 *
 85 * Splits the buffer into physically contiguous parts and makes an sg-list of
 86 * such parts.
 87 *
 88 * Context: Any context.
 89 */
 90static void virtsnd_pcm_sg_from(struct scatterlist *sgs, int nsgs, u8 *data,
 91				unsigned int length)
 92{
 93	int idx = -1;
 94
 95	while (length) {
 96		struct page *pg = vmalloc_to_page(data);
 97		size_t pg_length;
 98
 99		pg_length = PAGE_SIZE - offset_in_page(data);
100		if (pg_length > length)
101			pg_length = length;
102
103		if (idx == -1 ||
104		    sg_phys(&sgs[idx]) + sgs[idx].length != page_to_phys(pg)) {
105			if (idx + 1 == nsgs)
106				break;
107			sg_set_page(&sgs[++idx], pg, pg_length,
108				    offset_in_page(data));
109		} else {
110			sgs[idx].length += pg_length;
111		}
112
113		data += pg_length;
114		length -= pg_length;
115	}
116
117	sg_mark_end(&sgs[idx]);
118}
119
120/**
121 * virtsnd_pcm_msg_alloc() - Allocate I/O messages.
122 * @vss: VirtIO PCM substream.
123 * @periods: Current number of periods.
124 * @period_bytes: Current period size in bytes.
125 *
126 * The function slices the buffer into @periods parts (each with the size of
127 * @period_bytes), and creates @periods corresponding I/O messages.
128 *
129 * Context: Any context that permits to sleep.
130 * Return: 0 on success, -ENOMEM on failure.
131 */
132int virtsnd_pcm_msg_alloc(struct virtio_pcm_substream *vss,
133			  unsigned int periods, unsigned int period_bytes)
134{
135	struct snd_pcm_runtime *runtime = vss->substream->runtime;
136	unsigned int i;
137
138	vss->msgs = kcalloc(periods, sizeof(*vss->msgs), GFP_KERNEL);
139	if (!vss->msgs)
140		return -ENOMEM;
141
142	vss->nmsgs = periods;
143
144	for (i = 0; i < periods; ++i) {
145		u8 *data = runtime->dma_area + period_bytes * i;
146		int sg_num = virtsnd_pcm_sg_num(data, period_bytes);
147		struct virtio_pcm_msg *msg;
148
149		msg = kzalloc(struct_size(msg, sgs, sg_num + 2), GFP_KERNEL);
150		if (!msg)
151			return -ENOMEM;
152
153		msg->substream = vss;
154		sg_init_one(&msg->sgs[PCM_MSG_SG_XFER], &msg->xfer,
155			    sizeof(msg->xfer));
156		sg_init_one(&msg->sgs[PCM_MSG_SG_STATUS], &msg->status,
157			    sizeof(msg->status));
158		msg->length = period_bytes;
159		virtsnd_pcm_sg_from(&msg->sgs[PCM_MSG_SG_DATA], sg_num, data,
160				    period_bytes);
161
162		vss->msgs[i] = msg;
163	}
164
165	return 0;
166}
167
168/**
169 * virtsnd_pcm_msg_free() - Free all allocated I/O messages.
170 * @vss: VirtIO PCM substream.
171 *
172 * Context: Any context.
173 */
174void virtsnd_pcm_msg_free(struct virtio_pcm_substream *vss)
175{
176	unsigned int i;
177
178	for (i = 0; vss->msgs && i < vss->nmsgs; ++i)
179		kfree(vss->msgs[i]);
180	kfree(vss->msgs);
181
182	vss->msgs = NULL;
183	vss->nmsgs = 0;
184}
185
186/**
187 * virtsnd_pcm_msg_send() - Send asynchronous I/O messages.
188 * @vss: VirtIO PCM substream.
 
 
189 *
190 * All messages are organized in an ordered circular list. Each time the
191 * function is called, all currently non-enqueued messages are added to the
192 * virtqueue. For this, the function keeps track of two values:
193 *
194 *   msg_last_enqueued = index of the last enqueued message,
195 *   msg_count = # of pending messages in the virtqueue.
196 *
197 * Context: Any context. Expects the tx/rx queue and the VirtIO substream
198 *          spinlocks to be held by caller.
199 * Return: 0 on success, -errno on failure.
200 */
201int virtsnd_pcm_msg_send(struct virtio_pcm_substream *vss)
 
202{
203	struct snd_pcm_runtime *runtime = vss->substream->runtime;
204	struct virtio_snd *snd = vss->snd;
205	struct virtio_device *vdev = snd->vdev;
206	struct virtqueue *vqueue = virtsnd_pcm_queue(vss)->vqueue;
207	int i;
208	int n;
 
209	bool notify = false;
 
210
211	i = (vss->msg_last_enqueued + 1) % runtime->periods;
212	n = runtime->periods - vss->msg_count;
213
214	for (; n; --n, i = (i + 1) % runtime->periods) {
215		struct virtio_pcm_msg *msg = vss->msgs[i];
216		struct scatterlist *psgs[] = {
217			&msg->sgs[PCM_MSG_SG_XFER],
218			&msg->sgs[PCM_MSG_SG_DATA],
219			&msg->sgs[PCM_MSG_SG_STATUS]
220		};
221		int rc;
222
223		msg->xfer.stream_id = cpu_to_le32(vss->sid);
224		memset(&msg->status, 0, sizeof(msg->status));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
226		if (vss->direction == SNDRV_PCM_STREAM_PLAYBACK)
227			rc = virtqueue_add_sgs(vqueue, psgs, 2, 1, msg,
228					       GFP_ATOMIC);
229		else
230			rc = virtqueue_add_sgs(vqueue, psgs, 1, 2, msg,
231					       GFP_ATOMIC);
232
233		if (rc) {
234			dev_err(&vdev->dev,
235				"SID %u: failed to send I/O message\n",
236				vss->sid);
237			return rc;
238		}
239
240		vss->msg_last_enqueued = i;
241		vss->msg_count++;
242	}
243
 
 
 
244	if (!(vss->features & (1U << VIRTIO_SND_PCM_F_MSG_POLLING)))
245		notify = virtqueue_kick_prepare(vqueue);
246
247	if (notify)
248		virtqueue_notify(vqueue);
249
250	return 0;
251}
252
253/**
254 * virtsnd_pcm_msg_pending_num() - Returns the number of pending I/O messages.
255 * @vss: VirtIO substream.
256 *
257 * Context: Any context.
258 * Return: Number of messages.
259 */
260unsigned int virtsnd_pcm_msg_pending_num(struct virtio_pcm_substream *vss)
261{
262	unsigned int num;
263	unsigned long flags;
264
265	spin_lock_irqsave(&vss->lock, flags);
266	num = vss->msg_count;
267	spin_unlock_irqrestore(&vss->lock, flags);
268
269	return num;
270}
271
272/**
273 * virtsnd_pcm_msg_complete() - Complete an I/O message.
274 * @msg: I/O message.
275 * @written_bytes: Number of bytes written to the message.
276 *
277 * Completion of the message means the elapsed period. If transmission is
278 * allowed, then each completed message is immediately placed back at the end
279 * of the queue.
280 *
281 * For the playback substream, @written_bytes is equal to sizeof(msg->status).
282 *
283 * For the capture substream, @written_bytes is equal to sizeof(msg->status)
284 * plus the number of captured bytes.
285 *
286 * Context: Interrupt context. Takes and releases the VirtIO substream spinlock.
287 */
288static void virtsnd_pcm_msg_complete(struct virtio_pcm_msg *msg,
289				     size_t written_bytes)
290{
291	struct virtio_pcm_substream *vss = msg->substream;
292
293	/*
294	 * hw_ptr always indicates the buffer position of the first I/O message
295	 * in the virtqueue. Therefore, on each completion of an I/O message,
296	 * the hw_ptr value is unconditionally advanced.
297	 */
298	spin_lock(&vss->lock);
299	/*
300	 * If the capture substream returned an incorrect status, then just
301	 * increase the hw_ptr by the message size.
302	 */
303	if (vss->direction == SNDRV_PCM_STREAM_PLAYBACK ||
304	    written_bytes <= sizeof(msg->status))
305		vss->hw_ptr += msg->length;
306	else
307		vss->hw_ptr += written_bytes - sizeof(msg->status);
308
309	if (vss->hw_ptr >= vss->buffer_bytes)
310		vss->hw_ptr -= vss->buffer_bytes;
311
 
 
312	vss->xfer_xrun = false;
313	vss->msg_count--;
314
315	if (vss->xfer_enabled) {
316		struct snd_pcm_runtime *runtime = vss->substream->runtime;
317
318		runtime->delay =
319			bytes_to_frames(runtime,
320					le32_to_cpu(msg->status.latency_bytes));
321
322		schedule_work(&vss->elapsed_period);
323
324		virtsnd_pcm_msg_send(vss);
325	} else if (!vss->msg_count) {
326		wake_up_all(&vss->msg_empty);
327	}
328	spin_unlock(&vss->lock);
329}
330
331/**
332 * virtsnd_pcm_notify_cb() - Process all completed I/O messages.
333 * @queue: Underlying tx/rx virtqueue.
334 *
335 * Context: Interrupt context. Takes and releases the tx/rx queue spinlock.
336 */
337static inline void virtsnd_pcm_notify_cb(struct virtio_snd_queue *queue)
338{
339	struct virtio_pcm_msg *msg;
340	u32 written_bytes;
341	unsigned long flags;
342
343	spin_lock_irqsave(&queue->lock, flags);
344	do {
345		virtqueue_disable_cb(queue->vqueue);
346		while ((msg = virtqueue_get_buf(queue->vqueue, &written_bytes)))
347			virtsnd_pcm_msg_complete(msg, written_bytes);
348		if (unlikely(virtqueue_is_broken(queue->vqueue)))
349			break;
350	} while (!virtqueue_enable_cb(queue->vqueue));
351	spin_unlock_irqrestore(&queue->lock, flags);
352}
353
354/**
355 * virtsnd_pcm_tx_notify_cb() - Process all completed TX messages.
356 * @vqueue: Underlying tx virtqueue.
357 *
358 * Context: Interrupt context.
359 */
360void virtsnd_pcm_tx_notify_cb(struct virtqueue *vqueue)
361{
362	struct virtio_snd *snd = vqueue->vdev->priv;
363
364	virtsnd_pcm_notify_cb(virtsnd_tx_queue(snd));
365}
366
367/**
368 * virtsnd_pcm_rx_notify_cb() - Process all completed RX messages.
369 * @vqueue: Underlying rx virtqueue.
370 *
371 * Context: Interrupt context.
372 */
373void virtsnd_pcm_rx_notify_cb(struct virtqueue *vqueue)
374{
375	struct virtio_snd *snd = vqueue->vdev->priv;
376
377	virtsnd_pcm_notify_cb(virtsnd_rx_queue(snd));
378}
379
380/**
381 * virtsnd_pcm_ctl_msg_alloc() - Allocate and initialize the PCM device control
382 *                               message for the specified substream.
383 * @vss: VirtIO PCM substream.
384 * @command: Control request code (VIRTIO_SND_R_PCM_XXX).
385 * @gfp: Kernel flags for memory allocation.
386 *
387 * Context: Any context. May sleep if @gfp flags permit.
388 * Return: Allocated message on success, NULL on failure.
389 */
390struct virtio_snd_msg *
391virtsnd_pcm_ctl_msg_alloc(struct virtio_pcm_substream *vss,
392			  unsigned int command, gfp_t gfp)
393{
394	size_t request_size = sizeof(struct virtio_snd_pcm_hdr);
395	size_t response_size = sizeof(struct virtio_snd_hdr);
396	struct virtio_snd_msg *msg;
397
398	switch (command) {
399	case VIRTIO_SND_R_PCM_SET_PARAMS:
400		request_size = sizeof(struct virtio_snd_pcm_set_params);
401		break;
402	}
403
404	msg = virtsnd_ctl_msg_alloc(request_size, response_size, gfp);
405	if (msg) {
406		struct virtio_snd_pcm_hdr *hdr = virtsnd_ctl_msg_request(msg);
407
408		hdr->hdr.code = cpu_to_le32(command);
409		hdr->stream_id = cpu_to_le32(vss->sid);
410	}
411
412	return msg;
413}
v6.8
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * virtio-snd: Virtio sound device
  4 * Copyright (C) 2021 OpenSynergy GmbH
  5 */
  6#include <sound/pcm_params.h>
  7
  8#include "virtio_card.h"
  9
 10/**
 11 * struct virtio_pcm_msg - VirtIO I/O message.
 12 * @substream: VirtIO PCM substream.
 13 * @xfer: Request header payload.
 14 * @status: Response header payload.
 15 * @length: Data length in bytes.
 16 * @sgs: Payload scatter-gather table.
 17 */
 18struct virtio_pcm_msg {
 19	struct virtio_pcm_substream *substream;
 20	struct virtio_snd_pcm_xfer xfer;
 21	struct virtio_snd_pcm_status status;
 22	size_t length;
 23	struct scatterlist sgs[];
 24};
 25
 26/**
 27 * enum pcm_msg_sg_index - Index values for the virtio_pcm_msg->sgs field in
 28 *                         an I/O message.
 29 * @PCM_MSG_SG_XFER: Element containing a virtio_snd_pcm_xfer structure.
 30 * @PCM_MSG_SG_STATUS: Element containing a virtio_snd_pcm_status structure.
 31 * @PCM_MSG_SG_DATA: The first element containing a data buffer.
 32 */
 33enum pcm_msg_sg_index {
 34	PCM_MSG_SG_XFER = 0,
 35	PCM_MSG_SG_STATUS,
 36	PCM_MSG_SG_DATA
 37};
 38
 39/**
 40 * virtsnd_pcm_sg_num() - Count the number of sg-elements required to represent
 41 *                        vmalloc'ed buffer.
 42 * @data: Pointer to vmalloc'ed buffer.
 43 * @length: Buffer size.
 44 *
 45 * Context: Any context.
 46 * Return: Number of physically contiguous parts in the @data.
 47 */
 48static int virtsnd_pcm_sg_num(u8 *data, unsigned int length)
 49{
 50	phys_addr_t sg_address;
 51	unsigned int sg_length;
 52	int num = 0;
 53
 54	while (length) {
 55		struct page *pg = vmalloc_to_page(data);
 56		phys_addr_t pg_address = page_to_phys(pg);
 57		size_t pg_length;
 58
 59		pg_length = PAGE_SIZE - offset_in_page(data);
 60		if (pg_length > length)
 61			pg_length = length;
 62
 63		if (!num || sg_address + sg_length != pg_address) {
 64			sg_address = pg_address;
 65			sg_length = pg_length;
 66			num++;
 67		} else {
 68			sg_length += pg_length;
 69		}
 70
 71		data += pg_length;
 72		length -= pg_length;
 73	}
 74
 75	return num;
 76}
 77
 78/**
 79 * virtsnd_pcm_sg_from() - Build sg-list from vmalloc'ed buffer.
 80 * @sgs: Preallocated sg-list to populate.
 81 * @nsgs: The maximum number of elements in the @sgs.
 82 * @data: Pointer to vmalloc'ed buffer.
 83 * @length: Buffer size.
 84 *
 85 * Splits the buffer into physically contiguous parts and makes an sg-list of
 86 * such parts.
 87 *
 88 * Context: Any context.
 89 */
 90static void virtsnd_pcm_sg_from(struct scatterlist *sgs, int nsgs, u8 *data,
 91				unsigned int length)
 92{
 93	int idx = -1;
 94
 95	while (length) {
 96		struct page *pg = vmalloc_to_page(data);
 97		size_t pg_length;
 98
 99		pg_length = PAGE_SIZE - offset_in_page(data);
100		if (pg_length > length)
101			pg_length = length;
102
103		if (idx == -1 ||
104		    sg_phys(&sgs[idx]) + sgs[idx].length != page_to_phys(pg)) {
105			if (idx + 1 == nsgs)
106				break;
107			sg_set_page(&sgs[++idx], pg, pg_length,
108				    offset_in_page(data));
109		} else {
110			sgs[idx].length += pg_length;
111		}
112
113		data += pg_length;
114		length -= pg_length;
115	}
116
117	sg_mark_end(&sgs[idx]);
118}
119
120/**
121 * virtsnd_pcm_msg_alloc() - Allocate I/O messages.
122 * @vss: VirtIO PCM substream.
123 * @periods: Current number of periods.
124 * @period_bytes: Current period size in bytes.
125 *
126 * The function slices the buffer into @periods parts (each with the size of
127 * @period_bytes), and creates @periods corresponding I/O messages.
128 *
129 * Context: Any context that permits to sleep.
130 * Return: 0 on success, -ENOMEM on failure.
131 */
132int virtsnd_pcm_msg_alloc(struct virtio_pcm_substream *vss,
133			  unsigned int periods, unsigned int period_bytes)
134{
135	struct snd_pcm_runtime *runtime = vss->substream->runtime;
136	unsigned int i;
137
138	vss->msgs = kcalloc(periods, sizeof(*vss->msgs), GFP_KERNEL);
139	if (!vss->msgs)
140		return -ENOMEM;
141
142	vss->nmsgs = periods;
143
144	for (i = 0; i < periods; ++i) {
145		u8 *data = runtime->dma_area + period_bytes * i;
146		int sg_num = virtsnd_pcm_sg_num(data, period_bytes);
147		struct virtio_pcm_msg *msg;
148
149		msg = kzalloc(struct_size(msg, sgs, sg_num + 2), GFP_KERNEL);
150		if (!msg)
151			return -ENOMEM;
152
153		msg->substream = vss;
154		sg_init_one(&msg->sgs[PCM_MSG_SG_XFER], &msg->xfer,
155			    sizeof(msg->xfer));
156		sg_init_one(&msg->sgs[PCM_MSG_SG_STATUS], &msg->status,
157			    sizeof(msg->status));
 
158		virtsnd_pcm_sg_from(&msg->sgs[PCM_MSG_SG_DATA], sg_num, data,
159				    period_bytes);
160
161		vss->msgs[i] = msg;
162	}
163
164	return 0;
165}
166
167/**
168 * virtsnd_pcm_msg_free() - Free all allocated I/O messages.
169 * @vss: VirtIO PCM substream.
170 *
171 * Context: Any context.
172 */
173void virtsnd_pcm_msg_free(struct virtio_pcm_substream *vss)
174{
175	unsigned int i;
176
177	for (i = 0; vss->msgs && i < vss->nmsgs; ++i)
178		kfree(vss->msgs[i]);
179	kfree(vss->msgs);
180
181	vss->msgs = NULL;
182	vss->nmsgs = 0;
183}
184
185/**
186 * virtsnd_pcm_msg_send() - Send asynchronous I/O messages.
187 * @vss: VirtIO PCM substream.
188 * @offset: starting position that has been updated
189 * @bytes: number of bytes that has been updated
190 *
191 * All messages are organized in an ordered circular list. Each time the
192 * function is called, all currently non-enqueued messages are added to the
193 * virtqueue. For this, the function uses offset and bytes to calculate the
194 * messages that need to be added.
 
 
195 *
196 * Context: Any context. Expects the tx/rx queue and the VirtIO substream
197 *          spinlocks to be held by caller.
198 * Return: 0 on success, -errno on failure.
199 */
200int virtsnd_pcm_msg_send(struct virtio_pcm_substream *vss, unsigned long offset,
201			 unsigned long bytes)
202{
 
203	struct virtio_snd *snd = vss->snd;
204	struct virtio_device *vdev = snd->vdev;
205	struct virtqueue *vqueue = virtsnd_pcm_queue(vss)->vqueue;
206	unsigned long period_bytes = snd_pcm_lib_period_bytes(vss->substream);
207	unsigned long start, end, i;
208	unsigned int msg_count = vss->msg_count;
209	bool notify = false;
210	int rc;
211
212	start = offset / period_bytes;
213	end = (offset + bytes - 1) / period_bytes;
214
215	for (i = start; i <= end; i++) {
216		struct virtio_pcm_msg *msg = vss->msgs[i];
217		struct scatterlist *psgs[] = {
218			&msg->sgs[PCM_MSG_SG_XFER],
219			&msg->sgs[PCM_MSG_SG_DATA],
220			&msg->sgs[PCM_MSG_SG_STATUS]
221		};
222		unsigned long n;
223
224		n = period_bytes - (offset % period_bytes);
225		if (n > bytes)
226			n = bytes;
227
228		msg->length += n;
229		if (msg->length == period_bytes) {
230			msg->xfer.stream_id = cpu_to_le32(vss->sid);
231			memset(&msg->status, 0, sizeof(msg->status));
232
233			if (vss->direction == SNDRV_PCM_STREAM_PLAYBACK)
234				rc = virtqueue_add_sgs(vqueue, psgs, 2, 1, msg,
235						       GFP_ATOMIC);
236			else
237				rc = virtqueue_add_sgs(vqueue, psgs, 1, 2, msg,
238						       GFP_ATOMIC);
239
240			if (rc) {
241				dev_err(&vdev->dev,
242					"SID %u: failed to send I/O message\n",
243					vss->sid);
244				return rc;
245			}
246
247			vss->msg_count++;
 
 
 
 
 
 
 
 
 
 
 
248		}
249
250		offset = 0;
251		bytes -= n;
252	}
253
254	if (msg_count == vss->msg_count)
255		return 0;
256
257	if (!(vss->features & (1U << VIRTIO_SND_PCM_F_MSG_POLLING)))
258		notify = virtqueue_kick_prepare(vqueue);
259
260	if (notify)
261		virtqueue_notify(vqueue);
262
263	return 0;
264}
265
266/**
267 * virtsnd_pcm_msg_pending_num() - Returns the number of pending I/O messages.
268 * @vss: VirtIO substream.
269 *
270 * Context: Any context.
271 * Return: Number of messages.
272 */
273unsigned int virtsnd_pcm_msg_pending_num(struct virtio_pcm_substream *vss)
274{
275	unsigned int num;
276	unsigned long flags;
277
278	spin_lock_irqsave(&vss->lock, flags);
279	num = vss->msg_count;
280	spin_unlock_irqrestore(&vss->lock, flags);
281
282	return num;
283}
284
285/**
286 * virtsnd_pcm_msg_complete() - Complete an I/O message.
287 * @msg: I/O message.
288 * @written_bytes: Number of bytes written to the message.
289 *
290 * Completion of the message means the elapsed period. If transmission is
291 * allowed, then each completed message is immediately placed back at the end
292 * of the queue.
293 *
294 * For the playback substream, @written_bytes is equal to sizeof(msg->status).
295 *
296 * For the capture substream, @written_bytes is equal to sizeof(msg->status)
297 * plus the number of captured bytes.
298 *
299 * Context: Interrupt context. Takes and releases the VirtIO substream spinlock.
300 */
301static void virtsnd_pcm_msg_complete(struct virtio_pcm_msg *msg,
302				     size_t written_bytes)
303{
304	struct virtio_pcm_substream *vss = msg->substream;
305
306	/*
307	 * hw_ptr always indicates the buffer position of the first I/O message
308	 * in the virtqueue. Therefore, on each completion of an I/O message,
309	 * the hw_ptr value is unconditionally advanced.
310	 */
311	spin_lock(&vss->lock);
312	/*
313	 * If the capture substream returned an incorrect status, then just
314	 * increase the hw_ptr by the message size.
315	 */
316	if (vss->direction == SNDRV_PCM_STREAM_PLAYBACK ||
317	    written_bytes <= sizeof(msg->status))
318		vss->hw_ptr += msg->length;
319	else
320		vss->hw_ptr += written_bytes - sizeof(msg->status);
321
322	if (vss->hw_ptr >= vss->buffer_bytes)
323		vss->hw_ptr -= vss->buffer_bytes;
324
325	msg->length = 0;
326
327	vss->xfer_xrun = false;
328	vss->msg_count--;
329
330	if (vss->xfer_enabled) {
331		struct snd_pcm_runtime *runtime = vss->substream->runtime;
332
333		runtime->delay =
334			bytes_to_frames(runtime,
335					le32_to_cpu(msg->status.latency_bytes));
336
337		schedule_work(&vss->elapsed_period);
 
 
338	} else if (!vss->msg_count) {
339		wake_up_all(&vss->msg_empty);
340	}
341	spin_unlock(&vss->lock);
342}
343
344/**
345 * virtsnd_pcm_notify_cb() - Process all completed I/O messages.
346 * @queue: Underlying tx/rx virtqueue.
347 *
348 * Context: Interrupt context. Takes and releases the tx/rx queue spinlock.
349 */
350static inline void virtsnd_pcm_notify_cb(struct virtio_snd_queue *queue)
351{
352	struct virtio_pcm_msg *msg;
353	u32 written_bytes;
354	unsigned long flags;
355
356	spin_lock_irqsave(&queue->lock, flags);
357	do {
358		virtqueue_disable_cb(queue->vqueue);
359		while ((msg = virtqueue_get_buf(queue->vqueue, &written_bytes)))
360			virtsnd_pcm_msg_complete(msg, written_bytes);
 
 
361	} while (!virtqueue_enable_cb(queue->vqueue));
362	spin_unlock_irqrestore(&queue->lock, flags);
363}
364
365/**
366 * virtsnd_pcm_tx_notify_cb() - Process all completed TX messages.
367 * @vqueue: Underlying tx virtqueue.
368 *
369 * Context: Interrupt context.
370 */
371void virtsnd_pcm_tx_notify_cb(struct virtqueue *vqueue)
372{
373	struct virtio_snd *snd = vqueue->vdev->priv;
374
375	virtsnd_pcm_notify_cb(virtsnd_tx_queue(snd));
376}
377
378/**
379 * virtsnd_pcm_rx_notify_cb() - Process all completed RX messages.
380 * @vqueue: Underlying rx virtqueue.
381 *
382 * Context: Interrupt context.
383 */
384void virtsnd_pcm_rx_notify_cb(struct virtqueue *vqueue)
385{
386	struct virtio_snd *snd = vqueue->vdev->priv;
387
388	virtsnd_pcm_notify_cb(virtsnd_rx_queue(snd));
389}
390
391/**
392 * virtsnd_pcm_ctl_msg_alloc() - Allocate and initialize the PCM device control
393 *                               message for the specified substream.
394 * @vss: VirtIO PCM substream.
395 * @command: Control request code (VIRTIO_SND_R_PCM_XXX).
396 * @gfp: Kernel flags for memory allocation.
397 *
398 * Context: Any context. May sleep if @gfp flags permit.
399 * Return: Allocated message on success, NULL on failure.
400 */
401struct virtio_snd_msg *
402virtsnd_pcm_ctl_msg_alloc(struct virtio_pcm_substream *vss,
403			  unsigned int command, gfp_t gfp)
404{
405	size_t request_size = sizeof(struct virtio_snd_pcm_hdr);
406	size_t response_size = sizeof(struct virtio_snd_hdr);
407	struct virtio_snd_msg *msg;
408
409	switch (command) {
410	case VIRTIO_SND_R_PCM_SET_PARAMS:
411		request_size = sizeof(struct virtio_snd_pcm_set_params);
412		break;
413	}
414
415	msg = virtsnd_ctl_msg_alloc(request_size, response_size, gfp);
416	if (msg) {
417		struct virtio_snd_pcm_hdr *hdr = virtsnd_ctl_msg_request(msg);
418
419		hdr->hdr.code = cpu_to_le32(command);
420		hdr->stream_id = cpu_to_le32(vss->sid);
421	}
422
423	return msg;
424}