Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright(c) 2016-2018 Intel Corporation. All rights reserved.
  4 */
  5#include <linux/dma-mapping.h>
  6#include <linux/mei.h>
  7
  8#include "mei_dev.h"
  9
 10/**
 11 * mei_dmam_dscr_alloc() - allocate a managed coherent buffer
 12 *     for the dma descriptor
 13 * @dev: mei_device
 14 * @dscr: dma descriptor
 15 *
 16 * Return:
 17 * * 0       - on success or zero allocation request
 18 * * -EINVAL - if size is not power of 2
 19 * * -ENOMEM - of allocation has failed
 20 */
 21static int mei_dmam_dscr_alloc(struct mei_device *dev,
 22			       struct mei_dma_dscr *dscr)
 23{
 24	if (!dscr->size)
 25		return 0;
 26
 27	if (WARN_ON(!is_power_of_2(dscr->size)))
 28		return -EINVAL;
 29
 30	if (dscr->vaddr)
 31		return 0;
 32
 33	dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr,
 34					  GFP_KERNEL);
 35	if (!dscr->vaddr)
 36		return -ENOMEM;
 37
 38	return 0;
 39}
 40
 41/**
 42 * mei_dmam_dscr_free() - free a managed coherent buffer
 43 *     from the dma descriptor
 44 * @dev: mei_device
 45 * @dscr: dma descriptor
 46 */
 47static void mei_dmam_dscr_free(struct mei_device *dev,
 48			       struct mei_dma_dscr *dscr)
 49{
 50	if (!dscr->vaddr)
 51		return;
 52
 53	dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr);
 54	dscr->vaddr = NULL;
 55}
 56
 57/**
 58 * mei_dmam_ring_free() - free dma ring buffers
 59 * @dev: mei device
 60 */
 61void mei_dmam_ring_free(struct mei_device *dev)
 62{
 63	int i;
 64
 65	for (i = 0; i < DMA_DSCR_NUM; i++)
 66		mei_dmam_dscr_free(dev, &dev->dr_dscr[i]);
 67}
 68
 69/**
 70 * mei_dmam_ring_alloc() - allocate dma ring buffers
 71 * @dev: mei device
 72 *
 73 * Return: -ENOMEM on allocation failure 0 otherwise
 74 */
 75int mei_dmam_ring_alloc(struct mei_device *dev)
 76{
 77	int i;
 78
 79	for (i = 0; i < DMA_DSCR_NUM; i++)
 80		if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i]))
 81			goto err;
 82
 83	return 0;
 84
 85err:
 86	mei_dmam_ring_free(dev);
 87	return -ENOMEM;
 88}
 89
 90/**
 91 * mei_dma_ring_is_allocated() - check if dma ring is allocated
 92 * @dev: mei device
 93 *
 94 * Return: true if dma ring is allocated
 95 */
 96bool mei_dma_ring_is_allocated(struct mei_device *dev)
 97{
 98	return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr;
 99}
100
101static inline
102struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev)
103{
104	return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr;
105}
106
107/**
108 * mei_dma_ring_reset() - reset the dma control block
109 * @dev: mei device
110 */
111void mei_dma_ring_reset(struct mei_device *dev)
112{
113	struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
114
115	if (!ctrl)
116		return;
117
118	memset(ctrl, 0, sizeof(*ctrl));
119}
120
121/**
122 * mei_dma_copy_from() - copy from dma ring into buffer
123 * @dev: mei device
124 * @buf: data buffer
125 * @offset: offset in slots.
126 * @n: number of slots to copy.
127 */
128static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf,
129				u32 offset, u32 n)
130{
131	unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr;
132
133	size_t b_offset = offset << 2;
134	size_t b_n = n << 2;
135
136	memcpy(buf, dbuf + b_offset, b_n);
137
138	return b_n;
139}
140
141/**
142 * mei_dma_copy_to() - copy to a buffer to the dma ring
143 * @dev: mei device
144 * @buf: data buffer
145 * @offset: offset in slots.
146 * @n: number of slots to copy.
147 */
148static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf,
149			      u32 offset, u32 n)
150{
151	unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr;
152
153	size_t b_offset = offset << 2;
154	size_t b_n = n << 2;
155
156	memcpy(hbuf + b_offset, buf, b_n);
157
158	return b_n;
159}
160
161/**
162 * mei_dma_ring_read() - read data from the ring
163 * @dev: mei device
164 * @buf: buffer to read into: may be NULL in case of droping the data.
165 * @len: length to read.
166 */
167void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len)
168{
169	struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
170	u32 dbuf_depth;
171	u32 rd_idx, rem, slots;
172
173	if (WARN_ON(!ctrl))
174		return;
175
176	dev_dbg(dev->dev, "reading from dma %u bytes\n", len);
177
178	if (!len)
179		return;
180
181	dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2;
182	rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1);
183	slots = mei_data2slots(len);
184
185	/* if buf is NULL we drop the packet by advancing the pointer.*/
186	if (!buf)
187		goto out;
188
189	if (rd_idx + slots > dbuf_depth) {
190		buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx);
191		rem = slots - (dbuf_depth - rd_idx);
192		rd_idx = 0;
193	} else {
194		rem = slots;
195	}
196
197	mei_dma_copy_from(dev, buf, rd_idx, rem);
198out:
199	WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots);
200}
201
202static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev)
203{
204	return dev->dr_dscr[DMA_DSCR_HOST].size >> 2;
205}
206
207/**
208 * mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring
209 * @dev: mei_device
210 *
211 * Return: number of empty slots
212 */
213u32 mei_dma_ring_empty_slots(struct mei_device *dev)
214{
215	struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
216	u32 wr_idx, rd_idx, hbuf_depth, empty;
217
218	if (!mei_dma_ring_is_allocated(dev))
219		return 0;
220
221	if (WARN_ON(!ctrl))
222		return 0;
223
224	/* easier to work in slots */
225	hbuf_depth = mei_dma_ring_hbuf_depth(dev);
226	rd_idx = READ_ONCE(ctrl->hbuf_rd_idx);
227	wr_idx = READ_ONCE(ctrl->hbuf_wr_idx);
228
229	if (rd_idx > wr_idx)
230		empty = rd_idx - wr_idx;
231	else
232		empty = hbuf_depth - (wr_idx - rd_idx);
233
234	return empty;
235}
236
237/**
238 * mei_dma_ring_write - write data to dma ring host buffer
239 *
240 * @dev: mei_device
241 * @buf: data will be written
242 * @len: data length
243 */
244void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len)
245{
246	struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
247	u32 hbuf_depth;
248	u32 wr_idx, rem, slots;
249
250	if (WARN_ON(!ctrl))
251		return;
252
253	dev_dbg(dev->dev, "writing to dma %u bytes\n", len);
254	hbuf_depth = mei_dma_ring_hbuf_depth(dev);
255	wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1);
256	slots = mei_data2slots(len);
257
258	if (wr_idx + slots > hbuf_depth) {
259		buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx);
260		rem = slots - (hbuf_depth - wr_idx);
261		wr_idx = 0;
262	} else {
263		rem = slots;
264	}
265
266	mei_dma_copy_to(dev, buf, wr_idx, rem);
267
268	WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots);
269}