Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: ISC
  2/*
  3 * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
  4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include <linux/types.h>
  8#include <linux/errno.h>
  9#include <linux/fs.h>
 10#include <linux/seq_file.h>
 11#include "wmi.h"
 12#include "wil6210.h"
 13#include "txrx.h"
 14#include "pmc.h"
 15
 16struct desc_alloc_info {
 17	dma_addr_t pa;
 18	void	  *va;
 19};
 20
 21static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
 22{
 23	return !!pmc->pring_va;
 24}
 25
 26void wil_pmc_init(struct wil6210_priv *wil)
 27{
 28	memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
 29	mutex_init(&wil->pmc.lock);
 30}
 31
 32/* Allocate the physical ring (p-ring) and the required
 
 33 * number of descriptors of required size.
 34 * Initialize the descriptors as required by pmc dma.
 35 * The descriptors' buffers dwords are initialized to hold
 36 * dword's serial number in the lsw and reserved value
 37 * PCM_DATA_INVALID_DW_VAL in the msw.
 38 */
 39void wil_pmc_alloc(struct wil6210_priv *wil,
 40		   int num_descriptors,
 41		   int descriptor_size)
 42{
 43	u32 i;
 44	struct pmc_ctx *pmc = &wil->pmc;
 45	struct device *dev = wil_to_dev(wil);
 46	struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
 47	struct wmi_pmc_cmd pmc_cmd = {0};
 48	int last_cmd_err = -ENOMEM;
 49
 50	mutex_lock(&pmc->lock);
 51
 52	if (wil_is_pmc_allocated(pmc)) {
 53		/* sanity check */
 54		wil_err(wil, "ERROR pmc is already allocated\n");
 55		goto no_release_err;
 56	}
 57	if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
 58		wil_err(wil,
 59			"Invalid params num_descriptors(%d), descriptor_size(%d)\n",
 60			num_descriptors, descriptor_size);
 61		last_cmd_err = -EINVAL;
 62		goto no_release_err;
 63	}
 64
 65	if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) {
 66		wil_err(wil,
 67			"num_descriptors(%d) exceeds max ring size %d\n",
 68			num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX);
 69		last_cmd_err = -EINVAL;
 70		goto no_release_err;
 71	}
 72
 73	if (num_descriptors > INT_MAX / descriptor_size) {
 74		wil_err(wil,
 75			"Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
 76			num_descriptors, descriptor_size);
 77		last_cmd_err = -EINVAL;
 78		goto no_release_err;
 79	}
 80
 81	pmc->num_descriptors = num_descriptors;
 82	pmc->descriptor_size = descriptor_size;
 83
 84	wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n",
 85		     num_descriptors, descriptor_size);
 86
 87	/* allocate descriptors info list in pmc context*/
 88	pmc->descriptors = kcalloc(num_descriptors,
 89				  sizeof(struct desc_alloc_info),
 90				  GFP_KERNEL);
 91	if (!pmc->descriptors) {
 92		wil_err(wil, "ERROR allocating pmc skb list\n");
 93		goto no_release_err;
 94	}
 95
 96	wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n",
 97		     pmc->descriptors);
 
 98
 99	/* Allocate pring buffer and descriptors.
100	 * vring->va should be aligned on its size rounded up to power of 2
101	 * This is granted by the dma_alloc_coherent.
102	 *
103	 * HW has limitation that all vrings addresses must share the same
104	 * upper 16 msb bits part of 48 bits address. To workaround that,
105	 * if we are using more than 32 bit addresses switch to 32 bit
106	 * allocation before allocating vring memory.
107	 *
108	 * There's no check for the return value of dma_set_mask_and_coherent,
109	 * since we assume if we were able to set the mask during
110	 * initialization in this system it will not fail if we set it again
111	 */
112	if (wil->dma_addr_size > 32)
113		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
114
115	pmc->pring_va = dma_alloc_coherent(dev,
116			sizeof(struct vring_tx_desc) * num_descriptors,
117			&pmc->pring_pa,
118			GFP_KERNEL);
119
120	if (wil->dma_addr_size > 32)
121		dma_set_mask_and_coherent(dev,
122					  DMA_BIT_MASK(wil->dma_addr_size));
123
124	wil_dbg_misc(wil,
125		     "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
 
126		     pmc->pring_va, &pmc->pring_pa,
127		     sizeof(struct vring_tx_desc),
128		     num_descriptors,
129		     sizeof(struct vring_tx_desc) * num_descriptors);
130
131	if (!pmc->pring_va) {
132		wil_err(wil, "ERROR allocating pmc pring\n");
133		goto release_pmc_skb_list;
134	}
135
136	/* initially, all descriptors are SW owned
137	 * For Tx, Rx, and PMC, ownership bit is at the same location, thus
138	 * we can use any
139	 */
140	for (i = 0; i < num_descriptors; i++) {
141		struct vring_tx_desc *_d = &pmc->pring_va[i];
142		struct vring_tx_desc dd = {}, *d = &dd;
143		int j = 0;
144
145		pmc->descriptors[i].va = dma_alloc_coherent(dev,
146			descriptor_size,
147			&pmc->descriptors[i].pa,
148			GFP_KERNEL);
149
150		if (unlikely(!pmc->descriptors[i].va)) {
151			wil_err(wil, "ERROR allocating pmc descriptor %d", i);
 
 
152			goto release_pmc_skbs;
153		}
154
155		for (j = 0; j < descriptor_size / sizeof(u32); j++) {
156			u32 *p = (u32 *)pmc->descriptors[i].va + j;
157			*p = PCM_DATA_INVALID_DW_VAL | j;
158		}
159
160		/* configure dma descriptor */
161		d->dma.addr.addr_low =
162			cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
163		d->dma.addr.addr_high =
164			cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
165		d->dma.status = 0; /* 0 = HW_OWNED */
166		d->dma.length = cpu_to_le16(descriptor_size);
167		d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
168		*_d = *d;
169	}
170
171	wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n");
172
173	pmc_cmd.op = WMI_PMC_ALLOCATE;
174	pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
175	pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
176
177	wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
178	pmc->last_cmd_status = wmi_send(wil,
179					WMI_PMC_CMDID,
180					vif->mid,
181					&pmc_cmd,
182					sizeof(pmc_cmd));
183	if (pmc->last_cmd_status) {
184		wil_err(wil,
185			"WMI_PMC_CMD with ALLOCATE op failed with status %d",
186			pmc->last_cmd_status);
187		goto release_pmc_skbs;
188	}
189
190	mutex_unlock(&pmc->lock);
191
192	return;
193
194release_pmc_skbs:
195	wil_err(wil, "exit on error: Releasing skbs...\n");
196	for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) {
197		dma_free_coherent(dev,
198				  descriptor_size,
199				  pmc->descriptors[i].va,
200				  pmc->descriptors[i].pa);
201
202		pmc->descriptors[i].va = NULL;
203	}
204	wil_err(wil, "exit on error: Releasing pring...\n");
205
206	dma_free_coherent(dev,
207			  sizeof(struct vring_tx_desc) * num_descriptors,
208			  pmc->pring_va,
209			  pmc->pring_pa);
210
211	pmc->pring_va = NULL;
212
213release_pmc_skb_list:
214	wil_err(wil, "exit on error: Releasing descriptors info list...\n");
 
215	kfree(pmc->descriptors);
216	pmc->descriptors = NULL;
217
218no_release_err:
219	pmc->last_cmd_status = last_cmd_err;
220	mutex_unlock(&pmc->lock);
221}
222
223/* Traverse the p-ring and release all buffers.
 
224 * At the end release the p-ring memory
225 */
226void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
227{
228	struct pmc_ctx *pmc = &wil->pmc;
229	struct device *dev = wil_to_dev(wil);
230	struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
231	struct wmi_pmc_cmd pmc_cmd = {0};
232
233	mutex_lock(&pmc->lock);
234
235	pmc->last_cmd_status = 0;
236
237	if (!wil_is_pmc_allocated(pmc)) {
238		wil_dbg_misc(wil,
239			     "pmc_free: Error, can't free - not allocated\n");
240		pmc->last_cmd_status = -EPERM;
241		mutex_unlock(&pmc->lock);
242		return;
243	}
244
245	if (send_pmc_cmd) {
246		wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n");
 
247		pmc_cmd.op = WMI_PMC_RELEASE;
248		pmc->last_cmd_status =
249				wmi_send(wil, WMI_PMC_CMDID, vif->mid,
250					 &pmc_cmd, sizeof(pmc_cmd));
251		if (pmc->last_cmd_status) {
252			wil_err(wil,
253				"WMI_PMC_CMD with RELEASE op failed, status %d",
254				pmc->last_cmd_status);
255			/* There's nothing we can do with this error.
256			 * Normally, it should never occur.
257			 * Continue to freeing all memory allocated for pmc.
258			 */
259		}
260	}
261
262	if (pmc->pring_va) {
263		size_t buf_size = sizeof(struct vring_tx_desc) *
264				  pmc->num_descriptors;
265
266		wil_dbg_misc(wil, "pmc_free: free pring va %p\n",
267			     pmc->pring_va);
268		dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
269
270		pmc->pring_va = NULL;
271	} else {
272		pmc->last_cmd_status = -ENOENT;
273	}
274
275	if (pmc->descriptors) {
276		int i;
277
278		for (i = 0;
279		     i < pmc->num_descriptors && pmc->descriptors[i].va; i++) {
280			dma_free_coherent(dev,
281					  pmc->descriptor_size,
282					  pmc->descriptors[i].va,
283					  pmc->descriptors[i].pa);
284			pmc->descriptors[i].va = NULL;
285		}
286		wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i,
287			     pmc->num_descriptors);
288		wil_dbg_misc(wil,
289			     "pmc_free: free pmc descriptors info list %p\n",
290			     pmc->descriptors);
291		kfree(pmc->descriptors);
292		pmc->descriptors = NULL;
293	} else {
294		pmc->last_cmd_status = -ENOENT;
295	}
296
297	mutex_unlock(&pmc->lock);
298}
299
300/* Status of the last operation requested via debugfs: alloc/free/read.
 
301 * 0 - success or negative errno
302 */
303int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
304{
305	wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n",
306		     wil->pmc.last_cmd_status);
307
308	return wil->pmc.last_cmd_status;
309}
310
311/* Read from required position up to the end of current descriptor,
 
312 * depends on descriptor size configured during alloc request.
313 */
314ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
315		     loff_t *f_pos)
316{
317	struct wil6210_priv *wil = filp->private_data;
318	struct pmc_ctx *pmc = &wil->pmc;
319	size_t retval = 0;
320	unsigned long long idx;
321	loff_t offset;
322	size_t pmc_size;
323
324	mutex_lock(&pmc->lock);
325
326	if (!wil_is_pmc_allocated(pmc)) {
327		wil_err(wil, "error, pmc is not allocated!\n");
328		pmc->last_cmd_status = -EPERM;
329		mutex_unlock(&pmc->lock);
330		return -EPERM;
331	}
332
333	pmc_size = pmc->descriptor_size * pmc->num_descriptors;
334
335	wil_dbg_misc(wil,
336		     "pmc_read: size %u, pos %lld\n",
337		     (u32)count, *f_pos);
338
339	pmc->last_cmd_status = 0;
340
341	idx = *f_pos;
342	do_div(idx, pmc->descriptor_size);
343	offset = *f_pos - (idx * pmc->descriptor_size);
344
345	if (*f_pos >= pmc_size) {
346		wil_dbg_misc(wil,
347			     "pmc_read: reached end of pmc buf: %lld >= %u\n",
348			     *f_pos, (u32)pmc_size);
349		pmc->last_cmd_status = -ERANGE;
350		goto out;
351	}
352
353	wil_dbg_misc(wil,
354		     "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
355		     *f_pos, idx, offset, count);
356
357	/* if no errors, return the copied byte count */
358	retval = simple_read_from_buffer(buf,
359					 count,
360					 &offset,
361					 pmc->descriptors[idx].va,
362					 pmc->descriptor_size);
363	*f_pos += retval;
364out:
365	mutex_unlock(&pmc->lock);
366
367	return retval;
368}
369
370loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
371{
372	loff_t newpos;
373	struct wil6210_priv *wil = filp->private_data;
374	struct pmc_ctx *pmc = &wil->pmc;
375	size_t pmc_size;
376
377	mutex_lock(&pmc->lock);
378
379	if (!wil_is_pmc_allocated(pmc)) {
380		wil_err(wil, "error, pmc is not allocated!\n");
381		pmc->last_cmd_status = -EPERM;
382		mutex_unlock(&pmc->lock);
383		return -EPERM;
384	}
385
386	pmc_size = pmc->descriptor_size * pmc->num_descriptors;
387
388	switch (whence) {
389	case 0: /* SEEK_SET */
390		newpos = off;
391		break;
392
393	case 1: /* SEEK_CUR */
394		newpos = filp->f_pos + off;
395		break;
396
397	case 2: /* SEEK_END */
398		newpos = pmc_size;
399		break;
400
401	default: /* can't happen */
402		newpos = -EINVAL;
403		goto out;
404	}
405
406	if (newpos < 0) {
407		newpos = -EINVAL;
408		goto out;
409	}
410	if (newpos > pmc_size)
411		newpos = pmc_size;
412
413	filp->f_pos = newpos;
414
415out:
416	mutex_unlock(&pmc->lock);
417
418	return newpos;
419}
420
421int wil_pmcring_read(struct seq_file *s, void *data)
422{
423	struct wil6210_priv *wil = s->private;
424	struct pmc_ctx *pmc = &wil->pmc;
425	size_t pmc_ring_size =
426		sizeof(struct vring_rx_desc) * pmc->num_descriptors;
427
428	mutex_lock(&pmc->lock);
429
430	if (!wil_is_pmc_allocated(pmc)) {
431		wil_err(wil, "error, pmc is not allocated!\n");
432		pmc->last_cmd_status = -EPERM;
433		mutex_unlock(&pmc->lock);
434		return -EPERM;
435	}
436
437	wil_dbg_misc(wil, "pmcring_read: size %zu\n", pmc_ring_size);
438
439	seq_write(s, pmc->pring_va, pmc_ring_size);
440
441	mutex_unlock(&pmc->lock);
442
443	return 0;
444}
v4.6
 
  1/*
  2 * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
  3 *
  4 * Permission to use, copy, modify, and/or distribute this software for any
  5 * purpose with or without fee is hereby granted, provided that the above
  6 * copyright notice and this permission notice appear in all copies.
  7 *
  8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 15 */
 16
 17#include <linux/types.h>
 18#include <linux/errno.h>
 19#include <linux/fs.h>
 
 20#include "wmi.h"
 21#include "wil6210.h"
 22#include "txrx.h"
 23#include "pmc.h"
 24
 25struct desc_alloc_info {
 26	dma_addr_t pa;
 27	void	  *va;
 28};
 29
 30static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
 31{
 32	return !!pmc->pring_va;
 33}
 34
 35void wil_pmc_init(struct wil6210_priv *wil)
 36{
 37	memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
 38	mutex_init(&wil->pmc.lock);
 39}
 40
 41/**
 42 * Allocate the physical ring (p-ring) and the required
 43 * number of descriptors of required size.
 44 * Initialize the descriptors as required by pmc dma.
 45 * The descriptors' buffers dwords are initialized to hold
 46 * dword's serial number in the lsw and reserved value
 47 * PCM_DATA_INVALID_DW_VAL in the msw.
 48 */
 49void wil_pmc_alloc(struct wil6210_priv *wil,
 50		   int num_descriptors,
 51		   int descriptor_size)
 52{
 53	u32 i;
 54	struct pmc_ctx *pmc = &wil->pmc;
 55	struct device *dev = wil_to_dev(wil);
 
 56	struct wmi_pmc_cmd pmc_cmd = {0};
 
 57
 58	mutex_lock(&pmc->lock);
 59
 60	if (wil_is_pmc_allocated(pmc)) {
 61		/* sanity check */
 62		wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63		goto no_release_err;
 64	}
 65
 66	pmc->num_descriptors = num_descriptors;
 67	pmc->descriptor_size = descriptor_size;
 68
 69	wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n",
 70		     __func__, num_descriptors, descriptor_size);
 71
 72	/* allocate descriptors info list in pmc context*/
 73	pmc->descriptors = kcalloc(num_descriptors,
 74				  sizeof(struct desc_alloc_info),
 75				  GFP_KERNEL);
 76	if (!pmc->descriptors) {
 77		wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__);
 78		goto no_release_err;
 79	}
 80
 81	wil_dbg_misc(wil,
 82		     "%s: allocated descriptors info list %p\n",
 83		     __func__, pmc->descriptors);
 84
 85	/* Allocate pring buffer and descriptors.
 86	 * vring->va should be aligned on its size rounded up to power of 2
 87	 * This is granted by the dma_alloc_coherent
 
 
 
 
 
 
 
 
 
 88	 */
 
 
 
 89	pmc->pring_va = dma_alloc_coherent(dev,
 90			sizeof(struct vring_tx_desc) * num_descriptors,
 91			&pmc->pring_pa,
 92			GFP_KERNEL);
 93
 
 
 
 
 94	wil_dbg_misc(wil,
 95		     "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
 96		     __func__,
 97		     pmc->pring_va, &pmc->pring_pa,
 98		     sizeof(struct vring_tx_desc),
 99		     num_descriptors,
100		     sizeof(struct vring_tx_desc) * num_descriptors);
101
102	if (!pmc->pring_va) {
103		wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__);
104		goto release_pmc_skb_list;
105	}
106
107	/* initially, all descriptors are SW owned
108	 * For Tx, Rx, and PMC, ownership bit is at the same location, thus
109	 * we can use any
110	 */
111	for (i = 0; i < num_descriptors; i++) {
112		struct vring_tx_desc *_d = &pmc->pring_va[i];
113		struct vring_tx_desc dd = {}, *d = &dd;
114		int j = 0;
115
116		pmc->descriptors[i].va = dma_alloc_coherent(dev,
117			descriptor_size,
118			&pmc->descriptors[i].pa,
119			GFP_KERNEL);
120
121		if (unlikely(!pmc->descriptors[i].va)) {
122			wil_err(wil,
123				"%s: ERROR allocating pmc descriptor %d",
124				__func__, i);
125			goto release_pmc_skbs;
126		}
127
128		for (j = 0; j < descriptor_size / sizeof(u32); j++) {
129			u32 *p = (u32 *)pmc->descriptors[i].va + j;
130			*p = PCM_DATA_INVALID_DW_VAL | j;
131		}
132
133		/* configure dma descriptor */
134		d->dma.addr.addr_low =
135			cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
136		d->dma.addr.addr_high =
137			cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
138		d->dma.status = 0; /* 0 = HW_OWNED */
139		d->dma.length = cpu_to_le16(descriptor_size);
140		d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
141		*_d = *d;
142	}
143
144	wil_dbg_misc(wil, "%s: allocated successfully\n", __func__);
145
146	pmc_cmd.op = WMI_PMC_ALLOCATE;
147	pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
148	pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
149
150	wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__);
151	pmc->last_cmd_status = wmi_send(wil,
152					WMI_PMC_CMDID,
 
153					&pmc_cmd,
154					sizeof(pmc_cmd));
155	if (pmc->last_cmd_status) {
156		wil_err(wil,
157			"%s: WMI_PMC_CMD with ALLOCATE op failed with status %d",
158			__func__, pmc->last_cmd_status);
159		goto release_pmc_skbs;
160	}
161
162	mutex_unlock(&pmc->lock);
163
164	return;
165
166release_pmc_skbs:
167	wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__);
168	for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
169		dma_free_coherent(dev,
170				  descriptor_size,
171				  pmc->descriptors[i].va,
172				  pmc->descriptors[i].pa);
173
174		pmc->descriptors[i].va = NULL;
175	}
176	wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__);
177
178	dma_free_coherent(dev,
179			  sizeof(struct vring_tx_desc) * num_descriptors,
180			  pmc->pring_va,
181			  pmc->pring_pa);
182
183	pmc->pring_va = NULL;
184
185release_pmc_skb_list:
186	wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n",
187		__func__);
188	kfree(pmc->descriptors);
189	pmc->descriptors = NULL;
190
191no_release_err:
192	pmc->last_cmd_status = -ENOMEM;
193	mutex_unlock(&pmc->lock);
194}
195
196/**
197 * Traverse the p-ring and release all buffers.
198 * At the end release the p-ring memory
199 */
200void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
201{
202	struct pmc_ctx *pmc = &wil->pmc;
203	struct device *dev = wil_to_dev(wil);
 
204	struct wmi_pmc_cmd pmc_cmd = {0};
205
206	mutex_lock(&pmc->lock);
207
208	pmc->last_cmd_status = 0;
209
210	if (!wil_is_pmc_allocated(pmc)) {
211		wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n",
212			     __func__);
213		pmc->last_cmd_status = -EPERM;
214		mutex_unlock(&pmc->lock);
215		return;
216	}
217
218	if (send_pmc_cmd) {
219		wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n",
220			     __func__);
221		pmc_cmd.op = WMI_PMC_RELEASE;
222		pmc->last_cmd_status =
223				wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
224					 sizeof(pmc_cmd));
225		if (pmc->last_cmd_status) {
226			wil_err(wil,
227				"%s WMI_PMC_CMD with RELEASE op failed, status %d",
228				__func__, pmc->last_cmd_status);
229			/* There's nothing we can do with this error.
230			 * Normally, it should never occur.
231			 * Continue to freeing all memory allocated for pmc.
232			 */
233		}
234	}
235
236	if (pmc->pring_va) {
237		size_t buf_size = sizeof(struct vring_tx_desc) *
238				  pmc->num_descriptors;
239
240		wil_dbg_misc(wil, "%s: free pring va %p\n",
241			     __func__, pmc->pring_va);
242		dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
243
244		pmc->pring_va = NULL;
245	} else {
246		pmc->last_cmd_status = -ENOENT;
247	}
248
249	if (pmc->descriptors) {
250		int i;
251
252		for (i = 0;
253		     pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
254			dma_free_coherent(dev,
255					  pmc->descriptor_size,
256					  pmc->descriptors[i].va,
257					  pmc->descriptors[i].pa);
258			pmc->descriptors[i].va = NULL;
259		}
260		wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n",
261			     __func__, i, pmc->num_descriptors);
262		wil_dbg_misc(wil,
263			     "%s: free pmc descriptors info list %p\n",
264			     __func__, pmc->descriptors);
265		kfree(pmc->descriptors);
266		pmc->descriptors = NULL;
267	} else {
268		pmc->last_cmd_status = -ENOENT;
269	}
270
271	mutex_unlock(&pmc->lock);
272}
273
274/**
275 * Status of the last operation requested via debugfs: alloc/free/read.
276 * 0 - success or negative errno
277 */
278int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
279{
280	wil_dbg_misc(wil, "%s: status %d\n", __func__,
281		     wil->pmc.last_cmd_status);
282
283	return wil->pmc.last_cmd_status;
284}
285
286/**
287 * Read from required position up to the end of current descriptor,
288 * depends on descriptor size configured during alloc request.
289 */
290ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
291		     loff_t *f_pos)
292{
293	struct wil6210_priv *wil = filp->private_data;
294	struct pmc_ctx *pmc = &wil->pmc;
295	size_t retval = 0;
296	unsigned long long idx;
297	loff_t offset;
298	size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
299
300	mutex_lock(&pmc->lock);
301
302	if (!wil_is_pmc_allocated(pmc)) {
303		wil_err(wil, "%s: error, pmc is not allocated!\n", __func__);
304		pmc->last_cmd_status = -EPERM;
305		mutex_unlock(&pmc->lock);
306		return -EPERM;
307	}
308
 
 
309	wil_dbg_misc(wil,
310		     "%s: size %u, pos %lld\n",
311		     __func__, (unsigned)count, *f_pos);
312
313	pmc->last_cmd_status = 0;
314
315	idx = *f_pos;
316	do_div(idx, pmc->descriptor_size);
317	offset = *f_pos - (idx * pmc->descriptor_size);
318
319	if (*f_pos >= pmc_size) {
320		wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n",
321			     __func__, *f_pos, (unsigned)pmc_size);
 
322		pmc->last_cmd_status = -ERANGE;
323		goto out;
324	}
325
326	wil_dbg_misc(wil,
327		     "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
328		     __func__, *f_pos, idx, offset, count);
329
330	/* if no errors, return the copied byte count */
331	retval = simple_read_from_buffer(buf,
332					 count,
333					 &offset,
334					 pmc->descriptors[idx].va,
335					 pmc->descriptor_size);
336	*f_pos += retval;
337out:
338	mutex_unlock(&pmc->lock);
339
340	return retval;
341}
342
343loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
344{
345	loff_t newpos;
346	struct wil6210_priv *wil = filp->private_data;
347	struct pmc_ctx *pmc = &wil->pmc;
348	size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
 
 
 
 
 
 
 
 
 
 
 
349
350	switch (whence) {
351	case 0: /* SEEK_SET */
352		newpos = off;
353		break;
354
355	case 1: /* SEEK_CUR */
356		newpos = filp->f_pos + off;
357		break;
358
359	case 2: /* SEEK_END */
360		newpos = pmc_size;
361		break;
362
363	default: /* can't happen */
364		return -EINVAL;
 
365	}
366
367	if (newpos < 0)
368		return -EINVAL;
 
 
369	if (newpos > pmc_size)
370		newpos = pmc_size;
371
372	filp->f_pos = newpos;
373
 
 
 
374	return newpos;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375}