Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
  3	<http://rt2x00.serialmonkey.com>
  4
  5	This program is free software; you can redistribute it and/or modify
  6	it under the terms of the GNU General Public License as published by
  7	the Free Software Foundation; either version 2 of the License, or
  8	(at your option) any later version.
  9
 10	This program is distributed in the hope that it will be useful,
 11	but WITHOUT ANY WARRANTY; without even the implied warranty of
 12	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 13	GNU General Public License for more details.
 14
 15	You should have received a copy of the GNU General Public License
 16	along with this program; if not, write to the
 17	Free Software Foundation, Inc.,
 18	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 19 */
 20
 21/*
 22	Module: rt2x00pci
 23	Abstract: rt2x00 generic pci device routines.
 24 */
 25
 26#include <linux/dma-mapping.h>
 27#include <linux/kernel.h>
 28#include <linux/module.h>
 29#include <linux/pci.h>
 30#include <linux/slab.h>
 31
 32#include "rt2x00.h"
 33#include "rt2x00pci.h"
 34
 35/*
 36 * Register access.
 37 */
 38int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
 39			   const unsigned int offset,
 40			   const struct rt2x00_field32 field,
 41			   u32 *reg)
 42{
 43	unsigned int i;
 44
 45	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
 46		return 0;
 47
 48	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
 49		rt2x00pci_register_read(rt2x00dev, offset, reg);
 50		if (!rt2x00_get_field32(*reg, field))
 51			return 1;
 52		udelay(REGISTER_BUSY_DELAY);
 53	}
 54
 55	ERROR(rt2x00dev, "Indirect register access failed: "
 56	      "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
 57	*reg = ~0;
 58
 59	return 0;
 60}
 61EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
 62
 63bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
 64{
 65	struct data_queue *queue = rt2x00dev->rx;
 66	struct queue_entry *entry;
 67	struct queue_entry_priv_pci *entry_priv;
 68	struct skb_frame_desc *skbdesc;
 69	int max_rx = 16;
 70
 71	while (--max_rx) {
 72		entry = rt2x00queue_get_entry(queue, Q_INDEX);
 73		entry_priv = entry->priv_data;
 74
 75		if (rt2x00dev->ops->lib->get_entry_state(entry))
 76			break;
 77
 78		/*
 79		 * Fill in desc fields of the skb descriptor
 80		 */
 81		skbdesc = get_skb_frame_desc(entry->skb);
 82		skbdesc->desc = entry_priv->desc;
 83		skbdesc->desc_len = entry->queue->desc_size;
 84
 85		/*
 86		 * DMA is already done, notify rt2x00lib that
 87		 * it finished successfully.
 88		 */
 89		rt2x00lib_dmastart(entry);
 90		rt2x00lib_dmadone(entry);
 91
 92		/*
 93		 * Send the frame to rt2x00lib for further processing.
 94		 */
 95		rt2x00lib_rxdone(entry);
 96	}
 97
 98	return !max_rx;
 99}
100EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
101
102void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
103{
104	unsigned int i;
105
106	for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
107		msleep(10);
108}
109EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
110
111/*
112 * Device initialization handlers.
113 */
114static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
115				     struct data_queue *queue)
116{
117	struct queue_entry_priv_pci *entry_priv;
118	void *addr;
119	dma_addr_t dma;
120	unsigned int i;
121
122	/*
123	 * Allocate DMA memory for descriptor and buffer.
124	 */
125	addr = dma_alloc_coherent(rt2x00dev->dev,
126				  queue->limit * queue->desc_size,
127				  &dma, GFP_KERNEL);
128	if (!addr)
129		return -ENOMEM;
130
131	memset(addr, 0, queue->limit * queue->desc_size);
132
133	/*
134	 * Initialize all queue entries to contain valid addresses.
135	 */
136	for (i = 0; i < queue->limit; i++) {
137		entry_priv = queue->entries[i].priv_data;
138		entry_priv->desc = addr + i * queue->desc_size;
139		entry_priv->desc_dma = dma + i * queue->desc_size;
140	}
141
142	return 0;
143}
144
145static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
146				     struct data_queue *queue)
147{
148	struct queue_entry_priv_pci *entry_priv =
149	    queue->entries[0].priv_data;
150
151	if (entry_priv->desc)
152		dma_free_coherent(rt2x00dev->dev,
153				  queue->limit * queue->desc_size,
154				  entry_priv->desc, entry_priv->desc_dma);
155	entry_priv->desc = NULL;
156}
157
158int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
159{
160	struct data_queue *queue;
161	int status;
162
163	/*
164	 * Allocate DMA
165	 */
166	queue_for_each(rt2x00dev, queue) {
167		status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
168		if (status)
169			goto exit;
170	}
171
172	/*
173	 * Register interrupt handler.
174	 */
175	status = request_irq(rt2x00dev->irq,
176			     rt2x00dev->ops->lib->irq_handler,
177			     IRQF_SHARED, rt2x00dev->name, rt2x00dev);
178	if (status) {
179		ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
180		      rt2x00dev->irq, status);
181		goto exit;
182	}
183
184	return 0;
185
186exit:
187	queue_for_each(rt2x00dev, queue)
188		rt2x00pci_free_queue_dma(rt2x00dev, queue);
189
190	return status;
191}
192EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
193
194void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
195{
196	struct data_queue *queue;
197
198	/*
199	 * Free irq line.
200	 */
201	free_irq(rt2x00dev->irq, rt2x00dev);
202
203	/*
204	 * Free DMA
205	 */
206	queue_for_each(rt2x00dev, queue)
207		rt2x00pci_free_queue_dma(rt2x00dev, queue);
208}
209EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
210
211/*
212 * PCI driver handlers.
213 */
214static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
215{
216	kfree(rt2x00dev->rf);
217	rt2x00dev->rf = NULL;
218
219	kfree(rt2x00dev->eeprom);
220	rt2x00dev->eeprom = NULL;
221
222	if (rt2x00dev->csr.base) {
223		iounmap(rt2x00dev->csr.base);
224		rt2x00dev->csr.base = NULL;
225	}
226}
227
228static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
229{
230	struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev);
231
232	rt2x00dev->csr.base = pci_ioremap_bar(pci_dev, 0);
233	if (!rt2x00dev->csr.base)
234		goto exit;
235
236	rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
237	if (!rt2x00dev->eeprom)
238		goto exit;
239
240	rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
241	if (!rt2x00dev->rf)
242		goto exit;
243
244	return 0;
245
246exit:
247	ERROR_PROBE("Failed to allocate registers.\n");
248
249	rt2x00pci_free_reg(rt2x00dev);
250
251	return -ENOMEM;
252}
253
254int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
255{
256	struct ieee80211_hw *hw;
257	struct rt2x00_dev *rt2x00dev;
258	int retval;
259
260	retval = pci_enable_device(pci_dev);
261	if (retval) {
262		ERROR_PROBE("Enable device failed.\n");
263		return retval;
264	}
265
266	retval = pci_request_regions(pci_dev, pci_name(pci_dev));
267	if (retval) {
268		ERROR_PROBE("PCI request regions failed.\n");
269		goto exit_disable_device;
270	}
271
272	pci_set_master(pci_dev);
273
274	if (pci_set_mwi(pci_dev))
275		ERROR_PROBE("MWI not available.\n");
276
277	if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
278		ERROR_PROBE("PCI DMA not supported.\n");
279		retval = -EIO;
280		goto exit_release_regions;
281	}
282
283	hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
284	if (!hw) {
285		ERROR_PROBE("Failed to allocate hardware.\n");
286		retval = -ENOMEM;
287		goto exit_release_regions;
288	}
289
290	pci_set_drvdata(pci_dev, hw);
291
292	rt2x00dev = hw->priv;
293	rt2x00dev->dev = &pci_dev->dev;
294	rt2x00dev->ops = ops;
295	rt2x00dev->hw = hw;
296	rt2x00dev->irq = pci_dev->irq;
297	rt2x00dev->name = pci_name(pci_dev);
298
299	if (pci_is_pcie(pci_dev))
300		rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
301	else
302		rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
303
304	retval = rt2x00pci_alloc_reg(rt2x00dev);
305	if (retval)
306		goto exit_free_device;
307
308	retval = rt2x00lib_probe_dev(rt2x00dev);
309	if (retval)
310		goto exit_free_reg;
311
312	return 0;
313
314exit_free_reg:
315	rt2x00pci_free_reg(rt2x00dev);
316
317exit_free_device:
318	ieee80211_free_hw(hw);
319
320exit_release_regions:
321	pci_release_regions(pci_dev);
322
323exit_disable_device:
324	pci_disable_device(pci_dev);
325
326	pci_set_drvdata(pci_dev, NULL);
327
328	return retval;
329}
330EXPORT_SYMBOL_GPL(rt2x00pci_probe);
331
332void rt2x00pci_remove(struct pci_dev *pci_dev)
333{
334	struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
335	struct rt2x00_dev *rt2x00dev = hw->priv;
336
337	/*
338	 * Free all allocated data.
339	 */
340	rt2x00lib_remove_dev(rt2x00dev);
341	rt2x00pci_free_reg(rt2x00dev);
342	ieee80211_free_hw(hw);
343
344	/*
345	 * Free the PCI device data.
346	 */
347	pci_set_drvdata(pci_dev, NULL);
348	pci_disable_device(pci_dev);
349	pci_release_regions(pci_dev);
350}
351EXPORT_SYMBOL_GPL(rt2x00pci_remove);
352
353#ifdef CONFIG_PM
354int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
355{
356	struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
357	struct rt2x00_dev *rt2x00dev = hw->priv;
358	int retval;
359
360	retval = rt2x00lib_suspend(rt2x00dev, state);
361	if (retval)
362		return retval;
363
364	pci_save_state(pci_dev);
365	pci_disable_device(pci_dev);
366	return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
367}
368EXPORT_SYMBOL_GPL(rt2x00pci_suspend);
369
370int rt2x00pci_resume(struct pci_dev *pci_dev)
371{
372	struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
373	struct rt2x00_dev *rt2x00dev = hw->priv;
374
375	if (pci_set_power_state(pci_dev, PCI_D0) ||
376	    pci_enable_device(pci_dev)) {
377		ERROR(rt2x00dev, "Failed to resume device.\n");
378		return -EIO;
379	}
380
381	pci_restore_state(pci_dev);
382	return rt2x00lib_resume(rt2x00dev);
383}
384EXPORT_SYMBOL_GPL(rt2x00pci_resume);
385#endif /* CONFIG_PM */
386
387/*
388 * rt2x00pci module information.
389 */
390MODULE_AUTHOR(DRV_PROJECT);
391MODULE_VERSION(DRV_VERSION);
392MODULE_DESCRIPTION("rt2x00 pci library");
393MODULE_LICENSE("GPL");