Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0
  2// PCI1xxxx SPI driver
  3// Copyright (C) 2022 Microchip Technology Inc.
  4// Authors: Tharun Kumar P <tharunkumar.pasumarthi@microchip.com>
  5//          Kumaravel Thiagarajan <Kumaravel.Thiagarajan@microchip.com>
  6
  7
  8#include <linux/module.h>
  9#include <linux/pci.h>
 10#include <linux/spi/spi.h>
 11#include <linux/delay.h>
 12
 13#define DRV_NAME "spi-pci1xxxx"
 14
 15#define	SYS_FREQ_DEFAULT		(62500000)
 16
 17#define	PCI1XXXX_SPI_MAX_CLOCK_HZ	(30000000)
 18#define	PCI1XXXX_SPI_CLK_20MHZ		(20000000)
 19#define	PCI1XXXX_SPI_CLK_15MHZ		(15000000)
 20#define	PCI1XXXX_SPI_CLK_12MHZ		(12000000)
 21#define	PCI1XXXX_SPI_CLK_10MHZ		(10000000)
 22#define	PCI1XXXX_SPI_MIN_CLOCK_HZ	(2000000)
 23
 24#define	PCI1XXXX_SPI_BUFFER_SIZE	(320)
 25
 26#define	SPI_MST_CTL_DEVSEL_MASK		(GENMASK(27, 25))
 27#define	SPI_MST_CTL_CMD_LEN_MASK	(GENMASK(16, 8))
 28#define	SPI_MST_CTL_SPEED_MASK		(GENMASK(7, 5))
 29#define	SPI_MSI_VECTOR_SEL_MASK		(GENMASK(4, 4))
 30
 31#define	SPI_MST_CTL_FORCE_CE		(BIT(4))
 32#define	SPI_MST_CTL_MODE_SEL		(BIT(2))
 33#define	SPI_MST_CTL_GO			(BIT(0))
 34
 35#define	SPI_MST1_ADDR_BASE		(0x800)
 36
 37/* x refers to SPI Host Controller HW instance id in the below macros - 0 or 1 */
 38
 39#define	SPI_MST_CMD_BUF_OFFSET(x)		(((x) * SPI_MST1_ADDR_BASE) + 0x00)
 40#define	SPI_MST_RSP_BUF_OFFSET(x)		(((x) * SPI_MST1_ADDR_BASE) + 0x200)
 41#define	SPI_MST_CTL_REG_OFFSET(x)		(((x) * SPI_MST1_ADDR_BASE) + 0x400)
 42#define	SPI_MST_EVENT_REG_OFFSET(x)		(((x) * SPI_MST1_ADDR_BASE) + 0x420)
 43#define	SPI_MST_EVENT_MASK_REG_OFFSET(x)	(((x) * SPI_MST1_ADDR_BASE) + 0x424)
 44#define	SPI_MST_PAD_CTL_REG_OFFSET(x)		(((x) * SPI_MST1_ADDR_BASE) + 0x460)
 45#define	SPIALERT_MST_DB_REG_OFFSET(x)		(((x) * SPI_MST1_ADDR_BASE) + 0x464)
 46#define	SPIALERT_MST_VAL_REG_OFFSET(x)		(((x) * SPI_MST1_ADDR_BASE) + 0x468)
 47#define	SPI_PCI_CTRL_REG_OFFSET(x)		(((x) * SPI_MST1_ADDR_BASE) + 0x480)
 48
 49#define PCI1XXXX_IRQ_FLAGS			(IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE)
 50#define SPI_MAX_DATA_LEN			320
 51
 52#define PCI1XXXX_SPI_TIMEOUT			(msecs_to_jiffies(100))
 53
 54#define SPI_INTR		BIT(8)
 55#define SPI_FORCE_CE		BIT(4)
 56
 57#define SPI_CHIP_SEL_COUNT 7
 58#define VENDOR_ID_MCHP 0x1055
 59
 60#define SPI_SUSPEND_CONFIG 0x101
 61#define SPI_RESUME_CONFIG 0x203
 62
 63struct pci1xxxx_spi_internal {
 64	u8 hw_inst;
 65	bool spi_xfer_in_progress;
 66	int irq;
 67	struct completion spi_xfer_done;
 68	struct spi_controller *spi_host;
 69	struct pci1xxxx_spi *parent;
 70	struct {
 71		unsigned int dev_sel : 3;
 72		unsigned int msi_vector_sel : 1;
 73	} prev_val;
 74};
 75
 76struct pci1xxxx_spi {
 77	struct pci_dev *dev;
 78	u8 total_hw_instances;
 79	void __iomem *reg_base;
 80	struct pci1xxxx_spi_internal *spi_int[] __counted_by(total_hw_instances);
 81};
 82
 83static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = {
 84	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
 85	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
 86	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
 87	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
 88	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
 89	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
 90	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
 91	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
 92	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
 93	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
 94	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
 95	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
 96	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
 97	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
 98	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
 99	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
100	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0001), 0, 0, 0x02},
101	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0002), 0, 0, 0x01},
102	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0003), 0, 0, 0x11},
103	{ PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01},
104	{ 0, }
105};
106
107MODULE_DEVICE_TABLE(pci, pci1xxxx_spi_pci_id_table);
108
109static void pci1xxxx_spi_set_cs(struct spi_device *spi, bool enable)
110{
111	struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi->controller);
112	struct pci1xxxx_spi *par = p->parent;
113	u32 regval;
114
115	/* Set the DEV_SEL bits of the SPI_MST_CTL_REG */
116	regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
117	if (!enable) {
118		regval |= SPI_FORCE_CE;
119		regval &= ~SPI_MST_CTL_DEVSEL_MASK;
120		regval |= (spi_get_chipselect(spi, 0) << 25);
121	} else {
122		regval &= ~SPI_FORCE_CE;
123	}
124	writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
125}
126
127static u8 pci1xxxx_get_clock_div(u32 hz)
128{
129	u8 val = 0;
130
131	if (hz >= PCI1XXXX_SPI_MAX_CLOCK_HZ)
132		val = 2;
133	else if ((hz < PCI1XXXX_SPI_MAX_CLOCK_HZ) && (hz >= PCI1XXXX_SPI_CLK_20MHZ))
134		val = 3;
135	else if ((hz < PCI1XXXX_SPI_CLK_20MHZ) && (hz >= PCI1XXXX_SPI_CLK_15MHZ))
136		val = 4;
137	else if ((hz < PCI1XXXX_SPI_CLK_15MHZ) && (hz >= PCI1XXXX_SPI_CLK_12MHZ))
138		val = 5;
139	else if ((hz < PCI1XXXX_SPI_CLK_12MHZ) && (hz >= PCI1XXXX_SPI_CLK_10MHZ))
140		val = 6;
141	else if ((hz < PCI1XXXX_SPI_CLK_10MHZ) && (hz >= PCI1XXXX_SPI_MIN_CLOCK_HZ))
142		val = 7;
143	else
144		val = 2;
145
146	return val;
147}
148
149static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
150				     struct spi_device *spi, struct spi_transfer *xfer)
151{
152	struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr);
153	int mode, len, loop_iter, transfer_len;
154	struct pci1xxxx_spi *par = p->parent;
155	unsigned long bytes_transfered;
156	unsigned long bytes_recvd;
157	unsigned long loop_count;
158	u8 *rx_buf, result;
159	const u8 *tx_buf;
160	u32 regval;
161	u8 clkdiv;
162
163	p->spi_xfer_in_progress = true;
164	mode = spi->mode;
165	clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz);
166	tx_buf = xfer->tx_buf;
167	rx_buf = xfer->rx_buf;
168	transfer_len = xfer->len;
169	regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
170	writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
171
172	if (tx_buf) {
173		bytes_transfered = 0;
174		bytes_recvd = 0;
175		loop_count = transfer_len / SPI_MAX_DATA_LEN;
176		if (transfer_len % SPI_MAX_DATA_LEN != 0)
177			loop_count += 1;
178
179		for (loop_iter = 0; loop_iter < loop_count; loop_iter++) {
180			len = SPI_MAX_DATA_LEN;
181			if ((transfer_len % SPI_MAX_DATA_LEN != 0) &&
182			    (loop_iter == loop_count - 1))
183				len = transfer_len % SPI_MAX_DATA_LEN;
184
185			reinit_completion(&p->spi_xfer_done);
186			memcpy_toio(par->reg_base + SPI_MST_CMD_BUF_OFFSET(p->hw_inst),
187				    &tx_buf[bytes_transfered], len);
188			bytes_transfered += len;
189			regval = readl(par->reg_base +
190				       SPI_MST_CTL_REG_OFFSET(p->hw_inst));
191			regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK |
192				    SPI_MST_CTL_SPEED_MASK);
193
194			if (mode == SPI_MODE_3)
195				regval |= SPI_MST_CTL_MODE_SEL;
196			else
197				regval &= ~SPI_MST_CTL_MODE_SEL;
198
199			regval |= (clkdiv << 5);
200			regval &= ~SPI_MST_CTL_CMD_LEN_MASK;
201			regval |= (len << 8);
202			writel(regval, par->reg_base +
203			       SPI_MST_CTL_REG_OFFSET(p->hw_inst));
204			regval = readl(par->reg_base +
205				       SPI_MST_CTL_REG_OFFSET(p->hw_inst));
206			regval |= SPI_MST_CTL_GO;
207			writel(regval, par->reg_base +
208			       SPI_MST_CTL_REG_OFFSET(p->hw_inst));
209
210			/* Wait for DMA_TERM interrupt */
211			result = wait_for_completion_timeout(&p->spi_xfer_done,
212							     PCI1XXXX_SPI_TIMEOUT);
213			if (!result)
214				return -ETIMEDOUT;
215
216			if (rx_buf) {
217				memcpy_fromio(&rx_buf[bytes_recvd], par->reg_base +
218					      SPI_MST_RSP_BUF_OFFSET(p->hw_inst), len);
219				bytes_recvd += len;
220			}
221		}
222	}
223	p->spi_xfer_in_progress = false;
224
225	return 0;
226}
227
228static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
229{
230	struct pci1xxxx_spi_internal *p = dev;
231	irqreturn_t spi_int_fired = IRQ_NONE;
232	u32 regval;
233
234	/* Clear the SPI GO_BIT Interrupt */
235	regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
236	if (regval & SPI_INTR) {
237		/* Clear xfer_done */
238		complete(&p->spi_xfer_done);
239		spi_int_fired = IRQ_HANDLED;
240	}
241
242	writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
243
244	return spi_int_fired;
245}
246
247static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
248{
249	u8 hw_inst_cnt, iter, start, only_sec_inst;
250	struct pci1xxxx_spi_internal *spi_sub_ptr;
251	struct device *dev = &pdev->dev;
252	struct pci1xxxx_spi *spi_bus;
253	struct spi_controller *spi_host;
254	u32 regval;
255	int ret;
256
257	hw_inst_cnt = ent->driver_data & 0x0f;
258	start = (ent->driver_data & 0xf0) >> 4;
259	if (start == 1)
260		only_sec_inst = 1;
261	else
262		only_sec_inst = 0;
263
264	spi_bus = devm_kzalloc(&pdev->dev,
265			       struct_size(spi_bus, spi_int, hw_inst_cnt),
266			       GFP_KERNEL);
267	if (!spi_bus)
268		return -ENOMEM;
269
270	spi_bus->dev = pdev;
271	spi_bus->total_hw_instances = hw_inst_cnt;
272	pci_set_master(pdev);
273
274	for (iter = 0; iter < hw_inst_cnt; iter++) {
275		spi_bus->spi_int[iter] = devm_kzalloc(&pdev->dev,
276						      sizeof(struct pci1xxxx_spi_internal),
277						      GFP_KERNEL);
278		spi_sub_ptr = spi_bus->spi_int[iter];
279		spi_sub_ptr->spi_host = devm_spi_alloc_host(dev, sizeof(struct spi_controller));
280		if (!spi_sub_ptr->spi_host)
281			return -ENOMEM;
282
283		spi_sub_ptr->parent = spi_bus;
284		spi_sub_ptr->spi_xfer_in_progress = false;
285
286		if (!iter) {
287			ret = pcim_enable_device(pdev);
288			if (ret)
289				return -ENOMEM;
290
291			ret = pci_request_regions(pdev, DRV_NAME);
292			if (ret)
293				return -ENOMEM;
294
295			spi_bus->reg_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
296			if (!spi_bus->reg_base) {
297				ret = -EINVAL;
298				goto error;
299			}
300
301			ret = pci_alloc_irq_vectors(pdev, hw_inst_cnt, hw_inst_cnt,
302						    PCI_IRQ_ALL_TYPES);
303			if (ret < 0) {
304				dev_err(&pdev->dev, "Error allocating MSI vectors\n");
305				goto error;
306			}
307
308			init_completion(&spi_sub_ptr->spi_xfer_done);
309			/* Initialize Interrupts - SPI_INT */
310			regval = readl(spi_bus->reg_base +
311				       SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
312			regval &= ~SPI_INTR;
313			writel(regval, spi_bus->reg_base +
314			       SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
315			spi_sub_ptr->irq = pci_irq_vector(pdev, 0);
316
317			ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
318					       pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
319					       pci_name(pdev), spi_sub_ptr);
320			if (ret < 0) {
321				dev_err(&pdev->dev, "Unable to request irq : %d",
322					spi_sub_ptr->irq);
323				ret = -ENODEV;
324				goto error;
325			}
326
327			/* This register is only applicable for 1st instance */
328			regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
329			if (!only_sec_inst)
330				regval |= (BIT(4));
331			else
332				regval &= ~(BIT(4));
333
334			writel(regval, spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
335		}
336
337		spi_sub_ptr->hw_inst = start++;
338
339		if (iter == 1) {
340			init_completion(&spi_sub_ptr->spi_xfer_done);
341			/* Initialize Interrupts - SPI_INT */
342			regval = readl(spi_bus->reg_base +
343			       SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
344			regval &= ~SPI_INTR;
345			writel(regval, spi_bus->reg_base +
346			       SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
347			spi_sub_ptr->irq = pci_irq_vector(pdev, iter);
348			ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
349					       pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
350					       pci_name(pdev), spi_sub_ptr);
351			if (ret < 0) {
352				dev_err(&pdev->dev, "Unable to request irq : %d",
353					spi_sub_ptr->irq);
354				ret = -ENODEV;
355				goto error;
356			}
357		}
358
359		spi_host = spi_sub_ptr->spi_host;
360		spi_host->num_chipselect = SPI_CHIP_SEL_COUNT;
361		spi_host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_RX_DUAL |
362				      SPI_TX_DUAL | SPI_LOOP;
363		spi_host->transfer_one = pci1xxxx_spi_transfer_one;
364		spi_host->set_cs = pci1xxxx_spi_set_cs;
365		spi_host->bits_per_word_mask = SPI_BPW_MASK(8);
366		spi_host->max_speed_hz = PCI1XXXX_SPI_MAX_CLOCK_HZ;
367		spi_host->min_speed_hz = PCI1XXXX_SPI_MIN_CLOCK_HZ;
368		spi_host->flags = SPI_CONTROLLER_MUST_TX;
369		spi_controller_set_devdata(spi_host, spi_sub_ptr);
370		ret = devm_spi_register_controller(dev, spi_host);
371		if (ret)
372			goto error;
373	}
374	pci_set_drvdata(pdev, spi_bus);
375
376	return 0;
377
378error:
379	pci_release_regions(pdev);
380	return ret;
381}
382
383static void store_restore_config(struct pci1xxxx_spi *spi_ptr,
384				 struct pci1xxxx_spi_internal *spi_sub_ptr,
385				 u8 inst, bool store)
386{
387	u32 regval;
388
389	if (store) {
390		regval = readl(spi_ptr->reg_base +
391			       SPI_MST_CTL_REG_OFFSET(spi_sub_ptr->hw_inst));
392		regval &= SPI_MST_CTL_DEVSEL_MASK;
393		spi_sub_ptr->prev_val.dev_sel = (regval >> 25) & 7;
394		regval = readl(spi_ptr->reg_base +
395			       SPI_PCI_CTRL_REG_OFFSET(spi_sub_ptr->hw_inst));
396		regval &= SPI_MSI_VECTOR_SEL_MASK;
397		spi_sub_ptr->prev_val.msi_vector_sel = (regval >> 4) & 1;
398	} else {
399		regval = readl(spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst));
400		regval &= ~SPI_MST_CTL_DEVSEL_MASK;
401		regval |= (spi_sub_ptr->prev_val.dev_sel << 25);
402		writel(regval,
403		       spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst));
404		writel((spi_sub_ptr->prev_val.msi_vector_sel << 4),
405			spi_ptr->reg_base + SPI_PCI_CTRL_REG_OFFSET(inst));
406	}
407}
408
409static int pci1xxxx_spi_resume(struct device *dev)
410{
411	struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev);
412	struct pci1xxxx_spi_internal *spi_sub_ptr;
413	u32 regval = SPI_RESUME_CONFIG;
414	u8 iter;
415
416	for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
417		spi_sub_ptr = spi_ptr->spi_int[iter];
418		spi_controller_resume(spi_sub_ptr->spi_host);
419		writel(regval, spi_ptr->reg_base +
420		       SPI_MST_EVENT_MASK_REG_OFFSET(iter));
421
422		/* Restore config at resume */
423		store_restore_config(spi_ptr, spi_sub_ptr, iter, 0);
424	}
425
426	return 0;
427}
428
429static int pci1xxxx_spi_suspend(struct device *dev)
430{
431	struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev);
432	struct pci1xxxx_spi_internal *spi_sub_ptr;
433	u32 reg1 = SPI_SUSPEND_CONFIG;
434	u8 iter;
435
436	for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
437		spi_sub_ptr = spi_ptr->spi_int[iter];
438
439		while (spi_sub_ptr->spi_xfer_in_progress)
440			msleep(20);
441
442		/* Store existing config before suspend */
443		store_restore_config(spi_ptr, spi_sub_ptr, iter, 1);
444		spi_controller_suspend(spi_sub_ptr->spi_host);
445		writel(reg1, spi_ptr->reg_base +
446		       SPI_MST_EVENT_MASK_REG_OFFSET(iter));
447	}
448
449	return 0;
450}
451
452static DEFINE_SIMPLE_DEV_PM_OPS(spi_pm_ops, pci1xxxx_spi_suspend,
453				pci1xxxx_spi_resume);
454
455static struct pci_driver pci1xxxx_spi_driver = {
456	.name		= DRV_NAME,
457	.id_table	= pci1xxxx_spi_pci_id_table,
458	.probe		= pci1xxxx_spi_probe,
459	.driver		=	{
460		.pm = pm_sleep_ptr(&spi_pm_ops),
461	},
462};
463
464module_pci_driver(pci1xxxx_spi_driver);
465
466MODULE_DESCRIPTION("Microchip Technology Inc. pci1xxxx SPI bus driver");
467MODULE_AUTHOR("Tharun Kumar P<tharunkumar.pasumarthi@microchip.com>");
468MODULE_AUTHOR("Kumaravel Thiagarajan<kumaravel.thiagarajan@microchip.com>");
469MODULE_LICENSE("GPL v2");