Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * Driver for MMC and SSD cards for Cavium ThunderX SOCs.
  3 *
  4 * This file is subject to the terms and conditions of the GNU General Public
  5 * License.  See the file "COPYING" in the main directory of this archive
  6 * for more details.
  7 *
  8 * Copyright (C) 2016 Cavium Inc.
  9 */
 10#include <linux/device.h>
 11#include <linux/dma-mapping.h>
 12#include <linux/interrupt.h>
 13#include <linux/mmc/mmc.h>
 14#include <linux/module.h>
 15#include <linux/of.h>
 16#include <linux/of_platform.h>
 17#include <linux/platform_device.h>
 18#include <linux/pci.h>
 19#include "cavium.h"
 20
 21static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
 22{
 23	down(&host->mmc_serializer);
 24}
 25
 26static void thunder_mmc_release_bus(struct cvm_mmc_host *host)
 27{
 28	up(&host->mmc_serializer);
 29}
 30
 31static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
 32{
 33	writeq(val, host->base + MIO_EMM_INT(host));
 34	writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
 35}
 36
 37static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
 38					   struct pci_dev *pdev)
 39{
 40	int nvec, ret, i;
 41
 42	nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX);
 43	if (nvec < 0)
 44		return nvec;
 45
 46	/* register interrupts */
 47	for (i = 0; i < nvec; i++) {
 48		ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
 49				       cvm_mmc_interrupt,
 50				       0, cvm_mmc_irq_names[i], host);
 51		if (ret)
 52			return ret;
 53	}
 54	return 0;
 55}
 56
 57static int thunder_mmc_probe(struct pci_dev *pdev,
 58			     const struct pci_device_id *id)
 59{
 60	struct device_node *node = pdev->dev.of_node;
 61	struct device *dev = &pdev->dev;
 62	struct device_node *child_node;
 63	struct cvm_mmc_host *host;
 64	int ret, i = 0;
 65
 66	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
 67	if (!host)
 68		return -ENOMEM;
 69
 70	pci_set_drvdata(pdev, host);
 71	ret = pcim_enable_device(pdev);
 72	if (ret)
 73		return ret;
 74
 75	ret = pci_request_regions(pdev, KBUILD_MODNAME);
 76	if (ret)
 77		return ret;
 78
 79	host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
 80	if (!host->base) {
 81		ret = -EINVAL;
 82		goto error;
 83	}
 84
 85	/* On ThunderX these are identical */
 86	host->dma_base = host->base;
 87
 88	host->reg_off = 0x2000;
 89	host->reg_off_dma = 0x160;
 90
 91	host->clk = devm_clk_get(dev, NULL);
 92	if (IS_ERR(host->clk)) {
 93		ret = PTR_ERR(host->clk);
 94		goto error;
 95	}
 96
 97	ret = clk_prepare_enable(host->clk);
 98	if (ret)
 99		goto error;
100	host->sys_freq = clk_get_rate(host->clk);
101
102	spin_lock_init(&host->irq_handler_lock);
103	sema_init(&host->mmc_serializer, 1);
104
105	host->dev = dev;
106	host->acquire_bus = thunder_mmc_acquire_bus;
107	host->release_bus = thunder_mmc_release_bus;
108	host->int_enable = thunder_mmc_int_enable;
109
110	host->use_sg = true;
111	host->big_dma_addr = true;
112	host->need_irq_handler_lock = true;
113	host->last_slot = -1;
114
115	ret = dma_set_mask(dev, DMA_BIT_MASK(48));
116	if (ret)
117		goto error;
118
119	/*
120	 * Clear out any pending interrupts that may be left over from
121	 * bootloader. Writing 1 to the bits clears them.
122	 */
123	writeq(127, host->base + MIO_EMM_INT_EN(host));
124	writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
125	/* Clear DMA FIFO */
126	writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
127
128	ret = thunder_mmc_register_interrupts(host, pdev);
129	if (ret)
130		goto error;
131
132	for_each_child_of_node(node, child_node) {
133		/*
134		 * mmc_of_parse and devm* require one device per slot.
135		 * Create a dummy device per slot and set the node pointer to
136		 * the slot. The easiest way to get this is using
137		 * of_platform_device_create.
138		 */
139		if (of_device_is_compatible(child_node, "mmc-slot")) {
140			host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
141								       &pdev->dev);
142			if (!host->slot_pdev[i])
143				continue;
144
145			ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
146			if (ret) {
147				of_node_put(child_node);
148				goto error;
149			}
150		}
151		i++;
152	}
153	dev_info(dev, "probed\n");
154	return 0;
155
156error:
157	for (i = 0; i < CAVIUM_MAX_MMC; i++) {
158		if (host->slot[i])
159			cvm_mmc_of_slot_remove(host->slot[i]);
160		if (host->slot_pdev[i]) {
161			get_device(&host->slot_pdev[i]->dev);
162			of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
163			put_device(&host->slot_pdev[i]->dev);
164		}
165	}
166	clk_disable_unprepare(host->clk);
167	pci_release_regions(pdev);
168	return ret;
169}
170
171static void thunder_mmc_remove(struct pci_dev *pdev)
172{
173	struct cvm_mmc_host *host = pci_get_drvdata(pdev);
174	u64 dma_cfg;
175	int i;
176
177	for (i = 0; i < CAVIUM_MAX_MMC; i++)
178		if (host->slot[i])
179			cvm_mmc_of_slot_remove(host->slot[i]);
180
181	dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
182	dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
183	writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
184
185	clk_disable_unprepare(host->clk);
186	pci_release_regions(pdev);
187}
188
189static const struct pci_device_id thunder_mmc_id_table[] = {
190	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa010) },
191	{ 0, }  /* end of table */
192};
193
194static struct pci_driver thunder_mmc_driver = {
195	.name = KBUILD_MODNAME,
196	.id_table = thunder_mmc_id_table,
197	.probe = thunder_mmc_probe,
198	.remove = thunder_mmc_remove,
199};
200
201module_pci_driver(thunder_mmc_driver);
202
203MODULE_AUTHOR("Cavium Inc.");
204MODULE_DESCRIPTION("Cavium ThunderX eMMC Driver");
205MODULE_LICENSE("GPL");
206MODULE_DEVICE_TABLE(pci, thunder_mmc_id_table);
v5.14.15
  1/*
  2 * Driver for MMC and SSD cards for Cavium ThunderX SOCs.
  3 *
  4 * This file is subject to the terms and conditions of the GNU General Public
  5 * License.  See the file "COPYING" in the main directory of this archive
  6 * for more details.
  7 *
  8 * Copyright (C) 2016 Cavium Inc.
  9 */
 10#include <linux/device.h>
 11#include <linux/dma-mapping.h>
 12#include <linux/interrupt.h>
 13#include <linux/mmc/mmc.h>
 14#include <linux/module.h>
 15#include <linux/of.h>
 16#include <linux/of_platform.h>
 
 17#include <linux/pci.h>
 18#include "cavium.h"
 19
 20static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
 21{
 22	down(&host->mmc_serializer);
 23}
 24
 25static void thunder_mmc_release_bus(struct cvm_mmc_host *host)
 26{
 27	up(&host->mmc_serializer);
 28}
 29
 30static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
 31{
 32	writeq(val, host->base + MIO_EMM_INT(host));
 33	writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
 34}
 35
 36static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
 37					   struct pci_dev *pdev)
 38{
 39	int nvec, ret, i;
 40
 41	nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX);
 42	if (nvec < 0)
 43		return nvec;
 44
 45	/* register interrupts */
 46	for (i = 0; i < nvec; i++) {
 47		ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
 48				       cvm_mmc_interrupt,
 49				       0, cvm_mmc_irq_names[i], host);
 50		if (ret)
 51			return ret;
 52	}
 53	return 0;
 54}
 55
 56static int thunder_mmc_probe(struct pci_dev *pdev,
 57			     const struct pci_device_id *id)
 58{
 59	struct device_node *node = pdev->dev.of_node;
 60	struct device *dev = &pdev->dev;
 61	struct device_node *child_node;
 62	struct cvm_mmc_host *host;
 63	int ret, i = 0;
 64
 65	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
 66	if (!host)
 67		return -ENOMEM;
 68
 69	pci_set_drvdata(pdev, host);
 70	ret = pcim_enable_device(pdev);
 71	if (ret)
 72		return ret;
 73
 74	ret = pci_request_regions(pdev, KBUILD_MODNAME);
 75	if (ret)
 76		return ret;
 77
 78	host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
 79	if (!host->base) {
 80		ret = -EINVAL;
 81		goto error;
 82	}
 83
 84	/* On ThunderX these are identical */
 85	host->dma_base = host->base;
 86
 87	host->reg_off = 0x2000;
 88	host->reg_off_dma = 0x160;
 89
 90	host->clk = devm_clk_get(dev, NULL);
 91	if (IS_ERR(host->clk)) {
 92		ret = PTR_ERR(host->clk);
 93		goto error;
 94	}
 95
 96	ret = clk_prepare_enable(host->clk);
 97	if (ret)
 98		goto error;
 99	host->sys_freq = clk_get_rate(host->clk);
100
101	spin_lock_init(&host->irq_handler_lock);
102	sema_init(&host->mmc_serializer, 1);
103
104	host->dev = dev;
105	host->acquire_bus = thunder_mmc_acquire_bus;
106	host->release_bus = thunder_mmc_release_bus;
107	host->int_enable = thunder_mmc_int_enable;
108
109	host->use_sg = true;
110	host->big_dma_addr = true;
111	host->need_irq_handler_lock = true;
112	host->last_slot = -1;
113
114	ret = dma_set_mask(dev, DMA_BIT_MASK(48));
115	if (ret)
116		goto error;
117
118	/*
119	 * Clear out any pending interrupts that may be left over from
120	 * bootloader. Writing 1 to the bits clears them.
121	 */
122	writeq(127, host->base + MIO_EMM_INT_EN(host));
123	writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
124	/* Clear DMA FIFO */
125	writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
126
127	ret = thunder_mmc_register_interrupts(host, pdev);
128	if (ret)
129		goto error;
130
131	for_each_child_of_node(node, child_node) {
132		/*
133		 * mmc_of_parse and devm* require one device per slot.
134		 * Create a dummy device per slot and set the node pointer to
135		 * the slot. The easiest way to get this is using
136		 * of_platform_device_create.
137		 */
138		if (of_device_is_compatible(child_node, "mmc-slot")) {
139			host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
140								       &pdev->dev);
141			if (!host->slot_pdev[i])
142				continue;
143
144			ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
145			if (ret)
 
146				goto error;
 
147		}
148		i++;
149	}
150	dev_info(dev, "probed\n");
151	return 0;
152
153error:
154	for (i = 0; i < CAVIUM_MAX_MMC; i++) {
155		if (host->slot[i])
156			cvm_mmc_of_slot_remove(host->slot[i]);
157		if (host->slot_pdev[i]) {
158			get_device(&host->slot_pdev[i]->dev);
159			of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
160			put_device(&host->slot_pdev[i]->dev);
161		}
162	}
163	clk_disable_unprepare(host->clk);
164	pci_release_regions(pdev);
165	return ret;
166}
167
168static void thunder_mmc_remove(struct pci_dev *pdev)
169{
170	struct cvm_mmc_host *host = pci_get_drvdata(pdev);
171	u64 dma_cfg;
172	int i;
173
174	for (i = 0; i < CAVIUM_MAX_MMC; i++)
175		if (host->slot[i])
176			cvm_mmc_of_slot_remove(host->slot[i]);
177
178	dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
179	dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
180	writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
181
182	clk_disable_unprepare(host->clk);
183	pci_release_regions(pdev);
184}
185
186static const struct pci_device_id thunder_mmc_id_table[] = {
187	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa010) },
188	{ 0, }  /* end of table */
189};
190
191static struct pci_driver thunder_mmc_driver = {
192	.name = KBUILD_MODNAME,
193	.id_table = thunder_mmc_id_table,
194	.probe = thunder_mmc_probe,
195	.remove = thunder_mmc_remove,
196};
197
198module_pci_driver(thunder_mmc_driver);
199
200MODULE_AUTHOR("Cavium Inc.");
201MODULE_DESCRIPTION("Cavium ThunderX eMMC Driver");
202MODULE_LICENSE("GPL");
203MODULE_DEVICE_TABLE(pci, thunder_mmc_id_table);