Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * Driver for MMC and SSD cards for Cavium OCTEON SOCs.
  3 *
  4 * This file is subject to the terms and conditions of the GNU General Public
  5 * License.  See the file "COPYING" in the main directory of this archive
  6 * for more details.
  7 *
  8 * Copyright (C) 2012-2017 Cavium Inc.
  9 */
 10#include <linux/dma-mapping.h>
 11#include <linux/gpio/consumer.h>
 12#include <linux/interrupt.h>
 13#include <linux/mmc/mmc.h>
 14#include <linux/mmc/slot-gpio.h>
 15#include <linux/module.h>
 16#include <linux/of_platform.h>
 17#include <asm/octeon/octeon.h>
 18#include "cavium.h"
 19
 20#define CVMX_MIO_BOOT_CTL CVMX_ADD_IO_SEG(0x00011800000000D0ull)
 21
 22/*
 23 * The l2c* functions below are used for the EMMC-17978 workaround.
 24 *
 25 * Due to a bug in the design of the MMC bus hardware, the 2nd to last
 26 * cache block of a DMA read must be locked into the L2 Cache.
 27 * Otherwise, data corruption may occur.
 28 */
 29static inline void *phys_to_ptr(u64 address)
 30{
 31	return (void *)(address | (1ull << 63)); /* XKPHYS */
 32}
 33
 34/*
 35 * Lock a single line into L2. The line is zeroed before locking
 36 * to make sure no dram accesses are made.
 37 */
 38static void l2c_lock_line(u64 addr)
 39{
 40	char *addr_ptr = phys_to_ptr(addr);
 41
 42	asm volatile (
 43		"cache 31, %[line]"	/* Unlock the line */
 44		::[line] "m" (*addr_ptr));
 45}
 46
 47/* Unlock a single line in the L2 cache. */
 48static void l2c_unlock_line(u64 addr)
 49{
 50	char *addr_ptr = phys_to_ptr(addr);
 51
 52	asm volatile (
 53		"cache 23, %[line]"	/* Unlock the line */
 54		::[line] "m" (*addr_ptr));
 55}
 56
 57/* Locks a memory region in the L2 cache. */
 58static void l2c_lock_mem_region(u64 start, u64 len)
 59{
 60	u64 end;
 61
 62	/* Round start/end to cache line boundaries */
 63	end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
 64	start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
 65
 66	while (start <= end) {
 67		l2c_lock_line(start);
 68		start += CVMX_CACHE_LINE_SIZE;
 69	}
 70	asm volatile("sync");
 71}
 72
 73/* Unlock a memory region in the L2 cache. */
 74static void l2c_unlock_mem_region(u64 start, u64 len)
 75{
 76	u64 end;
 77
 78	/* Round start/end to cache line boundaries */
 79	end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
 80	start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
 81
 82	while (start <= end) {
 83		l2c_unlock_line(start);
 84		start += CVMX_CACHE_LINE_SIZE;
 85	}
 86}
 87
 88static void octeon_mmc_acquire_bus(struct cvm_mmc_host *host)
 89{
 90	if (!host->has_ciu3) {
 91		down(&octeon_bootbus_sem);
 92		/* For CN70XX, switch the MMC controller onto the bus. */
 93		if (OCTEON_IS_MODEL(OCTEON_CN70XX))
 94			writeq(0, (void __iomem *)CVMX_MIO_BOOT_CTL);
 95	} else {
 96		down(&host->mmc_serializer);
 97	}
 98}
 99
100static void octeon_mmc_release_bus(struct cvm_mmc_host *host)
101{
102	if (!host->has_ciu3)
103		up(&octeon_bootbus_sem);
104	else
105		up(&host->mmc_serializer);
106}
107
108static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
109{
110	writeq(val, host->base + MIO_EMM_INT(host));
111	if (!host->has_ciu3)
112		writeq(val, host->base + MIO_EMM_INT_EN(host));
113}
114
115static void octeon_mmc_set_shared_power(struct cvm_mmc_host *host, int dir)
116{
117	if (dir == 0)
118		if (!atomic_dec_return(&host->shared_power_users))
119			gpiod_set_value_cansleep(host->global_pwr_gpiod, 0);
120	if (dir == 1)
121		if (atomic_inc_return(&host->shared_power_users) == 1)
122			gpiod_set_value_cansleep(host->global_pwr_gpiod, 1);
123}
124
125static void octeon_mmc_dmar_fixup(struct cvm_mmc_host *host,
126				  struct mmc_command *cmd,
127				  struct mmc_data *data,
128				  u64 addr)
129{
130	if (cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
131		return;
132	if (data->blksz * data->blocks <= 1024)
133		return;
134
135	host->n_minus_one = addr + (data->blksz * data->blocks) - 1024;
136	l2c_lock_mem_region(host->n_minus_one, 512);
137}
138
139static void octeon_mmc_dmar_fixup_done(struct cvm_mmc_host *host)
140{
141	if (!host->n_minus_one)
142		return;
143	l2c_unlock_mem_region(host->n_minus_one, 512);
144	host->n_minus_one = 0;
145}
146
147static int octeon_mmc_probe(struct platform_device *pdev)
148{
149	struct device_node *cn, *node = pdev->dev.of_node;
150	struct cvm_mmc_host *host;
151	void __iomem *base;
152	int mmc_irq[9];
153	int i, ret = 0;
154	u64 val;
155
156	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
157	if (!host)
158		return -ENOMEM;
159
160	spin_lock_init(&host->irq_handler_lock);
161	sema_init(&host->mmc_serializer, 1);
162
163	host->dev = &pdev->dev;
164	host->acquire_bus = octeon_mmc_acquire_bus;
165	host->release_bus = octeon_mmc_release_bus;
166	host->int_enable = octeon_mmc_int_enable;
167	host->set_shared_power = octeon_mmc_set_shared_power;
168	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
169	    OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
170		host->dmar_fixup = octeon_mmc_dmar_fixup;
171		host->dmar_fixup_done = octeon_mmc_dmar_fixup_done;
172	}
173
174	host->sys_freq = octeon_get_io_clock_rate();
175
176	if (of_device_is_compatible(node, "cavium,octeon-7890-mmc")) {
177		host->big_dma_addr = true;
178		host->need_irq_handler_lock = true;
179		host->has_ciu3 = true;
180		host->use_sg = true;
181		/*
182		 * First seven are the EMM_INT bits 0..6, then two for
183		 * the EMM_DMA_INT bits
184		 */
185		for (i = 0; i < 9; i++) {
186			mmc_irq[i] = platform_get_irq(pdev, i);
187			if (mmc_irq[i] < 0)
188				return mmc_irq[i];
189
190			/* work around legacy u-boot device trees */
191			irq_set_irq_type(mmc_irq[i], IRQ_TYPE_EDGE_RISING);
192		}
193	} else {
194		host->big_dma_addr = false;
195		host->need_irq_handler_lock = false;
196		host->has_ciu3 = false;
197		/* First one is EMM second DMA */
198		for (i = 0; i < 2; i++) {
199			mmc_irq[i] = platform_get_irq(pdev, i);
200			if (mmc_irq[i] < 0)
201				return mmc_irq[i];
202		}
203	}
204
205	host->last_slot = -1;
206
207	base = devm_platform_ioremap_resource(pdev, 0);
208	if (IS_ERR(base))
209		return PTR_ERR(base);
210	host->base = base;
211	host->reg_off = 0;
212
213	base = devm_platform_ioremap_resource(pdev, 1);
214	if (IS_ERR(base))
215		return PTR_ERR(base);
216	host->dma_base = base;
217	/*
218	 * To keep the register addresses shared we intentionaly use
219	 * a negative offset here, first register used on Octeon therefore
220	 * starts at 0x20 (MIO_EMM_DMA_CFG).
221	 */
222	host->reg_off_dma = -0x20;
223
224	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
225	if (ret)
226		return ret;
227
228	/*
229	 * Clear out any pending interrupts that may be left over from
230	 * bootloader.
231	 */
232	val = readq(host->base + MIO_EMM_INT(host));
233	writeq(val, host->base + MIO_EMM_INT(host));
234
235	if (host->has_ciu3) {
236		/* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
237		for (i = 1; i <= 4; i++) {
238			ret = devm_request_irq(&pdev->dev, mmc_irq[i],
239					       cvm_mmc_interrupt,
240					       0, cvm_mmc_irq_names[i], host);
241			if (ret < 0) {
242				dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
243					mmc_irq[i]);
244				return ret;
245			}
246		}
247	} else {
248		ret = devm_request_irq(&pdev->dev, mmc_irq[0],
249				       cvm_mmc_interrupt, 0, KBUILD_MODNAME,
250				       host);
251		if (ret < 0) {
252			dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
253				mmc_irq[0]);
254			return ret;
255		}
256	}
257
258	host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev,
259							 "power",
260							 GPIOD_OUT_HIGH);
261	if (IS_ERR(host->global_pwr_gpiod)) {
262		dev_err(&pdev->dev, "Invalid power GPIO\n");
263		return PTR_ERR(host->global_pwr_gpiod);
264	}
265
266	platform_set_drvdata(pdev, host);
267
268	i = 0;
269	for_each_child_of_node(node, cn) {
270		host->slot_pdev[i] =
271			of_platform_device_create(cn, NULL, &pdev->dev);
272		if (!host->slot_pdev[i]) {
273			i++;
274			continue;
275		}
276		ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
277		if (ret) {
278			dev_err(&pdev->dev, "Error populating slots\n");
279			octeon_mmc_set_shared_power(host, 0);
280			of_node_put(cn);
281			goto error;
282		}
283		i++;
284	}
285	return 0;
286
287error:
288	for (i = 0; i < CAVIUM_MAX_MMC; i++) {
289		if (host->slot[i])
290			cvm_mmc_of_slot_remove(host->slot[i]);
291		if (host->slot_pdev[i])
292			of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
293	}
294	return ret;
295}
296
297static int octeon_mmc_remove(struct platform_device *pdev)
298{
299	struct cvm_mmc_host *host = platform_get_drvdata(pdev);
300	u64 dma_cfg;
301	int i;
302
303	for (i = 0; i < CAVIUM_MAX_MMC; i++)
304		if (host->slot[i])
305			cvm_mmc_of_slot_remove(host->slot[i]);
306
307	dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
308	dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
309	writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
310
311	octeon_mmc_set_shared_power(host, 0);
312	return 0;
313}
314
315static const struct of_device_id octeon_mmc_match[] = {
316	{
317		.compatible = "cavium,octeon-6130-mmc",
318	},
319	{
320		.compatible = "cavium,octeon-7890-mmc",
321	},
322	{},
323};
324MODULE_DEVICE_TABLE(of, octeon_mmc_match);
325
326static struct platform_driver octeon_mmc_driver = {
327	.probe		= octeon_mmc_probe,
328	.remove		= octeon_mmc_remove,
329	.driver		= {
330		.name	= KBUILD_MODNAME,
331		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
332		.of_match_table = octeon_mmc_match,
333	},
334};
335
336module_platform_driver(octeon_mmc_driver);
337
338MODULE_AUTHOR("Cavium Inc. <support@cavium.com>");
339MODULE_DESCRIPTION("Low-level driver for Cavium OCTEON MMC/SSD card");
340MODULE_LICENSE("GPL");
v5.9
  1/*
  2 * Driver for MMC and SSD cards for Cavium OCTEON SOCs.
  3 *
  4 * This file is subject to the terms and conditions of the GNU General Public
  5 * License.  See the file "COPYING" in the main directory of this archive
  6 * for more details.
  7 *
  8 * Copyright (C) 2012-2017 Cavium Inc.
  9 */
 10#include <linux/dma-mapping.h>
 11#include <linux/gpio/consumer.h>
 12#include <linux/interrupt.h>
 13#include <linux/mmc/mmc.h>
 14#include <linux/mmc/slot-gpio.h>
 15#include <linux/module.h>
 16#include <linux/of_platform.h>
 17#include <asm/octeon/octeon.h>
 18#include "cavium.h"
 19
 20#define CVMX_MIO_BOOT_CTL CVMX_ADD_IO_SEG(0x00011800000000D0ull)
 21
 22/*
 23 * The l2c* functions below are used for the EMMC-17978 workaround.
 24 *
 25 * Due to a bug in the design of the MMC bus hardware, the 2nd to last
 26 * cache block of a DMA read must be locked into the L2 Cache.
 27 * Otherwise, data corruption may occur.
 28 */
 29static inline void *phys_to_ptr(u64 address)
 30{
 31	return (void *)(address | (1ull << 63)); /* XKPHYS */
 32}
 33
 34/*
 35 * Lock a single line into L2. The line is zeroed before locking
 36 * to make sure no dram accesses are made.
 37 */
 38static void l2c_lock_line(u64 addr)
 39{
 40	char *addr_ptr = phys_to_ptr(addr);
 41
 42	asm volatile (
 43		"cache 31, %[line]"	/* Unlock the line */
 44		::[line] "m" (*addr_ptr));
 45}
 46
 47/* Unlock a single line in the L2 cache. */
 48static void l2c_unlock_line(u64 addr)
 49{
 50	char *addr_ptr = phys_to_ptr(addr);
 51
 52	asm volatile (
 53		"cache 23, %[line]"	/* Unlock the line */
 54		::[line] "m" (*addr_ptr));
 55}
 56
 57/* Locks a memory region in the L2 cache. */
 58static void l2c_lock_mem_region(u64 start, u64 len)
 59{
 60	u64 end;
 61
 62	/* Round start/end to cache line boundaries */
 63	end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
 64	start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
 65
 66	while (start <= end) {
 67		l2c_lock_line(start);
 68		start += CVMX_CACHE_LINE_SIZE;
 69	}
 70	asm volatile("sync");
 71}
 72
 73/* Unlock a memory region in the L2 cache. */
 74static void l2c_unlock_mem_region(u64 start, u64 len)
 75{
 76	u64 end;
 77
 78	/* Round start/end to cache line boundaries */
 79	end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
 80	start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
 81
 82	while (start <= end) {
 83		l2c_unlock_line(start);
 84		start += CVMX_CACHE_LINE_SIZE;
 85	}
 86}
 87
 88static void octeon_mmc_acquire_bus(struct cvm_mmc_host *host)
 89{
 90	if (!host->has_ciu3) {
 91		down(&octeon_bootbus_sem);
 92		/* For CN70XX, switch the MMC controller onto the bus. */
 93		if (OCTEON_IS_MODEL(OCTEON_CN70XX))
 94			writeq(0, (void __iomem *)CVMX_MIO_BOOT_CTL);
 95	} else {
 96		down(&host->mmc_serializer);
 97	}
 98}
 99
100static void octeon_mmc_release_bus(struct cvm_mmc_host *host)
101{
102	if (!host->has_ciu3)
103		up(&octeon_bootbus_sem);
104	else
105		up(&host->mmc_serializer);
106}
107
108static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
109{
110	writeq(val, host->base + MIO_EMM_INT(host));
111	if (!host->has_ciu3)
112		writeq(val, host->base + MIO_EMM_INT_EN(host));
113}
114
115static void octeon_mmc_set_shared_power(struct cvm_mmc_host *host, int dir)
116{
117	if (dir == 0)
118		if (!atomic_dec_return(&host->shared_power_users))
119			gpiod_set_value_cansleep(host->global_pwr_gpiod, 0);
120	if (dir == 1)
121		if (atomic_inc_return(&host->shared_power_users) == 1)
122			gpiod_set_value_cansleep(host->global_pwr_gpiod, 1);
123}
124
125static void octeon_mmc_dmar_fixup(struct cvm_mmc_host *host,
126				  struct mmc_command *cmd,
127				  struct mmc_data *data,
128				  u64 addr)
129{
130	if (cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
131		return;
132	if (data->blksz * data->blocks <= 1024)
133		return;
134
135	host->n_minus_one = addr + (data->blksz * data->blocks) - 1024;
136	l2c_lock_mem_region(host->n_minus_one, 512);
137}
138
139static void octeon_mmc_dmar_fixup_done(struct cvm_mmc_host *host)
140{
141	if (!host->n_minus_one)
142		return;
143	l2c_unlock_mem_region(host->n_minus_one, 512);
144	host->n_minus_one = 0;
145}
146
147static int octeon_mmc_probe(struct platform_device *pdev)
148{
149	struct device_node *cn, *node = pdev->dev.of_node;
150	struct cvm_mmc_host *host;
151	void __iomem *base;
152	int mmc_irq[9];
153	int i, ret = 0;
154	u64 val;
155
156	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
157	if (!host)
158		return -ENOMEM;
159
160	spin_lock_init(&host->irq_handler_lock);
161	sema_init(&host->mmc_serializer, 1);
162
163	host->dev = &pdev->dev;
164	host->acquire_bus = octeon_mmc_acquire_bus;
165	host->release_bus = octeon_mmc_release_bus;
166	host->int_enable = octeon_mmc_int_enable;
167	host->set_shared_power = octeon_mmc_set_shared_power;
168	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
169	    OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
170		host->dmar_fixup = octeon_mmc_dmar_fixup;
171		host->dmar_fixup_done = octeon_mmc_dmar_fixup_done;
172	}
173
174	host->sys_freq = octeon_get_io_clock_rate();
175
176	if (of_device_is_compatible(node, "cavium,octeon-7890-mmc")) {
177		host->big_dma_addr = true;
178		host->need_irq_handler_lock = true;
179		host->has_ciu3 = true;
180		host->use_sg = true;
181		/*
182		 * First seven are the EMM_INT bits 0..6, then two for
183		 * the EMM_DMA_INT bits
184		 */
185		for (i = 0; i < 9; i++) {
186			mmc_irq[i] = platform_get_irq(pdev, i);
187			if (mmc_irq[i] < 0)
188				return mmc_irq[i];
189
190			/* work around legacy u-boot device trees */
191			irq_set_irq_type(mmc_irq[i], IRQ_TYPE_EDGE_RISING);
192		}
193	} else {
194		host->big_dma_addr = false;
195		host->need_irq_handler_lock = false;
196		host->has_ciu3 = false;
197		/* First one is EMM second DMA */
198		for (i = 0; i < 2; i++) {
199			mmc_irq[i] = platform_get_irq(pdev, i);
200			if (mmc_irq[i] < 0)
201				return mmc_irq[i];
202		}
203	}
204
205	host->last_slot = -1;
206
207	base = devm_platform_ioremap_resource(pdev, 0);
208	if (IS_ERR(base))
209		return PTR_ERR(base);
210	host->base = base;
211	host->reg_off = 0;
212
213	base = devm_platform_ioremap_resource(pdev, 1);
214	if (IS_ERR(base))
215		return PTR_ERR(base);
216	host->dma_base = base;
217	/*
218	 * To keep the register addresses shared we intentionaly use
219	 * a negative offset here, first register used on Octeon therefore
220	 * starts at 0x20 (MIO_EMM_DMA_CFG).
221	 */
222	host->reg_off_dma = -0x20;
223
224	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
225	if (ret)
226		return ret;
227
228	/*
229	 * Clear out any pending interrupts that may be left over from
230	 * bootloader.
231	 */
232	val = readq(host->base + MIO_EMM_INT(host));
233	writeq(val, host->base + MIO_EMM_INT(host));
234
235	if (host->has_ciu3) {
236		/* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
237		for (i = 1; i <= 4; i++) {
238			ret = devm_request_irq(&pdev->dev, mmc_irq[i],
239					       cvm_mmc_interrupt,
240					       0, cvm_mmc_irq_names[i], host);
241			if (ret < 0) {
242				dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
243					mmc_irq[i]);
244				return ret;
245			}
246		}
247	} else {
248		ret = devm_request_irq(&pdev->dev, mmc_irq[0],
249				       cvm_mmc_interrupt, 0, KBUILD_MODNAME,
250				       host);
251		if (ret < 0) {
252			dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
253				mmc_irq[0]);
254			return ret;
255		}
256	}
257
258	host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev,
259							 "power",
260							 GPIOD_OUT_HIGH);
261	if (IS_ERR(host->global_pwr_gpiod)) {
262		dev_err(&pdev->dev, "Invalid power GPIO\n");
263		return PTR_ERR(host->global_pwr_gpiod);
264	}
265
266	platform_set_drvdata(pdev, host);
267
268	i = 0;
269	for_each_child_of_node(node, cn) {
270		host->slot_pdev[i] =
271			of_platform_device_create(cn, NULL, &pdev->dev);
272		if (!host->slot_pdev[i]) {
273			i++;
274			continue;
275		}
276		ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
277		if (ret) {
278			dev_err(&pdev->dev, "Error populating slots\n");
279			octeon_mmc_set_shared_power(host, 0);
 
280			goto error;
281		}
282		i++;
283	}
284	return 0;
285
286error:
287	for (i = 0; i < CAVIUM_MAX_MMC; i++) {
288		if (host->slot[i])
289			cvm_mmc_of_slot_remove(host->slot[i]);
290		if (host->slot_pdev[i])
291			of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
292	}
293	return ret;
294}
295
296static int octeon_mmc_remove(struct platform_device *pdev)
297{
298	struct cvm_mmc_host *host = platform_get_drvdata(pdev);
299	u64 dma_cfg;
300	int i;
301
302	for (i = 0; i < CAVIUM_MAX_MMC; i++)
303		if (host->slot[i])
304			cvm_mmc_of_slot_remove(host->slot[i]);
305
306	dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
307	dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
308	writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
309
310	octeon_mmc_set_shared_power(host, 0);
311	return 0;
312}
313
314static const struct of_device_id octeon_mmc_match[] = {
315	{
316		.compatible = "cavium,octeon-6130-mmc",
317	},
318	{
319		.compatible = "cavium,octeon-7890-mmc",
320	},
321	{},
322};
323MODULE_DEVICE_TABLE(of, octeon_mmc_match);
324
325static struct platform_driver octeon_mmc_driver = {
326	.probe		= octeon_mmc_probe,
327	.remove		= octeon_mmc_remove,
328	.driver		= {
329		.name	= KBUILD_MODNAME,
 
330		.of_match_table = octeon_mmc_match,
331	},
332};
333
334module_platform_driver(octeon_mmc_driver);
335
336MODULE_AUTHOR("Cavium Inc. <support@cavium.com>");
337MODULE_DESCRIPTION("Low-level driver for Cavium OCTEON MMC/SSD card");
338MODULE_LICENSE("GPL");