Loading...
1/*
2 * Driver for MMC and SSD cards for Cavium OCTEON SOCs.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2012-2017 Cavium Inc.
9 */
10#include <linux/dma-mapping.h>
11#include <linux/gpio/consumer.h>
12#include <linux/interrupt.h>
13#include <linux/mmc/mmc.h>
14#include <linux/mmc/slot-gpio.h>
15#include <linux/module.h>
16#include <linux/of_platform.h>
17#include <asm/octeon/octeon.h>
18#include "cavium.h"
19
20#define CVMX_MIO_BOOT_CTL CVMX_ADD_IO_SEG(0x00011800000000D0ull)
21
22/*
23 * The l2c* functions below are used for the EMMC-17978 workaround.
24 *
25 * Due to a bug in the design of the MMC bus hardware, the 2nd to last
26 * cache block of a DMA read must be locked into the L2 Cache.
27 * Otherwise, data corruption may occur.
28 */
29static inline void *phys_to_ptr(u64 address)
30{
31 return (void *)(address | (1ull << 63)); /* XKPHYS */
32}
33
34/*
35 * Lock a single line into L2. The line is zeroed before locking
36 * to make sure no dram accesses are made.
37 */
38static void l2c_lock_line(u64 addr)
39{
40 char *addr_ptr = phys_to_ptr(addr);
41
42 asm volatile (
43 "cache 31, %[line]" /* Unlock the line */
44 ::[line] "m" (*addr_ptr));
45}
46
47/* Unlock a single line in the L2 cache. */
48static void l2c_unlock_line(u64 addr)
49{
50 char *addr_ptr = phys_to_ptr(addr);
51
52 asm volatile (
53 "cache 23, %[line]" /* Unlock the line */
54 ::[line] "m" (*addr_ptr));
55}
56
57/* Locks a memory region in the L2 cache. */
58static void l2c_lock_mem_region(u64 start, u64 len)
59{
60 u64 end;
61
62 /* Round start/end to cache line boundaries */
63 end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
64 start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
65
66 while (start <= end) {
67 l2c_lock_line(start);
68 start += CVMX_CACHE_LINE_SIZE;
69 }
70 asm volatile("sync");
71}
72
73/* Unlock a memory region in the L2 cache. */
74static void l2c_unlock_mem_region(u64 start, u64 len)
75{
76 u64 end;
77
78 /* Round start/end to cache line boundaries */
79 end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
80 start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
81
82 while (start <= end) {
83 l2c_unlock_line(start);
84 start += CVMX_CACHE_LINE_SIZE;
85 }
86}
87
88static void octeon_mmc_acquire_bus(struct cvm_mmc_host *host)
89{
90 if (!host->has_ciu3) {
91 down(&octeon_bootbus_sem);
92 /* For CN70XX, switch the MMC controller onto the bus. */
93 if (OCTEON_IS_MODEL(OCTEON_CN70XX))
94 writeq(0, (void __iomem *)CVMX_MIO_BOOT_CTL);
95 } else {
96 down(&host->mmc_serializer);
97 }
98}
99
100static void octeon_mmc_release_bus(struct cvm_mmc_host *host)
101{
102 if (!host->has_ciu3)
103 up(&octeon_bootbus_sem);
104 else
105 up(&host->mmc_serializer);
106}
107
108static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
109{
110 writeq(val, host->base + MIO_EMM_INT(host));
111 if (!host->has_ciu3)
112 writeq(val, host->base + MIO_EMM_INT_EN(host));
113}
114
115static void octeon_mmc_set_shared_power(struct cvm_mmc_host *host, int dir)
116{
117 if (dir == 0)
118 if (!atomic_dec_return(&host->shared_power_users))
119 gpiod_set_value_cansleep(host->global_pwr_gpiod, 0);
120 if (dir == 1)
121 if (atomic_inc_return(&host->shared_power_users) == 1)
122 gpiod_set_value_cansleep(host->global_pwr_gpiod, 1);
123}
124
125static void octeon_mmc_dmar_fixup(struct cvm_mmc_host *host,
126 struct mmc_command *cmd,
127 struct mmc_data *data,
128 u64 addr)
129{
130 if (cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
131 return;
132 if (data->blksz * data->blocks <= 1024)
133 return;
134
135 host->n_minus_one = addr + (data->blksz * data->blocks) - 1024;
136 l2c_lock_mem_region(host->n_minus_one, 512);
137}
138
139static void octeon_mmc_dmar_fixup_done(struct cvm_mmc_host *host)
140{
141 if (!host->n_minus_one)
142 return;
143 l2c_unlock_mem_region(host->n_minus_one, 512);
144 host->n_minus_one = 0;
145}
146
147static int octeon_mmc_probe(struct platform_device *pdev)
148{
149 struct device_node *cn, *node = pdev->dev.of_node;
150 struct cvm_mmc_host *host;
151 void __iomem *base;
152 int mmc_irq[9];
153 int i, ret = 0;
154 u64 val;
155
156 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
157 if (!host)
158 return -ENOMEM;
159
160 spin_lock_init(&host->irq_handler_lock);
161 sema_init(&host->mmc_serializer, 1);
162
163 host->dev = &pdev->dev;
164 host->acquire_bus = octeon_mmc_acquire_bus;
165 host->release_bus = octeon_mmc_release_bus;
166 host->int_enable = octeon_mmc_int_enable;
167 host->set_shared_power = octeon_mmc_set_shared_power;
168 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
169 OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
170 host->dmar_fixup = octeon_mmc_dmar_fixup;
171 host->dmar_fixup_done = octeon_mmc_dmar_fixup_done;
172 }
173
174 host->sys_freq = octeon_get_io_clock_rate();
175
176 if (of_device_is_compatible(node, "cavium,octeon-7890-mmc")) {
177 host->big_dma_addr = true;
178 host->need_irq_handler_lock = true;
179 host->has_ciu3 = true;
180 host->use_sg = true;
181 /*
182 * First seven are the EMM_INT bits 0..6, then two for
183 * the EMM_DMA_INT bits
184 */
185 for (i = 0; i < 9; i++) {
186 mmc_irq[i] = platform_get_irq(pdev, i);
187 if (mmc_irq[i] < 0)
188 return mmc_irq[i];
189
190 /* work around legacy u-boot device trees */
191 irq_set_irq_type(mmc_irq[i], IRQ_TYPE_EDGE_RISING);
192 }
193 } else {
194 host->big_dma_addr = false;
195 host->need_irq_handler_lock = false;
196 host->has_ciu3 = false;
197 /* First one is EMM second DMA */
198 for (i = 0; i < 2; i++) {
199 mmc_irq[i] = platform_get_irq(pdev, i);
200 if (mmc_irq[i] < 0)
201 return mmc_irq[i];
202 }
203 }
204
205 host->last_slot = -1;
206
207 base = devm_platform_ioremap_resource(pdev, 0);
208 if (IS_ERR(base))
209 return PTR_ERR(base);
210 host->base = base;
211 host->reg_off = 0;
212
213 base = devm_platform_ioremap_resource(pdev, 1);
214 if (IS_ERR(base))
215 return PTR_ERR(base);
216 host->dma_base = base;
217 /*
218 * To keep the register addresses shared we intentionaly use
219 * a negative offset here, first register used on Octeon therefore
220 * starts at 0x20 (MIO_EMM_DMA_CFG).
221 */
222 host->reg_off_dma = -0x20;
223
224 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
225 if (ret)
226 return ret;
227
228 /*
229 * Clear out any pending interrupts that may be left over from
230 * bootloader.
231 */
232 val = readq(host->base + MIO_EMM_INT(host));
233 writeq(val, host->base + MIO_EMM_INT(host));
234
235 if (host->has_ciu3) {
236 /* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
237 for (i = 1; i <= 4; i++) {
238 ret = devm_request_irq(&pdev->dev, mmc_irq[i],
239 cvm_mmc_interrupt,
240 0, cvm_mmc_irq_names[i], host);
241 if (ret < 0) {
242 dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
243 mmc_irq[i]);
244 return ret;
245 }
246 }
247 } else {
248 ret = devm_request_irq(&pdev->dev, mmc_irq[0],
249 cvm_mmc_interrupt, 0, KBUILD_MODNAME,
250 host);
251 if (ret < 0) {
252 dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
253 mmc_irq[0]);
254 return ret;
255 }
256 }
257
258 host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev,
259 "power",
260 GPIOD_OUT_HIGH);
261 if (IS_ERR(host->global_pwr_gpiod)) {
262 dev_err(&pdev->dev, "Invalid power GPIO\n");
263 return PTR_ERR(host->global_pwr_gpiod);
264 }
265
266 platform_set_drvdata(pdev, host);
267
268 i = 0;
269 for_each_child_of_node(node, cn) {
270 host->slot_pdev[i] =
271 of_platform_device_create(cn, NULL, &pdev->dev);
272 if (!host->slot_pdev[i]) {
273 i++;
274 continue;
275 }
276 ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
277 if (ret) {
278 dev_err(&pdev->dev, "Error populating slots\n");
279 octeon_mmc_set_shared_power(host, 0);
280 of_node_put(cn);
281 goto error;
282 }
283 i++;
284 }
285 return 0;
286
287error:
288 for (i = 0; i < CAVIUM_MAX_MMC; i++) {
289 if (host->slot[i])
290 cvm_mmc_of_slot_remove(host->slot[i]);
291 if (host->slot_pdev[i])
292 of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
293 }
294 return ret;
295}
296
297static int octeon_mmc_remove(struct platform_device *pdev)
298{
299 struct cvm_mmc_host *host = platform_get_drvdata(pdev);
300 u64 dma_cfg;
301 int i;
302
303 for (i = 0; i < CAVIUM_MAX_MMC; i++)
304 if (host->slot[i])
305 cvm_mmc_of_slot_remove(host->slot[i]);
306
307 dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
308 dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
309 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
310
311 octeon_mmc_set_shared_power(host, 0);
312 return 0;
313}
314
315static const struct of_device_id octeon_mmc_match[] = {
316 {
317 .compatible = "cavium,octeon-6130-mmc",
318 },
319 {
320 .compatible = "cavium,octeon-7890-mmc",
321 },
322 {},
323};
324MODULE_DEVICE_TABLE(of, octeon_mmc_match);
325
326static struct platform_driver octeon_mmc_driver = {
327 .probe = octeon_mmc_probe,
328 .remove = octeon_mmc_remove,
329 .driver = {
330 .name = KBUILD_MODNAME,
331 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
332 .of_match_table = octeon_mmc_match,
333 },
334};
335
336module_platform_driver(octeon_mmc_driver);
337
338MODULE_AUTHOR("Cavium Inc. <support@cavium.com>");
339MODULE_DESCRIPTION("Low-level driver for Cavium OCTEON MMC/SSD card");
340MODULE_LICENSE("GPL");
1/*
2 * Driver for MMC and SSD cards for Cavium OCTEON SOCs.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2012-2017 Cavium Inc.
9 */
10#include <linux/dma-mapping.h>
11#include <linux/gpio/consumer.h>
12#include <linux/interrupt.h>
13#include <linux/mmc/mmc.h>
14#include <linux/mmc/slot-gpio.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_platform.h>
18#include <linux/platform_device.h>
19#include <asm/octeon/octeon.h>
20#include "cavium.h"
21
22#define CVMX_MIO_BOOT_CTL CVMX_ADD_IO_SEG(0x00011800000000D0ull)
23
24/*
25 * The l2c* functions below are used for the EMMC-17978 workaround.
26 *
27 * Due to a bug in the design of the MMC bus hardware, the 2nd to last
28 * cache block of a DMA read must be locked into the L2 Cache.
29 * Otherwise, data corruption may occur.
30 */
31static inline void *phys_to_ptr(u64 address)
32{
33 return (void *)(address | (1ull << 63)); /* XKPHYS */
34}
35
36/*
37 * Lock a single line into L2. The line is zeroed before locking
38 * to make sure no dram accesses are made.
39 */
40static void l2c_lock_line(u64 addr)
41{
42 char *addr_ptr = phys_to_ptr(addr);
43
44 asm volatile (
45 "cache 31, %[line]" /* Unlock the line */
46 ::[line] "m" (*addr_ptr));
47}
48
49/* Unlock a single line in the L2 cache. */
50static void l2c_unlock_line(u64 addr)
51{
52 char *addr_ptr = phys_to_ptr(addr);
53
54 asm volatile (
55 "cache 23, %[line]" /* Unlock the line */
56 ::[line] "m" (*addr_ptr));
57}
58
59/* Locks a memory region in the L2 cache. */
60static void l2c_lock_mem_region(u64 start, u64 len)
61{
62 u64 end;
63
64 /* Round start/end to cache line boundaries */
65 end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
66 start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
67
68 while (start <= end) {
69 l2c_lock_line(start);
70 start += CVMX_CACHE_LINE_SIZE;
71 }
72 asm volatile("sync");
73}
74
75/* Unlock a memory region in the L2 cache. */
76static void l2c_unlock_mem_region(u64 start, u64 len)
77{
78 u64 end;
79
80 /* Round start/end to cache line boundaries */
81 end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
82 start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
83
84 while (start <= end) {
85 l2c_unlock_line(start);
86 start += CVMX_CACHE_LINE_SIZE;
87 }
88}
89
90static void octeon_mmc_acquire_bus(struct cvm_mmc_host *host)
91{
92 if (!host->has_ciu3) {
93 down(&octeon_bootbus_sem);
94 /* For CN70XX, switch the MMC controller onto the bus. */
95 if (OCTEON_IS_MODEL(OCTEON_CN70XX))
96 writeq(0, (void __iomem *)CVMX_MIO_BOOT_CTL);
97 } else {
98 down(&host->mmc_serializer);
99 }
100}
101
102static void octeon_mmc_release_bus(struct cvm_mmc_host *host)
103{
104 if (!host->has_ciu3)
105 up(&octeon_bootbus_sem);
106 else
107 up(&host->mmc_serializer);
108}
109
110static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
111{
112 writeq(val, host->base + MIO_EMM_INT(host));
113 if (!host->has_ciu3)
114 writeq(val, host->base + MIO_EMM_INT_EN(host));
115}
116
117static void octeon_mmc_set_shared_power(struct cvm_mmc_host *host, int dir)
118{
119 if (dir == 0)
120 if (!atomic_dec_return(&host->shared_power_users))
121 gpiod_set_value_cansleep(host->global_pwr_gpiod, 0);
122 if (dir == 1)
123 if (atomic_inc_return(&host->shared_power_users) == 1)
124 gpiod_set_value_cansleep(host->global_pwr_gpiod, 1);
125}
126
127static void octeon_mmc_dmar_fixup(struct cvm_mmc_host *host,
128 struct mmc_command *cmd,
129 struct mmc_data *data,
130 u64 addr)
131{
132 if (cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
133 return;
134 if (data->blksz * data->blocks <= 1024)
135 return;
136
137 host->n_minus_one = addr + (data->blksz * data->blocks) - 1024;
138 l2c_lock_mem_region(host->n_minus_one, 512);
139}
140
141static void octeon_mmc_dmar_fixup_done(struct cvm_mmc_host *host)
142{
143 if (!host->n_minus_one)
144 return;
145 l2c_unlock_mem_region(host->n_minus_one, 512);
146 host->n_minus_one = 0;
147}
148
149static int octeon_mmc_probe(struct platform_device *pdev)
150{
151 struct device_node *cn, *node = pdev->dev.of_node;
152 struct cvm_mmc_host *host;
153 void __iomem *base;
154 int mmc_irq[9];
155 int i, ret = 0;
156 u64 val;
157
158 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
159 if (!host)
160 return -ENOMEM;
161
162 spin_lock_init(&host->irq_handler_lock);
163 sema_init(&host->mmc_serializer, 1);
164
165 host->dev = &pdev->dev;
166 host->acquire_bus = octeon_mmc_acquire_bus;
167 host->release_bus = octeon_mmc_release_bus;
168 host->int_enable = octeon_mmc_int_enable;
169 host->set_shared_power = octeon_mmc_set_shared_power;
170 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
171 OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
172 host->dmar_fixup = octeon_mmc_dmar_fixup;
173 host->dmar_fixup_done = octeon_mmc_dmar_fixup_done;
174 }
175
176 host->sys_freq = octeon_get_io_clock_rate();
177
178 if (of_device_is_compatible(node, "cavium,octeon-7890-mmc")) {
179 host->big_dma_addr = true;
180 host->need_irq_handler_lock = true;
181 host->has_ciu3 = true;
182 host->use_sg = true;
183 /*
184 * First seven are the EMM_INT bits 0..6, then two for
185 * the EMM_DMA_INT bits
186 */
187 for (i = 0; i < 9; i++) {
188 mmc_irq[i] = platform_get_irq(pdev, i);
189 if (mmc_irq[i] < 0)
190 return mmc_irq[i];
191
192 /* work around legacy u-boot device trees */
193 irq_set_irq_type(mmc_irq[i], IRQ_TYPE_EDGE_RISING);
194 }
195 } else {
196 host->big_dma_addr = false;
197 host->need_irq_handler_lock = false;
198 host->has_ciu3 = false;
199 /* First one is EMM second DMA */
200 for (i = 0; i < 2; i++) {
201 mmc_irq[i] = platform_get_irq(pdev, i);
202 if (mmc_irq[i] < 0)
203 return mmc_irq[i];
204 }
205 }
206
207 host->last_slot = -1;
208
209 base = devm_platform_ioremap_resource(pdev, 0);
210 if (IS_ERR(base))
211 return PTR_ERR(base);
212 host->base = base;
213 host->reg_off = 0;
214
215 base = devm_platform_ioremap_resource(pdev, 1);
216 if (IS_ERR(base))
217 return PTR_ERR(base);
218 host->dma_base = base;
219 /*
220 * To keep the register addresses shared we intentionaly use
221 * a negative offset here, first register used on Octeon therefore
222 * starts at 0x20 (MIO_EMM_DMA_CFG).
223 */
224 host->reg_off_dma = -0x20;
225
226 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
227 if (ret)
228 return ret;
229
230 /*
231 * Clear out any pending interrupts that may be left over from
232 * bootloader.
233 */
234 val = readq(host->base + MIO_EMM_INT(host));
235 writeq(val, host->base + MIO_EMM_INT(host));
236
237 if (host->has_ciu3) {
238 /* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
239 for (i = 1; i <= 4; i++) {
240 ret = devm_request_irq(&pdev->dev, mmc_irq[i],
241 cvm_mmc_interrupt,
242 0, cvm_mmc_irq_names[i], host);
243 if (ret < 0) {
244 dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
245 mmc_irq[i]);
246 return ret;
247 }
248 }
249 } else {
250 ret = devm_request_irq(&pdev->dev, mmc_irq[0],
251 cvm_mmc_interrupt, 0, KBUILD_MODNAME,
252 host);
253 if (ret < 0) {
254 dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
255 mmc_irq[0]);
256 return ret;
257 }
258 }
259
260 host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev,
261 "power",
262 GPIOD_OUT_HIGH);
263 if (IS_ERR(host->global_pwr_gpiod)) {
264 dev_err(&pdev->dev, "Invalid power GPIO\n");
265 return PTR_ERR(host->global_pwr_gpiod);
266 }
267
268 platform_set_drvdata(pdev, host);
269
270 i = 0;
271 for_each_child_of_node(node, cn) {
272 host->slot_pdev[i] =
273 of_platform_device_create(cn, NULL, &pdev->dev);
274 if (!host->slot_pdev[i]) {
275 i++;
276 continue;
277 }
278 ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
279 if (ret) {
280 dev_err(&pdev->dev, "Error populating slots\n");
281 octeon_mmc_set_shared_power(host, 0);
282 of_node_put(cn);
283 goto error;
284 }
285 i++;
286 }
287 return 0;
288
289error:
290 for (i = 0; i < CAVIUM_MAX_MMC; i++) {
291 if (host->slot[i])
292 cvm_mmc_of_slot_remove(host->slot[i]);
293 if (host->slot_pdev[i])
294 of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
295 }
296 return ret;
297}
298
299static void octeon_mmc_remove(struct platform_device *pdev)
300{
301 struct cvm_mmc_host *host = platform_get_drvdata(pdev);
302 u64 dma_cfg;
303 int i;
304
305 for (i = 0; i < CAVIUM_MAX_MMC; i++)
306 if (host->slot[i])
307 cvm_mmc_of_slot_remove(host->slot[i]);
308
309 dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
310 dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
311 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
312
313 octeon_mmc_set_shared_power(host, 0);
314}
315
316static const struct of_device_id octeon_mmc_match[] = {
317 {
318 .compatible = "cavium,octeon-6130-mmc",
319 },
320 {
321 .compatible = "cavium,octeon-7890-mmc",
322 },
323 {},
324};
325MODULE_DEVICE_TABLE(of, octeon_mmc_match);
326
327static struct platform_driver octeon_mmc_driver = {
328 .probe = octeon_mmc_probe,
329 .remove_new = octeon_mmc_remove,
330 .driver = {
331 .name = KBUILD_MODNAME,
332 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
333 .of_match_table = octeon_mmc_match,
334 },
335};
336
337module_platform_driver(octeon_mmc_driver);
338
339MODULE_AUTHOR("Cavium Inc. <support@cavium.com>");
340MODULE_DESCRIPTION("Low-level driver for Cavium OCTEON MMC/SSD card");
341MODULE_LICENSE("GPL");