Loading...
1// SPDX-License-Identifier: GPL-2.0
2//
3// Register map access API - MMIO support
4//
5// Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
6
7#include <linux/clk.h>
8#include <linux/err.h>
9#include <linux/io.h>
10#include <linux/module.h>
11#include <linux/regmap.h>
12#include <linux/slab.h>
13#include <linux/swab.h>
14
15#include "internal.h"
16
17struct regmap_mmio_context {
18 void __iomem *regs;
19 unsigned int val_bytes;
20 bool big_endian;
21
22 bool attached_clk;
23 struct clk *clk;
24
25 void (*reg_write)(struct regmap_mmio_context *ctx,
26 unsigned int reg, unsigned int val);
27 unsigned int (*reg_read)(struct regmap_mmio_context *ctx,
28 unsigned int reg);
29};
30
31static int regmap_mmio_regbits_check(size_t reg_bits)
32{
33 switch (reg_bits) {
34 case 8:
35 case 16:
36 case 32:
37 return 0;
38 default:
39 return -EINVAL;
40 }
41}
42
43static int regmap_mmio_get_min_stride(size_t val_bits)
44{
45 int min_stride;
46
47 switch (val_bits) {
48 case 8:
49 /* The core treats 0 as 1 */
50 min_stride = 0;
51 break;
52 case 16:
53 min_stride = 2;
54 break;
55 case 32:
56 min_stride = 4;
57 break;
58 default:
59 return -EINVAL;
60 }
61
62 return min_stride;
63}
64
65static void regmap_mmio_write8(struct regmap_mmio_context *ctx,
66 unsigned int reg,
67 unsigned int val)
68{
69 writeb(val, ctx->regs + reg);
70}
71
72static void regmap_mmio_write8_relaxed(struct regmap_mmio_context *ctx,
73 unsigned int reg,
74 unsigned int val)
75{
76 writeb_relaxed(val, ctx->regs + reg);
77}
78
79static void regmap_mmio_iowrite8(struct regmap_mmio_context *ctx,
80 unsigned int reg, unsigned int val)
81{
82 iowrite8(val, ctx->regs + reg);
83}
84
85static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
86 unsigned int reg,
87 unsigned int val)
88{
89 writew(val, ctx->regs + reg);
90}
91
92static void regmap_mmio_write16le_relaxed(struct regmap_mmio_context *ctx,
93 unsigned int reg,
94 unsigned int val)
95{
96 writew_relaxed(val, ctx->regs + reg);
97}
98
99static void regmap_mmio_iowrite16le(struct regmap_mmio_context *ctx,
100 unsigned int reg, unsigned int val)
101{
102 iowrite16(val, ctx->regs + reg);
103}
104
105static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
106 unsigned int reg,
107 unsigned int val)
108{
109 writew(swab16(val), ctx->regs + reg);
110}
111
112static void regmap_mmio_iowrite16be(struct regmap_mmio_context *ctx,
113 unsigned int reg, unsigned int val)
114{
115 iowrite16be(val, ctx->regs + reg);
116}
117
118static void regmap_mmio_write32le(struct regmap_mmio_context *ctx,
119 unsigned int reg,
120 unsigned int val)
121{
122 writel(val, ctx->regs + reg);
123}
124
125static void regmap_mmio_write32le_relaxed(struct regmap_mmio_context *ctx,
126 unsigned int reg,
127 unsigned int val)
128{
129 writel_relaxed(val, ctx->regs + reg);
130}
131
132static void regmap_mmio_iowrite32le(struct regmap_mmio_context *ctx,
133 unsigned int reg, unsigned int val)
134{
135 iowrite32(val, ctx->regs + reg);
136}
137
138static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
139 unsigned int reg,
140 unsigned int val)
141{
142 writel(swab32(val), ctx->regs + reg);
143}
144
145static void regmap_mmio_iowrite32be(struct regmap_mmio_context *ctx,
146 unsigned int reg, unsigned int val)
147{
148 iowrite32be(val, ctx->regs + reg);
149}
150
151static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
152{
153 struct regmap_mmio_context *ctx = context;
154 int ret;
155
156 if (!IS_ERR(ctx->clk)) {
157 ret = clk_enable(ctx->clk);
158 if (ret < 0)
159 return ret;
160 }
161
162 ctx->reg_write(ctx, reg, val);
163
164 if (!IS_ERR(ctx->clk))
165 clk_disable(ctx->clk);
166
167 return 0;
168}
169
170static int regmap_mmio_noinc_write(void *context, unsigned int reg,
171 const void *val, size_t val_count)
172{
173 struct regmap_mmio_context *ctx = context;
174 int ret = 0;
175 int i;
176
177 if (!IS_ERR(ctx->clk)) {
178 ret = clk_enable(ctx->clk);
179 if (ret < 0)
180 return ret;
181 }
182
183 /*
184 * There are no native, assembly-optimized write single register
185 * operations for big endian, so fall back to emulation if this
186 * is needed. (Single bytes are fine, they are not affected by
187 * endianness.)
188 */
189 if (ctx->big_endian && (ctx->val_bytes > 1)) {
190 switch (ctx->val_bytes) {
191 case 2:
192 {
193 const u16 *valp = (const u16 *)val;
194 for (i = 0; i < val_count; i++)
195 writew(swab16(valp[i]), ctx->regs + reg);
196 goto out_clk;
197 }
198 case 4:
199 {
200 const u32 *valp = (const u32 *)val;
201 for (i = 0; i < val_count; i++)
202 writel(swab32(valp[i]), ctx->regs + reg);
203 goto out_clk;
204 }
205 default:
206 ret = -EINVAL;
207 goto out_clk;
208 }
209 }
210
211 switch (ctx->val_bytes) {
212 case 1:
213 writesb(ctx->regs + reg, (const u8 *)val, val_count);
214 break;
215 case 2:
216 writesw(ctx->regs + reg, (const u16 *)val, val_count);
217 break;
218 case 4:
219 writesl(ctx->regs + reg, (const u32 *)val, val_count);
220 break;
221 default:
222 ret = -EINVAL;
223 break;
224 }
225
226out_clk:
227 if (!IS_ERR(ctx->clk))
228 clk_disable(ctx->clk);
229
230 return ret;
231}
232
233static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
234 unsigned int reg)
235{
236 return readb(ctx->regs + reg);
237}
238
239static unsigned int regmap_mmio_read8_relaxed(struct regmap_mmio_context *ctx,
240 unsigned int reg)
241{
242 return readb_relaxed(ctx->regs + reg);
243}
244
245static unsigned int regmap_mmio_ioread8(struct regmap_mmio_context *ctx,
246 unsigned int reg)
247{
248 return ioread8(ctx->regs + reg);
249}
250
251static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
252 unsigned int reg)
253{
254 return readw(ctx->regs + reg);
255}
256
257static unsigned int regmap_mmio_read16le_relaxed(struct regmap_mmio_context *ctx,
258 unsigned int reg)
259{
260 return readw_relaxed(ctx->regs + reg);
261}
262
263static unsigned int regmap_mmio_ioread16le(struct regmap_mmio_context *ctx,
264 unsigned int reg)
265{
266 return ioread16(ctx->regs + reg);
267}
268
269static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
270 unsigned int reg)
271{
272 return swab16(readw(ctx->regs + reg));
273}
274
275static unsigned int regmap_mmio_ioread16be(struct regmap_mmio_context *ctx,
276 unsigned int reg)
277{
278 return ioread16be(ctx->regs + reg);
279}
280
281static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx,
282 unsigned int reg)
283{
284 return readl(ctx->regs + reg);
285}
286
287static unsigned int regmap_mmio_read32le_relaxed(struct regmap_mmio_context *ctx,
288 unsigned int reg)
289{
290 return readl_relaxed(ctx->regs + reg);
291}
292
293static unsigned int regmap_mmio_ioread32le(struct regmap_mmio_context *ctx,
294 unsigned int reg)
295{
296 return ioread32(ctx->regs + reg);
297}
298
299static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
300 unsigned int reg)
301{
302 return swab32(readl(ctx->regs + reg));
303}
304
305static unsigned int regmap_mmio_ioread32be(struct regmap_mmio_context *ctx,
306 unsigned int reg)
307{
308 return ioread32be(ctx->regs + reg);
309}
310
311static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
312{
313 struct regmap_mmio_context *ctx = context;
314 int ret;
315
316 if (!IS_ERR(ctx->clk)) {
317 ret = clk_enable(ctx->clk);
318 if (ret < 0)
319 return ret;
320 }
321
322 *val = ctx->reg_read(ctx, reg);
323
324 if (!IS_ERR(ctx->clk))
325 clk_disable(ctx->clk);
326
327 return 0;
328}
329
330static int regmap_mmio_noinc_read(void *context, unsigned int reg,
331 void *val, size_t val_count)
332{
333 struct regmap_mmio_context *ctx = context;
334 int ret = 0;
335
336 if (!IS_ERR(ctx->clk)) {
337 ret = clk_enable(ctx->clk);
338 if (ret < 0)
339 return ret;
340 }
341
342 switch (ctx->val_bytes) {
343 case 1:
344 readsb(ctx->regs + reg, (u8 *)val, val_count);
345 break;
346 case 2:
347 readsw(ctx->regs + reg, (u16 *)val, val_count);
348 break;
349 case 4:
350 readsl(ctx->regs + reg, (u32 *)val, val_count);
351 break;
352 default:
353 ret = -EINVAL;
354 goto out_clk;
355 }
356
357 /*
358 * There are no native, assembly-optimized write single register
359 * operations for big endian, so fall back to emulation if this
360 * is needed. (Single bytes are fine, they are not affected by
361 * endianness.)
362 */
363 if (ctx->big_endian && (ctx->val_bytes > 1)) {
364 switch (ctx->val_bytes) {
365 case 2:
366 swab16_array(val, val_count);
367 break;
368 case 4:
369 swab32_array(val, val_count);
370 break;
371 default:
372 ret = -EINVAL;
373 break;
374 }
375 }
376
377out_clk:
378 if (!IS_ERR(ctx->clk))
379 clk_disable(ctx->clk);
380
381 return ret;
382}
383
384
385static void regmap_mmio_free_context(void *context)
386{
387 struct regmap_mmio_context *ctx = context;
388
389 if (!IS_ERR(ctx->clk)) {
390 clk_unprepare(ctx->clk);
391 if (!ctx->attached_clk)
392 clk_put(ctx->clk);
393 }
394 kfree(context);
395}
396
397static const struct regmap_bus regmap_mmio = {
398 .fast_io = true,
399 .reg_write = regmap_mmio_write,
400 .reg_read = regmap_mmio_read,
401 .reg_noinc_write = regmap_mmio_noinc_write,
402 .reg_noinc_read = regmap_mmio_noinc_read,
403 .free_context = regmap_mmio_free_context,
404 .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
405};
406
407static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
408 const char *clk_id,
409 void __iomem *regs,
410 const struct regmap_config *config)
411{
412 struct regmap_mmio_context *ctx;
413 int min_stride;
414 int ret;
415
416 ret = regmap_mmio_regbits_check(config->reg_bits);
417 if (ret)
418 return ERR_PTR(ret);
419
420 if (config->pad_bits)
421 return ERR_PTR(-EINVAL);
422
423 min_stride = regmap_mmio_get_min_stride(config->val_bits);
424 if (min_stride < 0)
425 return ERR_PTR(min_stride);
426
427 if (config->reg_stride && config->reg_stride < min_stride)
428 return ERR_PTR(-EINVAL);
429
430 if (config->use_relaxed_mmio && config->io_port)
431 return ERR_PTR(-EINVAL);
432
433 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
434 if (!ctx)
435 return ERR_PTR(-ENOMEM);
436
437 ctx->regs = regs;
438 ctx->val_bytes = config->val_bits / 8;
439 ctx->clk = ERR_PTR(-ENODEV);
440
441 switch (regmap_get_val_endian(dev, ®map_mmio, config)) {
442 case REGMAP_ENDIAN_DEFAULT:
443 case REGMAP_ENDIAN_LITTLE:
444#ifdef __LITTLE_ENDIAN
445 case REGMAP_ENDIAN_NATIVE:
446#endif
447 switch (config->val_bits) {
448 case 8:
449 if (config->io_port) {
450 ctx->reg_read = regmap_mmio_ioread8;
451 ctx->reg_write = regmap_mmio_iowrite8;
452 } else if (config->use_relaxed_mmio) {
453 ctx->reg_read = regmap_mmio_read8_relaxed;
454 ctx->reg_write = regmap_mmio_write8_relaxed;
455 } else {
456 ctx->reg_read = regmap_mmio_read8;
457 ctx->reg_write = regmap_mmio_write8;
458 }
459 break;
460 case 16:
461 if (config->io_port) {
462 ctx->reg_read = regmap_mmio_ioread16le;
463 ctx->reg_write = regmap_mmio_iowrite16le;
464 } else if (config->use_relaxed_mmio) {
465 ctx->reg_read = regmap_mmio_read16le_relaxed;
466 ctx->reg_write = regmap_mmio_write16le_relaxed;
467 } else {
468 ctx->reg_read = regmap_mmio_read16le;
469 ctx->reg_write = regmap_mmio_write16le;
470 }
471 break;
472 case 32:
473 if (config->io_port) {
474 ctx->reg_read = regmap_mmio_ioread32le;
475 ctx->reg_write = regmap_mmio_iowrite32le;
476 } else if (config->use_relaxed_mmio) {
477 ctx->reg_read = regmap_mmio_read32le_relaxed;
478 ctx->reg_write = regmap_mmio_write32le_relaxed;
479 } else {
480 ctx->reg_read = regmap_mmio_read32le;
481 ctx->reg_write = regmap_mmio_write32le;
482 }
483 break;
484 default:
485 ret = -EINVAL;
486 goto err_free;
487 }
488 break;
489 case REGMAP_ENDIAN_BIG:
490#ifdef __BIG_ENDIAN
491 case REGMAP_ENDIAN_NATIVE:
492#endif
493 ctx->big_endian = true;
494 switch (config->val_bits) {
495 case 8:
496 if (config->io_port) {
497 ctx->reg_read = regmap_mmio_ioread8;
498 ctx->reg_write = regmap_mmio_iowrite8;
499 } else {
500 ctx->reg_read = regmap_mmio_read8;
501 ctx->reg_write = regmap_mmio_write8;
502 }
503 break;
504 case 16:
505 if (config->io_port) {
506 ctx->reg_read = regmap_mmio_ioread16be;
507 ctx->reg_write = regmap_mmio_iowrite16be;
508 } else {
509 ctx->reg_read = regmap_mmio_read16be;
510 ctx->reg_write = regmap_mmio_write16be;
511 }
512 break;
513 case 32:
514 if (config->io_port) {
515 ctx->reg_read = regmap_mmio_ioread32be;
516 ctx->reg_write = regmap_mmio_iowrite32be;
517 } else {
518 ctx->reg_read = regmap_mmio_read32be;
519 ctx->reg_write = regmap_mmio_write32be;
520 }
521 break;
522 default:
523 ret = -EINVAL;
524 goto err_free;
525 }
526 break;
527 default:
528 ret = -EINVAL;
529 goto err_free;
530 }
531
532 if (clk_id == NULL)
533 return ctx;
534
535 ctx->clk = clk_get(dev, clk_id);
536 if (IS_ERR(ctx->clk)) {
537 ret = PTR_ERR(ctx->clk);
538 goto err_free;
539 }
540
541 ret = clk_prepare(ctx->clk);
542 if (ret < 0) {
543 clk_put(ctx->clk);
544 goto err_free;
545 }
546
547 return ctx;
548
549err_free:
550 kfree(ctx);
551
552 return ERR_PTR(ret);
553}
554
555struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
556 void __iomem *regs,
557 const struct regmap_config *config,
558 struct lock_class_key *lock_key,
559 const char *lock_name)
560{
561 struct regmap_mmio_context *ctx;
562
563 ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
564 if (IS_ERR(ctx))
565 return ERR_CAST(ctx);
566
567 return __regmap_init(dev, ®map_mmio, ctx, config,
568 lock_key, lock_name);
569}
570EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk);
571
572struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
573 const char *clk_id,
574 void __iomem *regs,
575 const struct regmap_config *config,
576 struct lock_class_key *lock_key,
577 const char *lock_name)
578{
579 struct regmap_mmio_context *ctx;
580
581 ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
582 if (IS_ERR(ctx))
583 return ERR_CAST(ctx);
584
585 return __devm_regmap_init(dev, ®map_mmio, ctx, config,
586 lock_key, lock_name);
587}
588EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk);
589
590int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk)
591{
592 struct regmap_mmio_context *ctx = map->bus_context;
593
594 ctx->clk = clk;
595 ctx->attached_clk = true;
596
597 return clk_prepare(ctx->clk);
598}
599EXPORT_SYMBOL_GPL(regmap_mmio_attach_clk);
600
601void regmap_mmio_detach_clk(struct regmap *map)
602{
603 struct regmap_mmio_context *ctx = map->bus_context;
604
605 clk_unprepare(ctx->clk);
606
607 ctx->attached_clk = false;
608 ctx->clk = NULL;
609}
610EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
611
612MODULE_LICENSE("GPL v2");
1/*
2 * Register map access API - MMIO support
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/io.h>
22#include <linux/module.h>
23#include <linux/regmap.h>
24#include <linux/slab.h>
25
26struct regmap_mmio_context {
27 void __iomem *regs;
28 unsigned reg_bytes;
29 unsigned val_bytes;
30 unsigned pad_bytes;
31 struct clk *clk;
32};
33
34static inline void regmap_mmio_regsize_check(size_t reg_size)
35{
36 switch (reg_size) {
37 case 1:
38 case 2:
39 case 4:
40#ifdef CONFIG_64BIT
41 case 8:
42#endif
43 break;
44 default:
45 BUG();
46 }
47}
48
49static int regmap_mmio_regbits_check(size_t reg_bits)
50{
51 switch (reg_bits) {
52 case 8:
53 case 16:
54 case 32:
55#ifdef CONFIG_64BIT
56 case 64:
57#endif
58 return 0;
59 default:
60 return -EINVAL;
61 }
62}
63
64static inline void regmap_mmio_count_check(size_t count)
65{
66 BUG_ON(count % 2 != 0);
67}
68
69static int regmap_mmio_gather_write(void *context,
70 const void *reg, size_t reg_size,
71 const void *val, size_t val_size)
72{
73 struct regmap_mmio_context *ctx = context;
74 u32 offset;
75 int ret;
76
77 regmap_mmio_regsize_check(reg_size);
78
79 if (!IS_ERR(ctx->clk)) {
80 ret = clk_enable(ctx->clk);
81 if (ret < 0)
82 return ret;
83 }
84
85 offset = *(u32 *)reg;
86
87 while (val_size) {
88 switch (ctx->val_bytes) {
89 case 1:
90 writeb(*(u8 *)val, ctx->regs + offset);
91 break;
92 case 2:
93 writew(*(u16 *)val, ctx->regs + offset);
94 break;
95 case 4:
96 writel(*(u32 *)val, ctx->regs + offset);
97 break;
98#ifdef CONFIG_64BIT
99 case 8:
100 writeq(*(u64 *)val, ctx->regs + offset);
101 break;
102#endif
103 default:
104 /* Should be caught by regmap_mmio_check_config */
105 BUG();
106 }
107 val_size -= ctx->val_bytes;
108 val += ctx->val_bytes;
109 offset += ctx->val_bytes;
110 }
111
112 if (!IS_ERR(ctx->clk))
113 clk_disable(ctx->clk);
114
115 return 0;
116}
117
118static int regmap_mmio_write(void *context, const void *data, size_t count)
119{
120 struct regmap_mmio_context *ctx = context;
121 u32 offset = ctx->reg_bytes + ctx->pad_bytes;
122
123 regmap_mmio_count_check(count);
124
125 return regmap_mmio_gather_write(context, data, ctx->reg_bytes,
126 data + offset, count - offset);
127}
128
129static int regmap_mmio_read(void *context,
130 const void *reg, size_t reg_size,
131 void *val, size_t val_size)
132{
133 struct regmap_mmio_context *ctx = context;
134 u32 offset;
135 int ret;
136
137 regmap_mmio_regsize_check(reg_size);
138
139 if (!IS_ERR(ctx->clk)) {
140 ret = clk_enable(ctx->clk);
141 if (ret < 0)
142 return ret;
143 }
144
145 offset = *(u32 *)reg;
146
147 while (val_size) {
148 switch (ctx->val_bytes) {
149 case 1:
150 *(u8 *)val = readb(ctx->regs + offset);
151 break;
152 case 2:
153 *(u16 *)val = readw(ctx->regs + offset);
154 break;
155 case 4:
156 *(u32 *)val = readl(ctx->regs + offset);
157 break;
158#ifdef CONFIG_64BIT
159 case 8:
160 *(u64 *)val = readq(ctx->regs + offset);
161 break;
162#endif
163 default:
164 /* Should be caught by regmap_mmio_check_config */
165 BUG();
166 }
167 val_size -= ctx->val_bytes;
168 val += ctx->val_bytes;
169 offset += ctx->val_bytes;
170 }
171
172 if (!IS_ERR(ctx->clk))
173 clk_disable(ctx->clk);
174
175 return 0;
176}
177
178static void regmap_mmio_free_context(void *context)
179{
180 struct regmap_mmio_context *ctx = context;
181
182 if (!IS_ERR(ctx->clk)) {
183 clk_unprepare(ctx->clk);
184 clk_put(ctx->clk);
185 }
186 kfree(context);
187}
188
189static struct regmap_bus regmap_mmio = {
190 .fast_io = true,
191 .write = regmap_mmio_write,
192 .gather_write = regmap_mmio_gather_write,
193 .read = regmap_mmio_read,
194 .free_context = regmap_mmio_free_context,
195 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
196 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
197};
198
199static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
200 const char *clk_id,
201 void __iomem *regs,
202 const struct regmap_config *config)
203{
204 struct regmap_mmio_context *ctx;
205 int min_stride;
206 int ret;
207
208 ret = regmap_mmio_regbits_check(config->reg_bits);
209 if (ret)
210 return ERR_PTR(ret);
211
212 if (config->pad_bits)
213 return ERR_PTR(-EINVAL);
214
215 switch (config->val_bits) {
216 case 8:
217 /* The core treats 0 as 1 */
218 min_stride = 0;
219 break;
220 case 16:
221 min_stride = 2;
222 break;
223 case 32:
224 min_stride = 4;
225 break;
226#ifdef CONFIG_64BIT
227 case 64:
228 min_stride = 8;
229 break;
230#endif
231 break;
232 default:
233 return ERR_PTR(-EINVAL);
234 }
235
236 if (config->reg_stride < min_stride)
237 return ERR_PTR(-EINVAL);
238
239 switch (config->reg_format_endian) {
240 case REGMAP_ENDIAN_DEFAULT:
241 case REGMAP_ENDIAN_NATIVE:
242 break;
243 default:
244 return ERR_PTR(-EINVAL);
245 }
246
247 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
248 if (!ctx)
249 return ERR_PTR(-ENOMEM);
250
251 ctx->regs = regs;
252 ctx->val_bytes = config->val_bits / 8;
253 ctx->reg_bytes = config->reg_bits / 8;
254 ctx->pad_bytes = config->pad_bits / 8;
255 ctx->clk = ERR_PTR(-ENODEV);
256
257 if (clk_id == NULL)
258 return ctx;
259
260 ctx->clk = clk_get(dev, clk_id);
261 if (IS_ERR(ctx->clk)) {
262 ret = PTR_ERR(ctx->clk);
263 goto err_free;
264 }
265
266 ret = clk_prepare(ctx->clk);
267 if (ret < 0) {
268 clk_put(ctx->clk);
269 goto err_free;
270 }
271
272 return ctx;
273
274err_free:
275 kfree(ctx);
276
277 return ERR_PTR(ret);
278}
279
280/**
281 * regmap_init_mmio_clk(): Initialise register map with register clock
282 *
283 * @dev: Device that will be interacted with
284 * @clk_id: register clock consumer ID
285 * @regs: Pointer to memory-mapped IO region
286 * @config: Configuration for register map
287 *
288 * The return value will be an ERR_PTR() on error or a valid pointer to
289 * a struct regmap.
290 */
291struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
292 void __iomem *regs,
293 const struct regmap_config *config)
294{
295 struct regmap_mmio_context *ctx;
296
297 ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
298 if (IS_ERR(ctx))
299 return ERR_CAST(ctx);
300
301 return regmap_init(dev, ®map_mmio, ctx, config);
302}
303EXPORT_SYMBOL_GPL(regmap_init_mmio_clk);
304
305/**
306 * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
307 *
308 * @dev: Device that will be interacted with
309 * @clk_id: register clock consumer ID
310 * @regs: Pointer to memory-mapped IO region
311 * @config: Configuration for register map
312 *
313 * The return value will be an ERR_PTR() on error or a valid pointer
314 * to a struct regmap. The regmap will be automatically freed by the
315 * device management code.
316 */
317struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
318 void __iomem *regs,
319 const struct regmap_config *config)
320{
321 struct regmap_mmio_context *ctx;
322
323 ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
324 if (IS_ERR(ctx))
325 return ERR_CAST(ctx);
326
327 return devm_regmap_init(dev, ®map_mmio, ctx, config);
328}
329EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk);
330
331MODULE_LICENSE("GPL v2");