Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * nvmem framework core.
4 *
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
8
9#include <linux/device.h>
10#include <linux/export.h>
11#include <linux/fs.h>
12#include <linux/idr.h>
13#include <linux/init.h>
14#include <linux/kref.h>
15#include <linux/module.h>
16#include <linux/nvmem-consumer.h>
17#include <linux/nvmem-provider.h>
18#include <linux/gpio/consumer.h>
19#include <linux/of.h>
20#include <linux/slab.h>
21
22struct nvmem_device {
23 struct module *owner;
24 struct device dev;
25 int stride;
26 int word_size;
27 int id;
28 struct kref refcnt;
29 size_t size;
30 bool read_only;
31 bool root_only;
32 int flags;
33 enum nvmem_type type;
34 struct bin_attribute eeprom;
35 struct device *base_dev;
36 struct list_head cells;
37 const struct nvmem_keepout *keepout;
38 unsigned int nkeepout;
39 nvmem_reg_read_t reg_read;
40 nvmem_reg_write_t reg_write;
41 nvmem_cell_post_process_t cell_post_process;
42 struct gpio_desc *wp_gpio;
43 void *priv;
44};
45
46#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
47
48#define FLAG_COMPAT BIT(0)
49struct nvmem_cell_entry {
50 const char *name;
51 int offset;
52 int bytes;
53 int bit_offset;
54 int nbits;
55 struct device_node *np;
56 struct nvmem_device *nvmem;
57 struct list_head node;
58};
59
60struct nvmem_cell {
61 struct nvmem_cell_entry *entry;
62 const char *id;
63};
64
65static DEFINE_MUTEX(nvmem_mutex);
66static DEFINE_IDA(nvmem_ida);
67
68static DEFINE_MUTEX(nvmem_cell_mutex);
69static LIST_HEAD(nvmem_cell_tables);
70
71static DEFINE_MUTEX(nvmem_lookup_mutex);
72static LIST_HEAD(nvmem_lookup_list);
73
74static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
75
76static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
77 void *val, size_t bytes)
78{
79 if (nvmem->reg_read)
80 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
81
82 return -EINVAL;
83}
84
85static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
86 void *val, size_t bytes)
87{
88 int ret;
89
90 if (nvmem->reg_write) {
91 gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
92 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
93 gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
94 return ret;
95 }
96
97 return -EINVAL;
98}
99
100static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
101 unsigned int offset, void *val,
102 size_t bytes, int write)
103{
104
105 unsigned int end = offset + bytes;
106 unsigned int kend, ksize;
107 const struct nvmem_keepout *keepout = nvmem->keepout;
108 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
109 int rc;
110
111 /*
112 * Skip all keepouts before the range being accessed.
113 * Keepouts are sorted.
114 */
115 while ((keepout < keepoutend) && (keepout->end <= offset))
116 keepout++;
117
118 while ((offset < end) && (keepout < keepoutend)) {
119 /* Access the valid portion before the keepout. */
120 if (offset < keepout->start) {
121 kend = min(end, keepout->start);
122 ksize = kend - offset;
123 if (write)
124 rc = __nvmem_reg_write(nvmem, offset, val, ksize);
125 else
126 rc = __nvmem_reg_read(nvmem, offset, val, ksize);
127
128 if (rc)
129 return rc;
130
131 offset += ksize;
132 val += ksize;
133 }
134
135 /*
136 * Now we're aligned to the start of this keepout zone. Go
137 * through it.
138 */
139 kend = min(end, keepout->end);
140 ksize = kend - offset;
141 if (!write)
142 memset(val, keepout->value, ksize);
143
144 val += ksize;
145 offset += ksize;
146 keepout++;
147 }
148
149 /*
150 * If we ran out of keepouts but there's still stuff to do, send it
151 * down directly
152 */
153 if (offset < end) {
154 ksize = end - offset;
155 if (write)
156 return __nvmem_reg_write(nvmem, offset, val, ksize);
157 else
158 return __nvmem_reg_read(nvmem, offset, val, ksize);
159 }
160
161 return 0;
162}
163
164static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
165 void *val, size_t bytes)
166{
167 if (!nvmem->nkeepout)
168 return __nvmem_reg_read(nvmem, offset, val, bytes);
169
170 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
171}
172
173static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
174 void *val, size_t bytes)
175{
176 if (!nvmem->nkeepout)
177 return __nvmem_reg_write(nvmem, offset, val, bytes);
178
179 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
180}
181
182#ifdef CONFIG_NVMEM_SYSFS
183static const char * const nvmem_type_str[] = {
184 [NVMEM_TYPE_UNKNOWN] = "Unknown",
185 [NVMEM_TYPE_EEPROM] = "EEPROM",
186 [NVMEM_TYPE_OTP] = "OTP",
187 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
188 [NVMEM_TYPE_FRAM] = "FRAM",
189};
190
191#ifdef CONFIG_DEBUG_LOCK_ALLOC
192static struct lock_class_key eeprom_lock_key;
193#endif
194
195static ssize_t type_show(struct device *dev,
196 struct device_attribute *attr, char *buf)
197{
198 struct nvmem_device *nvmem = to_nvmem_device(dev);
199
200 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
201}
202
203static DEVICE_ATTR_RO(type);
204
205static struct attribute *nvmem_attrs[] = {
206 &dev_attr_type.attr,
207 NULL,
208};
209
210static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
211 struct bin_attribute *attr, char *buf,
212 loff_t pos, size_t count)
213{
214 struct device *dev;
215 struct nvmem_device *nvmem;
216 int rc;
217
218 if (attr->private)
219 dev = attr->private;
220 else
221 dev = kobj_to_dev(kobj);
222 nvmem = to_nvmem_device(dev);
223
224 /* Stop the user from reading */
225 if (pos >= nvmem->size)
226 return 0;
227
228 if (!IS_ALIGNED(pos, nvmem->stride))
229 return -EINVAL;
230
231 if (count < nvmem->word_size)
232 return -EINVAL;
233
234 if (pos + count > nvmem->size)
235 count = nvmem->size - pos;
236
237 count = round_down(count, nvmem->word_size);
238
239 if (!nvmem->reg_read)
240 return -EPERM;
241
242 rc = nvmem_reg_read(nvmem, pos, buf, count);
243
244 if (rc)
245 return rc;
246
247 return count;
248}
249
250static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
251 struct bin_attribute *attr, char *buf,
252 loff_t pos, size_t count)
253{
254 struct device *dev;
255 struct nvmem_device *nvmem;
256 int rc;
257
258 if (attr->private)
259 dev = attr->private;
260 else
261 dev = kobj_to_dev(kobj);
262 nvmem = to_nvmem_device(dev);
263
264 /* Stop the user from writing */
265 if (pos >= nvmem->size)
266 return -EFBIG;
267
268 if (!IS_ALIGNED(pos, nvmem->stride))
269 return -EINVAL;
270
271 if (count < nvmem->word_size)
272 return -EINVAL;
273
274 if (pos + count > nvmem->size)
275 count = nvmem->size - pos;
276
277 count = round_down(count, nvmem->word_size);
278
279 if (!nvmem->reg_write)
280 return -EPERM;
281
282 rc = nvmem_reg_write(nvmem, pos, buf, count);
283
284 if (rc)
285 return rc;
286
287 return count;
288}
289
290static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
291{
292 umode_t mode = 0400;
293
294 if (!nvmem->root_only)
295 mode |= 0044;
296
297 if (!nvmem->read_only)
298 mode |= 0200;
299
300 if (!nvmem->reg_write)
301 mode &= ~0200;
302
303 if (!nvmem->reg_read)
304 mode &= ~0444;
305
306 return mode;
307}
308
309static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
310 struct bin_attribute *attr, int i)
311{
312 struct device *dev = kobj_to_dev(kobj);
313 struct nvmem_device *nvmem = to_nvmem_device(dev);
314
315 attr->size = nvmem->size;
316
317 return nvmem_bin_attr_get_umode(nvmem);
318}
319
320/* default read/write permissions */
321static struct bin_attribute bin_attr_rw_nvmem = {
322 .attr = {
323 .name = "nvmem",
324 .mode = 0644,
325 },
326 .read = bin_attr_nvmem_read,
327 .write = bin_attr_nvmem_write,
328};
329
330static struct bin_attribute *nvmem_bin_attributes[] = {
331 &bin_attr_rw_nvmem,
332 NULL,
333};
334
335static const struct attribute_group nvmem_bin_group = {
336 .bin_attrs = nvmem_bin_attributes,
337 .attrs = nvmem_attrs,
338 .is_bin_visible = nvmem_bin_attr_is_visible,
339};
340
341static const struct attribute_group *nvmem_dev_groups[] = {
342 &nvmem_bin_group,
343 NULL,
344};
345
346static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
347 .attr = {
348 .name = "eeprom",
349 },
350 .read = bin_attr_nvmem_read,
351 .write = bin_attr_nvmem_write,
352};
353
354/*
355 * nvmem_setup_compat() - Create an additional binary entry in
356 * drivers sys directory, to be backwards compatible with the older
357 * drivers/misc/eeprom drivers.
358 */
359static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
360 const struct nvmem_config *config)
361{
362 int rval;
363
364 if (!config->compat)
365 return 0;
366
367 if (!config->base_dev)
368 return -EINVAL;
369
370 if (config->type == NVMEM_TYPE_FRAM)
371 bin_attr_nvmem_eeprom_compat.attr.name = "fram";
372
373 nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
374 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
375 nvmem->eeprom.size = nvmem->size;
376#ifdef CONFIG_DEBUG_LOCK_ALLOC
377 nvmem->eeprom.attr.key = &eeprom_lock_key;
378#endif
379 nvmem->eeprom.private = &nvmem->dev;
380 nvmem->base_dev = config->base_dev;
381
382 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
383 if (rval) {
384 dev_err(&nvmem->dev,
385 "Failed to create eeprom binary file %d\n", rval);
386 return rval;
387 }
388
389 nvmem->flags |= FLAG_COMPAT;
390
391 return 0;
392}
393
394static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
395 const struct nvmem_config *config)
396{
397 if (config->compat)
398 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
399}
400
401#else /* CONFIG_NVMEM_SYSFS */
402
403static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
404 const struct nvmem_config *config)
405{
406 return -ENOSYS;
407}
408static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
409 const struct nvmem_config *config)
410{
411}
412
413#endif /* CONFIG_NVMEM_SYSFS */
414
415static void nvmem_release(struct device *dev)
416{
417 struct nvmem_device *nvmem = to_nvmem_device(dev);
418
419 ida_free(&nvmem_ida, nvmem->id);
420 gpiod_put(nvmem->wp_gpio);
421 kfree(nvmem);
422}
423
424static const struct device_type nvmem_provider_type = {
425 .release = nvmem_release,
426};
427
428static struct bus_type nvmem_bus_type = {
429 .name = "nvmem",
430};
431
432static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
433{
434 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
435 mutex_lock(&nvmem_mutex);
436 list_del(&cell->node);
437 mutex_unlock(&nvmem_mutex);
438 of_node_put(cell->np);
439 kfree_const(cell->name);
440 kfree(cell);
441}
442
443static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
444{
445 struct nvmem_cell_entry *cell, *p;
446
447 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
448 nvmem_cell_entry_drop(cell);
449}
450
451static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
452{
453 mutex_lock(&nvmem_mutex);
454 list_add_tail(&cell->node, &cell->nvmem->cells);
455 mutex_unlock(&nvmem_mutex);
456 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
457}
458
459static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
460 const struct nvmem_cell_info *info,
461 struct nvmem_cell_entry *cell)
462{
463 cell->nvmem = nvmem;
464 cell->offset = info->offset;
465 cell->bytes = info->bytes;
466 cell->name = info->name;
467
468 cell->bit_offset = info->bit_offset;
469 cell->nbits = info->nbits;
470 cell->np = info->np;
471
472 if (cell->nbits)
473 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
474 BITS_PER_BYTE);
475
476 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
477 dev_err(&nvmem->dev,
478 "cell %s unaligned to nvmem stride %d\n",
479 cell->name ?: "<unknown>", nvmem->stride);
480 return -EINVAL;
481 }
482
483 return 0;
484}
485
486static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
487 const struct nvmem_cell_info *info,
488 struct nvmem_cell_entry *cell)
489{
490 int err;
491
492 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
493 if (err)
494 return err;
495
496 cell->name = kstrdup_const(info->name, GFP_KERNEL);
497 if (!cell->name)
498 return -ENOMEM;
499
500 return 0;
501}
502
503/**
504 * nvmem_add_cells() - Add cell information to an nvmem device
505 *
506 * @nvmem: nvmem device to add cells to.
507 * @info: nvmem cell info to add to the device
508 * @ncells: number of cells in info
509 *
510 * Return: 0 or negative error code on failure.
511 */
512static int nvmem_add_cells(struct nvmem_device *nvmem,
513 const struct nvmem_cell_info *info,
514 int ncells)
515{
516 struct nvmem_cell_entry **cells;
517 int i, rval;
518
519 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
520 if (!cells)
521 return -ENOMEM;
522
523 for (i = 0; i < ncells; i++) {
524 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
525 if (!cells[i]) {
526 rval = -ENOMEM;
527 goto err;
528 }
529
530 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, &info[i], cells[i]);
531 if (rval) {
532 kfree(cells[i]);
533 goto err;
534 }
535
536 nvmem_cell_entry_add(cells[i]);
537 }
538
539 /* remove tmp array */
540 kfree(cells);
541
542 return 0;
543err:
544 while (i--)
545 nvmem_cell_entry_drop(cells[i]);
546
547 kfree(cells);
548
549 return rval;
550}
551
552/**
553 * nvmem_register_notifier() - Register a notifier block for nvmem events.
554 *
555 * @nb: notifier block to be called on nvmem events.
556 *
557 * Return: 0 on success, negative error number on failure.
558 */
559int nvmem_register_notifier(struct notifier_block *nb)
560{
561 return blocking_notifier_chain_register(&nvmem_notifier, nb);
562}
563EXPORT_SYMBOL_GPL(nvmem_register_notifier);
564
565/**
566 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
567 *
568 * @nb: notifier block to be unregistered.
569 *
570 * Return: 0 on success, negative error number on failure.
571 */
572int nvmem_unregister_notifier(struct notifier_block *nb)
573{
574 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
575}
576EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
577
578static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
579{
580 const struct nvmem_cell_info *info;
581 struct nvmem_cell_table *table;
582 struct nvmem_cell_entry *cell;
583 int rval = 0, i;
584
585 mutex_lock(&nvmem_cell_mutex);
586 list_for_each_entry(table, &nvmem_cell_tables, node) {
587 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
588 for (i = 0; i < table->ncells; i++) {
589 info = &table->cells[i];
590
591 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
592 if (!cell) {
593 rval = -ENOMEM;
594 goto out;
595 }
596
597 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
598 if (rval) {
599 kfree(cell);
600 goto out;
601 }
602
603 nvmem_cell_entry_add(cell);
604 }
605 }
606 }
607
608out:
609 mutex_unlock(&nvmem_cell_mutex);
610 return rval;
611}
612
613static struct nvmem_cell_entry *
614nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
615{
616 struct nvmem_cell_entry *iter, *cell = NULL;
617
618 mutex_lock(&nvmem_mutex);
619 list_for_each_entry(iter, &nvmem->cells, node) {
620 if (strcmp(cell_id, iter->name) == 0) {
621 cell = iter;
622 break;
623 }
624 }
625 mutex_unlock(&nvmem_mutex);
626
627 return cell;
628}
629
630static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
631{
632 unsigned int cur = 0;
633 const struct nvmem_keepout *keepout = nvmem->keepout;
634 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
635
636 while (keepout < keepoutend) {
637 /* Ensure keepouts are sorted and don't overlap. */
638 if (keepout->start < cur) {
639 dev_err(&nvmem->dev,
640 "Keepout regions aren't sorted or overlap.\n");
641
642 return -ERANGE;
643 }
644
645 if (keepout->end < keepout->start) {
646 dev_err(&nvmem->dev,
647 "Invalid keepout region.\n");
648
649 return -EINVAL;
650 }
651
652 /*
653 * Validate keepouts (and holes between) don't violate
654 * word_size constraints.
655 */
656 if ((keepout->end - keepout->start < nvmem->word_size) ||
657 ((keepout->start != cur) &&
658 (keepout->start - cur < nvmem->word_size))) {
659
660 dev_err(&nvmem->dev,
661 "Keepout regions violate word_size constraints.\n");
662
663 return -ERANGE;
664 }
665
666 /* Validate keepouts don't violate stride (alignment). */
667 if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
668 !IS_ALIGNED(keepout->end, nvmem->stride)) {
669
670 dev_err(&nvmem->dev,
671 "Keepout regions violate stride.\n");
672
673 return -EINVAL;
674 }
675
676 cur = keepout->end;
677 keepout++;
678 }
679
680 return 0;
681}
682
683static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
684{
685 struct device_node *parent, *child;
686 struct device *dev = &nvmem->dev;
687 struct nvmem_cell_entry *cell;
688 const __be32 *addr;
689 int len;
690
691 parent = dev->of_node;
692
693 for_each_child_of_node(parent, child) {
694 addr = of_get_property(child, "reg", &len);
695 if (!addr)
696 continue;
697 if (len < 2 * sizeof(u32)) {
698 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
699 of_node_put(child);
700 return -EINVAL;
701 }
702
703 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
704 if (!cell) {
705 of_node_put(child);
706 return -ENOMEM;
707 }
708
709 cell->nvmem = nvmem;
710 cell->offset = be32_to_cpup(addr++);
711 cell->bytes = be32_to_cpup(addr);
712 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
713
714 addr = of_get_property(child, "bits", &len);
715 if (addr && len == (2 * sizeof(u32))) {
716 cell->bit_offset = be32_to_cpup(addr++);
717 cell->nbits = be32_to_cpup(addr);
718 }
719
720 if (cell->nbits)
721 cell->bytes = DIV_ROUND_UP(
722 cell->nbits + cell->bit_offset,
723 BITS_PER_BYTE);
724
725 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
726 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
727 cell->name, nvmem->stride);
728 /* Cells already added will be freed later. */
729 kfree_const(cell->name);
730 kfree(cell);
731 of_node_put(child);
732 return -EINVAL;
733 }
734
735 cell->np = of_node_get(child);
736 nvmem_cell_entry_add(cell);
737 }
738
739 return 0;
740}
741
742/**
743 * nvmem_register() - Register a nvmem device for given nvmem_config.
744 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
745 *
746 * @config: nvmem device configuration with which nvmem device is created.
747 *
748 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
749 * on success.
750 */
751
752struct nvmem_device *nvmem_register(const struct nvmem_config *config)
753{
754 struct nvmem_device *nvmem;
755 int rval;
756
757 if (!config->dev)
758 return ERR_PTR(-EINVAL);
759
760 if (!config->reg_read && !config->reg_write)
761 return ERR_PTR(-EINVAL);
762
763 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
764 if (!nvmem)
765 return ERR_PTR(-ENOMEM);
766
767 rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
768 if (rval < 0) {
769 kfree(nvmem);
770 return ERR_PTR(rval);
771 }
772
773 nvmem->id = rval;
774
775 nvmem->dev.type = &nvmem_provider_type;
776 nvmem->dev.bus = &nvmem_bus_type;
777 nvmem->dev.parent = config->dev;
778
779 device_initialize(&nvmem->dev);
780
781 if (!config->ignore_wp)
782 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
783 GPIOD_OUT_HIGH);
784 if (IS_ERR(nvmem->wp_gpio)) {
785 rval = PTR_ERR(nvmem->wp_gpio);
786 nvmem->wp_gpio = NULL;
787 goto err_put_device;
788 }
789
790 kref_init(&nvmem->refcnt);
791 INIT_LIST_HEAD(&nvmem->cells);
792
793 nvmem->owner = config->owner;
794 if (!nvmem->owner && config->dev->driver)
795 nvmem->owner = config->dev->driver->owner;
796 nvmem->stride = config->stride ?: 1;
797 nvmem->word_size = config->word_size ?: 1;
798 nvmem->size = config->size;
799 nvmem->root_only = config->root_only;
800 nvmem->priv = config->priv;
801 nvmem->type = config->type;
802 nvmem->reg_read = config->reg_read;
803 nvmem->reg_write = config->reg_write;
804 nvmem->cell_post_process = config->cell_post_process;
805 nvmem->keepout = config->keepout;
806 nvmem->nkeepout = config->nkeepout;
807 if (config->of_node)
808 nvmem->dev.of_node = config->of_node;
809 else if (!config->no_of_node)
810 nvmem->dev.of_node = config->dev->of_node;
811
812 switch (config->id) {
813 case NVMEM_DEVID_NONE:
814 rval = dev_set_name(&nvmem->dev, "%s", config->name);
815 break;
816 case NVMEM_DEVID_AUTO:
817 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
818 break;
819 default:
820 rval = dev_set_name(&nvmem->dev, "%s%d",
821 config->name ? : "nvmem",
822 config->name ? config->id : nvmem->id);
823 break;
824 }
825
826 if (rval)
827 goto err_put_device;
828
829 nvmem->read_only = device_property_present(config->dev, "read-only") ||
830 config->read_only || !nvmem->reg_write;
831
832#ifdef CONFIG_NVMEM_SYSFS
833 nvmem->dev.groups = nvmem_dev_groups;
834#endif
835
836 if (nvmem->nkeepout) {
837 rval = nvmem_validate_keepouts(nvmem);
838 if (rval)
839 goto err_put_device;
840 }
841
842 if (config->compat) {
843 rval = nvmem_sysfs_setup_compat(nvmem, config);
844 if (rval)
845 goto err_put_device;
846 }
847
848 if (config->cells) {
849 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
850 if (rval)
851 goto err_remove_cells;
852 }
853
854 rval = nvmem_add_cells_from_table(nvmem);
855 if (rval)
856 goto err_remove_cells;
857
858 rval = nvmem_add_cells_from_of(nvmem);
859 if (rval)
860 goto err_remove_cells;
861
862 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
863
864 rval = device_add(&nvmem->dev);
865 if (rval)
866 goto err_remove_cells;
867
868 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
869
870 return nvmem;
871
872err_remove_cells:
873 nvmem_device_remove_all_cells(nvmem);
874 if (config->compat)
875 nvmem_sysfs_remove_compat(nvmem, config);
876err_put_device:
877 put_device(&nvmem->dev);
878
879 return ERR_PTR(rval);
880}
881EXPORT_SYMBOL_GPL(nvmem_register);
882
883static void nvmem_device_release(struct kref *kref)
884{
885 struct nvmem_device *nvmem;
886
887 nvmem = container_of(kref, struct nvmem_device, refcnt);
888
889 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
890
891 if (nvmem->flags & FLAG_COMPAT)
892 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
893
894 nvmem_device_remove_all_cells(nvmem);
895 device_unregister(&nvmem->dev);
896}
897
898/**
899 * nvmem_unregister() - Unregister previously registered nvmem device
900 *
901 * @nvmem: Pointer to previously registered nvmem device.
902 */
903void nvmem_unregister(struct nvmem_device *nvmem)
904{
905 if (nvmem)
906 kref_put(&nvmem->refcnt, nvmem_device_release);
907}
908EXPORT_SYMBOL_GPL(nvmem_unregister);
909
910static void devm_nvmem_unregister(void *nvmem)
911{
912 nvmem_unregister(nvmem);
913}
914
915/**
916 * devm_nvmem_register() - Register a managed nvmem device for given
917 * nvmem_config.
918 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
919 *
920 * @dev: Device that uses the nvmem device.
921 * @config: nvmem device configuration with which nvmem device is created.
922 *
923 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
924 * on success.
925 */
926struct nvmem_device *devm_nvmem_register(struct device *dev,
927 const struct nvmem_config *config)
928{
929 struct nvmem_device *nvmem;
930 int ret;
931
932 nvmem = nvmem_register(config);
933 if (IS_ERR(nvmem))
934 return nvmem;
935
936 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
937 if (ret)
938 return ERR_PTR(ret);
939
940 return nvmem;
941}
942EXPORT_SYMBOL_GPL(devm_nvmem_register);
943
944static struct nvmem_device *__nvmem_device_get(void *data,
945 int (*match)(struct device *dev, const void *data))
946{
947 struct nvmem_device *nvmem = NULL;
948 struct device *dev;
949
950 mutex_lock(&nvmem_mutex);
951 dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
952 if (dev)
953 nvmem = to_nvmem_device(dev);
954 mutex_unlock(&nvmem_mutex);
955 if (!nvmem)
956 return ERR_PTR(-EPROBE_DEFER);
957
958 if (!try_module_get(nvmem->owner)) {
959 dev_err(&nvmem->dev,
960 "could not increase module refcount for cell %s\n",
961 nvmem_dev_name(nvmem));
962
963 put_device(&nvmem->dev);
964 return ERR_PTR(-EINVAL);
965 }
966
967 kref_get(&nvmem->refcnt);
968
969 return nvmem;
970}
971
972static void __nvmem_device_put(struct nvmem_device *nvmem)
973{
974 put_device(&nvmem->dev);
975 module_put(nvmem->owner);
976 kref_put(&nvmem->refcnt, nvmem_device_release);
977}
978
979#if IS_ENABLED(CONFIG_OF)
980/**
981 * of_nvmem_device_get() - Get nvmem device from a given id
982 *
983 * @np: Device tree node that uses the nvmem device.
984 * @id: nvmem name from nvmem-names property.
985 *
986 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
987 * on success.
988 */
989struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
990{
991
992 struct device_node *nvmem_np;
993 struct nvmem_device *nvmem;
994 int index = 0;
995
996 if (id)
997 index = of_property_match_string(np, "nvmem-names", id);
998
999 nvmem_np = of_parse_phandle(np, "nvmem", index);
1000 if (!nvmem_np)
1001 return ERR_PTR(-ENOENT);
1002
1003 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1004 of_node_put(nvmem_np);
1005 return nvmem;
1006}
1007EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1008#endif
1009
1010/**
1011 * nvmem_device_get() - Get nvmem device from a given id
1012 *
1013 * @dev: Device that uses the nvmem device.
1014 * @dev_name: name of the requested nvmem device.
1015 *
1016 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1017 * on success.
1018 */
1019struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1020{
1021 if (dev->of_node) { /* try dt first */
1022 struct nvmem_device *nvmem;
1023
1024 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1025
1026 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1027 return nvmem;
1028
1029 }
1030
1031 return __nvmem_device_get((void *)dev_name, device_match_name);
1032}
1033EXPORT_SYMBOL_GPL(nvmem_device_get);
1034
1035/**
1036 * nvmem_device_find() - Find nvmem device with matching function
1037 *
1038 * @data: Data to pass to match function
1039 * @match: Callback function to check device
1040 *
1041 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1042 * on success.
1043 */
1044struct nvmem_device *nvmem_device_find(void *data,
1045 int (*match)(struct device *dev, const void *data))
1046{
1047 return __nvmem_device_get(data, match);
1048}
1049EXPORT_SYMBOL_GPL(nvmem_device_find);
1050
1051static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1052{
1053 struct nvmem_device **nvmem = res;
1054
1055 if (WARN_ON(!nvmem || !*nvmem))
1056 return 0;
1057
1058 return *nvmem == data;
1059}
1060
1061static void devm_nvmem_device_release(struct device *dev, void *res)
1062{
1063 nvmem_device_put(*(struct nvmem_device **)res);
1064}
1065
1066/**
1067 * devm_nvmem_device_put() - put alredy got nvmem device
1068 *
1069 * @dev: Device that uses the nvmem device.
1070 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1071 * that needs to be released.
1072 */
1073void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1074{
1075 int ret;
1076
1077 ret = devres_release(dev, devm_nvmem_device_release,
1078 devm_nvmem_device_match, nvmem);
1079
1080 WARN_ON(ret);
1081}
1082EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1083
1084/**
1085 * nvmem_device_put() - put alredy got nvmem device
1086 *
1087 * @nvmem: pointer to nvmem device that needs to be released.
1088 */
1089void nvmem_device_put(struct nvmem_device *nvmem)
1090{
1091 __nvmem_device_put(nvmem);
1092}
1093EXPORT_SYMBOL_GPL(nvmem_device_put);
1094
1095/**
1096 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
1097 *
1098 * @dev: Device that requests the nvmem device.
1099 * @id: name id for the requested nvmem device.
1100 *
1101 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
1102 * on success. The nvmem_cell will be freed by the automatically once the
1103 * device is freed.
1104 */
1105struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1106{
1107 struct nvmem_device **ptr, *nvmem;
1108
1109 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1110 if (!ptr)
1111 return ERR_PTR(-ENOMEM);
1112
1113 nvmem = nvmem_device_get(dev, id);
1114 if (!IS_ERR(nvmem)) {
1115 *ptr = nvmem;
1116 devres_add(dev, ptr);
1117 } else {
1118 devres_free(ptr);
1119 }
1120
1121 return nvmem;
1122}
1123EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1124
1125static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, const char *id)
1126{
1127 struct nvmem_cell *cell;
1128 const char *name = NULL;
1129
1130 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
1131 if (!cell)
1132 return ERR_PTR(-ENOMEM);
1133
1134 if (id) {
1135 name = kstrdup_const(id, GFP_KERNEL);
1136 if (!name) {
1137 kfree(cell);
1138 return ERR_PTR(-ENOMEM);
1139 }
1140 }
1141
1142 cell->id = name;
1143 cell->entry = entry;
1144
1145 return cell;
1146}
1147
1148static struct nvmem_cell *
1149nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1150{
1151 struct nvmem_cell_entry *cell_entry;
1152 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1153 struct nvmem_cell_lookup *lookup;
1154 struct nvmem_device *nvmem;
1155 const char *dev_id;
1156
1157 if (!dev)
1158 return ERR_PTR(-EINVAL);
1159
1160 dev_id = dev_name(dev);
1161
1162 mutex_lock(&nvmem_lookup_mutex);
1163
1164 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1165 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1166 (strcmp(lookup->con_id, con_id) == 0)) {
1167 /* This is the right entry. */
1168 nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1169 device_match_name);
1170 if (IS_ERR(nvmem)) {
1171 /* Provider may not be registered yet. */
1172 cell = ERR_CAST(nvmem);
1173 break;
1174 }
1175
1176 cell_entry = nvmem_find_cell_entry_by_name(nvmem,
1177 lookup->cell_name);
1178 if (!cell_entry) {
1179 __nvmem_device_put(nvmem);
1180 cell = ERR_PTR(-ENOENT);
1181 } else {
1182 cell = nvmem_create_cell(cell_entry, con_id);
1183 if (IS_ERR(cell))
1184 __nvmem_device_put(nvmem);
1185 }
1186 break;
1187 }
1188 }
1189
1190 mutex_unlock(&nvmem_lookup_mutex);
1191 return cell;
1192}
1193
1194#if IS_ENABLED(CONFIG_OF)
1195static struct nvmem_cell_entry *
1196nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
1197{
1198 struct nvmem_cell_entry *iter, *cell = NULL;
1199
1200 mutex_lock(&nvmem_mutex);
1201 list_for_each_entry(iter, &nvmem->cells, node) {
1202 if (np == iter->np) {
1203 cell = iter;
1204 break;
1205 }
1206 }
1207 mutex_unlock(&nvmem_mutex);
1208
1209 return cell;
1210}
1211
1212/**
1213 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1214 *
1215 * @np: Device tree node that uses the nvmem cell.
1216 * @id: nvmem cell name from nvmem-cell-names property, or NULL
1217 * for the cell at index 0 (the lone cell with no accompanying
1218 * nvmem-cell-names property).
1219 *
1220 * Return: Will be an ERR_PTR() on error or a valid pointer
1221 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1222 * nvmem_cell_put().
1223 */
1224struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1225{
1226 struct device_node *cell_np, *nvmem_np;
1227 struct nvmem_device *nvmem;
1228 struct nvmem_cell_entry *cell_entry;
1229 struct nvmem_cell *cell;
1230 int index = 0;
1231
1232 /* if cell name exists, find index to the name */
1233 if (id)
1234 index = of_property_match_string(np, "nvmem-cell-names", id);
1235
1236 cell_np = of_parse_phandle(np, "nvmem-cells", index);
1237 if (!cell_np)
1238 return ERR_PTR(-ENOENT);
1239
1240 nvmem_np = of_get_parent(cell_np);
1241 if (!nvmem_np) {
1242 of_node_put(cell_np);
1243 return ERR_PTR(-EINVAL);
1244 }
1245
1246 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1247 of_node_put(nvmem_np);
1248 if (IS_ERR(nvmem)) {
1249 of_node_put(cell_np);
1250 return ERR_CAST(nvmem);
1251 }
1252
1253 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
1254 of_node_put(cell_np);
1255 if (!cell_entry) {
1256 __nvmem_device_put(nvmem);
1257 return ERR_PTR(-ENOENT);
1258 }
1259
1260 cell = nvmem_create_cell(cell_entry, id);
1261 if (IS_ERR(cell))
1262 __nvmem_device_put(nvmem);
1263
1264 return cell;
1265}
1266EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1267#endif
1268
1269/**
1270 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1271 *
1272 * @dev: Device that requests the nvmem cell.
1273 * @id: nvmem cell name to get (this corresponds with the name from the
1274 * nvmem-cell-names property for DT systems and with the con_id from
1275 * the lookup entry for non-DT systems).
1276 *
1277 * Return: Will be an ERR_PTR() on error or a valid pointer
1278 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1279 * nvmem_cell_put().
1280 */
1281struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1282{
1283 struct nvmem_cell *cell;
1284
1285 if (dev->of_node) { /* try dt first */
1286 cell = of_nvmem_cell_get(dev->of_node, id);
1287 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1288 return cell;
1289 }
1290
1291 /* NULL cell id only allowed for device tree; invalid otherwise */
1292 if (!id)
1293 return ERR_PTR(-EINVAL);
1294
1295 return nvmem_cell_get_from_lookup(dev, id);
1296}
1297EXPORT_SYMBOL_GPL(nvmem_cell_get);
1298
1299static void devm_nvmem_cell_release(struct device *dev, void *res)
1300{
1301 nvmem_cell_put(*(struct nvmem_cell **)res);
1302}
1303
1304/**
1305 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1306 *
1307 * @dev: Device that requests the nvmem cell.
1308 * @id: nvmem cell name id to get.
1309 *
1310 * Return: Will be an ERR_PTR() on error or a valid pointer
1311 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1312 * automatically once the device is freed.
1313 */
1314struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1315{
1316 struct nvmem_cell **ptr, *cell;
1317
1318 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1319 if (!ptr)
1320 return ERR_PTR(-ENOMEM);
1321
1322 cell = nvmem_cell_get(dev, id);
1323 if (!IS_ERR(cell)) {
1324 *ptr = cell;
1325 devres_add(dev, ptr);
1326 } else {
1327 devres_free(ptr);
1328 }
1329
1330 return cell;
1331}
1332EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1333
1334static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1335{
1336 struct nvmem_cell **c = res;
1337
1338 if (WARN_ON(!c || !*c))
1339 return 0;
1340
1341 return *c == data;
1342}
1343
1344/**
1345 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1346 * from devm_nvmem_cell_get.
1347 *
1348 * @dev: Device that requests the nvmem cell.
1349 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1350 */
1351void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1352{
1353 int ret;
1354
1355 ret = devres_release(dev, devm_nvmem_cell_release,
1356 devm_nvmem_cell_match, cell);
1357
1358 WARN_ON(ret);
1359}
1360EXPORT_SYMBOL(devm_nvmem_cell_put);
1361
1362/**
1363 * nvmem_cell_put() - Release previously allocated nvmem cell.
1364 *
1365 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1366 */
1367void nvmem_cell_put(struct nvmem_cell *cell)
1368{
1369 struct nvmem_device *nvmem = cell->entry->nvmem;
1370
1371 if (cell->id)
1372 kfree_const(cell->id);
1373
1374 kfree(cell);
1375 __nvmem_device_put(nvmem);
1376}
1377EXPORT_SYMBOL_GPL(nvmem_cell_put);
1378
1379static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
1380{
1381 u8 *p, *b;
1382 int i, extra, bit_offset = cell->bit_offset;
1383
1384 p = b = buf;
1385 if (bit_offset) {
1386 /* First shift */
1387 *b++ >>= bit_offset;
1388
1389 /* setup rest of the bytes if any */
1390 for (i = 1; i < cell->bytes; i++) {
1391 /* Get bits from next byte and shift them towards msb */
1392 *p |= *b << (BITS_PER_BYTE - bit_offset);
1393
1394 p = b;
1395 *b++ >>= bit_offset;
1396 }
1397 } else {
1398 /* point to the msb */
1399 p += cell->bytes - 1;
1400 }
1401
1402 /* result fits in less bytes */
1403 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1404 while (--extra >= 0)
1405 *p-- = 0;
1406
1407 /* clear msb bits if any leftover in the last byte */
1408 if (cell->nbits % BITS_PER_BYTE)
1409 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1410}
1411
1412static int __nvmem_cell_read(struct nvmem_device *nvmem,
1413 struct nvmem_cell_entry *cell,
1414 void *buf, size_t *len, const char *id)
1415{
1416 int rc;
1417
1418 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1419
1420 if (rc)
1421 return rc;
1422
1423 /* shift bits in-place */
1424 if (cell->bit_offset || cell->nbits)
1425 nvmem_shift_read_buffer_in_place(cell, buf);
1426
1427 if (nvmem->cell_post_process) {
1428 rc = nvmem->cell_post_process(nvmem->priv, id,
1429 cell->offset, buf, cell->bytes);
1430 if (rc)
1431 return rc;
1432 }
1433
1434 if (len)
1435 *len = cell->bytes;
1436
1437 return 0;
1438}
1439
1440/**
1441 * nvmem_cell_read() - Read a given nvmem cell
1442 *
1443 * @cell: nvmem cell to be read.
1444 * @len: pointer to length of cell which will be populated on successful read;
1445 * can be NULL.
1446 *
1447 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1448 * buffer should be freed by the consumer with a kfree().
1449 */
1450void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1451{
1452 struct nvmem_device *nvmem = cell->entry->nvmem;
1453 u8 *buf;
1454 int rc;
1455
1456 if (!nvmem)
1457 return ERR_PTR(-EINVAL);
1458
1459 buf = kzalloc(cell->entry->bytes, GFP_KERNEL);
1460 if (!buf)
1461 return ERR_PTR(-ENOMEM);
1462
1463 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id);
1464 if (rc) {
1465 kfree(buf);
1466 return ERR_PTR(rc);
1467 }
1468
1469 return buf;
1470}
1471EXPORT_SYMBOL_GPL(nvmem_cell_read);
1472
1473static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
1474 u8 *_buf, int len)
1475{
1476 struct nvmem_device *nvmem = cell->nvmem;
1477 int i, rc, nbits, bit_offset = cell->bit_offset;
1478 u8 v, *p, *buf, *b, pbyte, pbits;
1479
1480 nbits = cell->nbits;
1481 buf = kzalloc(cell->bytes, GFP_KERNEL);
1482 if (!buf)
1483 return ERR_PTR(-ENOMEM);
1484
1485 memcpy(buf, _buf, len);
1486 p = b = buf;
1487
1488 if (bit_offset) {
1489 pbyte = *b;
1490 *b <<= bit_offset;
1491
1492 /* setup the first byte with lsb bits from nvmem */
1493 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1494 if (rc)
1495 goto err;
1496 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1497
1498 /* setup rest of the byte if any */
1499 for (i = 1; i < cell->bytes; i++) {
1500 /* Get last byte bits and shift them towards lsb */
1501 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1502 pbyte = *b;
1503 p = b;
1504 *b <<= bit_offset;
1505 *b++ |= pbits;
1506 }
1507 }
1508
1509 /* if it's not end on byte boundary */
1510 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1511 /* setup the last byte with msb bits from nvmem */
1512 rc = nvmem_reg_read(nvmem,
1513 cell->offset + cell->bytes - 1, &v, 1);
1514 if (rc)
1515 goto err;
1516 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1517
1518 }
1519
1520 return buf;
1521err:
1522 kfree(buf);
1523 return ERR_PTR(rc);
1524}
1525
1526static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
1527{
1528 struct nvmem_device *nvmem = cell->nvmem;
1529 int rc;
1530
1531 if (!nvmem || nvmem->read_only ||
1532 (cell->bit_offset == 0 && len != cell->bytes))
1533 return -EINVAL;
1534
1535 if (cell->bit_offset || cell->nbits) {
1536 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1537 if (IS_ERR(buf))
1538 return PTR_ERR(buf);
1539 }
1540
1541 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1542
1543 /* free the tmp buffer */
1544 if (cell->bit_offset || cell->nbits)
1545 kfree(buf);
1546
1547 if (rc)
1548 return rc;
1549
1550 return len;
1551}
1552
1553/**
1554 * nvmem_cell_write() - Write to a given nvmem cell
1555 *
1556 * @cell: nvmem cell to be written.
1557 * @buf: Buffer to be written.
1558 * @len: length of buffer to be written to nvmem cell.
1559 *
1560 * Return: length of bytes written or negative on failure.
1561 */
1562int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1563{
1564 return __nvmem_cell_entry_write(cell->entry, buf, len);
1565}
1566
1567EXPORT_SYMBOL_GPL(nvmem_cell_write);
1568
1569static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1570 void *val, size_t count)
1571{
1572 struct nvmem_cell *cell;
1573 void *buf;
1574 size_t len;
1575
1576 cell = nvmem_cell_get(dev, cell_id);
1577 if (IS_ERR(cell))
1578 return PTR_ERR(cell);
1579
1580 buf = nvmem_cell_read(cell, &len);
1581 if (IS_ERR(buf)) {
1582 nvmem_cell_put(cell);
1583 return PTR_ERR(buf);
1584 }
1585 if (len != count) {
1586 kfree(buf);
1587 nvmem_cell_put(cell);
1588 return -EINVAL;
1589 }
1590 memcpy(val, buf, count);
1591 kfree(buf);
1592 nvmem_cell_put(cell);
1593
1594 return 0;
1595}
1596
1597/**
1598 * nvmem_cell_read_u8() - Read a cell value as a u8
1599 *
1600 * @dev: Device that requests the nvmem cell.
1601 * @cell_id: Name of nvmem cell to read.
1602 * @val: pointer to output value.
1603 *
1604 * Return: 0 on success or negative errno.
1605 */
1606int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1607{
1608 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1609}
1610EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1611
1612/**
1613 * nvmem_cell_read_u16() - Read a cell value as a u16
1614 *
1615 * @dev: Device that requests the nvmem cell.
1616 * @cell_id: Name of nvmem cell to read.
1617 * @val: pointer to output value.
1618 *
1619 * Return: 0 on success or negative errno.
1620 */
1621int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1622{
1623 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1624}
1625EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1626
1627/**
1628 * nvmem_cell_read_u32() - Read a cell value as a u32
1629 *
1630 * @dev: Device that requests the nvmem cell.
1631 * @cell_id: Name of nvmem cell to read.
1632 * @val: pointer to output value.
1633 *
1634 * Return: 0 on success or negative errno.
1635 */
1636int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1637{
1638 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1639}
1640EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1641
1642/**
1643 * nvmem_cell_read_u64() - Read a cell value as a u64
1644 *
1645 * @dev: Device that requests the nvmem cell.
1646 * @cell_id: Name of nvmem cell to read.
1647 * @val: pointer to output value.
1648 *
1649 * Return: 0 on success or negative errno.
1650 */
1651int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1652{
1653 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1654}
1655EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1656
1657static const void *nvmem_cell_read_variable_common(struct device *dev,
1658 const char *cell_id,
1659 size_t max_len, size_t *len)
1660{
1661 struct nvmem_cell *cell;
1662 int nbits;
1663 void *buf;
1664
1665 cell = nvmem_cell_get(dev, cell_id);
1666 if (IS_ERR(cell))
1667 return cell;
1668
1669 nbits = cell->entry->nbits;
1670 buf = nvmem_cell_read(cell, len);
1671 nvmem_cell_put(cell);
1672 if (IS_ERR(buf))
1673 return buf;
1674
1675 /*
1676 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1677 * the length of the real data. Throw away the extra junk.
1678 */
1679 if (nbits)
1680 *len = DIV_ROUND_UP(nbits, 8);
1681
1682 if (*len > max_len) {
1683 kfree(buf);
1684 return ERR_PTR(-ERANGE);
1685 }
1686
1687 return buf;
1688}
1689
1690/**
1691 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1692 *
1693 * @dev: Device that requests the nvmem cell.
1694 * @cell_id: Name of nvmem cell to read.
1695 * @val: pointer to output value.
1696 *
1697 * Return: 0 on success or negative errno.
1698 */
1699int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1700 u32 *val)
1701{
1702 size_t len;
1703 const u8 *buf;
1704 int i;
1705
1706 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1707 if (IS_ERR(buf))
1708 return PTR_ERR(buf);
1709
1710 /* Copy w/ implicit endian conversion */
1711 *val = 0;
1712 for (i = 0; i < len; i++)
1713 *val |= buf[i] << (8 * i);
1714
1715 kfree(buf);
1716
1717 return 0;
1718}
1719EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1720
1721/**
1722 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1723 *
1724 * @dev: Device that requests the nvmem cell.
1725 * @cell_id: Name of nvmem cell to read.
1726 * @val: pointer to output value.
1727 *
1728 * Return: 0 on success or negative errno.
1729 */
1730int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1731 u64 *val)
1732{
1733 size_t len;
1734 const u8 *buf;
1735 int i;
1736
1737 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1738 if (IS_ERR(buf))
1739 return PTR_ERR(buf);
1740
1741 /* Copy w/ implicit endian conversion */
1742 *val = 0;
1743 for (i = 0; i < len; i++)
1744 *val |= (uint64_t)buf[i] << (8 * i);
1745
1746 kfree(buf);
1747
1748 return 0;
1749}
1750EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1751
1752/**
1753 * nvmem_device_cell_read() - Read a given nvmem device and cell
1754 *
1755 * @nvmem: nvmem device to read from.
1756 * @info: nvmem cell info to be read.
1757 * @buf: buffer pointer which will be populated on successful read.
1758 *
1759 * Return: length of successful bytes read on success and negative
1760 * error code on error.
1761 */
1762ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1763 struct nvmem_cell_info *info, void *buf)
1764{
1765 struct nvmem_cell_entry cell;
1766 int rc;
1767 ssize_t len;
1768
1769 if (!nvmem)
1770 return -EINVAL;
1771
1772 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1773 if (rc)
1774 return rc;
1775
1776 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL);
1777 if (rc)
1778 return rc;
1779
1780 return len;
1781}
1782EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1783
1784/**
1785 * nvmem_device_cell_write() - Write cell to a given nvmem device
1786 *
1787 * @nvmem: nvmem device to be written to.
1788 * @info: nvmem cell info to be written.
1789 * @buf: buffer to be written to cell.
1790 *
1791 * Return: length of bytes written or negative error code on failure.
1792 */
1793int nvmem_device_cell_write(struct nvmem_device *nvmem,
1794 struct nvmem_cell_info *info, void *buf)
1795{
1796 struct nvmem_cell_entry cell;
1797 int rc;
1798
1799 if (!nvmem)
1800 return -EINVAL;
1801
1802 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1803 if (rc)
1804 return rc;
1805
1806 return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
1807}
1808EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1809
1810/**
1811 * nvmem_device_read() - Read from a given nvmem device
1812 *
1813 * @nvmem: nvmem device to read from.
1814 * @offset: offset in nvmem device.
1815 * @bytes: number of bytes to read.
1816 * @buf: buffer pointer which will be populated on successful read.
1817 *
1818 * Return: length of successful bytes read on success and negative
1819 * error code on error.
1820 */
1821int nvmem_device_read(struct nvmem_device *nvmem,
1822 unsigned int offset,
1823 size_t bytes, void *buf)
1824{
1825 int rc;
1826
1827 if (!nvmem)
1828 return -EINVAL;
1829
1830 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1831
1832 if (rc)
1833 return rc;
1834
1835 return bytes;
1836}
1837EXPORT_SYMBOL_GPL(nvmem_device_read);
1838
1839/**
1840 * nvmem_device_write() - Write cell to a given nvmem device
1841 *
1842 * @nvmem: nvmem device to be written to.
1843 * @offset: offset in nvmem device.
1844 * @bytes: number of bytes to write.
1845 * @buf: buffer to be written.
1846 *
1847 * Return: length of bytes written or negative error code on failure.
1848 */
1849int nvmem_device_write(struct nvmem_device *nvmem,
1850 unsigned int offset,
1851 size_t bytes, void *buf)
1852{
1853 int rc;
1854
1855 if (!nvmem)
1856 return -EINVAL;
1857
1858 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1859
1860 if (rc)
1861 return rc;
1862
1863
1864 return bytes;
1865}
1866EXPORT_SYMBOL_GPL(nvmem_device_write);
1867
1868/**
1869 * nvmem_add_cell_table() - register a table of cell info entries
1870 *
1871 * @table: table of cell info entries
1872 */
1873void nvmem_add_cell_table(struct nvmem_cell_table *table)
1874{
1875 mutex_lock(&nvmem_cell_mutex);
1876 list_add_tail(&table->node, &nvmem_cell_tables);
1877 mutex_unlock(&nvmem_cell_mutex);
1878}
1879EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1880
1881/**
1882 * nvmem_del_cell_table() - remove a previously registered cell info table
1883 *
1884 * @table: table of cell info entries
1885 */
1886void nvmem_del_cell_table(struct nvmem_cell_table *table)
1887{
1888 mutex_lock(&nvmem_cell_mutex);
1889 list_del(&table->node);
1890 mutex_unlock(&nvmem_cell_mutex);
1891}
1892EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1893
1894/**
1895 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1896 *
1897 * @entries: array of cell lookup entries
1898 * @nentries: number of cell lookup entries in the array
1899 */
1900void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1901{
1902 int i;
1903
1904 mutex_lock(&nvmem_lookup_mutex);
1905 for (i = 0; i < nentries; i++)
1906 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1907 mutex_unlock(&nvmem_lookup_mutex);
1908}
1909EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1910
1911/**
1912 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1913 * entries
1914 *
1915 * @entries: array of cell lookup entries
1916 * @nentries: number of cell lookup entries in the array
1917 */
1918void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1919{
1920 int i;
1921
1922 mutex_lock(&nvmem_lookup_mutex);
1923 for (i = 0; i < nentries; i++)
1924 list_del(&entries[i].node);
1925 mutex_unlock(&nvmem_lookup_mutex);
1926}
1927EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1928
1929/**
1930 * nvmem_dev_name() - Get the name of a given nvmem device.
1931 *
1932 * @nvmem: nvmem device.
1933 *
1934 * Return: name of the nvmem device.
1935 */
1936const char *nvmem_dev_name(struct nvmem_device *nvmem)
1937{
1938 return dev_name(&nvmem->dev);
1939}
1940EXPORT_SYMBOL_GPL(nvmem_dev_name);
1941
1942static int __init nvmem_init(void)
1943{
1944 return bus_register(&nvmem_bus_type);
1945}
1946
1947static void __exit nvmem_exit(void)
1948{
1949 bus_unregister(&nvmem_bus_type);
1950}
1951
1952subsys_initcall(nvmem_init);
1953module_exit(nvmem_exit);
1954
1955MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1956MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1957MODULE_DESCRIPTION("nvmem Driver Core");
1958MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * nvmem framework core.
4 *
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
8
9#include <linux/device.h>
10#include <linux/export.h>
11#include <linux/fs.h>
12#include <linux/idr.h>
13#include <linux/init.h>
14#include <linux/kref.h>
15#include <linux/module.h>
16#include <linux/nvmem-consumer.h>
17#include <linux/nvmem-provider.h>
18#include <linux/of.h>
19#include <linux/slab.h>
20#include "nvmem.h"
21
22struct nvmem_cell {
23 const char *name;
24 int offset;
25 int bytes;
26 int bit_offset;
27 int nbits;
28 struct device_node *np;
29 struct nvmem_device *nvmem;
30 struct list_head node;
31};
32
33static DEFINE_MUTEX(nvmem_mutex);
34static DEFINE_IDA(nvmem_ida);
35
36static DEFINE_MUTEX(nvmem_cell_mutex);
37static LIST_HEAD(nvmem_cell_tables);
38
39static DEFINE_MUTEX(nvmem_lookup_mutex);
40static LIST_HEAD(nvmem_lookup_list);
41
42static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
43
44
45static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
46 void *val, size_t bytes)
47{
48 if (nvmem->reg_read)
49 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
50
51 return -EINVAL;
52}
53
54static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
55 void *val, size_t bytes)
56{
57 if (nvmem->reg_write)
58 return nvmem->reg_write(nvmem->priv, offset, val, bytes);
59
60 return -EINVAL;
61}
62
63static void nvmem_release(struct device *dev)
64{
65 struct nvmem_device *nvmem = to_nvmem_device(dev);
66
67 ida_simple_remove(&nvmem_ida, nvmem->id);
68 kfree(nvmem);
69}
70
71static const struct device_type nvmem_provider_type = {
72 .release = nvmem_release,
73};
74
75static struct bus_type nvmem_bus_type = {
76 .name = "nvmem",
77};
78
79static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
80{
81 struct device *d;
82
83 if (!nvmem_np)
84 return NULL;
85
86 d = bus_find_device_by_of_node(&nvmem_bus_type, nvmem_np);
87
88 if (!d)
89 return NULL;
90
91 return to_nvmem_device(d);
92}
93
94static struct nvmem_device *nvmem_find(const char *name)
95{
96 struct device *d;
97
98 d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
99
100 if (!d)
101 return NULL;
102
103 return to_nvmem_device(d);
104}
105
106static void nvmem_cell_drop(struct nvmem_cell *cell)
107{
108 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
109 mutex_lock(&nvmem_mutex);
110 list_del(&cell->node);
111 mutex_unlock(&nvmem_mutex);
112 of_node_put(cell->np);
113 kfree(cell->name);
114 kfree(cell);
115}
116
117static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
118{
119 struct nvmem_cell *cell, *p;
120
121 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
122 nvmem_cell_drop(cell);
123}
124
125static void nvmem_cell_add(struct nvmem_cell *cell)
126{
127 mutex_lock(&nvmem_mutex);
128 list_add_tail(&cell->node, &cell->nvmem->cells);
129 mutex_unlock(&nvmem_mutex);
130 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
131}
132
133static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
134 const struct nvmem_cell_info *info,
135 struct nvmem_cell *cell)
136{
137 cell->nvmem = nvmem;
138 cell->offset = info->offset;
139 cell->bytes = info->bytes;
140 cell->name = info->name;
141
142 cell->bit_offset = info->bit_offset;
143 cell->nbits = info->nbits;
144
145 if (cell->nbits)
146 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
147 BITS_PER_BYTE);
148
149 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
150 dev_err(&nvmem->dev,
151 "cell %s unaligned to nvmem stride %d\n",
152 cell->name, nvmem->stride);
153 return -EINVAL;
154 }
155
156 return 0;
157}
158
159/**
160 * nvmem_add_cells() - Add cell information to an nvmem device
161 *
162 * @nvmem: nvmem device to add cells to.
163 * @info: nvmem cell info to add to the device
164 * @ncells: number of cells in info
165 *
166 * Return: 0 or negative error code on failure.
167 */
168static int nvmem_add_cells(struct nvmem_device *nvmem,
169 const struct nvmem_cell_info *info,
170 int ncells)
171{
172 struct nvmem_cell **cells;
173 int i, rval;
174
175 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
176 if (!cells)
177 return -ENOMEM;
178
179 for (i = 0; i < ncells; i++) {
180 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
181 if (!cells[i]) {
182 rval = -ENOMEM;
183 goto err;
184 }
185
186 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
187 if (rval) {
188 kfree(cells[i]);
189 goto err;
190 }
191
192 nvmem_cell_add(cells[i]);
193 }
194
195 /* remove tmp array */
196 kfree(cells);
197
198 return 0;
199err:
200 while (i--)
201 nvmem_cell_drop(cells[i]);
202
203 kfree(cells);
204
205 return rval;
206}
207
208/**
209 * nvmem_register_notifier() - Register a notifier block for nvmem events.
210 *
211 * @nb: notifier block to be called on nvmem events.
212 *
213 * Return: 0 on success, negative error number on failure.
214 */
215int nvmem_register_notifier(struct notifier_block *nb)
216{
217 return blocking_notifier_chain_register(&nvmem_notifier, nb);
218}
219EXPORT_SYMBOL_GPL(nvmem_register_notifier);
220
221/**
222 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
223 *
224 * @nb: notifier block to be unregistered.
225 *
226 * Return: 0 on success, negative error number on failure.
227 */
228int nvmem_unregister_notifier(struct notifier_block *nb)
229{
230 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
231}
232EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
233
234static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
235{
236 const struct nvmem_cell_info *info;
237 struct nvmem_cell_table *table;
238 struct nvmem_cell *cell;
239 int rval = 0, i;
240
241 mutex_lock(&nvmem_cell_mutex);
242 list_for_each_entry(table, &nvmem_cell_tables, node) {
243 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
244 for (i = 0; i < table->ncells; i++) {
245 info = &table->cells[i];
246
247 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
248 if (!cell) {
249 rval = -ENOMEM;
250 goto out;
251 }
252
253 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
254 info,
255 cell);
256 if (rval) {
257 kfree(cell);
258 goto out;
259 }
260
261 nvmem_cell_add(cell);
262 }
263 }
264 }
265
266out:
267 mutex_unlock(&nvmem_cell_mutex);
268 return rval;
269}
270
271static struct nvmem_cell *
272nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
273{
274 struct nvmem_cell *iter, *cell = NULL;
275
276 mutex_lock(&nvmem_mutex);
277 list_for_each_entry(iter, &nvmem->cells, node) {
278 if (strcmp(cell_id, iter->name) == 0) {
279 cell = iter;
280 break;
281 }
282 }
283 mutex_unlock(&nvmem_mutex);
284
285 return cell;
286}
287
288static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
289{
290 struct device_node *parent, *child;
291 struct device *dev = &nvmem->dev;
292 struct nvmem_cell *cell;
293 const __be32 *addr;
294 int len;
295
296 parent = dev->of_node;
297
298 for_each_child_of_node(parent, child) {
299 addr = of_get_property(child, "reg", &len);
300 if (!addr || (len < 2 * sizeof(u32))) {
301 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
302 return -EINVAL;
303 }
304
305 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
306 if (!cell)
307 return -ENOMEM;
308
309 cell->nvmem = nvmem;
310 cell->np = of_node_get(child);
311 cell->offset = be32_to_cpup(addr++);
312 cell->bytes = be32_to_cpup(addr);
313 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
314
315 addr = of_get_property(child, "bits", &len);
316 if (addr && len == (2 * sizeof(u32))) {
317 cell->bit_offset = be32_to_cpup(addr++);
318 cell->nbits = be32_to_cpup(addr);
319 }
320
321 if (cell->nbits)
322 cell->bytes = DIV_ROUND_UP(
323 cell->nbits + cell->bit_offset,
324 BITS_PER_BYTE);
325
326 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
327 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
328 cell->name, nvmem->stride);
329 /* Cells already added will be freed later. */
330 kfree(cell->name);
331 kfree(cell);
332 return -EINVAL;
333 }
334
335 nvmem_cell_add(cell);
336 }
337
338 return 0;
339}
340
341/**
342 * nvmem_register() - Register a nvmem device for given nvmem_config.
343 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
344 *
345 * @config: nvmem device configuration with which nvmem device is created.
346 *
347 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
348 * on success.
349 */
350
351struct nvmem_device *nvmem_register(const struct nvmem_config *config)
352{
353 struct nvmem_device *nvmem;
354 int rval;
355
356 if (!config->dev)
357 return ERR_PTR(-EINVAL);
358
359 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
360 if (!nvmem)
361 return ERR_PTR(-ENOMEM);
362
363 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
364 if (rval < 0) {
365 kfree(nvmem);
366 return ERR_PTR(rval);
367 }
368
369 kref_init(&nvmem->refcnt);
370 INIT_LIST_HEAD(&nvmem->cells);
371
372 nvmem->id = rval;
373 nvmem->owner = config->owner;
374 if (!nvmem->owner && config->dev->driver)
375 nvmem->owner = config->dev->driver->owner;
376 nvmem->stride = config->stride ?: 1;
377 nvmem->word_size = config->word_size ?: 1;
378 nvmem->size = config->size;
379 nvmem->dev.type = &nvmem_provider_type;
380 nvmem->dev.bus = &nvmem_bus_type;
381 nvmem->dev.parent = config->dev;
382 nvmem->priv = config->priv;
383 nvmem->type = config->type;
384 nvmem->reg_read = config->reg_read;
385 nvmem->reg_write = config->reg_write;
386 if (!config->no_of_node)
387 nvmem->dev.of_node = config->dev->of_node;
388
389 if (config->id == -1 && config->name) {
390 dev_set_name(&nvmem->dev, "%s", config->name);
391 } else {
392 dev_set_name(&nvmem->dev, "%s%d",
393 config->name ? : "nvmem",
394 config->name ? config->id : nvmem->id);
395 }
396
397 nvmem->read_only = device_property_present(config->dev, "read-only") ||
398 config->read_only || !nvmem->reg_write;
399
400 nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
401
402 device_initialize(&nvmem->dev);
403
404 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
405
406 rval = device_add(&nvmem->dev);
407 if (rval)
408 goto err_put_device;
409
410 if (config->compat) {
411 rval = nvmem_sysfs_setup_compat(nvmem, config);
412 if (rval)
413 goto err_device_del;
414 }
415
416 if (config->cells) {
417 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
418 if (rval)
419 goto err_teardown_compat;
420 }
421
422 rval = nvmem_add_cells_from_table(nvmem);
423 if (rval)
424 goto err_remove_cells;
425
426 rval = nvmem_add_cells_from_of(nvmem);
427 if (rval)
428 goto err_remove_cells;
429
430 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
431
432 return nvmem;
433
434err_remove_cells:
435 nvmem_device_remove_all_cells(nvmem);
436err_teardown_compat:
437 if (config->compat)
438 nvmem_sysfs_remove_compat(nvmem, config);
439err_device_del:
440 device_del(&nvmem->dev);
441err_put_device:
442 put_device(&nvmem->dev);
443
444 return ERR_PTR(rval);
445}
446EXPORT_SYMBOL_GPL(nvmem_register);
447
448static void nvmem_device_release(struct kref *kref)
449{
450 struct nvmem_device *nvmem;
451
452 nvmem = container_of(kref, struct nvmem_device, refcnt);
453
454 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
455
456 if (nvmem->flags & FLAG_COMPAT)
457 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
458
459 nvmem_device_remove_all_cells(nvmem);
460 device_del(&nvmem->dev);
461 put_device(&nvmem->dev);
462}
463
464/**
465 * nvmem_unregister() - Unregister previously registered nvmem device
466 *
467 * @nvmem: Pointer to previously registered nvmem device.
468 */
469void nvmem_unregister(struct nvmem_device *nvmem)
470{
471 kref_put(&nvmem->refcnt, nvmem_device_release);
472}
473EXPORT_SYMBOL_GPL(nvmem_unregister);
474
475static void devm_nvmem_release(struct device *dev, void *res)
476{
477 nvmem_unregister(*(struct nvmem_device **)res);
478}
479
480/**
481 * devm_nvmem_register() - Register a managed nvmem device for given
482 * nvmem_config.
483 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
484 *
485 * @dev: Device that uses the nvmem device.
486 * @config: nvmem device configuration with which nvmem device is created.
487 *
488 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
489 * on success.
490 */
491struct nvmem_device *devm_nvmem_register(struct device *dev,
492 const struct nvmem_config *config)
493{
494 struct nvmem_device **ptr, *nvmem;
495
496 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
497 if (!ptr)
498 return ERR_PTR(-ENOMEM);
499
500 nvmem = nvmem_register(config);
501
502 if (!IS_ERR(nvmem)) {
503 *ptr = nvmem;
504 devres_add(dev, ptr);
505 } else {
506 devres_free(ptr);
507 }
508
509 return nvmem;
510}
511EXPORT_SYMBOL_GPL(devm_nvmem_register);
512
513static int devm_nvmem_match(struct device *dev, void *res, void *data)
514{
515 struct nvmem_device **r = res;
516
517 return *r == data;
518}
519
520/**
521 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
522 * device.
523 *
524 * @dev: Device that uses the nvmem device.
525 * @nvmem: Pointer to previously registered nvmem device.
526 *
527 * Return: Will be an negative on error or a zero on success.
528 */
529int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
530{
531 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
532}
533EXPORT_SYMBOL(devm_nvmem_unregister);
534
535static struct nvmem_device *__nvmem_device_get(struct device_node *np,
536 const char *nvmem_name)
537{
538 struct nvmem_device *nvmem = NULL;
539
540 mutex_lock(&nvmem_mutex);
541 nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
542 mutex_unlock(&nvmem_mutex);
543 if (!nvmem)
544 return ERR_PTR(-EPROBE_DEFER);
545
546 if (!try_module_get(nvmem->owner)) {
547 dev_err(&nvmem->dev,
548 "could not increase module refcount for cell %s\n",
549 nvmem_dev_name(nvmem));
550
551 put_device(&nvmem->dev);
552 return ERR_PTR(-EINVAL);
553 }
554
555 kref_get(&nvmem->refcnt);
556
557 return nvmem;
558}
559
560static void __nvmem_device_put(struct nvmem_device *nvmem)
561{
562 put_device(&nvmem->dev);
563 module_put(nvmem->owner);
564 kref_put(&nvmem->refcnt, nvmem_device_release);
565}
566
567#if IS_ENABLED(CONFIG_OF)
568/**
569 * of_nvmem_device_get() - Get nvmem device from a given id
570 *
571 * @np: Device tree node that uses the nvmem device.
572 * @id: nvmem name from nvmem-names property.
573 *
574 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
575 * on success.
576 */
577struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
578{
579
580 struct device_node *nvmem_np;
581 int index = 0;
582
583 if (id)
584 index = of_property_match_string(np, "nvmem-names", id);
585
586 nvmem_np = of_parse_phandle(np, "nvmem", index);
587 if (!nvmem_np)
588 return ERR_PTR(-ENOENT);
589
590 return __nvmem_device_get(nvmem_np, NULL);
591}
592EXPORT_SYMBOL_GPL(of_nvmem_device_get);
593#endif
594
595/**
596 * nvmem_device_get() - Get nvmem device from a given id
597 *
598 * @dev: Device that uses the nvmem device.
599 * @dev_name: name of the requested nvmem device.
600 *
601 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
602 * on success.
603 */
604struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
605{
606 if (dev->of_node) { /* try dt first */
607 struct nvmem_device *nvmem;
608
609 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
610
611 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
612 return nvmem;
613
614 }
615
616 return __nvmem_device_get(NULL, dev_name);
617}
618EXPORT_SYMBOL_GPL(nvmem_device_get);
619
620static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
621{
622 struct nvmem_device **nvmem = res;
623
624 if (WARN_ON(!nvmem || !*nvmem))
625 return 0;
626
627 return *nvmem == data;
628}
629
630static void devm_nvmem_device_release(struct device *dev, void *res)
631{
632 nvmem_device_put(*(struct nvmem_device **)res);
633}
634
635/**
636 * devm_nvmem_device_put() - put alredy got nvmem device
637 *
638 * @dev: Device that uses the nvmem device.
639 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
640 * that needs to be released.
641 */
642void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
643{
644 int ret;
645
646 ret = devres_release(dev, devm_nvmem_device_release,
647 devm_nvmem_device_match, nvmem);
648
649 WARN_ON(ret);
650}
651EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
652
653/**
654 * nvmem_device_put() - put alredy got nvmem device
655 *
656 * @nvmem: pointer to nvmem device that needs to be released.
657 */
658void nvmem_device_put(struct nvmem_device *nvmem)
659{
660 __nvmem_device_put(nvmem);
661}
662EXPORT_SYMBOL_GPL(nvmem_device_put);
663
664/**
665 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
666 *
667 * @dev: Device that requests the nvmem device.
668 * @id: name id for the requested nvmem device.
669 *
670 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
671 * on success. The nvmem_cell will be freed by the automatically once the
672 * device is freed.
673 */
674struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
675{
676 struct nvmem_device **ptr, *nvmem;
677
678 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
679 if (!ptr)
680 return ERR_PTR(-ENOMEM);
681
682 nvmem = nvmem_device_get(dev, id);
683 if (!IS_ERR(nvmem)) {
684 *ptr = nvmem;
685 devres_add(dev, ptr);
686 } else {
687 devres_free(ptr);
688 }
689
690 return nvmem;
691}
692EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
693
694static struct nvmem_cell *
695nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
696{
697 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
698 struct nvmem_cell_lookup *lookup;
699 struct nvmem_device *nvmem;
700 const char *dev_id;
701
702 if (!dev)
703 return ERR_PTR(-EINVAL);
704
705 dev_id = dev_name(dev);
706
707 mutex_lock(&nvmem_lookup_mutex);
708
709 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
710 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
711 (strcmp(lookup->con_id, con_id) == 0)) {
712 /* This is the right entry. */
713 nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
714 if (IS_ERR(nvmem)) {
715 /* Provider may not be registered yet. */
716 cell = ERR_CAST(nvmem);
717 break;
718 }
719
720 cell = nvmem_find_cell_by_name(nvmem,
721 lookup->cell_name);
722 if (!cell) {
723 __nvmem_device_put(nvmem);
724 cell = ERR_PTR(-ENOENT);
725 }
726 break;
727 }
728 }
729
730 mutex_unlock(&nvmem_lookup_mutex);
731 return cell;
732}
733
734#if IS_ENABLED(CONFIG_OF)
735static struct nvmem_cell *
736nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
737{
738 struct nvmem_cell *iter, *cell = NULL;
739
740 mutex_lock(&nvmem_mutex);
741 list_for_each_entry(iter, &nvmem->cells, node) {
742 if (np == iter->np) {
743 cell = iter;
744 break;
745 }
746 }
747 mutex_unlock(&nvmem_mutex);
748
749 return cell;
750}
751
752/**
753 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
754 *
755 * @np: Device tree node that uses the nvmem cell.
756 * @id: nvmem cell name from nvmem-cell-names property, or NULL
757 * for the cell at index 0 (the lone cell with no accompanying
758 * nvmem-cell-names property).
759 *
760 * Return: Will be an ERR_PTR() on error or a valid pointer
761 * to a struct nvmem_cell. The nvmem_cell will be freed by the
762 * nvmem_cell_put().
763 */
764struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
765{
766 struct device_node *cell_np, *nvmem_np;
767 struct nvmem_device *nvmem;
768 struct nvmem_cell *cell;
769 int index = 0;
770
771 /* if cell name exists, find index to the name */
772 if (id)
773 index = of_property_match_string(np, "nvmem-cell-names", id);
774
775 cell_np = of_parse_phandle(np, "nvmem-cells", index);
776 if (!cell_np)
777 return ERR_PTR(-ENOENT);
778
779 nvmem_np = of_get_next_parent(cell_np);
780 if (!nvmem_np)
781 return ERR_PTR(-EINVAL);
782
783 nvmem = __nvmem_device_get(nvmem_np, NULL);
784 of_node_put(nvmem_np);
785 if (IS_ERR(nvmem))
786 return ERR_CAST(nvmem);
787
788 cell = nvmem_find_cell_by_node(nvmem, cell_np);
789 if (!cell) {
790 __nvmem_device_put(nvmem);
791 return ERR_PTR(-ENOENT);
792 }
793
794 return cell;
795}
796EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
797#endif
798
799/**
800 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
801 *
802 * @dev: Device that requests the nvmem cell.
803 * @id: nvmem cell name to get (this corresponds with the name from the
804 * nvmem-cell-names property for DT systems and with the con_id from
805 * the lookup entry for non-DT systems).
806 *
807 * Return: Will be an ERR_PTR() on error or a valid pointer
808 * to a struct nvmem_cell. The nvmem_cell will be freed by the
809 * nvmem_cell_put().
810 */
811struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
812{
813 struct nvmem_cell *cell;
814
815 if (dev->of_node) { /* try dt first */
816 cell = of_nvmem_cell_get(dev->of_node, id);
817 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
818 return cell;
819 }
820
821 /* NULL cell id only allowed for device tree; invalid otherwise */
822 if (!id)
823 return ERR_PTR(-EINVAL);
824
825 return nvmem_cell_get_from_lookup(dev, id);
826}
827EXPORT_SYMBOL_GPL(nvmem_cell_get);
828
829static void devm_nvmem_cell_release(struct device *dev, void *res)
830{
831 nvmem_cell_put(*(struct nvmem_cell **)res);
832}
833
834/**
835 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
836 *
837 * @dev: Device that requests the nvmem cell.
838 * @id: nvmem cell name id to get.
839 *
840 * Return: Will be an ERR_PTR() on error or a valid pointer
841 * to a struct nvmem_cell. The nvmem_cell will be freed by the
842 * automatically once the device is freed.
843 */
844struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
845{
846 struct nvmem_cell **ptr, *cell;
847
848 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
849 if (!ptr)
850 return ERR_PTR(-ENOMEM);
851
852 cell = nvmem_cell_get(dev, id);
853 if (!IS_ERR(cell)) {
854 *ptr = cell;
855 devres_add(dev, ptr);
856 } else {
857 devres_free(ptr);
858 }
859
860 return cell;
861}
862EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
863
864static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
865{
866 struct nvmem_cell **c = res;
867
868 if (WARN_ON(!c || !*c))
869 return 0;
870
871 return *c == data;
872}
873
874/**
875 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
876 * from devm_nvmem_cell_get.
877 *
878 * @dev: Device that requests the nvmem cell.
879 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
880 */
881void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
882{
883 int ret;
884
885 ret = devres_release(dev, devm_nvmem_cell_release,
886 devm_nvmem_cell_match, cell);
887
888 WARN_ON(ret);
889}
890EXPORT_SYMBOL(devm_nvmem_cell_put);
891
892/**
893 * nvmem_cell_put() - Release previously allocated nvmem cell.
894 *
895 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
896 */
897void nvmem_cell_put(struct nvmem_cell *cell)
898{
899 struct nvmem_device *nvmem = cell->nvmem;
900
901 __nvmem_device_put(nvmem);
902}
903EXPORT_SYMBOL_GPL(nvmem_cell_put);
904
905static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
906{
907 u8 *p, *b;
908 int i, extra, bit_offset = cell->bit_offset;
909
910 p = b = buf;
911 if (bit_offset) {
912 /* First shift */
913 *b++ >>= bit_offset;
914
915 /* setup rest of the bytes if any */
916 for (i = 1; i < cell->bytes; i++) {
917 /* Get bits from next byte and shift them towards msb */
918 *p |= *b << (BITS_PER_BYTE - bit_offset);
919
920 p = b;
921 *b++ >>= bit_offset;
922 }
923 } else {
924 /* point to the msb */
925 p += cell->bytes - 1;
926 }
927
928 /* result fits in less bytes */
929 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
930 while (--extra >= 0)
931 *p-- = 0;
932
933 /* clear msb bits if any leftover in the last byte */
934 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
935}
936
937static int __nvmem_cell_read(struct nvmem_device *nvmem,
938 struct nvmem_cell *cell,
939 void *buf, size_t *len)
940{
941 int rc;
942
943 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
944
945 if (rc)
946 return rc;
947
948 /* shift bits in-place */
949 if (cell->bit_offset || cell->nbits)
950 nvmem_shift_read_buffer_in_place(cell, buf);
951
952 if (len)
953 *len = cell->bytes;
954
955 return 0;
956}
957
958/**
959 * nvmem_cell_read() - Read a given nvmem cell
960 *
961 * @cell: nvmem cell to be read.
962 * @len: pointer to length of cell which will be populated on successful read;
963 * can be NULL.
964 *
965 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
966 * buffer should be freed by the consumer with a kfree().
967 */
968void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
969{
970 struct nvmem_device *nvmem = cell->nvmem;
971 u8 *buf;
972 int rc;
973
974 if (!nvmem)
975 return ERR_PTR(-EINVAL);
976
977 buf = kzalloc(cell->bytes, GFP_KERNEL);
978 if (!buf)
979 return ERR_PTR(-ENOMEM);
980
981 rc = __nvmem_cell_read(nvmem, cell, buf, len);
982 if (rc) {
983 kfree(buf);
984 return ERR_PTR(rc);
985 }
986
987 return buf;
988}
989EXPORT_SYMBOL_GPL(nvmem_cell_read);
990
991static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
992 u8 *_buf, int len)
993{
994 struct nvmem_device *nvmem = cell->nvmem;
995 int i, rc, nbits, bit_offset = cell->bit_offset;
996 u8 v, *p, *buf, *b, pbyte, pbits;
997
998 nbits = cell->nbits;
999 buf = kzalloc(cell->bytes, GFP_KERNEL);
1000 if (!buf)
1001 return ERR_PTR(-ENOMEM);
1002
1003 memcpy(buf, _buf, len);
1004 p = b = buf;
1005
1006 if (bit_offset) {
1007 pbyte = *b;
1008 *b <<= bit_offset;
1009
1010 /* setup the first byte with lsb bits from nvmem */
1011 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1012 if (rc)
1013 goto err;
1014 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1015
1016 /* setup rest of the byte if any */
1017 for (i = 1; i < cell->bytes; i++) {
1018 /* Get last byte bits and shift them towards lsb */
1019 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1020 pbyte = *b;
1021 p = b;
1022 *b <<= bit_offset;
1023 *b++ |= pbits;
1024 }
1025 }
1026
1027 /* if it's not end on byte boundary */
1028 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1029 /* setup the last byte with msb bits from nvmem */
1030 rc = nvmem_reg_read(nvmem,
1031 cell->offset + cell->bytes - 1, &v, 1);
1032 if (rc)
1033 goto err;
1034 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1035
1036 }
1037
1038 return buf;
1039err:
1040 kfree(buf);
1041 return ERR_PTR(rc);
1042}
1043
1044/**
1045 * nvmem_cell_write() - Write to a given nvmem cell
1046 *
1047 * @cell: nvmem cell to be written.
1048 * @buf: Buffer to be written.
1049 * @len: length of buffer to be written to nvmem cell.
1050 *
1051 * Return: length of bytes written or negative on failure.
1052 */
1053int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1054{
1055 struct nvmem_device *nvmem = cell->nvmem;
1056 int rc;
1057
1058 if (!nvmem || nvmem->read_only ||
1059 (cell->bit_offset == 0 && len != cell->bytes))
1060 return -EINVAL;
1061
1062 if (cell->bit_offset || cell->nbits) {
1063 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1064 if (IS_ERR(buf))
1065 return PTR_ERR(buf);
1066 }
1067
1068 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1069
1070 /* free the tmp buffer */
1071 if (cell->bit_offset || cell->nbits)
1072 kfree(buf);
1073
1074 if (rc)
1075 return rc;
1076
1077 return len;
1078}
1079EXPORT_SYMBOL_GPL(nvmem_cell_write);
1080
1081/**
1082 * nvmem_cell_read_u16() - Read a cell value as an u16
1083 *
1084 * @dev: Device that requests the nvmem cell.
1085 * @cell_id: Name of nvmem cell to read.
1086 * @val: pointer to output value.
1087 *
1088 * Return: 0 on success or negative errno.
1089 */
1090int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1091{
1092 struct nvmem_cell *cell;
1093 void *buf;
1094 size_t len;
1095
1096 cell = nvmem_cell_get(dev, cell_id);
1097 if (IS_ERR(cell))
1098 return PTR_ERR(cell);
1099
1100 buf = nvmem_cell_read(cell, &len);
1101 if (IS_ERR(buf)) {
1102 nvmem_cell_put(cell);
1103 return PTR_ERR(buf);
1104 }
1105 if (len != sizeof(*val)) {
1106 kfree(buf);
1107 nvmem_cell_put(cell);
1108 return -EINVAL;
1109 }
1110 memcpy(val, buf, sizeof(*val));
1111 kfree(buf);
1112 nvmem_cell_put(cell);
1113
1114 return 0;
1115}
1116EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1117
1118/**
1119 * nvmem_cell_read_u32() - Read a cell value as an u32
1120 *
1121 * @dev: Device that requests the nvmem cell.
1122 * @cell_id: Name of nvmem cell to read.
1123 * @val: pointer to output value.
1124 *
1125 * Return: 0 on success or negative errno.
1126 */
1127int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1128{
1129 struct nvmem_cell *cell;
1130 void *buf;
1131 size_t len;
1132
1133 cell = nvmem_cell_get(dev, cell_id);
1134 if (IS_ERR(cell))
1135 return PTR_ERR(cell);
1136
1137 buf = nvmem_cell_read(cell, &len);
1138 if (IS_ERR(buf)) {
1139 nvmem_cell_put(cell);
1140 return PTR_ERR(buf);
1141 }
1142 if (len != sizeof(*val)) {
1143 kfree(buf);
1144 nvmem_cell_put(cell);
1145 return -EINVAL;
1146 }
1147 memcpy(val, buf, sizeof(*val));
1148
1149 kfree(buf);
1150 nvmem_cell_put(cell);
1151 return 0;
1152}
1153EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1154
1155/**
1156 * nvmem_device_cell_read() - Read a given nvmem device and cell
1157 *
1158 * @nvmem: nvmem device to read from.
1159 * @info: nvmem cell info to be read.
1160 * @buf: buffer pointer which will be populated on successful read.
1161 *
1162 * Return: length of successful bytes read on success and negative
1163 * error code on error.
1164 */
1165ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1166 struct nvmem_cell_info *info, void *buf)
1167{
1168 struct nvmem_cell cell;
1169 int rc;
1170 ssize_t len;
1171
1172 if (!nvmem)
1173 return -EINVAL;
1174
1175 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1176 if (rc)
1177 return rc;
1178
1179 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1180 if (rc)
1181 return rc;
1182
1183 return len;
1184}
1185EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1186
1187/**
1188 * nvmem_device_cell_write() - Write cell to a given nvmem device
1189 *
1190 * @nvmem: nvmem device to be written to.
1191 * @info: nvmem cell info to be written.
1192 * @buf: buffer to be written to cell.
1193 *
1194 * Return: length of bytes written or negative error code on failure.
1195 */
1196int nvmem_device_cell_write(struct nvmem_device *nvmem,
1197 struct nvmem_cell_info *info, void *buf)
1198{
1199 struct nvmem_cell cell;
1200 int rc;
1201
1202 if (!nvmem)
1203 return -EINVAL;
1204
1205 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1206 if (rc)
1207 return rc;
1208
1209 return nvmem_cell_write(&cell, buf, cell.bytes);
1210}
1211EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1212
1213/**
1214 * nvmem_device_read() - Read from a given nvmem device
1215 *
1216 * @nvmem: nvmem device to read from.
1217 * @offset: offset in nvmem device.
1218 * @bytes: number of bytes to read.
1219 * @buf: buffer pointer which will be populated on successful read.
1220 *
1221 * Return: length of successful bytes read on success and negative
1222 * error code on error.
1223 */
1224int nvmem_device_read(struct nvmem_device *nvmem,
1225 unsigned int offset,
1226 size_t bytes, void *buf)
1227{
1228 int rc;
1229
1230 if (!nvmem)
1231 return -EINVAL;
1232
1233 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1234
1235 if (rc)
1236 return rc;
1237
1238 return bytes;
1239}
1240EXPORT_SYMBOL_GPL(nvmem_device_read);
1241
1242/**
1243 * nvmem_device_write() - Write cell to a given nvmem device
1244 *
1245 * @nvmem: nvmem device to be written to.
1246 * @offset: offset in nvmem device.
1247 * @bytes: number of bytes to write.
1248 * @buf: buffer to be written.
1249 *
1250 * Return: length of bytes written or negative error code on failure.
1251 */
1252int nvmem_device_write(struct nvmem_device *nvmem,
1253 unsigned int offset,
1254 size_t bytes, void *buf)
1255{
1256 int rc;
1257
1258 if (!nvmem)
1259 return -EINVAL;
1260
1261 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1262
1263 if (rc)
1264 return rc;
1265
1266
1267 return bytes;
1268}
1269EXPORT_SYMBOL_GPL(nvmem_device_write);
1270
1271/**
1272 * nvmem_add_cell_table() - register a table of cell info entries
1273 *
1274 * @table: table of cell info entries
1275 */
1276void nvmem_add_cell_table(struct nvmem_cell_table *table)
1277{
1278 mutex_lock(&nvmem_cell_mutex);
1279 list_add_tail(&table->node, &nvmem_cell_tables);
1280 mutex_unlock(&nvmem_cell_mutex);
1281}
1282EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1283
1284/**
1285 * nvmem_del_cell_table() - remove a previously registered cell info table
1286 *
1287 * @table: table of cell info entries
1288 */
1289void nvmem_del_cell_table(struct nvmem_cell_table *table)
1290{
1291 mutex_lock(&nvmem_cell_mutex);
1292 list_del(&table->node);
1293 mutex_unlock(&nvmem_cell_mutex);
1294}
1295EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1296
1297/**
1298 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1299 *
1300 * @entries: array of cell lookup entries
1301 * @nentries: number of cell lookup entries in the array
1302 */
1303void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1304{
1305 int i;
1306
1307 mutex_lock(&nvmem_lookup_mutex);
1308 for (i = 0; i < nentries; i++)
1309 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1310 mutex_unlock(&nvmem_lookup_mutex);
1311}
1312EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1313
1314/**
1315 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1316 * entries
1317 *
1318 * @entries: array of cell lookup entries
1319 * @nentries: number of cell lookup entries in the array
1320 */
1321void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1322{
1323 int i;
1324
1325 mutex_lock(&nvmem_lookup_mutex);
1326 for (i = 0; i < nentries; i++)
1327 list_del(&entries[i].node);
1328 mutex_unlock(&nvmem_lookup_mutex);
1329}
1330EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1331
1332/**
1333 * nvmem_dev_name() - Get the name of a given nvmem device.
1334 *
1335 * @nvmem: nvmem device.
1336 *
1337 * Return: name of the nvmem device.
1338 */
1339const char *nvmem_dev_name(struct nvmem_device *nvmem)
1340{
1341 return dev_name(&nvmem->dev);
1342}
1343EXPORT_SYMBOL_GPL(nvmem_dev_name);
1344
1345static int __init nvmem_init(void)
1346{
1347 return bus_register(&nvmem_bus_type);
1348}
1349
1350static void __exit nvmem_exit(void)
1351{
1352 bus_unregister(&nvmem_bus_type);
1353}
1354
1355subsys_initcall(nvmem_init);
1356module_exit(nvmem_exit);
1357
1358MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1359MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1360MODULE_DESCRIPTION("nvmem Driver Core");
1361MODULE_LICENSE("GPL v2");