Loading...
1// SPDX-License-Identifier: GPL-2.0
2//
3// regmap KUnit tests
4//
5// Copyright 2023 Arm Ltd
6
7#include <kunit/test.h>
8#include "internal.h"
9
10#define BLOCK_TEST_SIZE 12
11
12static void get_changed_bytes(void *orig, void *new, size_t size)
13{
14 char *o = orig;
15 char *n = new;
16 int i;
17
18 get_random_bytes(new, size);
19
20 /*
21 * This could be nicer and more efficient but we shouldn't
22 * super care.
23 */
24 for (i = 0; i < size; i++)
25 while (n[i] == o[i])
26 get_random_bytes(&n[i], 1);
27}
28
29static const struct regmap_config test_regmap_config = {
30 .max_register = BLOCK_TEST_SIZE,
31 .reg_stride = 1,
32 .val_bits = sizeof(unsigned int) * 8,
33};
34
35struct regcache_types {
36 enum regcache_type type;
37 const char *name;
38};
39
40static void case_to_desc(const struct regcache_types *t, char *desc)
41{
42 strcpy(desc, t->name);
43}
44
45static const struct regcache_types regcache_types_list[] = {
46 { REGCACHE_NONE, "none" },
47 { REGCACHE_FLAT, "flat" },
48 { REGCACHE_RBTREE, "rbtree" },
49 { REGCACHE_MAPLE, "maple" },
50};
51
52KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
53
54static const struct regcache_types real_cache_types_list[] = {
55 { REGCACHE_FLAT, "flat" },
56 { REGCACHE_RBTREE, "rbtree" },
57 { REGCACHE_MAPLE, "maple" },
58};
59
60KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
61
62static const struct regcache_types sparse_cache_types_list[] = {
63 { REGCACHE_RBTREE, "rbtree" },
64 { REGCACHE_MAPLE, "maple" },
65};
66
67KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
68
69static struct regmap *gen_regmap(struct regmap_config *config,
70 struct regmap_ram_data **data)
71{
72 unsigned int *buf;
73 struct regmap *ret;
74 size_t size = (config->max_register + 1) * sizeof(unsigned int);
75 int i;
76 struct reg_default *defaults;
77
78 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
79 config->cache_type == REGCACHE_MAPLE;
80
81 buf = kmalloc(size, GFP_KERNEL);
82 if (!buf)
83 return ERR_PTR(-ENOMEM);
84
85 get_random_bytes(buf, size);
86
87 *data = kzalloc(sizeof(**data), GFP_KERNEL);
88 if (!(*data))
89 return ERR_PTR(-ENOMEM);
90 (*data)->vals = buf;
91
92 if (config->num_reg_defaults) {
93 defaults = kcalloc(config->num_reg_defaults,
94 sizeof(struct reg_default),
95 GFP_KERNEL);
96 if (!defaults)
97 return ERR_PTR(-ENOMEM);
98 config->reg_defaults = defaults;
99
100 for (i = 0; i < config->num_reg_defaults; i++) {
101 defaults[i].reg = i * config->reg_stride;
102 defaults[i].def = buf[i * config->reg_stride];
103 }
104 }
105
106 ret = regmap_init_ram(config, *data);
107 if (IS_ERR(ret)) {
108 kfree(buf);
109 kfree(*data);
110 }
111
112 return ret;
113}
114
115static bool reg_5_false(struct device *context, unsigned int reg)
116{
117 return reg != 5;
118}
119
120static void basic_read_write(struct kunit *test)
121{
122 struct regcache_types *t = (struct regcache_types *)test->param_value;
123 struct regmap *map;
124 struct regmap_config config;
125 struct regmap_ram_data *data;
126 unsigned int val, rval;
127
128 config = test_regmap_config;
129 config.cache_type = t->type;
130
131 map = gen_regmap(&config, &data);
132 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
133 if (IS_ERR(map))
134 return;
135
136 get_random_bytes(&val, sizeof(val));
137
138 /* If we write a value to a register we can read it back */
139 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
140 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
141 KUNIT_EXPECT_EQ(test, val, rval);
142
143 /* If using a cache the cache satisfied the read */
144 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
145
146 regmap_exit(map);
147}
148
149static void bulk_write(struct kunit *test)
150{
151 struct regcache_types *t = (struct regcache_types *)test->param_value;
152 struct regmap *map;
153 struct regmap_config config;
154 struct regmap_ram_data *data;
155 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
156 int i;
157
158 config = test_regmap_config;
159 config.cache_type = t->type;
160
161 map = gen_regmap(&config, &data);
162 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
163 if (IS_ERR(map))
164 return;
165
166 get_random_bytes(&val, sizeof(val));
167
168 /*
169 * Data written via the bulk API can be read back with single
170 * reads.
171 */
172 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
173 BLOCK_TEST_SIZE));
174 for (i = 0; i < BLOCK_TEST_SIZE; i++)
175 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
176
177 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
178
179 /* If using a cache the cache satisfied the read */
180 for (i = 0; i < BLOCK_TEST_SIZE; i++)
181 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
182
183 regmap_exit(map);
184}
185
186static void bulk_read(struct kunit *test)
187{
188 struct regcache_types *t = (struct regcache_types *)test->param_value;
189 struct regmap *map;
190 struct regmap_config config;
191 struct regmap_ram_data *data;
192 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
193 int i;
194
195 config = test_regmap_config;
196 config.cache_type = t->type;
197
198 map = gen_regmap(&config, &data);
199 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
200 if (IS_ERR(map))
201 return;
202
203 get_random_bytes(&val, sizeof(val));
204
205 /* Data written as single writes can be read via the bulk API */
206 for (i = 0; i < BLOCK_TEST_SIZE; i++)
207 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
208 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
209 BLOCK_TEST_SIZE));
210 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
211
212 /* If using a cache the cache satisfied the read */
213 for (i = 0; i < BLOCK_TEST_SIZE; i++)
214 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
215
216 regmap_exit(map);
217}
218
219static void write_readonly(struct kunit *test)
220{
221 struct regcache_types *t = (struct regcache_types *)test->param_value;
222 struct regmap *map;
223 struct regmap_config config;
224 struct regmap_ram_data *data;
225 unsigned int val;
226 int i;
227
228 config = test_regmap_config;
229 config.cache_type = t->type;
230 config.num_reg_defaults = BLOCK_TEST_SIZE;
231 config.writeable_reg = reg_5_false;
232
233 map = gen_regmap(&config, &data);
234 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
235 if (IS_ERR(map))
236 return;
237
238 get_random_bytes(&val, sizeof(val));
239
240 for (i = 0; i < BLOCK_TEST_SIZE; i++)
241 data->written[i] = false;
242
243 /* Change the value of all registers, readonly should fail */
244 for (i = 0; i < BLOCK_TEST_SIZE; i++)
245 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
246
247 /* Did that match what we see on the device? */
248 for (i = 0; i < BLOCK_TEST_SIZE; i++)
249 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
250
251 regmap_exit(map);
252}
253
254static void read_writeonly(struct kunit *test)
255{
256 struct regcache_types *t = (struct regcache_types *)test->param_value;
257 struct regmap *map;
258 struct regmap_config config;
259 struct regmap_ram_data *data;
260 unsigned int val;
261 int i;
262
263 config = test_regmap_config;
264 config.cache_type = t->type;
265 config.readable_reg = reg_5_false;
266
267 map = gen_regmap(&config, &data);
268 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
269 if (IS_ERR(map))
270 return;
271
272 for (i = 0; i < BLOCK_TEST_SIZE; i++)
273 data->read[i] = false;
274
275 /*
276 * Try to read all the registers, the writeonly one should
277 * fail if we aren't using the flat cache.
278 */
279 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
280 if (t->type != REGCACHE_FLAT) {
281 KUNIT_EXPECT_EQ(test, i != 5,
282 regmap_read(map, i, &val) == 0);
283 } else {
284 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
285 }
286 }
287
288 /* Did we trigger a hardware access? */
289 KUNIT_EXPECT_FALSE(test, data->read[5]);
290
291 regmap_exit(map);
292}
293
294static void reg_defaults(struct kunit *test)
295{
296 struct regcache_types *t = (struct regcache_types *)test->param_value;
297 struct regmap *map;
298 struct regmap_config config;
299 struct regmap_ram_data *data;
300 unsigned int rval[BLOCK_TEST_SIZE];
301 int i;
302
303 config = test_regmap_config;
304 config.cache_type = t->type;
305 config.num_reg_defaults = BLOCK_TEST_SIZE;
306
307 map = gen_regmap(&config, &data);
308 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
309 if (IS_ERR(map))
310 return;
311
312 /* Read back the expected default data */
313 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
314 BLOCK_TEST_SIZE));
315 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
316
317 /* The data should have been read from cache if there was one */
318 for (i = 0; i < BLOCK_TEST_SIZE; i++)
319 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
320}
321
322static void reg_defaults_read_dev(struct kunit *test)
323{
324 struct regcache_types *t = (struct regcache_types *)test->param_value;
325 struct regmap *map;
326 struct regmap_config config;
327 struct regmap_ram_data *data;
328 unsigned int rval[BLOCK_TEST_SIZE];
329 int i;
330
331 config = test_regmap_config;
332 config.cache_type = t->type;
333 config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
334
335 map = gen_regmap(&config, &data);
336 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
337 if (IS_ERR(map))
338 return;
339
340 /* We should have read the cache defaults back from the map */
341 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
342 KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
343 data->read[i] = false;
344 }
345
346 /* Read back the expected default data */
347 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
348 BLOCK_TEST_SIZE));
349 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
350
351 /* The data should have been read from cache if there was one */
352 for (i = 0; i < BLOCK_TEST_SIZE; i++)
353 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
354}
355
356static void register_patch(struct kunit *test)
357{
358 struct regcache_types *t = (struct regcache_types *)test->param_value;
359 struct regmap *map;
360 struct regmap_config config;
361 struct regmap_ram_data *data;
362 struct reg_sequence patch[2];
363 unsigned int rval[BLOCK_TEST_SIZE];
364 int i;
365
366 /* We need defaults so readback works */
367 config = test_regmap_config;
368 config.cache_type = t->type;
369 config.num_reg_defaults = BLOCK_TEST_SIZE;
370
371 map = gen_regmap(&config, &data);
372 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
373 if (IS_ERR(map))
374 return;
375
376 /* Stash the original values */
377 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
378 BLOCK_TEST_SIZE));
379
380 /* Patch a couple of values */
381 patch[0].reg = 2;
382 patch[0].def = rval[2] + 1;
383 patch[0].delay_us = 0;
384 patch[1].reg = 5;
385 patch[1].def = rval[5] + 1;
386 patch[1].delay_us = 0;
387 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
388 ARRAY_SIZE(patch)));
389
390 /* Only the patched registers are written */
391 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
392 switch (i) {
393 case 2:
394 case 5:
395 KUNIT_EXPECT_TRUE(test, data->written[i]);
396 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
397 break;
398 default:
399 KUNIT_EXPECT_FALSE(test, data->written[i]);
400 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
401 break;
402 }
403 }
404
405 regmap_exit(map);
406}
407
408static void stride(struct kunit *test)
409{
410 struct regcache_types *t = (struct regcache_types *)test->param_value;
411 struct regmap *map;
412 struct regmap_config config;
413 struct regmap_ram_data *data;
414 unsigned int rval;
415 int i;
416
417 config = test_regmap_config;
418 config.cache_type = t->type;
419 config.reg_stride = 2;
420 config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
421
422 map = gen_regmap(&config, &data);
423 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
424 if (IS_ERR(map))
425 return;
426
427 /* Only even registers can be accessed, try both read and write */
428 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
429 data->read[i] = false;
430 data->written[i] = false;
431
432 if (i % 2) {
433 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
434 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
435 KUNIT_EXPECT_FALSE(test, data->read[i]);
436 KUNIT_EXPECT_FALSE(test, data->written[i]);
437 } else {
438 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
439 KUNIT_EXPECT_EQ(test, data->vals[i], rval);
440 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
441 data->read[i]);
442
443 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
444 KUNIT_EXPECT_TRUE(test, data->written[i]);
445 }
446 }
447
448 regmap_exit(map);
449}
450
451static struct regmap_range_cfg test_range = {
452 .selector_reg = 1,
453 .selector_mask = 0xff,
454
455 .window_start = 4,
456 .window_len = 10,
457
458 .range_min = 20,
459 .range_max = 40,
460};
461
462static bool test_range_window_volatile(struct device *dev, unsigned int reg)
463{
464 if (reg >= test_range.window_start &&
465 reg <= test_range.window_start + test_range.window_len)
466 return true;
467
468 return false;
469}
470
471static bool test_range_all_volatile(struct device *dev, unsigned int reg)
472{
473 if (test_range_window_volatile(dev, reg))
474 return true;
475
476 if (reg >= test_range.range_min && reg <= test_range.range_max)
477 return true;
478
479 return false;
480}
481
482static void basic_ranges(struct kunit *test)
483{
484 struct regcache_types *t = (struct regcache_types *)test->param_value;
485 struct regmap *map;
486 struct regmap_config config;
487 struct regmap_ram_data *data;
488 unsigned int val;
489 int i;
490
491 config = test_regmap_config;
492 config.cache_type = t->type;
493 config.volatile_reg = test_range_all_volatile;
494 config.ranges = &test_range;
495 config.num_ranges = 1;
496 config.max_register = test_range.range_max;
497
498 map = gen_regmap(&config, &data);
499 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
500 if (IS_ERR(map))
501 return;
502
503 for (i = test_range.range_min; i < test_range.range_max; i++) {
504 data->read[i] = false;
505 data->written[i] = false;
506 }
507
508 /* Reset the page to a non-zero value to trigger a change */
509 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
510 test_range.range_max));
511
512 /* Check we set the page and use the window for writes */
513 data->written[test_range.selector_reg] = false;
514 data->written[test_range.window_start] = false;
515 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
516 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
517 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
518
519 data->written[test_range.selector_reg] = false;
520 data->written[test_range.window_start] = false;
521 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
522 test_range.range_min +
523 test_range.window_len,
524 0));
525 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
526 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
527
528 /* Same for reads */
529 data->written[test_range.selector_reg] = false;
530 data->read[test_range.window_start] = false;
531 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
532 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
533 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
534
535 data->written[test_range.selector_reg] = false;
536 data->read[test_range.window_start] = false;
537 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
538 test_range.range_min +
539 test_range.window_len,
540 &val));
541 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
542 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
543
544 /* No physical access triggered in the virtual range */
545 for (i = test_range.range_min; i < test_range.range_max; i++) {
546 KUNIT_EXPECT_FALSE(test, data->read[i]);
547 KUNIT_EXPECT_FALSE(test, data->written[i]);
548 }
549
550 regmap_exit(map);
551}
552
553/* Try to stress dynamic creation of cache data structures */
554static void stress_insert(struct kunit *test)
555{
556 struct regcache_types *t = (struct regcache_types *)test->param_value;
557 struct regmap *map;
558 struct regmap_config config;
559 struct regmap_ram_data *data;
560 unsigned int rval, *vals;
561 size_t buf_sz;
562 int i;
563
564 config = test_regmap_config;
565 config.cache_type = t->type;
566 config.max_register = 300;
567
568 map = gen_regmap(&config, &data);
569 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
570 if (IS_ERR(map))
571 return;
572
573 vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
574 GFP_KERNEL);
575 KUNIT_ASSERT_FALSE(test, vals == NULL);
576 buf_sz = sizeof(unsigned long) * config.max_register;
577
578 get_random_bytes(vals, buf_sz);
579
580 /* Write data into the map/cache in ever decreasing strides */
581 for (i = 0; i < config.max_register; i += 100)
582 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
583 for (i = 0; i < config.max_register; i += 50)
584 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
585 for (i = 0; i < config.max_register; i += 25)
586 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
587 for (i = 0; i < config.max_register; i += 10)
588 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
589 for (i = 0; i < config.max_register; i += 5)
590 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
591 for (i = 0; i < config.max_register; i += 3)
592 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
593 for (i = 0; i < config.max_register; i += 2)
594 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
595 for (i = 0; i < config.max_register; i++)
596 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
597
598 /* Do reads from the cache (if there is one) match? */
599 for (i = 0; i < config.max_register; i ++) {
600 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
601 KUNIT_EXPECT_EQ(test, rval, vals[i]);
602 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
603 }
604
605 regmap_exit(map);
606}
607
608static void cache_bypass(struct kunit *test)
609{
610 struct regcache_types *t = (struct regcache_types *)test->param_value;
611 struct regmap *map;
612 struct regmap_config config;
613 struct regmap_ram_data *data;
614 unsigned int val, rval;
615
616 config = test_regmap_config;
617 config.cache_type = t->type;
618
619 map = gen_regmap(&config, &data);
620 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
621 if (IS_ERR(map))
622 return;
623
624 get_random_bytes(&val, sizeof(val));
625
626 /* Ensure the cache has a value in it */
627 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
628
629 /* Bypass then write a different value */
630 regcache_cache_bypass(map, true);
631 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
632
633 /* Read the bypassed value */
634 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
635 KUNIT_EXPECT_EQ(test, val + 1, rval);
636 KUNIT_EXPECT_EQ(test, data->vals[0], rval);
637
638 /* Disable bypass, the cache should still return the original value */
639 regcache_cache_bypass(map, false);
640 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
641 KUNIT_EXPECT_EQ(test, val, rval);
642
643 regmap_exit(map);
644}
645
646static void cache_sync(struct kunit *test)
647{
648 struct regcache_types *t = (struct regcache_types *)test->param_value;
649 struct regmap *map;
650 struct regmap_config config;
651 struct regmap_ram_data *data;
652 unsigned int val[BLOCK_TEST_SIZE];
653 int i;
654
655 config = test_regmap_config;
656 config.cache_type = t->type;
657
658 map = gen_regmap(&config, &data);
659 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
660 if (IS_ERR(map))
661 return;
662
663 get_random_bytes(&val, sizeof(val));
664
665 /* Put some data into the cache */
666 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
667 BLOCK_TEST_SIZE));
668 for (i = 0; i < BLOCK_TEST_SIZE; i++)
669 data->written[i] = false;
670
671 /* Trash the data on the device itself then resync */
672 regcache_mark_dirty(map);
673 memset(data->vals, 0, sizeof(val));
674 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
675
676 /* Did we just write the correct data out? */
677 KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
678 for (i = 0; i < BLOCK_TEST_SIZE; i++)
679 KUNIT_EXPECT_EQ(test, true, data->written[i]);
680
681 regmap_exit(map);
682}
683
684static void cache_sync_defaults(struct kunit *test)
685{
686 struct regcache_types *t = (struct regcache_types *)test->param_value;
687 struct regmap *map;
688 struct regmap_config config;
689 struct regmap_ram_data *data;
690 unsigned int val;
691 int i;
692
693 config = test_regmap_config;
694 config.cache_type = t->type;
695 config.num_reg_defaults = BLOCK_TEST_SIZE;
696
697 map = gen_regmap(&config, &data);
698 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
699 if (IS_ERR(map))
700 return;
701
702 get_random_bytes(&val, sizeof(val));
703
704 /* Change the value of one register */
705 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
706
707 /* Resync */
708 regcache_mark_dirty(map);
709 for (i = 0; i < BLOCK_TEST_SIZE; i++)
710 data->written[i] = false;
711 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
712
713 /* Did we just sync the one register we touched? */
714 for (i = 0; i < BLOCK_TEST_SIZE; i++)
715 KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
716
717 regmap_exit(map);
718}
719
720static void cache_sync_readonly(struct kunit *test)
721{
722 struct regcache_types *t = (struct regcache_types *)test->param_value;
723 struct regmap *map;
724 struct regmap_config config;
725 struct regmap_ram_data *data;
726 unsigned int val;
727 int i;
728
729 config = test_regmap_config;
730 config.cache_type = t->type;
731 config.writeable_reg = reg_5_false;
732
733 map = gen_regmap(&config, &data);
734 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
735 if (IS_ERR(map))
736 return;
737
738 /* Read all registers to fill the cache */
739 for (i = 0; i < BLOCK_TEST_SIZE; i++)
740 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
741
742 /* Change the value of all registers, readonly should fail */
743 get_random_bytes(&val, sizeof(val));
744 regcache_cache_only(map, true);
745 for (i = 0; i < BLOCK_TEST_SIZE; i++)
746 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
747 regcache_cache_only(map, false);
748
749 /* Resync */
750 for (i = 0; i < BLOCK_TEST_SIZE; i++)
751 data->written[i] = false;
752 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
753
754 /* Did that match what we see on the device? */
755 for (i = 0; i < BLOCK_TEST_SIZE; i++)
756 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
757
758 regmap_exit(map);
759}
760
761static void cache_sync_patch(struct kunit *test)
762{
763 struct regcache_types *t = (struct regcache_types *)test->param_value;
764 struct regmap *map;
765 struct regmap_config config;
766 struct regmap_ram_data *data;
767 struct reg_sequence patch[2];
768 unsigned int rval[BLOCK_TEST_SIZE], val;
769 int i;
770
771 /* We need defaults so readback works */
772 config = test_regmap_config;
773 config.cache_type = t->type;
774 config.num_reg_defaults = BLOCK_TEST_SIZE;
775
776 map = gen_regmap(&config, &data);
777 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
778 if (IS_ERR(map))
779 return;
780
781 /* Stash the original values */
782 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
783 BLOCK_TEST_SIZE));
784
785 /* Patch a couple of values */
786 patch[0].reg = 2;
787 patch[0].def = rval[2] + 1;
788 patch[0].delay_us = 0;
789 patch[1].reg = 5;
790 patch[1].def = rval[5] + 1;
791 patch[1].delay_us = 0;
792 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
793 ARRAY_SIZE(patch)));
794
795 /* Sync the cache */
796 regcache_mark_dirty(map);
797 for (i = 0; i < BLOCK_TEST_SIZE; i++)
798 data->written[i] = false;
799 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
800
801 /* The patch should be on the device but not in the cache */
802 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
803 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
804 KUNIT_EXPECT_EQ(test, val, rval[i]);
805
806 switch (i) {
807 case 2:
808 case 5:
809 KUNIT_EXPECT_EQ(test, true, data->written[i]);
810 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
811 break;
812 default:
813 KUNIT_EXPECT_EQ(test, false, data->written[i]);
814 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
815 break;
816 }
817 }
818
819 regmap_exit(map);
820}
821
822static void cache_drop(struct kunit *test)
823{
824 struct regcache_types *t = (struct regcache_types *)test->param_value;
825 struct regmap *map;
826 struct regmap_config config;
827 struct regmap_ram_data *data;
828 unsigned int rval[BLOCK_TEST_SIZE];
829 int i;
830
831 config = test_regmap_config;
832 config.cache_type = t->type;
833 config.num_reg_defaults = BLOCK_TEST_SIZE;
834
835 map = gen_regmap(&config, &data);
836 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
837 if (IS_ERR(map))
838 return;
839
840 /* Ensure the data is read from the cache */
841 for (i = 0; i < BLOCK_TEST_SIZE; i++)
842 data->read[i] = false;
843 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
844 BLOCK_TEST_SIZE));
845 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
846 KUNIT_EXPECT_FALSE(test, data->read[i]);
847 data->read[i] = false;
848 }
849 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
850
851 /* Drop some registers */
852 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
853
854 /* Reread and check only the dropped registers hit the device. */
855 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
856 BLOCK_TEST_SIZE));
857 for (i = 0; i < BLOCK_TEST_SIZE; i++)
858 KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
859 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
860
861 regmap_exit(map);
862}
863
864static void cache_present(struct kunit *test)
865{
866 struct regcache_types *t = (struct regcache_types *)test->param_value;
867 struct regmap *map;
868 struct regmap_config config;
869 struct regmap_ram_data *data;
870 unsigned int val;
871 int i;
872
873 config = test_regmap_config;
874 config.cache_type = t->type;
875
876 map = gen_regmap(&config, &data);
877 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
878 if (IS_ERR(map))
879 return;
880
881 for (i = 0; i < BLOCK_TEST_SIZE; i++)
882 data->read[i] = false;
883
884 /* No defaults so no registers cached. */
885 for (i = 0; i < BLOCK_TEST_SIZE; i++)
886 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i));
887
888 /* We didn't trigger any reads */
889 for (i = 0; i < BLOCK_TEST_SIZE; i++)
890 KUNIT_ASSERT_FALSE(test, data->read[i]);
891
892 /* Fill the cache */
893 for (i = 0; i < BLOCK_TEST_SIZE; i++)
894 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
895
896 /* Now everything should be cached */
897 for (i = 0; i < BLOCK_TEST_SIZE; i++)
898 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i));
899
900 regmap_exit(map);
901}
902
903/* Check that caching the window register works with sync */
904static void cache_range_window_reg(struct kunit *test)
905{
906 struct regcache_types *t = (struct regcache_types *)test->param_value;
907 struct regmap *map;
908 struct regmap_config config;
909 struct regmap_ram_data *data;
910 unsigned int val;
911 int i;
912
913 config = test_regmap_config;
914 config.cache_type = t->type;
915 config.volatile_reg = test_range_window_volatile;
916 config.ranges = &test_range;
917 config.num_ranges = 1;
918 config.max_register = test_range.range_max;
919
920 map = gen_regmap(&config, &data);
921 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
922 if (IS_ERR(map))
923 return;
924
925 /* Write new values to the entire range */
926 for (i = test_range.range_min; i <= test_range.range_max; i++)
927 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
928
929 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
930 KUNIT_ASSERT_EQ(test, val, 2);
931
932 /* Write to the first register in the range to reset the page */
933 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
934 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
935 KUNIT_ASSERT_EQ(test, val, 0);
936
937 /* Trigger a cache sync */
938 regcache_mark_dirty(map);
939 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
940
941 /* Write to the first register again, the page should be reset */
942 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
943 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
944 KUNIT_ASSERT_EQ(test, val, 0);
945
946 /* Trigger another cache sync */
947 regcache_mark_dirty(map);
948 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
949
950 /* Write to the last register again, the page should be reset */
951 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
952 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
953 KUNIT_ASSERT_EQ(test, val, 2);
954}
955
956struct raw_test_types {
957 const char *name;
958
959 enum regcache_type cache_type;
960 enum regmap_endian val_endian;
961};
962
963static void raw_to_desc(const struct raw_test_types *t, char *desc)
964{
965 strcpy(desc, t->name);
966}
967
968static const struct raw_test_types raw_types_list[] = {
969 { "none-little", REGCACHE_NONE, REGMAP_ENDIAN_LITTLE },
970 { "none-big", REGCACHE_NONE, REGMAP_ENDIAN_BIG },
971 { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
972 { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
973 { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
974 { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
975 { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
976 { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
977};
978
979KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
980
981static const struct raw_test_types raw_cache_types_list[] = {
982 { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
983 { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
984 { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
985 { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
986 { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
987 { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
988};
989
990KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
991
992static const struct regmap_config raw_regmap_config = {
993 .max_register = BLOCK_TEST_SIZE,
994
995 .reg_format_endian = REGMAP_ENDIAN_LITTLE,
996 .reg_bits = 16,
997 .val_bits = 16,
998};
999
1000static struct regmap *gen_raw_regmap(struct regmap_config *config,
1001 struct raw_test_types *test_type,
1002 struct regmap_ram_data **data)
1003{
1004 u16 *buf;
1005 struct regmap *ret;
1006 size_t size = (config->max_register + 1) * config->reg_bits / 8;
1007 int i;
1008 struct reg_default *defaults;
1009
1010 config->cache_type = test_type->cache_type;
1011 config->val_format_endian = test_type->val_endian;
1012 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
1013 config->cache_type == REGCACHE_MAPLE;
1014
1015 buf = kmalloc(size, GFP_KERNEL);
1016 if (!buf)
1017 return ERR_PTR(-ENOMEM);
1018
1019 get_random_bytes(buf, size);
1020
1021 *data = kzalloc(sizeof(**data), GFP_KERNEL);
1022 if (!(*data))
1023 return ERR_PTR(-ENOMEM);
1024 (*data)->vals = (void *)buf;
1025
1026 config->num_reg_defaults = config->max_register + 1;
1027 defaults = kcalloc(config->num_reg_defaults,
1028 sizeof(struct reg_default),
1029 GFP_KERNEL);
1030 if (!defaults)
1031 return ERR_PTR(-ENOMEM);
1032 config->reg_defaults = defaults;
1033
1034 for (i = 0; i < config->num_reg_defaults; i++) {
1035 defaults[i].reg = i;
1036 switch (test_type->val_endian) {
1037 case REGMAP_ENDIAN_LITTLE:
1038 defaults[i].def = le16_to_cpu(buf[i]);
1039 break;
1040 case REGMAP_ENDIAN_BIG:
1041 defaults[i].def = be16_to_cpu(buf[i]);
1042 break;
1043 default:
1044 return ERR_PTR(-EINVAL);
1045 }
1046 }
1047
1048 /*
1049 * We use the defaults in the tests but they don't make sense
1050 * to the core if there's no cache.
1051 */
1052 if (config->cache_type == REGCACHE_NONE)
1053 config->num_reg_defaults = 0;
1054
1055 ret = regmap_init_raw_ram(config, *data);
1056 if (IS_ERR(ret)) {
1057 kfree(buf);
1058 kfree(*data);
1059 }
1060
1061 return ret;
1062}
1063
1064static void raw_read_defaults_single(struct kunit *test)
1065{
1066 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1067 struct regmap *map;
1068 struct regmap_config config;
1069 struct regmap_ram_data *data;
1070 unsigned int rval;
1071 int i;
1072
1073 config = raw_regmap_config;
1074
1075 map = gen_raw_regmap(&config, t, &data);
1076 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1077 if (IS_ERR(map))
1078 return;
1079
1080 /* Check that we can read the defaults via the API */
1081 for (i = 0; i < config.max_register + 1; i++) {
1082 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1083 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1084 }
1085
1086 regmap_exit(map);
1087}
1088
1089static void raw_read_defaults(struct kunit *test)
1090{
1091 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1092 struct regmap *map;
1093 struct regmap_config config;
1094 struct regmap_ram_data *data;
1095 u16 *rval;
1096 u16 def;
1097 size_t val_len;
1098 int i;
1099
1100 config = raw_regmap_config;
1101
1102 map = gen_raw_regmap(&config, t, &data);
1103 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1104 if (IS_ERR(map))
1105 return;
1106
1107 val_len = sizeof(*rval) * (config.max_register + 1);
1108 rval = kmalloc(val_len, GFP_KERNEL);
1109 KUNIT_ASSERT_TRUE(test, rval != NULL);
1110 if (!rval)
1111 return;
1112
1113 /* Check that we can read the defaults via the API */
1114 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1115 for (i = 0; i < config.max_register + 1; i++) {
1116 def = config.reg_defaults[i].def;
1117 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1118 KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
1119 } else {
1120 KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
1121 }
1122 }
1123
1124 kfree(rval);
1125 regmap_exit(map);
1126}
1127
1128static void raw_write_read_single(struct kunit *test)
1129{
1130 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1131 struct regmap *map;
1132 struct regmap_config config;
1133 struct regmap_ram_data *data;
1134 u16 val;
1135 unsigned int rval;
1136
1137 config = raw_regmap_config;
1138
1139 map = gen_raw_regmap(&config, t, &data);
1140 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1141 if (IS_ERR(map))
1142 return;
1143
1144 get_random_bytes(&val, sizeof(val));
1145
1146 /* If we write a value to a register we can read it back */
1147 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1148 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1149 KUNIT_EXPECT_EQ(test, val, rval);
1150
1151 regmap_exit(map);
1152}
1153
1154static void raw_write(struct kunit *test)
1155{
1156 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1157 struct regmap *map;
1158 struct regmap_config config;
1159 struct regmap_ram_data *data;
1160 u16 *hw_buf;
1161 u16 val[2];
1162 unsigned int rval;
1163 int i;
1164
1165 config = raw_regmap_config;
1166
1167 map = gen_raw_regmap(&config, t, &data);
1168 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1169 if (IS_ERR(map))
1170 return;
1171
1172 hw_buf = (u16 *)data->vals;
1173
1174 get_random_bytes(&val, sizeof(val));
1175
1176 /* Do a raw write */
1177 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1178
1179 /* We should read back the new values, and defaults for the rest */
1180 for (i = 0; i < config.max_register + 1; i++) {
1181 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1182
1183 switch (i) {
1184 case 2:
1185 case 3:
1186 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1187 KUNIT_EXPECT_EQ(test, rval,
1188 be16_to_cpu(val[i % 2]));
1189 } else {
1190 KUNIT_EXPECT_EQ(test, rval,
1191 le16_to_cpu(val[i % 2]));
1192 }
1193 break;
1194 default:
1195 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1196 break;
1197 }
1198 }
1199
1200 /* The values should appear in the "hardware" */
1201 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1202
1203 regmap_exit(map);
1204}
1205
1206static bool reg_zero(struct device *dev, unsigned int reg)
1207{
1208 return reg == 0;
1209}
1210
1211static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
1212{
1213 return reg == 0;
1214}
1215
1216static void raw_noinc_write(struct kunit *test)
1217{
1218 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1219 struct regmap *map;
1220 struct regmap_config config;
1221 struct regmap_ram_data *data;
1222 unsigned int val;
1223 u16 val_test, val_last;
1224 u16 val_array[BLOCK_TEST_SIZE];
1225
1226 config = raw_regmap_config;
1227 config.volatile_reg = reg_zero;
1228 config.writeable_noinc_reg = reg_zero;
1229 config.readable_noinc_reg = reg_zero;
1230
1231 map = gen_raw_regmap(&config, t, &data);
1232 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1233 if (IS_ERR(map))
1234 return;
1235
1236 data->noinc_reg = ram_reg_zero;
1237
1238 get_random_bytes(&val_array, sizeof(val_array));
1239
1240 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1241 val_test = be16_to_cpu(val_array[1]) + 100;
1242 val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1243 } else {
1244 val_test = le16_to_cpu(val_array[1]) + 100;
1245 val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1246 }
1247
1248 /* Put some data into the register following the noinc register */
1249 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
1250
1251 /* Write some data to the noinc register */
1252 KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
1253 sizeof(val_array)));
1254
1255 /* We should read back the last value written */
1256 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
1257 KUNIT_ASSERT_EQ(test, val_last, val);
1258
1259 /* Make sure we didn't touch the register after the noinc register */
1260 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1261 KUNIT_ASSERT_EQ(test, val_test, val);
1262
1263 regmap_exit(map);
1264}
1265
1266static void raw_sync(struct kunit *test)
1267{
1268 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1269 struct regmap *map;
1270 struct regmap_config config;
1271 struct regmap_ram_data *data;
1272 u16 val[3];
1273 u16 *hw_buf;
1274 unsigned int rval;
1275 int i;
1276
1277 config = raw_regmap_config;
1278
1279 map = gen_raw_regmap(&config, t, &data);
1280 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1281 if (IS_ERR(map))
1282 return;
1283
1284 hw_buf = (u16 *)data->vals;
1285
1286 get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
1287
1288 /* Do a regular write and a raw write in cache only mode */
1289 regcache_cache_only(map, true);
1290 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
1291 sizeof(u16) * 2));
1292 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
1293
1294 /* We should read back the new values, and defaults for the rest */
1295 for (i = 0; i < config.max_register + 1; i++) {
1296 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1297
1298 switch (i) {
1299 case 2:
1300 case 3:
1301 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1302 KUNIT_EXPECT_EQ(test, rval,
1303 be16_to_cpu(val[i - 2]));
1304 } else {
1305 KUNIT_EXPECT_EQ(test, rval,
1306 le16_to_cpu(val[i - 2]));
1307 }
1308 break;
1309 case 4:
1310 KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
1311 break;
1312 default:
1313 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1314 break;
1315 }
1316 }
1317
1318 /*
1319 * The value written via _write() was translated by the core,
1320 * translate the original copy for comparison purposes.
1321 */
1322 if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1323 val[2] = cpu_to_be16(val[2]);
1324 else
1325 val[2] = cpu_to_le16(val[2]);
1326
1327 /* The values should not appear in the "hardware" */
1328 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
1329
1330 for (i = 0; i < config.max_register + 1; i++)
1331 data->written[i] = false;
1332
1333 /* Do the sync */
1334 regcache_cache_only(map, false);
1335 regcache_mark_dirty(map);
1336 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1337
1338 /* The values should now appear in the "hardware" */
1339 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
1340
1341 regmap_exit(map);
1342}
1343
1344static struct kunit_case regmap_test_cases[] = {
1345 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1346 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1347 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
1348 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
1349 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
1350 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1351 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1352 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1353 KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1354 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1355 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1356 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
1357 KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
1358 KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
1359 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
1360 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1361 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
1362 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
1363 KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params),
1364
1365 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
1366 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
1367 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
1368 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
1369 KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
1370 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
1371 {}
1372};
1373
1374static struct kunit_suite regmap_test_suite = {
1375 .name = "regmap",
1376 .test_cases = regmap_test_cases,
1377};
1378kunit_test_suite(regmap_test_suite);
1379
1380MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2//
3// regmap KUnit tests
4//
5// Copyright 2023 Arm Ltd
6
7#include <kunit/device.h>
8#include <kunit/resource.h>
9#include <kunit/test.h>
10#include "internal.h"
11
12#define BLOCK_TEST_SIZE 12
13
14KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *);
15
16struct regmap_test_priv {
17 struct device *dev;
18};
19
20struct regmap_test_param {
21 enum regcache_type cache;
22 enum regmap_endian val_endian;
23
24 unsigned int from_reg;
25 bool fast_io;
26};
27
28static void get_changed_bytes(void *orig, void *new, size_t size)
29{
30 char *o = orig;
31 char *n = new;
32 int i;
33
34 get_random_bytes(new, size);
35
36 /*
37 * This could be nicer and more efficient but we shouldn't
38 * super care.
39 */
40 for (i = 0; i < size; i++)
41 while (n[i] == o[i])
42 get_random_bytes(&n[i], 1);
43}
44
45static const struct regmap_config test_regmap_config = {
46 .reg_stride = 1,
47 .val_bits = sizeof(unsigned int) * 8,
48};
49
50static const char *regcache_type_name(enum regcache_type type)
51{
52 switch (type) {
53 case REGCACHE_NONE:
54 return "none";
55 case REGCACHE_FLAT:
56 return "flat";
57 case REGCACHE_RBTREE:
58 return "rbtree";
59 case REGCACHE_MAPLE:
60 return "maple";
61 default:
62 return NULL;
63 }
64}
65
66static const char *regmap_endian_name(enum regmap_endian endian)
67{
68 switch (endian) {
69 case REGMAP_ENDIAN_BIG:
70 return "big";
71 case REGMAP_ENDIAN_LITTLE:
72 return "little";
73 case REGMAP_ENDIAN_DEFAULT:
74 return "default";
75 case REGMAP_ENDIAN_NATIVE:
76 return "native";
77 default:
78 return NULL;
79 }
80}
81
82static void param_to_desc(const struct regmap_test_param *param, char *desc)
83{
84 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s%s @%#x",
85 regcache_type_name(param->cache),
86 regmap_endian_name(param->val_endian),
87 param->fast_io ? " fast I/O" : "",
88 param->from_reg);
89}
90
91static const struct regmap_test_param regcache_types_list[] = {
92 { .cache = REGCACHE_NONE },
93 { .cache = REGCACHE_NONE, .fast_io = true },
94 { .cache = REGCACHE_FLAT },
95 { .cache = REGCACHE_FLAT, .fast_io = true },
96 { .cache = REGCACHE_RBTREE },
97 { .cache = REGCACHE_RBTREE, .fast_io = true },
98 { .cache = REGCACHE_MAPLE },
99 { .cache = REGCACHE_MAPLE, .fast_io = true },
100};
101
102KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
103
104static const struct regmap_test_param real_cache_types_only_list[] = {
105 { .cache = REGCACHE_FLAT },
106 { .cache = REGCACHE_FLAT, .fast_io = true },
107 { .cache = REGCACHE_RBTREE },
108 { .cache = REGCACHE_RBTREE, .fast_io = true },
109 { .cache = REGCACHE_MAPLE },
110 { .cache = REGCACHE_MAPLE, .fast_io = true },
111};
112
113KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
114
115static const struct regmap_test_param real_cache_types_list[] = {
116 { .cache = REGCACHE_FLAT, .from_reg = 0 },
117 { .cache = REGCACHE_FLAT, .from_reg = 0, .fast_io = true },
118 { .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
119 { .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
120 { .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
121 { .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
122 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
123 { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
124 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
125 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
126 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
127 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
128 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
129 { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
130 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
131 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
132 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
133 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
134};
135
136KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
137
138static const struct regmap_test_param sparse_cache_types_list[] = {
139 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
140 { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
141 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
142 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
143 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
144 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
145 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
146 { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
147 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
148 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
149 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
150 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
151};
152
153KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc);
154
155static struct regmap *gen_regmap(struct kunit *test,
156 struct regmap_config *config,
157 struct regmap_ram_data **data)
158{
159 const struct regmap_test_param *param = test->param_value;
160 struct regmap_test_priv *priv = test->priv;
161 unsigned int *buf;
162 struct regmap *ret = ERR_PTR(-ENOMEM);
163 size_t size;
164 int i, error;
165 struct reg_default *defaults;
166
167 config->cache_type = param->cache;
168 config->fast_io = param->fast_io;
169
170 if (config->max_register == 0) {
171 config->max_register = param->from_reg;
172 if (config->num_reg_defaults)
173 config->max_register += (config->num_reg_defaults - 1) *
174 config->reg_stride;
175 else
176 config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
177 }
178
179 size = array_size(config->max_register + 1, sizeof(*buf));
180 buf = kmalloc(size, GFP_KERNEL);
181 if (!buf)
182 return ERR_PTR(-ENOMEM);
183
184 get_random_bytes(buf, size);
185
186 *data = kzalloc(sizeof(**data), GFP_KERNEL);
187 if (!(*data))
188 goto out_free;
189 (*data)->vals = buf;
190
191 if (config->num_reg_defaults) {
192 defaults = kunit_kcalloc(test,
193 config->num_reg_defaults,
194 sizeof(struct reg_default),
195 GFP_KERNEL);
196 if (!defaults)
197 goto out_free;
198
199 config->reg_defaults = defaults;
200
201 for (i = 0; i < config->num_reg_defaults; i++) {
202 defaults[i].reg = param->from_reg + (i * config->reg_stride);
203 defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
204 }
205 }
206
207 ret = regmap_init_ram(priv->dev, config, *data);
208 if (IS_ERR(ret))
209 goto out_free;
210
211 /* This calls regmap_exit() on failure, which frees buf and *data */
212 error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
213 if (error)
214 ret = ERR_PTR(error);
215
216 return ret;
217
218out_free:
219 kfree(buf);
220 kfree(*data);
221
222 return ret;
223}
224
225static bool reg_5_false(struct device *dev, unsigned int reg)
226{
227 struct kunit *test = dev_get_drvdata(dev);
228 const struct regmap_test_param *param = test->param_value;
229
230 return reg != (param->from_reg + 5);
231}
232
233static void basic_read_write(struct kunit *test)
234{
235 struct regmap *map;
236 struct regmap_config config;
237 struct regmap_ram_data *data;
238 unsigned int val, rval;
239
240 config = test_regmap_config;
241
242 map = gen_regmap(test, &config, &data);
243 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
244 if (IS_ERR(map))
245 return;
246
247 get_random_bytes(&val, sizeof(val));
248
249 /* If we write a value to a register we can read it back */
250 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
251 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
252 KUNIT_EXPECT_EQ(test, val, rval);
253
254 /* If using a cache the cache satisfied the read */
255 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
256}
257
258static void bulk_write(struct kunit *test)
259{
260 struct regmap *map;
261 struct regmap_config config;
262 struct regmap_ram_data *data;
263 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
264 int i;
265
266 config = test_regmap_config;
267
268 map = gen_regmap(test, &config, &data);
269 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
270 if (IS_ERR(map))
271 return;
272
273 get_random_bytes(&val, sizeof(val));
274
275 /*
276 * Data written via the bulk API can be read back with single
277 * reads.
278 */
279 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
280 BLOCK_TEST_SIZE));
281 for (i = 0; i < BLOCK_TEST_SIZE; i++)
282 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
283
284 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
285
286 /* If using a cache the cache satisfied the read */
287 for (i = 0; i < BLOCK_TEST_SIZE; i++)
288 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
289}
290
291static void bulk_read(struct kunit *test)
292{
293 struct regmap *map;
294 struct regmap_config config;
295 struct regmap_ram_data *data;
296 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
297 int i;
298
299 config = test_regmap_config;
300
301 map = gen_regmap(test, &config, &data);
302 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
303 if (IS_ERR(map))
304 return;
305
306 get_random_bytes(&val, sizeof(val));
307
308 /* Data written as single writes can be read via the bulk API */
309 for (i = 0; i < BLOCK_TEST_SIZE; i++)
310 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
311 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
312 BLOCK_TEST_SIZE));
313 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
314
315 /* If using a cache the cache satisfied the read */
316 for (i = 0; i < BLOCK_TEST_SIZE; i++)
317 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
318}
319
320static void multi_write(struct kunit *test)
321{
322 struct regmap *map;
323 struct regmap_config config;
324 struct regmap_ram_data *data;
325 struct reg_sequence sequence[BLOCK_TEST_SIZE];
326 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
327 int i;
328
329 config = test_regmap_config;
330
331 map = gen_regmap(test, &config, &data);
332 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
333 if (IS_ERR(map))
334 return;
335
336 get_random_bytes(&val, sizeof(val));
337
338 /*
339 * Data written via the multi API can be read back with single
340 * reads.
341 */
342 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
343 sequence[i].reg = i;
344 sequence[i].def = val[i];
345 sequence[i].delay_us = 0;
346 }
347 KUNIT_EXPECT_EQ(test, 0,
348 regmap_multi_reg_write(map, sequence, BLOCK_TEST_SIZE));
349 for (i = 0; i < BLOCK_TEST_SIZE; i++)
350 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
351
352 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
353
354 /* If using a cache the cache satisfied the read */
355 for (i = 0; i < BLOCK_TEST_SIZE; i++)
356 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
357}
358
359static void multi_read(struct kunit *test)
360{
361 struct regmap *map;
362 struct regmap_config config;
363 struct regmap_ram_data *data;
364 unsigned int regs[BLOCK_TEST_SIZE];
365 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
366 int i;
367
368 config = test_regmap_config;
369
370 map = gen_regmap(test, &config, &data);
371 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
372 if (IS_ERR(map))
373 return;
374
375 get_random_bytes(&val, sizeof(val));
376
377 /* Data written as single writes can be read via the multi API */
378 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
379 regs[i] = i;
380 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
381 }
382 KUNIT_EXPECT_EQ(test, 0,
383 regmap_multi_reg_read(map, regs, rval, BLOCK_TEST_SIZE));
384 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
385
386 /* If using a cache the cache satisfied the read */
387 for (i = 0; i < BLOCK_TEST_SIZE; i++)
388 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
389}
390
391static void read_bypassed(struct kunit *test)
392{
393 const struct regmap_test_param *param = test->param_value;
394 struct regmap *map;
395 struct regmap_config config;
396 struct regmap_ram_data *data;
397 unsigned int val[BLOCK_TEST_SIZE], rval;
398 int i;
399
400 config = test_regmap_config;
401
402 map = gen_regmap(test, &config, &data);
403 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
404 if (IS_ERR(map))
405 return;
406
407 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
408
409 get_random_bytes(&val, sizeof(val));
410
411 /* Write some test values */
412 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
413
414 regcache_cache_only(map, true);
415
416 /*
417 * While in cache-only regmap_read_bypassed() should return the register
418 * value and leave the map in cache-only.
419 */
420 for (i = 0; i < ARRAY_SIZE(val); i++) {
421 /* Put inverted bits in rval to prove we really read the value */
422 rval = ~val[i];
423 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
424 KUNIT_EXPECT_EQ(test, val[i], rval);
425
426 rval = ~val[i];
427 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
428 KUNIT_EXPECT_EQ(test, val[i], rval);
429 KUNIT_EXPECT_TRUE(test, map->cache_only);
430 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
431 }
432
433 /*
434 * Change the underlying register values to prove it is returning
435 * real values not cached values.
436 */
437 for (i = 0; i < ARRAY_SIZE(val); i++) {
438 val[i] = ~val[i];
439 data->vals[param->from_reg + i] = val[i];
440 }
441
442 for (i = 0; i < ARRAY_SIZE(val); i++) {
443 rval = ~val[i];
444 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
445 KUNIT_EXPECT_NE(test, val[i], rval);
446
447 rval = ~val[i];
448 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
449 KUNIT_EXPECT_EQ(test, val[i], rval);
450 KUNIT_EXPECT_TRUE(test, map->cache_only);
451 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
452 }
453}
454
455static void read_bypassed_volatile(struct kunit *test)
456{
457 const struct regmap_test_param *param = test->param_value;
458 struct regmap *map;
459 struct regmap_config config;
460 struct regmap_ram_data *data;
461 unsigned int val[BLOCK_TEST_SIZE], rval;
462 int i;
463
464 config = test_regmap_config;
465 /* All registers except #5 volatile */
466 config.volatile_reg = reg_5_false;
467
468 map = gen_regmap(test, &config, &data);
469 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
470 if (IS_ERR(map))
471 return;
472
473 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
474
475 get_random_bytes(&val, sizeof(val));
476
477 /* Write some test values */
478 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
479
480 regcache_cache_only(map, true);
481
482 /*
483 * While in cache-only regmap_read_bypassed() should return the register
484 * value and leave the map in cache-only.
485 */
486 for (i = 0; i < ARRAY_SIZE(val); i++) {
487 /* Register #5 is non-volatile so should read from cache */
488 KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY,
489 regmap_read(map, param->from_reg + i, &rval));
490
491 /* Put inverted bits in rval to prove we really read the value */
492 rval = ~val[i];
493 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
494 KUNIT_EXPECT_EQ(test, val[i], rval);
495 KUNIT_EXPECT_TRUE(test, map->cache_only);
496 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
497 }
498
499 /*
500 * Change the underlying register values to prove it is returning
501 * real values not cached values.
502 */
503 for (i = 0; i < ARRAY_SIZE(val); i++) {
504 val[i] = ~val[i];
505 data->vals[param->from_reg + i] = val[i];
506 }
507
508 for (i = 0; i < ARRAY_SIZE(val); i++) {
509 if (i == 5)
510 continue;
511
512 rval = ~val[i];
513 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
514 KUNIT_EXPECT_EQ(test, val[i], rval);
515 KUNIT_EXPECT_TRUE(test, map->cache_only);
516 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
517 }
518}
519
520static void write_readonly(struct kunit *test)
521{
522 struct regmap *map;
523 struct regmap_config config;
524 struct regmap_ram_data *data;
525 unsigned int val;
526 int i;
527
528 config = test_regmap_config;
529 config.num_reg_defaults = BLOCK_TEST_SIZE;
530 config.writeable_reg = reg_5_false;
531
532 map = gen_regmap(test, &config, &data);
533 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
534 if (IS_ERR(map))
535 return;
536
537 get_random_bytes(&val, sizeof(val));
538
539 for (i = 0; i < BLOCK_TEST_SIZE; i++)
540 data->written[i] = false;
541
542 /* Change the value of all registers, readonly should fail */
543 for (i = 0; i < BLOCK_TEST_SIZE; i++)
544 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
545
546 /* Did that match what we see on the device? */
547 for (i = 0; i < BLOCK_TEST_SIZE; i++)
548 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
549}
550
551static void read_writeonly(struct kunit *test)
552{
553 struct regmap *map;
554 struct regmap_config config;
555 struct regmap_ram_data *data;
556 unsigned int val;
557 int i;
558
559 config = test_regmap_config;
560 config.readable_reg = reg_5_false;
561
562 map = gen_regmap(test, &config, &data);
563 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
564 if (IS_ERR(map))
565 return;
566
567 for (i = 0; i < BLOCK_TEST_SIZE; i++)
568 data->read[i] = false;
569
570 /*
571 * Try to read all the registers, the writeonly one should
572 * fail if we aren't using the flat cache.
573 */
574 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
575 if (config.cache_type != REGCACHE_FLAT) {
576 KUNIT_EXPECT_EQ(test, i != 5,
577 regmap_read(map, i, &val) == 0);
578 } else {
579 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
580 }
581 }
582
583 /* Did we trigger a hardware access? */
584 KUNIT_EXPECT_FALSE(test, data->read[5]);
585}
586
587static void reg_defaults(struct kunit *test)
588{
589 struct regmap *map;
590 struct regmap_config config;
591 struct regmap_ram_data *data;
592 unsigned int rval[BLOCK_TEST_SIZE];
593 int i;
594
595 config = test_regmap_config;
596 config.num_reg_defaults = BLOCK_TEST_SIZE;
597
598 map = gen_regmap(test, &config, &data);
599 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
600 if (IS_ERR(map))
601 return;
602
603 /* Read back the expected default data */
604 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
605 BLOCK_TEST_SIZE));
606 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
607
608 /* The data should have been read from cache if there was one */
609 for (i = 0; i < BLOCK_TEST_SIZE; i++)
610 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
611}
612
613static void reg_defaults_read_dev(struct kunit *test)
614{
615 struct regmap *map;
616 struct regmap_config config;
617 struct regmap_ram_data *data;
618 unsigned int rval[BLOCK_TEST_SIZE];
619 int i;
620
621 config = test_regmap_config;
622 config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
623
624 map = gen_regmap(test, &config, &data);
625 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
626 if (IS_ERR(map))
627 return;
628
629 /* We should have read the cache defaults back from the map */
630 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
631 KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
632 data->read[i] = false;
633 }
634
635 /* Read back the expected default data */
636 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
637 BLOCK_TEST_SIZE));
638 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
639
640 /* The data should have been read from cache if there was one */
641 for (i = 0; i < BLOCK_TEST_SIZE; i++)
642 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
643}
644
645static void register_patch(struct kunit *test)
646{
647 struct regmap *map;
648 struct regmap_config config;
649 struct regmap_ram_data *data;
650 struct reg_sequence patch[2];
651 unsigned int rval[BLOCK_TEST_SIZE];
652 int i;
653
654 /* We need defaults so readback works */
655 config = test_regmap_config;
656 config.num_reg_defaults = BLOCK_TEST_SIZE;
657
658 map = gen_regmap(test, &config, &data);
659 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
660 if (IS_ERR(map))
661 return;
662
663 /* Stash the original values */
664 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
665 BLOCK_TEST_SIZE));
666
667 /* Patch a couple of values */
668 patch[0].reg = 2;
669 patch[0].def = rval[2] + 1;
670 patch[0].delay_us = 0;
671 patch[1].reg = 5;
672 patch[1].def = rval[5] + 1;
673 patch[1].delay_us = 0;
674 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
675 ARRAY_SIZE(patch)));
676
677 /* Only the patched registers are written */
678 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
679 switch (i) {
680 case 2:
681 case 5:
682 KUNIT_EXPECT_TRUE(test, data->written[i]);
683 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
684 break;
685 default:
686 KUNIT_EXPECT_FALSE(test, data->written[i]);
687 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
688 break;
689 }
690 }
691}
692
693static void stride(struct kunit *test)
694{
695 struct regmap *map;
696 struct regmap_config config;
697 struct regmap_ram_data *data;
698 unsigned int rval;
699 int i;
700
701 config = test_regmap_config;
702 config.reg_stride = 2;
703 config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
704
705 /*
706 * Allow one extra register so that the read/written arrays
707 * are sized big enough to include an entry for the odd
708 * address past the final reg_default register.
709 */
710 config.max_register = BLOCK_TEST_SIZE;
711
712 map = gen_regmap(test, &config, &data);
713 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
714 if (IS_ERR(map))
715 return;
716
717 /* Only even addresses can be accessed, try both read and write */
718 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
719 data->read[i] = false;
720 data->written[i] = false;
721
722 if (i % 2) {
723 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
724 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
725 KUNIT_EXPECT_FALSE(test, data->read[i]);
726 KUNIT_EXPECT_FALSE(test, data->written[i]);
727 } else {
728 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
729 KUNIT_EXPECT_EQ(test, data->vals[i], rval);
730 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
731 data->read[i]);
732
733 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
734 KUNIT_EXPECT_TRUE(test, data->written[i]);
735 }
736 }
737}
738
739static struct regmap_range_cfg test_range = {
740 .selector_reg = 1,
741 .selector_mask = 0xff,
742
743 .window_start = 4,
744 .window_len = 10,
745
746 .range_min = 20,
747 .range_max = 40,
748};
749
750static bool test_range_window_volatile(struct device *dev, unsigned int reg)
751{
752 if (reg >= test_range.window_start &&
753 reg <= test_range.window_start + test_range.window_len)
754 return true;
755
756 return false;
757}
758
759static bool test_range_all_volatile(struct device *dev, unsigned int reg)
760{
761 if (test_range_window_volatile(dev, reg))
762 return true;
763
764 if (reg >= test_range.range_min && reg <= test_range.range_max)
765 return true;
766
767 return false;
768}
769
770static void basic_ranges(struct kunit *test)
771{
772 struct regmap *map;
773 struct regmap_config config;
774 struct regmap_ram_data *data;
775 unsigned int val;
776 int i;
777
778 config = test_regmap_config;
779 config.volatile_reg = test_range_all_volatile;
780 config.ranges = &test_range;
781 config.num_ranges = 1;
782 config.max_register = test_range.range_max;
783
784 map = gen_regmap(test, &config, &data);
785 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
786 if (IS_ERR(map))
787 return;
788
789 for (i = test_range.range_min; i < test_range.range_max; i++) {
790 data->read[i] = false;
791 data->written[i] = false;
792 }
793
794 /* Reset the page to a non-zero value to trigger a change */
795 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
796 test_range.range_max));
797
798 /* Check we set the page and use the window for writes */
799 data->written[test_range.selector_reg] = false;
800 data->written[test_range.window_start] = false;
801 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
802 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
803 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
804
805 data->written[test_range.selector_reg] = false;
806 data->written[test_range.window_start] = false;
807 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
808 test_range.range_min +
809 test_range.window_len,
810 0));
811 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
812 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
813
814 /* Same for reads */
815 data->written[test_range.selector_reg] = false;
816 data->read[test_range.window_start] = false;
817 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
818 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
819 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
820
821 data->written[test_range.selector_reg] = false;
822 data->read[test_range.window_start] = false;
823 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
824 test_range.range_min +
825 test_range.window_len,
826 &val));
827 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
828 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
829
830 /* No physical access triggered in the virtual range */
831 for (i = test_range.range_min; i < test_range.range_max; i++) {
832 KUNIT_EXPECT_FALSE(test, data->read[i]);
833 KUNIT_EXPECT_FALSE(test, data->written[i]);
834 }
835}
836
837/* Try to stress dynamic creation of cache data structures */
838static void stress_insert(struct kunit *test)
839{
840 struct regmap *map;
841 struct regmap_config config;
842 struct regmap_ram_data *data;
843 unsigned int rval, *vals;
844 size_t buf_sz;
845 int i;
846
847 config = test_regmap_config;
848 config.max_register = 300;
849
850 map = gen_regmap(test, &config, &data);
851 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
852 if (IS_ERR(map))
853 return;
854
855 buf_sz = array_size(sizeof(*vals), config.max_register);
856 vals = kunit_kmalloc(test, buf_sz, GFP_KERNEL);
857 KUNIT_ASSERT_FALSE(test, vals == NULL);
858
859 get_random_bytes(vals, buf_sz);
860
861 /* Write data into the map/cache in ever decreasing strides */
862 for (i = 0; i < config.max_register; i += 100)
863 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
864 for (i = 0; i < config.max_register; i += 50)
865 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
866 for (i = 0; i < config.max_register; i += 25)
867 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
868 for (i = 0; i < config.max_register; i += 10)
869 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
870 for (i = 0; i < config.max_register; i += 5)
871 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
872 for (i = 0; i < config.max_register; i += 3)
873 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
874 for (i = 0; i < config.max_register; i += 2)
875 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
876 for (i = 0; i < config.max_register; i++)
877 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
878
879 /* Do reads from the cache (if there is one) match? */
880 for (i = 0; i < config.max_register; i ++) {
881 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
882 KUNIT_EXPECT_EQ(test, rval, vals[i]);
883 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
884 }
885}
886
887static void cache_bypass(struct kunit *test)
888{
889 const struct regmap_test_param *param = test->param_value;
890 struct regmap *map;
891 struct regmap_config config;
892 struct regmap_ram_data *data;
893 unsigned int val, rval;
894
895 config = test_regmap_config;
896
897 map = gen_regmap(test, &config, &data);
898 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
899 if (IS_ERR(map))
900 return;
901
902 get_random_bytes(&val, sizeof(val));
903
904 /* Ensure the cache has a value in it */
905 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
906
907 /* Bypass then write a different value */
908 regcache_cache_bypass(map, true);
909 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
910
911 /* Read the bypassed value */
912 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
913 KUNIT_EXPECT_EQ(test, val + 1, rval);
914 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
915
916 /* Disable bypass, the cache should still return the original value */
917 regcache_cache_bypass(map, false);
918 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
919 KUNIT_EXPECT_EQ(test, val, rval);
920}
921
922static void cache_sync_marked_dirty(struct kunit *test)
923{
924 const struct regmap_test_param *param = test->param_value;
925 struct regmap *map;
926 struct regmap_config config;
927 struct regmap_ram_data *data;
928 unsigned int val[BLOCK_TEST_SIZE];
929 int i;
930
931 config = test_regmap_config;
932
933 map = gen_regmap(test, &config, &data);
934 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
935 if (IS_ERR(map))
936 return;
937
938 get_random_bytes(&val, sizeof(val));
939
940 /* Put some data into the cache */
941 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
942 BLOCK_TEST_SIZE));
943 for (i = 0; i < BLOCK_TEST_SIZE; i++)
944 data->written[param->from_reg + i] = false;
945
946 /* Trash the data on the device itself then resync */
947 regcache_mark_dirty(map);
948 memset(data->vals, 0, sizeof(val));
949 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
950
951 /* Did we just write the correct data out? */
952 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
953 for (i = 0; i < BLOCK_TEST_SIZE; i++)
954 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
955}
956
957static void cache_sync_after_cache_only(struct kunit *test)
958{
959 const struct regmap_test_param *param = test->param_value;
960 struct regmap *map;
961 struct regmap_config config;
962 struct regmap_ram_data *data;
963 unsigned int val[BLOCK_TEST_SIZE];
964 unsigned int val_mask;
965 int i;
966
967 config = test_regmap_config;
968
969 map = gen_regmap(test, &config, &data);
970 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
971 if (IS_ERR(map))
972 return;
973
974 val_mask = GENMASK(config.val_bits - 1, 0);
975 get_random_bytes(&val, sizeof(val));
976
977 /* Put some data into the cache */
978 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
979 BLOCK_TEST_SIZE));
980 for (i = 0; i < BLOCK_TEST_SIZE; i++)
981 data->written[param->from_reg + i] = false;
982
983 /* Set cache-only and change the values */
984 regcache_cache_only(map, true);
985 for (i = 0; i < ARRAY_SIZE(val); ++i)
986 val[i] = ~val[i] & val_mask;
987
988 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
989 BLOCK_TEST_SIZE));
990 for (i = 0; i < BLOCK_TEST_SIZE; i++)
991 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
992
993 KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val));
994
995 /* Exit cache-only and sync the cache without marking hardware registers dirty */
996 regcache_cache_only(map, false);
997
998 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
999
1000 /* Did we just write the correct data out? */
1001 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
1002 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1003 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
1004}
1005
1006static void cache_sync_defaults_marked_dirty(struct kunit *test)
1007{
1008 const struct regmap_test_param *param = test->param_value;
1009 struct regmap *map;
1010 struct regmap_config config;
1011 struct regmap_ram_data *data;
1012 unsigned int val;
1013 int i;
1014
1015 config = test_regmap_config;
1016 config.num_reg_defaults = BLOCK_TEST_SIZE;
1017
1018 map = gen_regmap(test, &config, &data);
1019 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1020 if (IS_ERR(map))
1021 return;
1022
1023 get_random_bytes(&val, sizeof(val));
1024
1025 /* Change the value of one register */
1026 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
1027
1028 /* Resync */
1029 regcache_mark_dirty(map);
1030 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1031 data->written[param->from_reg + i] = false;
1032 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1033
1034 /* Did we just sync the one register we touched? */
1035 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1036 KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
1037
1038 /* Rewrite registers back to their defaults */
1039 for (i = 0; i < config.num_reg_defaults; ++i)
1040 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
1041 config.reg_defaults[i].def));
1042
1043 /*
1044 * Resync after regcache_mark_dirty() should not write out registers
1045 * that are at default value
1046 */
1047 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1048 data->written[param->from_reg + i] = false;
1049 regcache_mark_dirty(map);
1050 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1051 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1052 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
1053}
1054
1055static void cache_sync_default_after_cache_only(struct kunit *test)
1056{
1057 const struct regmap_test_param *param = test->param_value;
1058 struct regmap *map;
1059 struct regmap_config config;
1060 struct regmap_ram_data *data;
1061 unsigned int orig_val;
1062 int i;
1063
1064 config = test_regmap_config;
1065 config.num_reg_defaults = BLOCK_TEST_SIZE;
1066
1067 map = gen_regmap(test, &config, &data);
1068 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1069 if (IS_ERR(map))
1070 return;
1071
1072 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
1073
1074 /* Enter cache-only and change the value of one register */
1075 regcache_cache_only(map, true);
1076 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
1077
1078 /* Exit cache-only and resync, should write out the changed register */
1079 regcache_cache_only(map, false);
1080 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1081 data->written[param->from_reg + i] = false;
1082 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1083
1084 /* Was the register written out? */
1085 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
1086 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
1087
1088 /* Enter cache-only and write register back to its default value */
1089 regcache_cache_only(map, true);
1090 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
1091
1092 /* Resync should write out the new value */
1093 regcache_cache_only(map, false);
1094 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1095 data->written[param->from_reg + i] = false;
1096
1097 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1098 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
1099 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val);
1100}
1101
1102static void cache_sync_readonly(struct kunit *test)
1103{
1104 const struct regmap_test_param *param = test->param_value;
1105 struct regmap *map;
1106 struct regmap_config config;
1107 struct regmap_ram_data *data;
1108 unsigned int val;
1109 int i;
1110
1111 config = test_regmap_config;
1112 config.writeable_reg = reg_5_false;
1113
1114 map = gen_regmap(test, &config, &data);
1115 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1116 if (IS_ERR(map))
1117 return;
1118
1119 /* Read all registers to fill the cache */
1120 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1121 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1122
1123 /* Change the value of all registers, readonly should fail */
1124 get_random_bytes(&val, sizeof(val));
1125 regcache_cache_only(map, true);
1126 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1127 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
1128 regcache_cache_only(map, false);
1129
1130 /* Resync */
1131 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1132 data->written[param->from_reg + i] = false;
1133 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1134
1135 /* Did that match what we see on the device? */
1136 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1137 KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
1138}
1139
1140static void cache_sync_patch(struct kunit *test)
1141{
1142 const struct regmap_test_param *param = test->param_value;
1143 struct regmap *map;
1144 struct regmap_config config;
1145 struct regmap_ram_data *data;
1146 struct reg_sequence patch[2];
1147 unsigned int rval[BLOCK_TEST_SIZE], val;
1148 int i;
1149
1150 /* We need defaults so readback works */
1151 config = test_regmap_config;
1152 config.num_reg_defaults = BLOCK_TEST_SIZE;
1153
1154 map = gen_regmap(test, &config, &data);
1155 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1156 if (IS_ERR(map))
1157 return;
1158
1159 /* Stash the original values */
1160 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1161 BLOCK_TEST_SIZE));
1162
1163 /* Patch a couple of values */
1164 patch[0].reg = param->from_reg + 2;
1165 patch[0].def = rval[2] + 1;
1166 patch[0].delay_us = 0;
1167 patch[1].reg = param->from_reg + 5;
1168 patch[1].def = rval[5] + 1;
1169 patch[1].delay_us = 0;
1170 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
1171 ARRAY_SIZE(patch)));
1172
1173 /* Sync the cache */
1174 regcache_mark_dirty(map);
1175 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1176 data->written[param->from_reg + i] = false;
1177 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1178
1179 /* The patch should be on the device but not in the cache */
1180 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
1181 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1182 KUNIT_EXPECT_EQ(test, val, rval[i]);
1183
1184 switch (i) {
1185 case 2:
1186 case 5:
1187 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
1188 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1);
1189 break;
1190 default:
1191 KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]);
1192 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]);
1193 break;
1194 }
1195 }
1196}
1197
1198static void cache_drop(struct kunit *test)
1199{
1200 const struct regmap_test_param *param = test->param_value;
1201 struct regmap *map;
1202 struct regmap_config config;
1203 struct regmap_ram_data *data;
1204 unsigned int rval[BLOCK_TEST_SIZE];
1205 int i;
1206
1207 config = test_regmap_config;
1208 config.num_reg_defaults = BLOCK_TEST_SIZE;
1209
1210 map = gen_regmap(test, &config, &data);
1211 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1212 if (IS_ERR(map))
1213 return;
1214
1215 /* Ensure the data is read from the cache */
1216 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1217 data->read[param->from_reg + i] = false;
1218 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1219 BLOCK_TEST_SIZE));
1220 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
1221 KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
1222 data->read[param->from_reg + i] = false;
1223 }
1224 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1225
1226 /* Drop some registers */
1227 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
1228 param->from_reg + 5));
1229
1230 /* Reread and check only the dropped registers hit the device. */
1231 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1232 BLOCK_TEST_SIZE));
1233 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1234 KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
1235 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1236}
1237
1238static void cache_drop_with_non_contiguous_ranges(struct kunit *test)
1239{
1240 const struct regmap_test_param *param = test->param_value;
1241 struct regmap *map;
1242 struct regmap_config config;
1243 struct regmap_ram_data *data;
1244 unsigned int val[4][BLOCK_TEST_SIZE];
1245 unsigned int reg;
1246 const int num_ranges = ARRAY_SIZE(val) * 2;
1247 int rangeidx, i;
1248
1249 static_assert(ARRAY_SIZE(val) == 4);
1250
1251 config = test_regmap_config;
1252 config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE);
1253
1254 map = gen_regmap(test, &config, &data);
1255 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1256 if (IS_ERR(map))
1257 return;
1258
1259 for (i = 0; i < config.max_register + 1; i++)
1260 data->written[i] = false;
1261
1262 /* Create non-contiguous cache blocks by writing every other range */
1263 get_random_bytes(&val, sizeof(val));
1264 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1265 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1266 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg,
1267 &val[rangeidx / 2],
1268 BLOCK_TEST_SIZE));
1269 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1270 &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1271 }
1272
1273 /* Check that odd ranges weren't written */
1274 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1275 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1276 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1277 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1278 }
1279
1280 /* Drop range 2 */
1281 reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1282 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1));
1283
1284 /* Drop part of range 4 */
1285 reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1286 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
1287
1288 /* Mark dirty and reset mock registers to 0 */
1289 regcache_mark_dirty(map);
1290 for (i = 0; i < config.max_register + 1; i++) {
1291 data->vals[i] = 0;
1292 data->written[i] = false;
1293 }
1294
1295 /* The registers that were dropped from range 4 should now remain at 0 */
1296 val[4 / 2][3] = 0;
1297 val[4 / 2][4] = 0;
1298 val[4 / 2][5] = 0;
1299
1300 /* Sync and check that the expected register ranges were written */
1301 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1302
1303 /* Check that odd ranges weren't written */
1304 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1305 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1306 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1307 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1308 }
1309
1310 /* Check that even ranges (except 2 and 4) were written */
1311 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1312 if ((rangeidx == 2) || (rangeidx == 4))
1313 continue;
1314
1315 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1316 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1317 KUNIT_EXPECT_TRUE(test, data->written[reg + i]);
1318
1319 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1320 &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1321 }
1322
1323 /* Check that range 2 wasn't written */
1324 reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1325 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1326 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1327
1328 /* Check that range 4 was partially written */
1329 reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1330 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1331 KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5);
1332
1333 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2]));
1334
1335 /* Nothing before param->from_reg should have been written */
1336 for (i = 0; i < param->from_reg; i++)
1337 KUNIT_EXPECT_FALSE(test, data->written[i]);
1338}
1339
1340static void cache_drop_all_and_sync_marked_dirty(struct kunit *test)
1341{
1342 const struct regmap_test_param *param = test->param_value;
1343 struct regmap *map;
1344 struct regmap_config config;
1345 struct regmap_ram_data *data;
1346 unsigned int rval[BLOCK_TEST_SIZE];
1347 int i;
1348
1349 config = test_regmap_config;
1350 config.num_reg_defaults = BLOCK_TEST_SIZE;
1351
1352 map = gen_regmap(test, &config, &data);
1353 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1354 if (IS_ERR(map))
1355 return;
1356
1357 /* Ensure the data is read from the cache */
1358 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1359 data->read[param->from_reg + i] = false;
1360 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1361 BLOCK_TEST_SIZE));
1362 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1363
1364 /* Change all values in cache from defaults */
1365 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1366 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1367
1368 /* Drop all registers */
1369 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1370
1371 /* Mark dirty and cache sync should not write anything. */
1372 regcache_mark_dirty(map);
1373 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1374 data->written[param->from_reg + i] = false;
1375
1376 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1377 for (i = 0; i <= config.max_register; i++)
1378 KUNIT_EXPECT_FALSE(test, data->written[i]);
1379}
1380
1381static void cache_drop_all_and_sync_no_defaults(struct kunit *test)
1382{
1383 const struct regmap_test_param *param = test->param_value;
1384 struct regmap *map;
1385 struct regmap_config config;
1386 struct regmap_ram_data *data;
1387 unsigned int rval[BLOCK_TEST_SIZE];
1388 int i;
1389
1390 config = test_regmap_config;
1391
1392 map = gen_regmap(test, &config, &data);
1393 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1394 if (IS_ERR(map))
1395 return;
1396
1397 /* Ensure the data is read from the cache */
1398 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1399 data->read[param->from_reg + i] = false;
1400 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1401 BLOCK_TEST_SIZE));
1402 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1403
1404 /* Change all values in cache */
1405 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1406 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1407
1408 /* Drop all registers */
1409 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1410
1411 /*
1412 * Sync cache without marking it dirty. All registers were dropped
1413 * so the cache should not have any entries to write out.
1414 */
1415 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1416 data->written[param->from_reg + i] = false;
1417
1418 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1419 for (i = 0; i <= config.max_register; i++)
1420 KUNIT_EXPECT_FALSE(test, data->written[i]);
1421}
1422
1423static void cache_drop_all_and_sync_has_defaults(struct kunit *test)
1424{
1425 const struct regmap_test_param *param = test->param_value;
1426 struct regmap *map;
1427 struct regmap_config config;
1428 struct regmap_ram_data *data;
1429 unsigned int rval[BLOCK_TEST_SIZE];
1430 int i;
1431
1432 config = test_regmap_config;
1433 config.num_reg_defaults = BLOCK_TEST_SIZE;
1434
1435 map = gen_regmap(test, &config, &data);
1436 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1437 if (IS_ERR(map))
1438 return;
1439
1440 /* Ensure the data is read from the cache */
1441 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1442 data->read[param->from_reg + i] = false;
1443 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1444 BLOCK_TEST_SIZE));
1445 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1446
1447 /* Change all values in cache from defaults */
1448 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1449 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1450
1451 /* Drop all registers */
1452 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1453
1454 /*
1455 * Sync cache without marking it dirty. All registers were dropped
1456 * so the cache should not have any entries to write out.
1457 */
1458 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1459 data->written[param->from_reg + i] = false;
1460
1461 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1462 for (i = 0; i <= config.max_register; i++)
1463 KUNIT_EXPECT_FALSE(test, data->written[i]);
1464}
1465
1466static void cache_present(struct kunit *test)
1467{
1468 const struct regmap_test_param *param = test->param_value;
1469 struct regmap *map;
1470 struct regmap_config config;
1471 struct regmap_ram_data *data;
1472 unsigned int val;
1473 int i;
1474
1475 config = test_regmap_config;
1476
1477 map = gen_regmap(test, &config, &data);
1478 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1479 if (IS_ERR(map))
1480 return;
1481
1482 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1483 data->read[param->from_reg + i] = false;
1484
1485 /* No defaults so no registers cached. */
1486 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1487 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
1488
1489 /* We didn't trigger any reads */
1490 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1491 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
1492
1493 /* Fill the cache */
1494 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1495 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1496
1497 /* Now everything should be cached */
1498 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1499 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
1500}
1501
1502static void cache_write_zero(struct kunit *test)
1503{
1504 const struct regmap_test_param *param = test->param_value;
1505 struct regmap *map;
1506 struct regmap_config config;
1507 struct regmap_ram_data *data;
1508 unsigned int val;
1509 int i;
1510
1511 config = test_regmap_config;
1512
1513 map = gen_regmap(test, &config, &data);
1514 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1515 if (IS_ERR(map))
1516 return;
1517
1518 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1519 data->read[param->from_reg + i] = false;
1520
1521 /* No defaults so no registers cached. */
1522 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1523 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
1524
1525 /* We didn't trigger any reads */
1526 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1527 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
1528
1529 /* Write a zero value */
1530 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, 0));
1531
1532 /* Read that zero value back */
1533 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1534 KUNIT_EXPECT_EQ(test, 0, val);
1535
1536 /* From the cache? */
1537 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, 1));
1538
1539 /* Try to throw it away */
1540 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 1, 1));
1541 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, 1));
1542}
1543
1544/* Check that caching the window register works with sync */
1545static void cache_range_window_reg(struct kunit *test)
1546{
1547 struct regmap *map;
1548 struct regmap_config config;
1549 struct regmap_ram_data *data;
1550 unsigned int val;
1551 int i;
1552
1553 config = test_regmap_config;
1554 config.volatile_reg = test_range_window_volatile;
1555 config.ranges = &test_range;
1556 config.num_ranges = 1;
1557 config.max_register = test_range.range_max;
1558
1559 map = gen_regmap(test, &config, &data);
1560 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1561 if (IS_ERR(map))
1562 return;
1563
1564 /* Write new values to the entire range */
1565 for (i = test_range.range_min; i <= test_range.range_max; i++)
1566 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
1567
1568 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1569 KUNIT_ASSERT_EQ(test, val, 2);
1570
1571 /* Write to the first register in the range to reset the page */
1572 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1573 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1574 KUNIT_ASSERT_EQ(test, val, 0);
1575
1576 /* Trigger a cache sync */
1577 regcache_mark_dirty(map);
1578 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1579
1580 /* Write to the first register again, the page should be reset */
1581 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1582 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1583 KUNIT_ASSERT_EQ(test, val, 0);
1584
1585 /* Trigger another cache sync */
1586 regcache_mark_dirty(map);
1587 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1588
1589 /* Write to the last register again, the page should be reset */
1590 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
1591 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1592 KUNIT_ASSERT_EQ(test, val, 2);
1593}
1594
1595static const struct regmap_test_param raw_types_list[] = {
1596 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE },
1597 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
1598 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1599 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1600 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1601 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1602 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1603 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
1604};
1605
1606KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
1607
1608static const struct regmap_test_param raw_cache_types_list[] = {
1609 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1610 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1611 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1612 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1613 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1614 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
1615};
1616
1617KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc);
1618
1619static const struct regmap_config raw_regmap_config = {
1620 .max_register = BLOCK_TEST_SIZE,
1621
1622 .reg_format_endian = REGMAP_ENDIAN_LITTLE,
1623 .reg_bits = 16,
1624 .val_bits = 16,
1625};
1626
1627static struct regmap *gen_raw_regmap(struct kunit *test,
1628 struct regmap_config *config,
1629 struct regmap_ram_data **data)
1630{
1631 struct regmap_test_priv *priv = test->priv;
1632 const struct regmap_test_param *param = test->param_value;
1633 u16 *buf;
1634 struct regmap *ret = ERR_PTR(-ENOMEM);
1635 int i, error;
1636 struct reg_default *defaults;
1637 size_t size;
1638
1639 config->cache_type = param->cache;
1640 config->val_format_endian = param->val_endian;
1641 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
1642 config->cache_type == REGCACHE_MAPLE;
1643
1644 size = array_size(config->max_register + 1, BITS_TO_BYTES(config->reg_bits));
1645 buf = kmalloc(size, GFP_KERNEL);
1646 if (!buf)
1647 return ERR_PTR(-ENOMEM);
1648
1649 get_random_bytes(buf, size);
1650
1651 *data = kzalloc(sizeof(**data), GFP_KERNEL);
1652 if (!(*data))
1653 goto out_free;
1654 (*data)->vals = (void *)buf;
1655
1656 config->num_reg_defaults = config->max_register + 1;
1657 defaults = kunit_kcalloc(test,
1658 config->num_reg_defaults,
1659 sizeof(struct reg_default),
1660 GFP_KERNEL);
1661 if (!defaults)
1662 goto out_free;
1663 config->reg_defaults = defaults;
1664
1665 for (i = 0; i < config->num_reg_defaults; i++) {
1666 defaults[i].reg = i;
1667 switch (param->val_endian) {
1668 case REGMAP_ENDIAN_LITTLE:
1669 defaults[i].def = le16_to_cpu(buf[i]);
1670 break;
1671 case REGMAP_ENDIAN_BIG:
1672 defaults[i].def = be16_to_cpu(buf[i]);
1673 break;
1674 default:
1675 ret = ERR_PTR(-EINVAL);
1676 goto out_free;
1677 }
1678 }
1679
1680 /*
1681 * We use the defaults in the tests but they don't make sense
1682 * to the core if there's no cache.
1683 */
1684 if (config->cache_type == REGCACHE_NONE)
1685 config->num_reg_defaults = 0;
1686
1687 ret = regmap_init_raw_ram(priv->dev, config, *data);
1688 if (IS_ERR(ret))
1689 goto out_free;
1690
1691 /* This calls regmap_exit() on failure, which frees buf and *data */
1692 error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
1693 if (error)
1694 ret = ERR_PTR(error);
1695
1696 return ret;
1697
1698out_free:
1699 kfree(buf);
1700 kfree(*data);
1701
1702 return ret;
1703}
1704
1705static void raw_read_defaults_single(struct kunit *test)
1706{
1707 struct regmap *map;
1708 struct regmap_config config;
1709 struct regmap_ram_data *data;
1710 unsigned int rval;
1711 int i;
1712
1713 config = raw_regmap_config;
1714
1715 map = gen_raw_regmap(test, &config, &data);
1716 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1717 if (IS_ERR(map))
1718 return;
1719
1720 /* Check that we can read the defaults via the API */
1721 for (i = 0; i < config.max_register + 1; i++) {
1722 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1723 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1724 }
1725}
1726
1727static void raw_read_defaults(struct kunit *test)
1728{
1729 struct regmap *map;
1730 struct regmap_config config;
1731 struct regmap_ram_data *data;
1732 u16 *rval;
1733 u16 def;
1734 size_t val_len;
1735 int i;
1736
1737 config = raw_regmap_config;
1738
1739 map = gen_raw_regmap(test, &config, &data);
1740 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1741 if (IS_ERR(map))
1742 return;
1743
1744 val_len = array_size(sizeof(*rval), config.max_register + 1);
1745 rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
1746 KUNIT_ASSERT_TRUE(test, rval != NULL);
1747 if (!rval)
1748 return;
1749
1750 /* Check that we can read the defaults via the API */
1751 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1752 for (i = 0; i < config.max_register + 1; i++) {
1753 def = config.reg_defaults[i].def;
1754 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1755 KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
1756 } else {
1757 KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
1758 }
1759 }
1760}
1761
1762static void raw_write_read_single(struct kunit *test)
1763{
1764 struct regmap *map;
1765 struct regmap_config config;
1766 struct regmap_ram_data *data;
1767 u16 val;
1768 unsigned int rval;
1769
1770 config = raw_regmap_config;
1771
1772 map = gen_raw_regmap(test, &config, &data);
1773 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1774 if (IS_ERR(map))
1775 return;
1776
1777 get_random_bytes(&val, sizeof(val));
1778
1779 /* If we write a value to a register we can read it back */
1780 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1781 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1782 KUNIT_EXPECT_EQ(test, val, rval);
1783}
1784
1785static void raw_write(struct kunit *test)
1786{
1787 struct regmap *map;
1788 struct regmap_config config;
1789 struct regmap_ram_data *data;
1790 u16 *hw_buf;
1791 u16 val[2];
1792 unsigned int rval;
1793 int i;
1794
1795 config = raw_regmap_config;
1796
1797 map = gen_raw_regmap(test, &config, &data);
1798 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1799 if (IS_ERR(map))
1800 return;
1801
1802 hw_buf = (u16 *)data->vals;
1803
1804 get_random_bytes(&val, sizeof(val));
1805
1806 /* Do a raw write */
1807 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1808
1809 /* We should read back the new values, and defaults for the rest */
1810 for (i = 0; i < config.max_register + 1; i++) {
1811 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1812
1813 switch (i) {
1814 case 2:
1815 case 3:
1816 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1817 KUNIT_EXPECT_EQ(test, rval,
1818 be16_to_cpu((__force __be16)val[i % 2]));
1819 } else {
1820 KUNIT_EXPECT_EQ(test, rval,
1821 le16_to_cpu((__force __le16)val[i % 2]));
1822 }
1823 break;
1824 default:
1825 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1826 break;
1827 }
1828 }
1829
1830 /* The values should appear in the "hardware" */
1831 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1832}
1833
1834static bool reg_zero(struct device *dev, unsigned int reg)
1835{
1836 return reg == 0;
1837}
1838
1839static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
1840{
1841 return reg == 0;
1842}
1843
1844static void raw_noinc_write(struct kunit *test)
1845{
1846 struct regmap *map;
1847 struct regmap_config config;
1848 struct regmap_ram_data *data;
1849 unsigned int val;
1850 u16 val_test, val_last;
1851 u16 val_array[BLOCK_TEST_SIZE];
1852
1853 config = raw_regmap_config;
1854 config.volatile_reg = reg_zero;
1855 config.writeable_noinc_reg = reg_zero;
1856 config.readable_noinc_reg = reg_zero;
1857
1858 map = gen_raw_regmap(test, &config, &data);
1859 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1860 if (IS_ERR(map))
1861 return;
1862
1863 data->noinc_reg = ram_reg_zero;
1864
1865 get_random_bytes(&val_array, sizeof(val_array));
1866
1867 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1868 val_test = be16_to_cpu(val_array[1]) + 100;
1869 val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1870 } else {
1871 val_test = le16_to_cpu(val_array[1]) + 100;
1872 val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1873 }
1874
1875 /* Put some data into the register following the noinc register */
1876 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
1877
1878 /* Write some data to the noinc register */
1879 KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
1880 sizeof(val_array)));
1881
1882 /* We should read back the last value written */
1883 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
1884 KUNIT_ASSERT_EQ(test, val_last, val);
1885
1886 /* Make sure we didn't touch the register after the noinc register */
1887 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1888 KUNIT_ASSERT_EQ(test, val_test, val);
1889}
1890
1891static void raw_sync(struct kunit *test)
1892{
1893 struct regmap *map;
1894 struct regmap_config config;
1895 struct regmap_ram_data *data;
1896 u16 val[3];
1897 u16 *hw_buf;
1898 unsigned int rval;
1899 int i;
1900
1901 config = raw_regmap_config;
1902
1903 map = gen_raw_regmap(test, &config, &data);
1904 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1905 if (IS_ERR(map))
1906 return;
1907
1908 hw_buf = (u16 *)data->vals;
1909
1910 get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
1911
1912 /* Do a regular write and a raw write in cache only mode */
1913 regcache_cache_only(map, true);
1914 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
1915 sizeof(u16) * 2));
1916 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
1917
1918 /* We should read back the new values, and defaults for the rest */
1919 for (i = 0; i < config.max_register + 1; i++) {
1920 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1921
1922 switch (i) {
1923 case 2:
1924 case 3:
1925 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1926 KUNIT_EXPECT_EQ(test, rval,
1927 be16_to_cpu((__force __be16)val[i - 2]));
1928 } else {
1929 KUNIT_EXPECT_EQ(test, rval,
1930 le16_to_cpu((__force __le16)val[i - 2]));
1931 }
1932 break;
1933 case 4:
1934 KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
1935 break;
1936 default:
1937 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1938 break;
1939 }
1940 }
1941
1942 /*
1943 * The value written via _write() was translated by the core,
1944 * translate the original copy for comparison purposes.
1945 */
1946 if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1947 val[2] = cpu_to_be16(val[2]);
1948 else
1949 val[2] = cpu_to_le16(val[2]);
1950
1951 /* The values should not appear in the "hardware" */
1952 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
1953
1954 for (i = 0; i < config.max_register + 1; i++)
1955 data->written[i] = false;
1956
1957 /* Do the sync */
1958 regcache_cache_only(map, false);
1959 regcache_mark_dirty(map);
1960 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1961
1962 /* The values should now appear in the "hardware" */
1963 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
1964}
1965
1966static void raw_ranges(struct kunit *test)
1967{
1968 struct regmap *map;
1969 struct regmap_config config;
1970 struct regmap_ram_data *data;
1971 unsigned int val;
1972 int i;
1973
1974 config = raw_regmap_config;
1975 config.volatile_reg = test_range_all_volatile;
1976 config.ranges = &test_range;
1977 config.num_ranges = 1;
1978 config.max_register = test_range.range_max;
1979
1980 map = gen_raw_regmap(test, &config, &data);
1981 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1982 if (IS_ERR(map))
1983 return;
1984
1985 /* Reset the page to a non-zero value to trigger a change */
1986 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
1987 test_range.range_max));
1988
1989 /* Check we set the page and use the window for writes */
1990 data->written[test_range.selector_reg] = false;
1991 data->written[test_range.window_start] = false;
1992 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1993 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1994 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
1995
1996 data->written[test_range.selector_reg] = false;
1997 data->written[test_range.window_start] = false;
1998 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
1999 test_range.range_min +
2000 test_range.window_len,
2001 0));
2002 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2003 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
2004
2005 /* Same for reads */
2006 data->written[test_range.selector_reg] = false;
2007 data->read[test_range.window_start] = false;
2008 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
2009 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2010 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
2011
2012 data->written[test_range.selector_reg] = false;
2013 data->read[test_range.window_start] = false;
2014 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
2015 test_range.range_min +
2016 test_range.window_len,
2017 &val));
2018 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2019 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
2020
2021 /* No physical access triggered in the virtual range */
2022 for (i = test_range.range_min; i < test_range.range_max; i++) {
2023 KUNIT_EXPECT_FALSE(test, data->read[i]);
2024 KUNIT_EXPECT_FALSE(test, data->written[i]);
2025 }
2026}
2027
2028static struct kunit_case regmap_test_cases[] = {
2029 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
2030 KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params),
2031 KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params),
2032 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
2033 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
2034 KUNIT_CASE_PARAM(multi_write, regcache_types_gen_params),
2035 KUNIT_CASE_PARAM(multi_read, regcache_types_gen_params),
2036 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
2037 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
2038 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
2039 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
2040 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
2041 KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
2042 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
2043 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
2044 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
2045 KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params),
2046 KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params),
2047 KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params),
2048 KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params),
2049 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
2050 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
2051 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
2052 KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params),
2053 KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params),
2054 KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
2055 KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
2056 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
2057 KUNIT_CASE_PARAM(cache_write_zero, sparse_cache_types_gen_params),
2058 KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
2059
2060 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
2061 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
2062 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
2063 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
2064 KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
2065 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
2066 KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
2067 {}
2068};
2069
2070static int regmap_test_init(struct kunit *test)
2071{
2072 struct regmap_test_priv *priv;
2073 struct device *dev;
2074
2075 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
2076 if (!priv)
2077 return -ENOMEM;
2078
2079 test->priv = priv;
2080
2081 dev = kunit_device_register(test, "regmap_test");
2082 if (IS_ERR(dev))
2083 return PTR_ERR(dev);
2084
2085 priv->dev = get_device(dev);
2086 dev_set_drvdata(dev, test);
2087
2088 return 0;
2089}
2090
2091static void regmap_test_exit(struct kunit *test)
2092{
2093 struct regmap_test_priv *priv = test->priv;
2094
2095 /* Destroy the dummy struct device */
2096 if (priv && priv->dev)
2097 put_device(priv->dev);
2098}
2099
2100static struct kunit_suite regmap_test_suite = {
2101 .name = "regmap",
2102 .init = regmap_test_init,
2103 .exit = regmap_test_exit,
2104 .test_cases = regmap_test_cases,
2105};
2106kunit_test_suite(regmap_test_suite);
2107
2108MODULE_DESCRIPTION("Regmap KUnit tests");
2109MODULE_LICENSE("GPL v2");