Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
   1/*
   2 * soc-cache.c  --  ASoC register cache helpers
   3 *
   4 * Copyright 2009 Wolfson Microelectronics PLC.
   5 *
   6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
   7 *
   8 *  This program is free software; you can redistribute  it and/or modify it
   9 *  under  the terms of  the GNU General  Public License as published by the
  10 *  Free Software Foundation;  either version 2 of the  License, or (at your
  11 *  option) any later version.
  12 */
  13
  14#include <linux/i2c.h>
  15#include <linux/spi/spi.h>
  16#include <sound/soc.h>
  17#include <linux/lzo.h>
  18#include <linux/bitmap.h>
  19#include <linux/rbtree.h>
  20
  21#include <trace/events/asoc.h>
  22
  23static bool snd_soc_set_cache_val(void *base, unsigned int idx,
  24				  unsigned int val, unsigned int word_size)
  25{
  26	switch (word_size) {
  27	case 1: {
  28		u8 *cache = base;
  29		if (cache[idx] == val)
  30			return true;
  31		cache[idx] = val;
  32		break;
  33	}
  34	case 2: {
  35		u16 *cache = base;
  36		if (cache[idx] == val)
  37			return true;
  38		cache[idx] = val;
  39		break;
  40	}
  41	default:
  42		BUG();
  43	}
  44	return false;
  45}
  46
  47static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
  48		unsigned int word_size)
  49{
  50	if (!base)
  51		return -1;
  52
  53	switch (word_size) {
  54	case 1: {
  55		const u8 *cache = base;
  56		return cache[idx];
  57	}
  58	case 2: {
  59		const u16 *cache = base;
  60		return cache[idx];
  61	}
  62	default:
  63		BUG();
  64	}
  65	/* unreachable */
  66	return -1;
  67}
  68
  69struct snd_soc_rbtree_node {
  70	struct rb_node node; /* the actual rbtree node holding this block */
  71	unsigned int base_reg; /* base register handled by this block */
  72	unsigned int word_size; /* number of bytes needed to represent the register index */
  73	void *block; /* block of adjacent registers */
  74	unsigned int blklen; /* number of registers available in the block */
  75} __attribute__ ((packed));
  76
  77struct snd_soc_rbtree_ctx {
  78	struct rb_root root;
  79	struct snd_soc_rbtree_node *cached_rbnode;
  80};
  81
  82static inline void snd_soc_rbtree_get_base_top_reg(
  83	struct snd_soc_rbtree_node *rbnode,
  84	unsigned int *base, unsigned int *top)
  85{
  86	*base = rbnode->base_reg;
  87	*top = rbnode->base_reg + rbnode->blklen - 1;
  88}
  89
  90static unsigned int snd_soc_rbtree_get_register(
  91	struct snd_soc_rbtree_node *rbnode, unsigned int idx)
  92{
  93	unsigned int val;
  94
  95	switch (rbnode->word_size) {
  96	case 1: {
  97		u8 *p = rbnode->block;
  98		val = p[idx];
  99		return val;
 100	}
 101	case 2: {
 102		u16 *p = rbnode->block;
 103		val = p[idx];
 104		return val;
 105	}
 106	default:
 107		BUG();
 108		break;
 109	}
 110	return -1;
 111}
 112
 113static void snd_soc_rbtree_set_register(struct snd_soc_rbtree_node *rbnode,
 114					unsigned int idx, unsigned int val)
 115{
 116	switch (rbnode->word_size) {
 117	case 1: {
 118		u8 *p = rbnode->block;
 119		p[idx] = val;
 120		break;
 121	}
 122	case 2: {
 123		u16 *p = rbnode->block;
 124		p[idx] = val;
 125		break;
 126	}
 127	default:
 128		BUG();
 129		break;
 130	}
 131}
 132
 133static struct snd_soc_rbtree_node *snd_soc_rbtree_lookup(
 134	struct rb_root *root, unsigned int reg)
 135{
 136	struct rb_node *node;
 137	struct snd_soc_rbtree_node *rbnode;
 138	unsigned int base_reg, top_reg;
 139
 140	node = root->rb_node;
 141	while (node) {
 142		rbnode = container_of(node, struct snd_soc_rbtree_node, node);
 143		snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
 144		if (reg >= base_reg && reg <= top_reg)
 145			return rbnode;
 146		else if (reg > top_reg)
 147			node = node->rb_right;
 148		else if (reg < base_reg)
 149			node = node->rb_left;
 150	}
 151
 152	return NULL;
 153}
 154
 155static int snd_soc_rbtree_insert(struct rb_root *root,
 156				 struct snd_soc_rbtree_node *rbnode)
 157{
 158	struct rb_node **new, *parent;
 159	struct snd_soc_rbtree_node *rbnode_tmp;
 160	unsigned int base_reg_tmp, top_reg_tmp;
 161	unsigned int base_reg;
 162
 163	parent = NULL;
 164	new = &root->rb_node;
 165	while (*new) {
 166		rbnode_tmp = container_of(*new, struct snd_soc_rbtree_node,
 167					  node);
 168		/* base and top registers of the current rbnode */
 169		snd_soc_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
 170						&top_reg_tmp);
 171		/* base register of the rbnode to be added */
 172		base_reg = rbnode->base_reg;
 173		parent = *new;
 174		/* if this register has already been inserted, just return */
 175		if (base_reg >= base_reg_tmp &&
 176		    base_reg <= top_reg_tmp)
 177			return 0;
 178		else if (base_reg > top_reg_tmp)
 179			new = &((*new)->rb_right);
 180		else if (base_reg < base_reg_tmp)
 181			new = &((*new)->rb_left);
 182	}
 183
 184	/* insert the node into the rbtree */
 185	rb_link_node(&rbnode->node, parent, new);
 186	rb_insert_color(&rbnode->node, root);
 187
 188	return 1;
 189}
 190
 191static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
 192{
 193	struct snd_soc_rbtree_ctx *rbtree_ctx;
 194	struct rb_node *node;
 195	struct snd_soc_rbtree_node *rbnode;
 196	unsigned int regtmp;
 197	unsigned int val, def;
 198	int ret;
 199	int i;
 200
 201	rbtree_ctx = codec->reg_cache;
 202	for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
 203		rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
 204		for (i = 0; i < rbnode->blklen; ++i) {
 205			regtmp = rbnode->base_reg + i;
 206			val = snd_soc_rbtree_get_register(rbnode, i);
 207			def = snd_soc_get_cache_val(codec->reg_def_copy, i,
 208						    rbnode->word_size);
 209			if (val == def)
 210				continue;
 211
 212			WARN_ON(!snd_soc_codec_writable_register(codec, regtmp));
 213
 214			codec->cache_bypass = 1;
 215			ret = snd_soc_write(codec, regtmp, val);
 216			codec->cache_bypass = 0;
 217			if (ret)
 218				return ret;
 219			dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
 220				regtmp, val);
 221		}
 222	}
 223
 224	return 0;
 225}
 226
 227static int snd_soc_rbtree_insert_to_block(struct snd_soc_rbtree_node *rbnode,
 228					  unsigned int pos, unsigned int reg,
 229					  unsigned int value)
 230{
 231	u8 *blk;
 232
 233	blk = krealloc(rbnode->block,
 234		       (rbnode->blklen + 1) * rbnode->word_size, GFP_KERNEL);
 235	if (!blk)
 236		return -ENOMEM;
 237
 238	/* insert the register value in the correct place in the rbnode block */
 239	memmove(blk + (pos + 1) * rbnode->word_size,
 240		blk + pos * rbnode->word_size,
 241		(rbnode->blklen - pos) * rbnode->word_size);
 242
 243	/* update the rbnode block, its size and the base register */
 244	rbnode->block = blk;
 245	rbnode->blklen++;
 246	if (!pos)
 247		rbnode->base_reg = reg;
 248
 249	snd_soc_rbtree_set_register(rbnode, pos, value);
 250	return 0;
 251}
 252
 253static int snd_soc_rbtree_cache_write(struct snd_soc_codec *codec,
 254				      unsigned int reg, unsigned int value)
 255{
 256	struct snd_soc_rbtree_ctx *rbtree_ctx;
 257	struct snd_soc_rbtree_node *rbnode, *rbnode_tmp;
 258	struct rb_node *node;
 259	unsigned int val;
 260	unsigned int reg_tmp;
 261	unsigned int base_reg, top_reg;
 262	unsigned int pos;
 263	int i;
 264	int ret;
 265
 266	rbtree_ctx = codec->reg_cache;
 267	/* look up the required register in the cached rbnode */
 268	rbnode = rbtree_ctx->cached_rbnode;
 269	if (rbnode) {
 270		snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
 271		if (reg >= base_reg && reg <= top_reg) {
 272			reg_tmp = reg - base_reg;
 273			val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
 274			if (val == value)
 275				return 0;
 276			snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
 277			return 0;
 278		}
 279	}
 280	/* if we can't locate it in the cached rbnode we'll have
 281	 * to traverse the rbtree looking for it.
 282	 */
 283	rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
 284	if (rbnode) {
 285		reg_tmp = reg - rbnode->base_reg;
 286		val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
 287		if (val == value)
 288			return 0;
 289		snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
 290		rbtree_ctx->cached_rbnode = rbnode;
 291	} else {
 292		/* bail out early, no need to create the rbnode yet */
 293		if (!value)
 294			return 0;
 295		/* look for an adjacent register to the one we are about to add */
 296		for (node = rb_first(&rbtree_ctx->root); node;
 297		     node = rb_next(node)) {
 298			rbnode_tmp = rb_entry(node, struct snd_soc_rbtree_node, node);
 299			for (i = 0; i < rbnode_tmp->blklen; ++i) {
 300				reg_tmp = rbnode_tmp->base_reg + i;
 301				if (abs(reg_tmp - reg) != 1)
 302					continue;
 303				/* decide where in the block to place our register */
 304				if (reg_tmp + 1 == reg)
 305					pos = i + 1;
 306				else
 307					pos = i;
 308				ret = snd_soc_rbtree_insert_to_block(rbnode_tmp, pos,
 309								     reg, value);
 310				if (ret)
 311					return ret;
 312				rbtree_ctx->cached_rbnode = rbnode_tmp;
 313				return 0;
 314			}
 315		}
 316		/* we did not manage to find a place to insert it in an existing
 317		 * block so create a new rbnode with a single register in its block.
 318		 * This block will get populated further if any other adjacent
 319		 * registers get modified in the future.
 320		 */
 321		rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
 322		if (!rbnode)
 323			return -ENOMEM;
 324		rbnode->blklen = 1;
 325		rbnode->base_reg = reg;
 326		rbnode->word_size = codec->driver->reg_word_size;
 327		rbnode->block = kmalloc(rbnode->blklen * rbnode->word_size,
 328					GFP_KERNEL);
 329		if (!rbnode->block) {
 330			kfree(rbnode);
 331			return -ENOMEM;
 332		}
 333		snd_soc_rbtree_set_register(rbnode, 0, value);
 334		snd_soc_rbtree_insert(&rbtree_ctx->root, rbnode);
 335		rbtree_ctx->cached_rbnode = rbnode;
 336	}
 337
 338	return 0;
 339}
 340
 341static int snd_soc_rbtree_cache_read(struct snd_soc_codec *codec,
 342				     unsigned int reg, unsigned int *value)
 343{
 344	struct snd_soc_rbtree_ctx *rbtree_ctx;
 345	struct snd_soc_rbtree_node *rbnode;
 346	unsigned int base_reg, top_reg;
 347	unsigned int reg_tmp;
 348
 349	rbtree_ctx = codec->reg_cache;
 350	/* look up the required register in the cached rbnode */
 351	rbnode = rbtree_ctx->cached_rbnode;
 352	if (rbnode) {
 353		snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
 354		if (reg >= base_reg && reg <= top_reg) {
 355			reg_tmp = reg - base_reg;
 356			*value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
 357			return 0;
 358		}
 359	}
 360	/* if we can't locate it in the cached rbnode we'll have
 361	 * to traverse the rbtree looking for it.
 362	 */
 363	rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
 364	if (rbnode) {
 365		reg_tmp = reg - rbnode->base_reg;
 366		*value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
 367		rbtree_ctx->cached_rbnode = rbnode;
 368	} else {
 369		/* uninitialized registers default to 0 */
 370		*value = 0;
 371	}
 372
 373	return 0;
 374}
 375
 376static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec)
 377{
 378	struct rb_node *next;
 379	struct snd_soc_rbtree_ctx *rbtree_ctx;
 380	struct snd_soc_rbtree_node *rbtree_node;
 381
 382	/* if we've already been called then just return */
 383	rbtree_ctx = codec->reg_cache;
 384	if (!rbtree_ctx)
 385		return 0;
 386
 387	/* free up the rbtree */
 388	next = rb_first(&rbtree_ctx->root);
 389	while (next) {
 390		rbtree_node = rb_entry(next, struct snd_soc_rbtree_node, node);
 391		next = rb_next(&rbtree_node->node);
 392		rb_erase(&rbtree_node->node, &rbtree_ctx->root);
 393		kfree(rbtree_node->block);
 394		kfree(rbtree_node);
 395	}
 396
 397	/* release the resources */
 398	kfree(codec->reg_cache);
 399	codec->reg_cache = NULL;
 400
 401	return 0;
 402}
 403
 404static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec)
 405{
 406	struct snd_soc_rbtree_ctx *rbtree_ctx;
 407	unsigned int word_size;
 408	unsigned int val;
 409	int i;
 410	int ret;
 411
 412	codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
 413	if (!codec->reg_cache)
 414		return -ENOMEM;
 415
 416	rbtree_ctx = codec->reg_cache;
 417	rbtree_ctx->root = RB_ROOT;
 418	rbtree_ctx->cached_rbnode = NULL;
 419
 420	if (!codec->reg_def_copy)
 421		return 0;
 422
 423	word_size = codec->driver->reg_word_size;
 424	for (i = 0; i < codec->driver->reg_cache_size; ++i) {
 425		val = snd_soc_get_cache_val(codec->reg_def_copy, i,
 426					    word_size);
 427		if (!val)
 428			continue;
 429		ret = snd_soc_rbtree_cache_write(codec, i, val);
 430		if (ret)
 431			goto err;
 432	}
 433
 434	return 0;
 435
 436err:
 437	snd_soc_cache_exit(codec);
 438	return ret;
 439}
 440
 441#ifdef CONFIG_SND_SOC_CACHE_LZO
 442struct snd_soc_lzo_ctx {
 443	void *wmem;
 444	void *dst;
 445	const void *src;
 446	size_t src_len;
 447	size_t dst_len;
 448	size_t decompressed_size;
 449	unsigned long *sync_bmp;
 450	int sync_bmp_nbits;
 451};
 452
 453#define LZO_BLOCK_NUM 8
 454static int snd_soc_lzo_block_count(void)
 455{
 456	return LZO_BLOCK_NUM;
 457}
 458
 459static int snd_soc_lzo_prepare(struct snd_soc_lzo_ctx *lzo_ctx)
 460{
 461	lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
 462	if (!lzo_ctx->wmem)
 463		return -ENOMEM;
 464	return 0;
 465}
 466
 467static int snd_soc_lzo_compress(struct snd_soc_lzo_ctx *lzo_ctx)
 468{
 469	size_t compress_size;
 470	int ret;
 471
 472	ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
 473			       lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
 474	if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
 475		return -EINVAL;
 476	lzo_ctx->dst_len = compress_size;
 477	return 0;
 478}
 479
 480static int snd_soc_lzo_decompress(struct snd_soc_lzo_ctx *lzo_ctx)
 481{
 482	size_t dst_len;
 483	int ret;
 484
 485	dst_len = lzo_ctx->dst_len;
 486	ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
 487				    lzo_ctx->dst, &dst_len);
 488	if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
 489		return -EINVAL;
 490	return 0;
 491}
 492
 493static int snd_soc_lzo_compress_cache_block(struct snd_soc_codec *codec,
 494		struct snd_soc_lzo_ctx *lzo_ctx)
 495{
 496	int ret;
 497
 498	lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
 499	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
 500	if (!lzo_ctx->dst) {
 501		lzo_ctx->dst_len = 0;
 502		return -ENOMEM;
 503	}
 504
 505	ret = snd_soc_lzo_compress(lzo_ctx);
 506	if (ret < 0)
 507		return ret;
 508	return 0;
 509}
 510
 511static int snd_soc_lzo_decompress_cache_block(struct snd_soc_codec *codec,
 512		struct snd_soc_lzo_ctx *lzo_ctx)
 513{
 514	int ret;
 515
 516	lzo_ctx->dst_len = lzo_ctx->decompressed_size;
 517	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
 518	if (!lzo_ctx->dst) {
 519		lzo_ctx->dst_len = 0;
 520		return -ENOMEM;
 521	}
 522
 523	ret = snd_soc_lzo_decompress(lzo_ctx);
 524	if (ret < 0)
 525		return ret;
 526	return 0;
 527}
 528
 529static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec *codec,
 530		unsigned int reg)
 531{
 532	const struct snd_soc_codec_driver *codec_drv;
 533
 534	codec_drv = codec->driver;
 535	return (reg * codec_drv->reg_word_size) /
 536	       DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count());
 537}
 538
 539static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec *codec,
 540		unsigned int reg)
 541{
 542	const struct snd_soc_codec_driver *codec_drv;
 543
 544	codec_drv = codec->driver;
 545	return reg % (DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count()) /
 546		      codec_drv->reg_word_size);
 547}
 548
 549static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec *codec)
 550{
 551	const struct snd_soc_codec_driver *codec_drv;
 552
 553	codec_drv = codec->driver;
 554	return DIV_ROUND_UP(codec->reg_size, snd_soc_lzo_block_count());
 555}
 556
 557static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
 558{
 559	struct snd_soc_lzo_ctx **lzo_blocks;
 560	unsigned int val;
 561	int i;
 562	int ret;
 563
 564	lzo_blocks = codec->reg_cache;
 565	for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
 566		WARN_ON(!snd_soc_codec_writable_register(codec, i));
 567		ret = snd_soc_cache_read(codec, i, &val);
 568		if (ret)
 569			return ret;
 570		codec->cache_bypass = 1;
 571		ret = snd_soc_write(codec, i, val);
 572		codec->cache_bypass = 0;
 573		if (ret)
 574			return ret;
 575		dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
 576			i, val);
 577	}
 578
 579	return 0;
 580}
 581
 582static int snd_soc_lzo_cache_write(struct snd_soc_codec *codec,
 583				   unsigned int reg, unsigned int value)
 584{
 585	struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
 586	int ret, blkindex, blkpos;
 587	size_t blksize, tmp_dst_len;
 588	void *tmp_dst;
 589
 590	/* index of the compressed lzo block */
 591	blkindex = snd_soc_lzo_get_blkindex(codec, reg);
 592	/* register index within the decompressed block */
 593	blkpos = snd_soc_lzo_get_blkpos(codec, reg);
 594	/* size of the compressed block */
 595	blksize = snd_soc_lzo_get_blksize(codec);
 596	lzo_blocks = codec->reg_cache;
 597	lzo_block = lzo_blocks[blkindex];
 598
 599	/* save the pointer and length of the compressed block */
 600	tmp_dst = lzo_block->dst;
 601	tmp_dst_len = lzo_block->dst_len;
 602
 603	/* prepare the source to be the compressed block */
 604	lzo_block->src = lzo_block->dst;
 605	lzo_block->src_len = lzo_block->dst_len;
 606
 607	/* decompress the block */
 608	ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
 609	if (ret < 0) {
 610		kfree(lzo_block->dst);
 611		goto out;
 612	}
 613
 614	/* write the new value to the cache */
 615	if (snd_soc_set_cache_val(lzo_block->dst, blkpos, value,
 616				  codec->driver->reg_word_size)) {
 617		kfree(lzo_block->dst);
 618		goto out;
 619	}
 620
 621	/* prepare the source to be the decompressed block */
 622	lzo_block->src = lzo_block->dst;
 623	lzo_block->src_len = lzo_block->dst_len;
 624
 625	/* compress the block */
 626	ret = snd_soc_lzo_compress_cache_block(codec, lzo_block);
 627	if (ret < 0) {
 628		kfree(lzo_block->dst);
 629		kfree(lzo_block->src);
 630		goto out;
 631	}
 632
 633	/* set the bit so we know we have to sync this register */
 634	set_bit(reg, lzo_block->sync_bmp);
 635	kfree(tmp_dst);
 636	kfree(lzo_block->src);
 637	return 0;
 638out:
 639	lzo_block->dst = tmp_dst;
 640	lzo_block->dst_len = tmp_dst_len;
 641	return ret;
 642}
 643
 644static int snd_soc_lzo_cache_read(struct snd_soc_codec *codec,
 645				  unsigned int reg, unsigned int *value)
 646{
 647	struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
 648	int ret, blkindex, blkpos;
 649	size_t blksize, tmp_dst_len;
 650	void *tmp_dst;
 651
 652	*value = 0;
 653	/* index of the compressed lzo block */
 654	blkindex = snd_soc_lzo_get_blkindex(codec, reg);
 655	/* register index within the decompressed block */
 656	blkpos = snd_soc_lzo_get_blkpos(codec, reg);
 657	/* size of the compressed block */
 658	blksize = snd_soc_lzo_get_blksize(codec);
 659	lzo_blocks = codec->reg_cache;
 660	lzo_block = lzo_blocks[blkindex];
 661
 662	/* save the pointer and length of the compressed block */
 663	tmp_dst = lzo_block->dst;
 664	tmp_dst_len = lzo_block->dst_len;
 665
 666	/* prepare the source to be the compressed block */
 667	lzo_block->src = lzo_block->dst;
 668	lzo_block->src_len = lzo_block->dst_len;
 669
 670	/* decompress the block */
 671	ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
 672	if (ret >= 0)
 673		/* fetch the value from the cache */
 674		*value = snd_soc_get_cache_val(lzo_block->dst, blkpos,
 675					       codec->driver->reg_word_size);
 676
 677	kfree(lzo_block->dst);
 678	/* restore the pointer and length of the compressed block */
 679	lzo_block->dst = tmp_dst;
 680	lzo_block->dst_len = tmp_dst_len;
 681	return 0;
 682}
 683
 684static int snd_soc_lzo_cache_exit(struct snd_soc_codec *codec)
 685{
 686	struct snd_soc_lzo_ctx **lzo_blocks;
 687	int i, blkcount;
 688
 689	lzo_blocks = codec->reg_cache;
 690	if (!lzo_blocks)
 691		return 0;
 692
 693	blkcount = snd_soc_lzo_block_count();
 694	/*
 695	 * the pointer to the bitmap used for syncing the cache
 696	 * is shared amongst all lzo_blocks.  Ensure it is freed
 697	 * only once.
 698	 */
 699	if (lzo_blocks[0])
 700		kfree(lzo_blocks[0]->sync_bmp);
 701	for (i = 0; i < blkcount; ++i) {
 702		if (lzo_blocks[i]) {
 703			kfree(lzo_blocks[i]->wmem);
 704			kfree(lzo_blocks[i]->dst);
 705		}
 706		/* each lzo_block is a pointer returned by kmalloc or NULL */
 707		kfree(lzo_blocks[i]);
 708	}
 709	kfree(lzo_blocks);
 710	codec->reg_cache = NULL;
 711	return 0;
 712}
 713
 714static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec)
 715{
 716	struct snd_soc_lzo_ctx **lzo_blocks;
 717	size_t bmp_size;
 718	const struct snd_soc_codec_driver *codec_drv;
 719	int ret, tofree, i, blksize, blkcount;
 720	const char *p, *end;
 721	unsigned long *sync_bmp;
 722
 723	ret = 0;
 724	codec_drv = codec->driver;
 725
 726	/*
 727	 * If we have not been given a default register cache
 728	 * then allocate a dummy zero-ed out region, compress it
 729	 * and remember to free it afterwards.
 730	 */
 731	tofree = 0;
 732	if (!codec->reg_def_copy)
 733		tofree = 1;
 734
 735	if (!codec->reg_def_copy) {
 736		codec->reg_def_copy = kzalloc(codec->reg_size, GFP_KERNEL);
 737		if (!codec->reg_def_copy)
 738			return -ENOMEM;
 739	}
 740
 741	blkcount = snd_soc_lzo_block_count();
 742	codec->reg_cache = kzalloc(blkcount * sizeof *lzo_blocks,
 743				   GFP_KERNEL);
 744	if (!codec->reg_cache) {
 745		ret = -ENOMEM;
 746		goto err_tofree;
 747	}
 748	lzo_blocks = codec->reg_cache;
 749
 750	/*
 751	 * allocate a bitmap to be used when syncing the cache with
 752	 * the hardware.  Each time a register is modified, the corresponding
 753	 * bit is set in the bitmap, so we know that we have to sync
 754	 * that register.
 755	 */
 756	bmp_size = codec_drv->reg_cache_size;
 757	sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
 758			   GFP_KERNEL);
 759	if (!sync_bmp) {
 760		ret = -ENOMEM;
 761		goto err;
 762	}
 763	bitmap_zero(sync_bmp, bmp_size);
 764
 765	/* allocate the lzo blocks and initialize them */
 766	for (i = 0; i < blkcount; ++i) {
 767		lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
 768					GFP_KERNEL);
 769		if (!lzo_blocks[i]) {
 770			kfree(sync_bmp);
 771			ret = -ENOMEM;
 772			goto err;
 773		}
 774		lzo_blocks[i]->sync_bmp = sync_bmp;
 775		lzo_blocks[i]->sync_bmp_nbits = bmp_size;
 776		/* alloc the working space for the compressed block */
 777		ret = snd_soc_lzo_prepare(lzo_blocks[i]);
 778		if (ret < 0)
 779			goto err;
 780	}
 781
 782	blksize = snd_soc_lzo_get_blksize(codec);
 783	p = codec->reg_def_copy;
 784	end = codec->reg_def_copy + codec->reg_size;
 785	/* compress the register map and fill the lzo blocks */
 786	for (i = 0; i < blkcount; ++i, p += blksize) {
 787		lzo_blocks[i]->src = p;
 788		if (p + blksize > end)
 789			lzo_blocks[i]->src_len = end - p;
 790		else
 791			lzo_blocks[i]->src_len = blksize;
 792		ret = snd_soc_lzo_compress_cache_block(codec,
 793						       lzo_blocks[i]);
 794		if (ret < 0)
 795			goto err;
 796		lzo_blocks[i]->decompressed_size =
 797			lzo_blocks[i]->src_len;
 798	}
 799
 800	if (tofree) {
 801		kfree(codec->reg_def_copy);
 802		codec->reg_def_copy = NULL;
 803	}
 804	return 0;
 805err:
 806	snd_soc_cache_exit(codec);
 807err_tofree:
 808	if (tofree) {
 809		kfree(codec->reg_def_copy);
 810		codec->reg_def_copy = NULL;
 811	}
 812	return ret;
 813}
 814#endif
 815
 816static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
 817{
 818	int i;
 819	int ret;
 820	const struct snd_soc_codec_driver *codec_drv;
 821	unsigned int val;
 822
 823	codec_drv = codec->driver;
 824	for (i = 0; i < codec_drv->reg_cache_size; ++i) {
 825		ret = snd_soc_cache_read(codec, i, &val);
 826		if (ret)
 827			return ret;
 828		if (codec->reg_def_copy)
 829			if (snd_soc_get_cache_val(codec->reg_def_copy,
 830						  i, codec_drv->reg_word_size) == val)
 831				continue;
 832
 833		WARN_ON(!snd_soc_codec_writable_register(codec, i));
 834
 835		ret = snd_soc_write(codec, i, val);
 836		if (ret)
 837			return ret;
 838		dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
 839			i, val);
 840	}
 841	return 0;
 842}
 843
 844static int snd_soc_flat_cache_write(struct snd_soc_codec *codec,
 845				    unsigned int reg, unsigned int value)
 846{
 847	snd_soc_set_cache_val(codec->reg_cache, reg, value,
 848			      codec->driver->reg_word_size);
 849	return 0;
 850}
 851
 852static int snd_soc_flat_cache_read(struct snd_soc_codec *codec,
 853				   unsigned int reg, unsigned int *value)
 854{
 855	*value = snd_soc_get_cache_val(codec->reg_cache, reg,
 856				       codec->driver->reg_word_size);
 857	return 0;
 858}
 859
 860static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec)
 861{
 862	if (!codec->reg_cache)
 863		return 0;
 864	kfree(codec->reg_cache);
 865	codec->reg_cache = NULL;
 866	return 0;
 867}
 868
 869static int snd_soc_flat_cache_init(struct snd_soc_codec *codec)
 870{
 871	const struct snd_soc_codec_driver *codec_drv;
 872
 873	codec_drv = codec->driver;
 874
 875	if (codec->reg_def_copy)
 876		codec->reg_cache = kmemdup(codec->reg_def_copy,
 877					   codec->reg_size, GFP_KERNEL);
 878	else
 879		codec->reg_cache = kzalloc(codec->reg_size, GFP_KERNEL);
 880	if (!codec->reg_cache)
 881		return -ENOMEM;
 882
 883	return 0;
 884}
 885
 886/* an array of all supported compression types */
 887static const struct snd_soc_cache_ops cache_types[] = {
 888	/* Flat *must* be the first entry for fallback */
 889	{
 890		.id = SND_SOC_FLAT_COMPRESSION,
 891		.name = "flat",
 892		.init = snd_soc_flat_cache_init,
 893		.exit = snd_soc_flat_cache_exit,
 894		.read = snd_soc_flat_cache_read,
 895		.write = snd_soc_flat_cache_write,
 896		.sync = snd_soc_flat_cache_sync
 897	},
 898#ifdef CONFIG_SND_SOC_CACHE_LZO
 899	{
 900		.id = SND_SOC_LZO_COMPRESSION,
 901		.name = "LZO",
 902		.init = snd_soc_lzo_cache_init,
 903		.exit = snd_soc_lzo_cache_exit,
 904		.read = snd_soc_lzo_cache_read,
 905		.write = snd_soc_lzo_cache_write,
 906		.sync = snd_soc_lzo_cache_sync
 907	},
 908#endif
 909	{
 910		.id = SND_SOC_RBTREE_COMPRESSION,
 911		.name = "rbtree",
 912		.init = snd_soc_rbtree_cache_init,
 913		.exit = snd_soc_rbtree_cache_exit,
 914		.read = snd_soc_rbtree_cache_read,
 915		.write = snd_soc_rbtree_cache_write,
 916		.sync = snd_soc_rbtree_cache_sync
 917	}
 918};
 919
 920int snd_soc_cache_init(struct snd_soc_codec *codec)
 921{
 922	int i;
 923
 924	for (i = 0; i < ARRAY_SIZE(cache_types); ++i)
 925		if (cache_types[i].id == codec->compress_type)
 926			break;
 927
 928	/* Fall back to flat compression */
 929	if (i == ARRAY_SIZE(cache_types)) {
 930		dev_warn(codec->dev, "Could not match compress type: %d\n",
 931			 codec->compress_type);
 932		i = 0;
 933	}
 934
 935	mutex_init(&codec->cache_rw_mutex);
 936	codec->cache_ops = &cache_types[i];
 937
 938	if (codec->cache_ops->init) {
 939		if (codec->cache_ops->name)
 940			dev_dbg(codec->dev, "Initializing %s cache for %s codec\n",
 941				codec->cache_ops->name, codec->name);
 942		return codec->cache_ops->init(codec);
 943	}
 944	return -ENOSYS;
 945}
 946
 947/*
 948 * NOTE: keep in mind that this function might be called
 949 * multiple times.
 950 */
 951int snd_soc_cache_exit(struct snd_soc_codec *codec)
 952{
 953	if (codec->cache_ops && codec->cache_ops->exit) {
 954		if (codec->cache_ops->name)
 955			dev_dbg(codec->dev, "Destroying %s cache for %s codec\n",
 956				codec->cache_ops->name, codec->name);
 957		return codec->cache_ops->exit(codec);
 958	}
 959	return -ENOSYS;
 960}
 961
 962/**
 963 * snd_soc_cache_read: Fetch the value of a given register from the cache.
 964 *
 965 * @codec: CODEC to configure.
 966 * @reg: The register index.
 967 * @value: The value to be returned.
 968 */
 969int snd_soc_cache_read(struct snd_soc_codec *codec,
 970		       unsigned int reg, unsigned int *value)
 971{
 972	int ret;
 973
 974	mutex_lock(&codec->cache_rw_mutex);
 975
 976	if (value && codec->cache_ops && codec->cache_ops->read) {
 977		ret = codec->cache_ops->read(codec, reg, value);
 978		mutex_unlock(&codec->cache_rw_mutex);
 979		return ret;
 980	}
 981
 982	mutex_unlock(&codec->cache_rw_mutex);
 983	return -ENOSYS;
 984}
 985EXPORT_SYMBOL_GPL(snd_soc_cache_read);
 986
 987/**
 988 * snd_soc_cache_write: Set the value of a given register in the cache.
 989 *
 990 * @codec: CODEC to configure.
 991 * @reg: The register index.
 992 * @value: The new register value.
 993 */
 994int snd_soc_cache_write(struct snd_soc_codec *codec,
 995			unsigned int reg, unsigned int value)
 996{
 997	int ret;
 998
 999	mutex_lock(&codec->cache_rw_mutex);
1000
1001	if (codec->cache_ops && codec->cache_ops->write) {
1002		ret = codec->cache_ops->write(codec, reg, value);
1003		mutex_unlock(&codec->cache_rw_mutex);
1004		return ret;
1005	}
1006
1007	mutex_unlock(&codec->cache_rw_mutex);
1008	return -ENOSYS;
1009}
1010EXPORT_SYMBOL_GPL(snd_soc_cache_write);
1011
1012/**
1013 * snd_soc_cache_sync: Sync the register cache with the hardware.
1014 *
1015 * @codec: CODEC to configure.
1016 *
1017 * Any registers that should not be synced should be marked as
1018 * volatile.  In general drivers can choose not to use the provided
1019 * syncing functionality if they so require.
1020 */
1021int snd_soc_cache_sync(struct snd_soc_codec *codec)
1022{
1023	int ret;
1024	const char *name;
1025
1026	if (!codec->cache_sync) {
1027		return 0;
1028	}
1029
1030	if (!codec->cache_ops || !codec->cache_ops->sync)
1031		return -ENOSYS;
1032
1033	if (codec->cache_ops->name)
1034		name = codec->cache_ops->name;
1035	else
1036		name = "unknown";
1037
1038	if (codec->cache_ops->name)
1039		dev_dbg(codec->dev, "Syncing %s cache for %s codec\n",
1040			codec->cache_ops->name, codec->name);
1041	trace_snd_soc_cache_sync(codec, name, "start");
1042	ret = codec->cache_ops->sync(codec);
1043	if (!ret)
1044		codec->cache_sync = 0;
1045	trace_snd_soc_cache_sync(codec, name, "end");
1046	return ret;
1047}
1048EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
1049
1050static int snd_soc_get_reg_access_index(struct snd_soc_codec *codec,
1051					unsigned int reg)
1052{
1053	const struct snd_soc_codec_driver *codec_drv;
1054	unsigned int min, max, index;
1055
1056	codec_drv = codec->driver;
1057	min = 0;
1058	max = codec_drv->reg_access_size - 1;
1059	do {
1060		index = (min + max) / 2;
1061		if (codec_drv->reg_access_default[index].reg == reg)
1062			return index;
1063		if (codec_drv->reg_access_default[index].reg < reg)
1064			min = index + 1;
1065		else
1066			max = index;
1067	} while (min <= max);
1068	return -1;
1069}
1070
1071int snd_soc_default_volatile_register(struct snd_soc_codec *codec,
1072				      unsigned int reg)
1073{
1074	int index;
1075
1076	if (reg >= codec->driver->reg_cache_size)
1077		return 1;
1078	index = snd_soc_get_reg_access_index(codec, reg);
1079	if (index < 0)
1080		return 0;
1081	return codec->driver->reg_access_default[index].vol;
1082}
1083EXPORT_SYMBOL_GPL(snd_soc_default_volatile_register);
1084
1085int snd_soc_default_readable_register(struct snd_soc_codec *codec,
1086				      unsigned int reg)
1087{
1088	int index;
1089
1090	if (reg >= codec->driver->reg_cache_size)
1091		return 1;
1092	index = snd_soc_get_reg_access_index(codec, reg);
1093	if (index < 0)
1094		return 0;
1095	return codec->driver->reg_access_default[index].read;
1096}
1097EXPORT_SYMBOL_GPL(snd_soc_default_readable_register);
1098
1099int snd_soc_default_writable_register(struct snd_soc_codec *codec,
1100				      unsigned int reg)
1101{
1102	int index;
1103
1104	if (reg >= codec->driver->reg_cache_size)
1105		return 1;
1106	index = snd_soc_get_reg_access_index(codec, reg);
1107	if (index < 0)
1108		return 0;
1109	return codec->driver->reg_access_default[index].write;
1110}
1111EXPORT_SYMBOL_GPL(snd_soc_default_writable_register);