Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2011 Red Hat, Inc.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm-btree-internal.h"
   9#include "dm-space-map.h"
  10#include "dm-transaction-manager.h"
  11
  12#include <linux/export.h>
  13#include <linux/device-mapper.h>
  14
  15#define DM_MSG_PREFIX "btree"
  16
  17/*
  18 *--------------------------------------------------------------
  19 * Array manipulation
  20 *--------------------------------------------------------------
  21 */
  22static void memcpy_disk(void *dest, const void *src, size_t len)
  23	__dm_written_to_disk(src)
  24{
  25	memcpy(dest, src, len);
  26	__dm_unbless_for_disk(src);
  27}
  28
  29static void array_insert(void *base, size_t elt_size, unsigned int nr_elts,
  30			 unsigned int index, void *elt)
  31	__dm_written_to_disk(elt)
  32{
  33	if (index < nr_elts)
  34		memmove(base + (elt_size * (index + 1)),
  35			base + (elt_size * index),
  36			(nr_elts - index) * elt_size);
  37
  38	memcpy_disk(base + (elt_size * index), elt, elt_size);
  39}
  40
  41/*----------------------------------------------------------------*/
  42
  43/* makes the assumption that no two keys are the same. */
  44static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
  45{
  46	int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
  47
  48	while (hi - lo > 1) {
  49		int mid = lo + ((hi - lo) / 2);
  50		uint64_t mid_key = le64_to_cpu(n->keys[mid]);
  51
  52		if (mid_key == key)
  53			return mid;
  54
  55		if (mid_key < key)
  56			lo = mid;
  57		else
  58			hi = mid;
  59	}
  60
  61	return want_hi ? hi : lo;
  62}
  63
  64int lower_bound(struct btree_node *n, uint64_t key)
  65{
  66	return bsearch(n, key, 0);
  67}
  68
  69static int upper_bound(struct btree_node *n, uint64_t key)
  70{
  71	return bsearch(n, key, 1);
  72}
  73
  74void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
  75		  struct dm_btree_value_type *vt)
  76{
 
  77	uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
  78
  79	if (le32_to_cpu(n->header.flags) & INTERNAL_NODE)
  80		dm_tm_with_runs(tm, value_ptr(n, 0), nr_entries, dm_tm_inc_range);
  81
  82	else if (vt->inc)
  83		vt->inc(vt->context, value_ptr(n, 0), nr_entries);
 
  84}
  85
  86static int insert_at(size_t value_size, struct btree_node *node, unsigned int index,
  87		     uint64_t key, void *value)
  88	__dm_written_to_disk(value)
  89{
  90	uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
  91	uint32_t max_entries = le32_to_cpu(node->header.max_entries);
  92	__le64 key_le = cpu_to_le64(key);
  93
  94	if (index > nr_entries ||
  95	    index >= max_entries ||
  96	    nr_entries >= max_entries) {
  97		DMERR("too many entries in btree node for insert");
  98		__dm_unbless_for_disk(value);
  99		return -ENOMEM;
 100	}
 101
 102	__dm_bless_for_disk(&key_le);
 103
 104	array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le);
 105	array_insert(value_base(node), value_size, nr_entries, index, value);
 106	node->header.nr_entries = cpu_to_le32(nr_entries + 1);
 107
 108	return 0;
 109}
 110
 111/*----------------------------------------------------------------*/
 112
 113/*
 114 * We want 3n entries (for some n).  This works more nicely for repeated
 115 * insert remove loops than (2n + 1).
 116 */
 117static uint32_t calc_max_entries(size_t value_size, size_t block_size)
 118{
 119	uint32_t total, n;
 120	size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */
 121
 122	block_size -= sizeof(struct node_header);
 123	total = block_size / elt_size;
 124	n = total / 3;		/* rounds down */
 125
 126	return 3 * n;
 127}
 128
 129int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
 130{
 131	int r;
 132	struct dm_block *b;
 133	struct btree_node *n;
 134	size_t block_size;
 135	uint32_t max_entries;
 136
 137	r = new_block(info, &b);
 138	if (r < 0)
 139		return r;
 140
 141	block_size = dm_bm_block_size(dm_tm_get_bm(info->tm));
 142	max_entries = calc_max_entries(info->value_type.size, block_size);
 143
 144	n = dm_block_data(b);
 145	memset(n, 0, block_size);
 146	n->header.flags = cpu_to_le32(LEAF_NODE);
 147	n->header.nr_entries = cpu_to_le32(0);
 148	n->header.max_entries = cpu_to_le32(max_entries);
 149	n->header.value_size = cpu_to_le32(info->value_type.size);
 150
 151	*root = dm_block_location(b);
 152	unlock_block(info, b);
 153
 154	return 0;
 155}
 156EXPORT_SYMBOL_GPL(dm_btree_empty);
 157
 158/*----------------------------------------------------------------*/
 159
 160/*
 161 * Deletion uses a recursive algorithm, since we have limited stack space
 162 * we explicitly manage our own stack on the heap.
 163 */
 164#define MAX_SPINE_DEPTH 64
 165struct frame {
 166	struct dm_block *b;
 167	struct btree_node *n;
 168	unsigned int level;
 169	unsigned int nr_children;
 170	unsigned int current_child;
 171};
 172
 173struct del_stack {
 174	struct dm_btree_info *info;
 175	struct dm_transaction_manager *tm;
 176	int top;
 177	struct frame spine[MAX_SPINE_DEPTH];
 178};
 179
 180static int top_frame(struct del_stack *s, struct frame **f)
 181{
 182	if (s->top < 0) {
 183		DMERR("btree deletion stack empty");
 184		return -EINVAL;
 185	}
 186
 187	*f = s->spine + s->top;
 188
 189	return 0;
 190}
 191
 192static int unprocessed_frames(struct del_stack *s)
 193{
 194	return s->top >= 0;
 195}
 196
 197static void prefetch_children(struct del_stack *s, struct frame *f)
 198{
 199	unsigned int i;
 200	struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
 201
 202	for (i = 0; i < f->nr_children; i++)
 203		dm_bm_prefetch(bm, value64(f->n, i));
 204}
 205
 206static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
 207{
 208	return f->level < (info->levels - 1);
 209}
 210
 211static int push_frame(struct del_stack *s, dm_block_t b, unsigned int level)
 212{
 213	int r;
 214	uint32_t ref_count;
 215
 216	if (s->top >= MAX_SPINE_DEPTH - 1) {
 217		DMERR("btree deletion stack out of memory");
 218		return -ENOMEM;
 219	}
 220
 221	r = dm_tm_ref(s->tm, b, &ref_count);
 222	if (r)
 223		return r;
 224
 225	if (ref_count > 1)
 226		/*
 227		 * This is a shared node, so we can just decrement it's
 228		 * reference counter and leave the children.
 229		 */
 230		dm_tm_dec(s->tm, b);
 231
 232	else {
 233		uint32_t flags;
 234		struct frame *f = s->spine + ++s->top;
 235
 236		r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
 237		if (r) {
 238			s->top--;
 239			return r;
 240		}
 241
 242		f->n = dm_block_data(f->b);
 243		f->level = level;
 244		f->nr_children = le32_to_cpu(f->n->header.nr_entries);
 245		f->current_child = 0;
 246
 247		flags = le32_to_cpu(f->n->header.flags);
 248		if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
 249			prefetch_children(s, f);
 250	}
 251
 252	return 0;
 253}
 254
 255static void pop_frame(struct del_stack *s)
 256{
 257	struct frame *f = s->spine + s->top--;
 258
 259	dm_tm_dec(s->tm, dm_block_location(f->b));
 260	dm_tm_unlock(s->tm, f->b);
 261}
 262
 263static void unlock_all_frames(struct del_stack *s)
 264{
 265	struct frame *f;
 266
 267	while (unprocessed_frames(s)) {
 268		f = s->spine + s->top--;
 269		dm_tm_unlock(s->tm, f->b);
 270	}
 271}
 272
 273int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
 274{
 275	int r;
 276	struct del_stack *s;
 277
 278	/*
 279	 * dm_btree_del() is called via an ioctl, as such should be
 280	 * considered an FS op.  We can't recurse back into the FS, so we
 281	 * allocate GFP_NOFS.
 282	 */
 283	s = kmalloc(sizeof(*s), GFP_NOFS);
 284	if (!s)
 285		return -ENOMEM;
 286	s->info = info;
 287	s->tm = info->tm;
 288	s->top = -1;
 289
 290	r = push_frame(s, root, 0);
 291	if (r)
 292		goto out;
 293
 294	while (unprocessed_frames(s)) {
 295		uint32_t flags;
 296		struct frame *f;
 297		dm_block_t b;
 298
 299		r = top_frame(s, &f);
 300		if (r)
 301			goto out;
 302
 303		if (f->current_child >= f->nr_children) {
 304			pop_frame(s);
 305			continue;
 306		}
 307
 308		flags = le32_to_cpu(f->n->header.flags);
 309		if (flags & INTERNAL_NODE) {
 310			b = value64(f->n, f->current_child);
 311			f->current_child++;
 312			r = push_frame(s, b, f->level);
 313			if (r)
 314				goto out;
 315
 316		} else if (is_internal_level(info, f)) {
 317			b = value64(f->n, f->current_child);
 318			f->current_child++;
 319			r = push_frame(s, b, f->level + 1);
 320			if (r)
 321				goto out;
 322
 323		} else {
 324			if (info->value_type.dec)
 325				info->value_type.dec(info->value_type.context,
 326						     value_ptr(f->n, 0), f->nr_children);
 
 
 
 
 327			pop_frame(s);
 328		}
 329	}
 330out:
 331	if (r) {
 332		/* cleanup all frames of del_stack */
 333		unlock_all_frames(s);
 334	}
 335	kfree(s);
 336
 337	return r;
 338}
 339EXPORT_SYMBOL_GPL(dm_btree_del);
 340
 341/*----------------------------------------------------------------*/
 342
 343static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
 344			    int (*search_fn)(struct btree_node *, uint64_t),
 345			    uint64_t *result_key, void *v, size_t value_size)
 346{
 347	int i, r;
 348	uint32_t flags, nr_entries;
 349
 350	do {
 351		r = ro_step(s, block);
 352		if (r < 0)
 353			return r;
 354
 355		i = search_fn(ro_node(s), key);
 356
 357		flags = le32_to_cpu(ro_node(s)->header.flags);
 358		nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries);
 359		if (i < 0 || i >= nr_entries)
 360			return -ENODATA;
 361
 362		if (flags & INTERNAL_NODE)
 363			block = value64(ro_node(s), i);
 364
 365	} while (!(flags & LEAF_NODE));
 366
 367	*result_key = le64_to_cpu(ro_node(s)->keys[i]);
 368	if (v)
 369		memcpy(v, value_ptr(ro_node(s), i), value_size);
 370
 371	return 0;
 372}
 373
 374int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
 375		    uint64_t *keys, void *value_le)
 376{
 377	unsigned int level, last_level = info->levels - 1;
 378	int r = -ENODATA;
 379	uint64_t rkey;
 380	__le64 internal_value_le;
 381	struct ro_spine spine;
 382
 383	init_ro_spine(&spine, info);
 384	for (level = 0; level < info->levels; level++) {
 385		size_t size;
 386		void *value_p;
 387
 388		if (level == last_level) {
 389			value_p = value_le;
 390			size = info->value_type.size;
 391
 392		} else {
 393			value_p = &internal_value_le;
 394			size = sizeof(uint64_t);
 395		}
 396
 397		r = btree_lookup_raw(&spine, root, keys[level],
 398				     lower_bound, &rkey,
 399				     value_p, size);
 400
 401		if (!r) {
 402			if (rkey != keys[level]) {
 403				exit_ro_spine(&spine);
 404				return -ENODATA;
 405			}
 406		} else {
 407			exit_ro_spine(&spine);
 408			return r;
 409		}
 410
 411		root = le64_to_cpu(internal_value_le);
 412	}
 413	exit_ro_spine(&spine);
 414
 415	return r;
 416}
 417EXPORT_SYMBOL_GPL(dm_btree_lookup);
 418
 419static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t root,
 420				       uint64_t key, uint64_t *rkey, void *value_le)
 421{
 422	int r, i;
 423	uint32_t flags, nr_entries;
 424	struct dm_block *node;
 425	struct btree_node *n;
 426
 427	r = bn_read_lock(info, root, &node);
 428	if (r)
 429		return r;
 430
 431	n = dm_block_data(node);
 432	flags = le32_to_cpu(n->header.flags);
 433	nr_entries = le32_to_cpu(n->header.nr_entries);
 434
 435	if (flags & INTERNAL_NODE) {
 436		i = lower_bound(n, key);
 437		if (i < 0) {
 438			/*
 439			 * avoid early -ENODATA return when all entries are
 440			 * higher than the search @key.
 441			 */
 442			i = 0;
 443		}
 444		if (i >= nr_entries) {
 445			r = -ENODATA;
 446			goto out;
 447		}
 448
 449		r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
 450		if (r == -ENODATA && i < (nr_entries - 1)) {
 451			i++;
 452			r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
 453		}
 454
 455	} else {
 456		i = upper_bound(n, key);
 457		if (i < 0 || i >= nr_entries) {
 458			r = -ENODATA;
 459			goto out;
 460		}
 461
 462		*rkey = le64_to_cpu(n->keys[i]);
 463		memcpy(value_le, value_ptr(n, i), info->value_type.size);
 464	}
 465out:
 466	dm_tm_unlock(info->tm, node);
 467	return r;
 468}
 469
 470int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
 471			 uint64_t *keys, uint64_t *rkey, void *value_le)
 472{
 473	unsigned int level;
 474	int r = -ENODATA;
 475	__le64 internal_value_le;
 476	struct ro_spine spine;
 477
 478	init_ro_spine(&spine, info);
 479	for (level = 0; level < info->levels - 1u; level++) {
 480		r = btree_lookup_raw(&spine, root, keys[level],
 481				     lower_bound, rkey,
 482				     &internal_value_le, sizeof(uint64_t));
 483		if (r)
 484			goto out;
 485
 486		if (*rkey != keys[level]) {
 487			r = -ENODATA;
 488			goto out;
 489		}
 490
 491		root = le64_to_cpu(internal_value_le);
 492	}
 493
 494	r = dm_btree_lookup_next_single(info, root, keys[level], rkey, value_le);
 495out:
 496	exit_ro_spine(&spine);
 497	return r;
 498}
 499EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
 500
 501/*----------------------------------------------------------------*/
 502
 503/*
 504 * Copies entries from one region of a btree node to another.  The regions
 505 * must not overlap.
 506 */
 507static void copy_entries(struct btree_node *dest, unsigned int dest_offset,
 508			 struct btree_node *src, unsigned int src_offset,
 509			 unsigned int count)
 510{
 511	size_t value_size = le32_to_cpu(dest->header.value_size);
 512
 513	memcpy(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
 514	memcpy(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size);
 515}
 516
 517/*
 518 * Moves entries from one region fo a btree node to another.  The regions
 519 * may overlap.
 520 */
 521static void move_entries(struct btree_node *dest, unsigned int dest_offset,
 522			 struct btree_node *src, unsigned int src_offset,
 523			 unsigned int count)
 524{
 525	size_t value_size = le32_to_cpu(dest->header.value_size);
 526
 527	memmove(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
 528	memmove(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size);
 529}
 530
 531/*
 532 * Erases the first 'count' entries of a btree node, shifting following
 533 * entries down into their place.
 534 */
 535static void shift_down(struct btree_node *n, unsigned int count)
 536{
 537	move_entries(n, 0, n, count, le32_to_cpu(n->header.nr_entries) - count);
 538}
 539
 540/*
 541 * Moves entries in a btree node up 'count' places, making space for
 542 * new entries at the start of the node.
 543 */
 544static void shift_up(struct btree_node *n, unsigned int count)
 545{
 546	move_entries(n, count, n, 0, le32_to_cpu(n->header.nr_entries));
 547}
 548
 549/*
 550 * Redistributes entries between two btree nodes to make them
 551 * have similar numbers of entries.
 552 */
 553static void redistribute2(struct btree_node *left, struct btree_node *right)
 554{
 555	unsigned int nr_left = le32_to_cpu(left->header.nr_entries);
 556	unsigned int nr_right = le32_to_cpu(right->header.nr_entries);
 557	unsigned int total = nr_left + nr_right;
 558	unsigned int target_left = total / 2;
 559	unsigned int target_right = total - target_left;
 560
 561	if (nr_left < target_left) {
 562		unsigned int delta = target_left - nr_left;
 563
 564		copy_entries(left, nr_left, right, 0, delta);
 565		shift_down(right, delta);
 566	} else if (nr_left > target_left) {
 567		unsigned int delta = nr_left - target_left;
 568
 569		if (nr_right)
 570			shift_up(right, delta);
 571		copy_entries(right, 0, left, target_left, delta);
 572	}
 573
 574	left->header.nr_entries = cpu_to_le32(target_left);
 575	right->header.nr_entries = cpu_to_le32(target_right);
 576}
 577
 578/*
 579 * Redistribute entries between three nodes.  Assumes the central
 580 * node is empty.
 581 */
 582static void redistribute3(struct btree_node *left, struct btree_node *center,
 583			  struct btree_node *right)
 584{
 585	unsigned int nr_left = le32_to_cpu(left->header.nr_entries);
 586	unsigned int nr_center = le32_to_cpu(center->header.nr_entries);
 587	unsigned int nr_right = le32_to_cpu(right->header.nr_entries);
 588	unsigned int total, target_left, target_center, target_right;
 589
 590	BUG_ON(nr_center);
 591
 592	total = nr_left + nr_right;
 593	target_left = total / 3;
 594	target_center = (total - target_left) / 2;
 595	target_right = (total - target_left - target_center);
 596
 597	if (nr_left < target_left) {
 598		unsigned int left_short = target_left - nr_left;
 599
 600		copy_entries(left, nr_left, right, 0, left_short);
 601		copy_entries(center, 0, right, left_short, target_center);
 602		shift_down(right, nr_right - target_right);
 603
 604	} else if (nr_left < (target_left + target_center)) {
 605		unsigned int left_to_center = nr_left - target_left;
 606
 607		copy_entries(center, 0, left, target_left, left_to_center);
 608		copy_entries(center, left_to_center, right, 0, target_center - left_to_center);
 609		shift_down(right, nr_right - target_right);
 610
 611	} else {
 612		unsigned int right_short = target_right - nr_right;
 613
 614		shift_up(right, right_short);
 615		copy_entries(right, 0, left, nr_left - right_short, right_short);
 616		copy_entries(center, 0, left, target_left, nr_left - target_left);
 617	}
 618
 619	left->header.nr_entries = cpu_to_le32(target_left);
 620	center->header.nr_entries = cpu_to_le32(target_center);
 621	right->header.nr_entries = cpu_to_le32(target_right);
 622}
 623
 624/*
 625 * Splits a node by creating a sibling node and shifting half the nodes
 626 * contents across.  Assumes there is a parent node, and it has room for
 627 * another child.
 628 *
 629 * Before:
 630 *	  +--------+
 631 *	  | Parent |
 632 *	  +--------+
 633 *	     |
 634 *	     v
 635 *	+----------+
 636 *	| A ++++++ |
 637 *	+----------+
 638 *
 639 *
 640 * After:
 641 *		+--------+
 642 *		| Parent |
 643 *		+--------+
 644 *		  |	|
 645 *		  v	+------+
 646 *	    +---------+	       |
 647 *	    | A* +++  |	       v
 648 *	    +---------+	  +-------+
 649 *			  | B +++ |
 650 *			  +-------+
 651 *
 652 * Where A* is a shadow of A.
 653 */
 654static int split_one_into_two(struct shadow_spine *s, unsigned int parent_index,
 655			      struct dm_btree_value_type *vt, uint64_t key)
 656{
 657	int r;
 
 
 658	struct dm_block *left, *right, *parent;
 659	struct btree_node *ln, *rn, *pn;
 660	__le64 location;
 661
 662	left = shadow_current(s);
 663
 664	r = new_block(s->info, &right);
 665	if (r < 0)
 666		return r;
 667
 668	ln = dm_block_data(left);
 669	rn = dm_block_data(right);
 670
 
 
 
 
 
 671	rn->header.flags = ln->header.flags;
 672	rn->header.nr_entries = cpu_to_le32(0);
 673	rn->header.max_entries = ln->header.max_entries;
 674	rn->header.value_size = ln->header.value_size;
 675	redistribute2(ln, rn);
 
 
 
 
 
 676
 677	/* patch up the parent */
 
 
 678	parent = shadow_parent(s);
 
 679	pn = dm_block_data(parent);
 
 
 
 
 680
 681	location = cpu_to_le64(dm_block_location(right));
 682	__dm_bless_for_disk(&location);
 
 683	r = insert_at(sizeof(__le64), pn, parent_index + 1,
 684		      le64_to_cpu(rn->keys[0]), &location);
 685	if (r) {
 686		unlock_block(s->info, right);
 687		return r;
 688	}
 689
 690	/* patch up the spine */
 691	if (key < le64_to_cpu(rn->keys[0])) {
 692		unlock_block(s->info, right);
 693		s->nodes[1] = left;
 694	} else {
 695		unlock_block(s->info, left);
 696		s->nodes[1] = right;
 697	}
 698
 699	return 0;
 700}
 701
 702/*
 703 * We often need to modify a sibling node.  This function shadows a particular
 704 * child of the given parent node.  Making sure to update the parent to point
 705 * to the new shadow.
 706 */
 707static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
 708			struct btree_node *parent, unsigned int index,
 709			struct dm_block **result)
 710{
 711	int r, inc;
 712	dm_block_t root;
 713	struct btree_node *node;
 714
 715	root = value64(parent, index);
 716
 717	r = dm_tm_shadow_block(info->tm, root, &btree_node_validator,
 718			       result, &inc);
 719	if (r)
 720		return r;
 721
 722	node = dm_block_data(*result);
 723
 724	if (inc)
 725		inc_children(info->tm, node, vt);
 726
 727	*((__le64 *) value_ptr(parent, index)) =
 728		cpu_to_le64(dm_block_location(*result));
 729
 730	return 0;
 731}
 732
 733/*
 734 * Splits two nodes into three.  This is more work, but results in fuller
 735 * nodes, so saves metadata space.
 736 */
 737static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index,
 738				struct dm_btree_value_type *vt, uint64_t key)
 739{
 740	int r;
 741	unsigned int middle_index;
 742	struct dm_block *left, *middle, *right, *parent;
 743	struct btree_node *ln, *rn, *mn, *pn;
 744	__le64 location;
 745
 746	parent = shadow_parent(s);
 747	pn = dm_block_data(parent);
 748
 749	if (parent_index == 0) {
 750		middle_index = 1;
 751		left = shadow_current(s);
 752		r = shadow_child(s->info, vt, pn, parent_index + 1, &right);
 753		if (r)
 754			return r;
 755	} else {
 756		middle_index = parent_index;
 757		right = shadow_current(s);
 758		r = shadow_child(s->info, vt, pn, parent_index - 1, &left);
 759		if (r)
 760			return r;
 761	}
 762
 763	r = new_block(s->info, &middle);
 764	if (r < 0)
 765		return r;
 766
 767	ln = dm_block_data(left);
 768	mn = dm_block_data(middle);
 769	rn = dm_block_data(right);
 770
 771	mn->header.nr_entries = cpu_to_le32(0);
 772	mn->header.flags = ln->header.flags;
 773	mn->header.max_entries = ln->header.max_entries;
 774	mn->header.value_size = ln->header.value_size;
 775
 776	redistribute3(ln, mn, rn);
 777
 778	/* patch up the parent */
 779	pn->keys[middle_index] = rn->keys[0];
 780	location = cpu_to_le64(dm_block_location(middle));
 781	__dm_bless_for_disk(&location);
 782	r = insert_at(sizeof(__le64), pn, middle_index,
 783		      le64_to_cpu(mn->keys[0]), &location);
 784	if (r) {
 785		if (shadow_current(s) != left)
 786			unlock_block(s->info, left);
 787
 788		unlock_block(s->info, middle);
 789
 790		if (shadow_current(s) != right)
 791			unlock_block(s->info, right);
 792
 793		return r;
 794	}
 795
 796
 797	/* patch up the spine */
 798	if (key < le64_to_cpu(mn->keys[0])) {
 799		unlock_block(s->info, middle);
 800		unlock_block(s->info, right);
 801		s->nodes[1] = left;
 802	} else if (key < le64_to_cpu(rn->keys[0])) {
 803		unlock_block(s->info, left);
 804		unlock_block(s->info, right);
 805		s->nodes[1] = middle;
 806	} else {
 807		unlock_block(s->info, left);
 808		unlock_block(s->info, middle);
 809		s->nodes[1] = right;
 810	}
 811
 812	return 0;
 813}
 814
 815/*----------------------------------------------------------------*/
 816
 817/*
 818 * Splits a node by creating two new children beneath the given node.
 819 *
 820 * Before:
 821 *	  +----------+
 822 *	  | A ++++++ |
 823 *	  +----------+
 824 *
 825 *
 826 * After:
 827 *	+------------+
 828 *	| A (shadow) |
 829 *	+------------+
 830 *	    |	|
 831 *   +------+	+----+
 832 *   |		     |
 833 *   v		     v
 834 * +-------+	 +-------+
 835 * | B +++ |	 | C +++ |
 836 * +-------+	 +-------+
 837 */
 838static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
 839{
 840	int r;
 841	size_t size;
 842	unsigned int nr_left, nr_right;
 843	struct dm_block *left, *right, *new_parent;
 844	struct btree_node *pn, *ln, *rn;
 845	__le64 val;
 846
 847	new_parent = shadow_current(s);
 848
 849	pn = dm_block_data(new_parent);
 850	size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
 851		sizeof(__le64) : s->info->value_type.size;
 852
 853	/* create & init the left block */
 854	r = new_block(s->info, &left);
 855	if (r < 0)
 856		return r;
 857
 858	ln = dm_block_data(left);
 859	nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
 860
 861	ln->header.flags = pn->header.flags;
 862	ln->header.nr_entries = cpu_to_le32(nr_left);
 863	ln->header.max_entries = pn->header.max_entries;
 864	ln->header.value_size = pn->header.value_size;
 865	memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
 866	memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
 867
 868	/* create & init the right block */
 869	r = new_block(s->info, &right);
 870	if (r < 0) {
 871		unlock_block(s->info, left);
 872		return r;
 873	}
 874
 
 
 875	rn = dm_block_data(right);
 
 
 876	nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
 877
 
 
 
 
 
 878	rn->header.flags = pn->header.flags;
 879	rn->header.nr_entries = cpu_to_le32(nr_right);
 880	rn->header.max_entries = pn->header.max_entries;
 881	rn->header.value_size = pn->header.value_size;
 
 
 882	memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
 
 
 
 
 883	memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
 884	       nr_right * size);
 885
 886	/* new_parent should just point to l and r now */
 887	pn->header.flags = cpu_to_le32(INTERNAL_NODE);
 888	pn->header.nr_entries = cpu_to_le32(2);
 889	pn->header.max_entries = cpu_to_le32(
 890		calc_max_entries(sizeof(__le64),
 891				 dm_bm_block_size(
 892					 dm_tm_get_bm(s->info->tm))));
 893	pn->header.value_size = cpu_to_le32(sizeof(__le64));
 894
 895	val = cpu_to_le64(dm_block_location(left));
 896	__dm_bless_for_disk(&val);
 897	pn->keys[0] = ln->keys[0];
 898	memcpy_disk(value_ptr(pn, 0), &val, sizeof(__le64));
 899
 900	val = cpu_to_le64(dm_block_location(right));
 901	__dm_bless_for_disk(&val);
 902	pn->keys[1] = rn->keys[0];
 903	memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
 904
 905	unlock_block(s->info, left);
 906	unlock_block(s->info, right);
 907	return 0;
 908}
 909
 910/*----------------------------------------------------------------*/
 911
 912/*
 913 * Redistributes a node's entries with its left sibling.
 914 */
 915static int rebalance_left(struct shadow_spine *s, struct dm_btree_value_type *vt,
 916			  unsigned int parent_index, uint64_t key)
 917{
 918	int r;
 919	struct dm_block *sib;
 920	struct btree_node *left, *right, *parent = dm_block_data(shadow_parent(s));
 921
 922	r = shadow_child(s->info, vt, parent, parent_index - 1, &sib);
 923	if (r)
 924		return r;
 925
 926	left = dm_block_data(sib);
 927	right = dm_block_data(shadow_current(s));
 928	redistribute2(left, right);
 929	*key_ptr(parent, parent_index) = right->keys[0];
 930
 931	if (key < le64_to_cpu(right->keys[0])) {
 932		unlock_block(s->info, s->nodes[1]);
 933		s->nodes[1] = sib;
 934	} else {
 935		unlock_block(s->info, sib);
 936	}
 937
 938	return 0;
 939}
 940
 941/*
 942 * Redistributes a nodes entries with its right sibling.
 943 */
 944static int rebalance_right(struct shadow_spine *s, struct dm_btree_value_type *vt,
 945			   unsigned int parent_index, uint64_t key)
 946{
 947	int r;
 948	struct dm_block *sib;
 949	struct btree_node *left, *right, *parent = dm_block_data(shadow_parent(s));
 950
 951	r = shadow_child(s->info, vt, parent, parent_index + 1, &sib);
 952	if (r)
 953		return r;
 954
 955	left = dm_block_data(shadow_current(s));
 956	right = dm_block_data(sib);
 957	redistribute2(left, right);
 958	*key_ptr(parent, parent_index + 1) = right->keys[0];
 959
 960	if (key < le64_to_cpu(right->keys[0])) {
 961		unlock_block(s->info, sib);
 962	} else {
 963		unlock_block(s->info, s->nodes[1]);
 964		s->nodes[1] = sib;
 965	}
 966
 967	return 0;
 968}
 969
 970/*
 971 * Returns the number of spare entries in a node.
 972 */
 973static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigned int *space)
 974{
 975	int r;
 976	unsigned int nr_entries;
 977	struct dm_block *block;
 978	struct btree_node *node;
 979
 980	r = bn_read_lock(info, b, &block);
 981	if (r)
 982		return r;
 983
 984	node = dm_block_data(block);
 985	nr_entries = le32_to_cpu(node->header.nr_entries);
 986	*space = le32_to_cpu(node->header.max_entries) - nr_entries;
 987
 988	unlock_block(info, block);
 989	return 0;
 990}
 991
 992/*
 993 * Make space in a node, either by moving some entries to a sibling,
 994 * or creating a new sibling node.  SPACE_THRESHOLD defines the minimum
 995 * number of free entries that must be in the sibling to make the move
 996 * worth while.  If the siblings are shared (eg, part of a snapshot),
 997 * then they are not touched, since this break sharing and so consume
 998 * more space than we save.
 999 */
1000#define SPACE_THRESHOLD 8
1001static int rebalance_or_split(struct shadow_spine *s, struct dm_btree_value_type *vt,
1002			      unsigned int parent_index, uint64_t key)
1003{
1004	int r;
1005	struct btree_node *parent = dm_block_data(shadow_parent(s));
1006	unsigned int nr_parent = le32_to_cpu(parent->header.nr_entries);
1007	unsigned int free_space;
1008	int left_shared = 0, right_shared = 0;
1009
1010	/* Should we move entries to the left sibling? */
1011	if (parent_index > 0) {
1012		dm_block_t left_b = value64(parent, parent_index - 1);
1013
1014		r = dm_tm_block_is_shared(s->info->tm, left_b, &left_shared);
1015		if (r)
1016			return r;
1017
1018		if (!left_shared) {
1019			r = get_node_free_space(s->info, left_b, &free_space);
1020			if (r)
1021				return r;
1022
1023			if (free_space >= SPACE_THRESHOLD)
1024				return rebalance_left(s, vt, parent_index, key);
1025		}
1026	}
1027
1028	/* Should we move entries to the right sibling? */
1029	if (parent_index < (nr_parent - 1)) {
1030		dm_block_t right_b = value64(parent, parent_index + 1);
1031
1032		r = dm_tm_block_is_shared(s->info->tm, right_b, &right_shared);
1033		if (r)
1034			return r;
1035
1036		if (!right_shared) {
1037			r = get_node_free_space(s->info, right_b, &free_space);
1038			if (r)
1039				return r;
1040
1041			if (free_space >= SPACE_THRESHOLD)
1042				return rebalance_right(s, vt, parent_index, key);
1043		}
1044	}
1045
1046	/*
1047	 * We need to split the node, normally we split two nodes
1048	 * into three.	But when inserting a sequence that is either
1049	 * monotonically increasing or decreasing it's better to split
1050	 * a single node into two.
1051	 */
1052	if (left_shared || right_shared || (nr_parent <= 2) ||
1053	    (parent_index == 0) || (parent_index + 1 == nr_parent)) {
1054		return split_one_into_two(s, parent_index, vt, key);
1055	} else {
1056		return split_two_into_three(s, parent_index, vt, key);
1057	}
1058}
1059
1060/*
1061 * Does the node contain a particular key?
1062 */
1063static bool contains_key(struct btree_node *node, uint64_t key)
1064{
1065	int i = lower_bound(node, key);
1066
1067	if (i >= 0 && le64_to_cpu(node->keys[i]) == key)
1068		return true;
1069
1070	return false;
1071}
1072
1073/*
1074 * In general we preemptively make sure there's a free entry in every
1075 * node on the spine when doing an insert.  But we can avoid that with
1076 * leaf nodes if we know it's an overwrite.
1077 */
1078static bool has_space_for_insert(struct btree_node *node, uint64_t key)
1079{
1080	if (node->header.nr_entries == node->header.max_entries) {
1081		if (le32_to_cpu(node->header.flags) & LEAF_NODE) {
1082			/* we don't need space if it's an overwrite */
1083			return contains_key(node, key);
1084		}
1085
1086		return false;
1087	}
1088
1089	return true;
1090}
1091
1092static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
1093			    struct dm_btree_value_type *vt,
1094			    uint64_t key, unsigned int *index)
1095{
1096	int r, i = *index, top = 1;
1097	struct btree_node *node;
1098
1099	for (;;) {
1100		r = shadow_step(s, root, vt);
1101		if (r < 0)
1102			return r;
1103
1104		node = dm_block_data(shadow_current(s));
1105
1106		/*
1107		 * We have to patch up the parent node, ugly, but I don't
1108		 * see a way to do this automatically as part of the spine
1109		 * op.
1110		 */
1111		if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */
1112			__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
1113
1114			__dm_bless_for_disk(&location);
1115			memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i),
1116				    &location, sizeof(__le64));
1117		}
1118
1119		node = dm_block_data(shadow_current(s));
1120
1121		if (!has_space_for_insert(node, key)) {
1122			if (top)
1123				r = btree_split_beneath(s, key);
1124			else
1125				r = rebalance_or_split(s, vt, i, key);
1126
1127			if (r < 0)
1128				return r;
1129
1130			/* making space can cause the current node to change */
1131			node = dm_block_data(shadow_current(s));
1132		}
1133
 
 
1134		i = lower_bound(node, key);
1135
1136		if (le32_to_cpu(node->header.flags) & LEAF_NODE)
1137			break;
1138
1139		if (i < 0) {
1140			/* change the bounds on the lowest key */
1141			node->keys[0] = cpu_to_le64(key);
1142			i = 0;
1143		}
1144
1145		root = value64(node, i);
1146		top = 0;
1147	}
1148
1149	if (i < 0 || le64_to_cpu(node->keys[i]) != key)
1150		i++;
1151
1152	*index = i;
1153	return 0;
1154}
1155
1156static int __btree_get_overwrite_leaf(struct shadow_spine *s, dm_block_t root,
1157				      uint64_t key, int *index)
1158{
1159	int r, i = -1;
1160	struct btree_node *node;
1161
1162	*index = 0;
1163	for (;;) {
1164		r = shadow_step(s, root, &s->info->value_type);
1165		if (r < 0)
1166			return r;
1167
1168		node = dm_block_data(shadow_current(s));
1169
1170		/*
1171		 * We have to patch up the parent node, ugly, but I don't
1172		 * see a way to do this automatically as part of the spine
1173		 * op.
1174		 */
1175		if (shadow_has_parent(s) && i >= 0) {
1176			__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
1177
1178			__dm_bless_for_disk(&location);
1179			memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i),
1180				    &location, sizeof(__le64));
1181		}
1182
1183		node = dm_block_data(shadow_current(s));
1184		i = lower_bound(node, key);
1185
1186		BUG_ON(i < 0);
1187		BUG_ON(i >= le32_to_cpu(node->header.nr_entries));
1188
1189		if (le32_to_cpu(node->header.flags) & LEAF_NODE) {
1190			if (key != le64_to_cpu(node->keys[i]))
1191				return -EINVAL;
1192			break;
1193		}
1194
1195		root = value64(node, i);
1196	}
1197
1198	*index = i;
1199	return 0;
1200}
1201
1202int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root,
1203			     uint64_t key, int *index,
1204			     dm_block_t *new_root, struct dm_block **leaf)
1205{
1206	int r;
1207	struct shadow_spine spine;
1208
1209	BUG_ON(info->levels > 1);
1210	init_shadow_spine(&spine, info);
1211	r = __btree_get_overwrite_leaf(&spine, root, key, index);
1212	if (!r) {
1213		*new_root = shadow_root(&spine);
1214		*leaf = shadow_current(&spine);
1215
1216		/*
1217		 * Decrement the count so exit_shadow_spine() doesn't
1218		 * unlock the leaf.
1219		 */
1220		spine.count--;
1221	}
1222	exit_shadow_spine(&spine);
1223
1224	return r;
1225}
1226
1227static bool need_insert(struct btree_node *node, uint64_t *keys,
1228			unsigned int level, unsigned int index)
1229{
1230	return ((index >= le32_to_cpu(node->header.nr_entries)) ||
1231		(le64_to_cpu(node->keys[index]) != keys[level]));
1232}
1233
1234static int insert(struct dm_btree_info *info, dm_block_t root,
1235		  uint64_t *keys, void *value, dm_block_t *new_root,
1236		  int *inserted)
1237		  __dm_written_to_disk(value)
1238{
1239	int r;
1240	unsigned int level, index = -1, last_level = info->levels - 1;
1241	dm_block_t block = root;
1242	struct shadow_spine spine;
1243	struct btree_node *n;
1244	struct dm_btree_value_type le64_type;
1245
1246	init_le64_type(info->tm, &le64_type);
1247	init_shadow_spine(&spine, info);
1248
1249	for (level = 0; level < (info->levels - 1); level++) {
1250		r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index);
1251		if (r < 0)
1252			goto bad;
1253
1254		n = dm_block_data(shadow_current(&spine));
1255
1256		if (need_insert(n, keys, level, index)) {
1257			dm_block_t new_tree;
1258			__le64 new_le;
1259
1260			r = dm_btree_empty(info, &new_tree);
1261			if (r < 0)
1262				goto bad;
1263
1264			new_le = cpu_to_le64(new_tree);
1265			__dm_bless_for_disk(&new_le);
1266
1267			r = insert_at(sizeof(uint64_t), n, index,
1268				      keys[level], &new_le);
1269			if (r)
1270				goto bad;
1271		}
1272
1273		if (level < last_level)
1274			block = value64(n, index);
1275	}
1276
1277	r = btree_insert_raw(&spine, block, &info->value_type,
1278			     keys[level], &index);
1279	if (r < 0)
1280		goto bad;
1281
1282	n = dm_block_data(shadow_current(&spine));
1283
1284	if (need_insert(n, keys, level, index)) {
1285		if (inserted)
1286			*inserted = 1;
1287
1288		r = insert_at(info->value_type.size, n, index,
1289			      keys[level], value);
1290		if (r)
1291			goto bad_unblessed;
1292	} else {
1293		if (inserted)
1294			*inserted = 0;
1295
1296		if (info->value_type.dec &&
1297		    (!info->value_type.equal ||
1298		     !info->value_type.equal(
1299			     info->value_type.context,
1300			     value_ptr(n, index),
1301			     value))) {
1302			info->value_type.dec(info->value_type.context,
1303					     value_ptr(n, index), 1);
1304		}
1305		memcpy_disk(value_ptr(n, index),
1306			    value, info->value_type.size);
1307	}
1308
1309	*new_root = shadow_root(&spine);
1310	exit_shadow_spine(&spine);
1311
1312	return 0;
1313
1314bad:
1315	__dm_unbless_for_disk(value);
1316bad_unblessed:
1317	exit_shadow_spine(&spine);
1318	return r;
1319}
1320
1321int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
1322		    uint64_t *keys, void *value, dm_block_t *new_root)
1323	__dm_written_to_disk(value)
1324{
1325	return insert(info, root, keys, value, new_root, NULL);
1326}
1327EXPORT_SYMBOL_GPL(dm_btree_insert);
1328
1329int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
1330			   uint64_t *keys, void *value, dm_block_t *new_root,
1331			   int *inserted)
1332	__dm_written_to_disk(value)
1333{
1334	return insert(info, root, keys, value, new_root, inserted);
1335}
1336EXPORT_SYMBOL_GPL(dm_btree_insert_notify);
1337
1338/*----------------------------------------------------------------*/
1339
1340static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
1341		    uint64_t *result_key, dm_block_t *next_block)
1342{
1343	int i, r;
1344	uint32_t flags;
1345
1346	do {
1347		r = ro_step(s, block);
1348		if (r < 0)
1349			return r;
1350
1351		flags = le32_to_cpu(ro_node(s)->header.flags);
1352		i = le32_to_cpu(ro_node(s)->header.nr_entries);
1353		if (!i)
1354			return -ENODATA;
1355
1356		i--;
1357
1358		if (find_highest)
1359			*result_key = le64_to_cpu(ro_node(s)->keys[i]);
1360		else
1361			*result_key = le64_to_cpu(ro_node(s)->keys[0]);
1362
1363		if (next_block || flags & INTERNAL_NODE) {
1364			if (find_highest)
1365				block = value64(ro_node(s), i);
1366			else
1367				block = value64(ro_node(s), 0);
1368		}
1369
1370	} while (flags & INTERNAL_NODE);
1371
1372	if (next_block)
1373		*next_block = block;
1374	return 0;
1375}
1376
1377static int dm_btree_find_key(struct dm_btree_info *info, dm_block_t root,
1378			     bool find_highest, uint64_t *result_keys)
1379{
1380	int r = 0, count = 0, level;
1381	struct ro_spine spine;
1382
1383	init_ro_spine(&spine, info);
1384	for (level = 0; level < info->levels; level++) {
1385		r = find_key(&spine, root, find_highest, result_keys + level,
1386			     level == info->levels - 1 ? NULL : &root);
1387		if (r == -ENODATA) {
1388			r = 0;
1389			break;
1390
1391		} else if (r)
1392			break;
1393
1394		count++;
1395	}
1396	exit_ro_spine(&spine);
1397
1398	return r ? r : count;
1399}
1400
1401int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
1402			      uint64_t *result_keys)
1403{
1404	return dm_btree_find_key(info, root, true, result_keys);
1405}
1406EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
1407
1408int dm_btree_find_lowest_key(struct dm_btree_info *info, dm_block_t root,
1409			     uint64_t *result_keys)
1410{
1411	return dm_btree_find_key(info, root, false, result_keys);
1412}
1413EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
1414
1415/*----------------------------------------------------------------*/
1416
1417/*
1418 * FIXME: We shouldn't use a recursive algorithm when we have limited stack
1419 * space.  Also this only works for single level trees.
1420 */
1421static int walk_node(struct dm_btree_info *info, dm_block_t block,
1422		     int (*fn)(void *context, uint64_t *keys, void *leaf),
1423		     void *context)
1424{
1425	int r;
1426	unsigned int i, nr;
1427	struct dm_block *node;
1428	struct btree_node *n;
1429	uint64_t keys;
1430
1431	r = bn_read_lock(info, block, &node);
1432	if (r)
1433		return r;
1434
1435	n = dm_block_data(node);
1436
1437	nr = le32_to_cpu(n->header.nr_entries);
1438	for (i = 0; i < nr; i++) {
1439		if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
1440			r = walk_node(info, value64(n, i), fn, context);
1441			if (r)
1442				goto out;
1443		} else {
1444			keys = le64_to_cpu(*key_ptr(n, i));
1445			r = fn(context, &keys, value_ptr(n, i));
1446			if (r)
1447				goto out;
1448		}
1449	}
1450
1451out:
1452	dm_tm_unlock(info->tm, node);
1453	return r;
1454}
1455
1456int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
1457		  int (*fn)(void *context, uint64_t *keys, void *leaf),
1458		  void *context)
1459{
1460	BUG_ON(info->levels > 1);
1461	return walk_node(info, root, fn, context);
1462}
1463EXPORT_SYMBOL_GPL(dm_btree_walk);
1464
1465/*----------------------------------------------------------------*/
1466
1467static void prefetch_values(struct dm_btree_cursor *c)
1468{
1469	unsigned int i, nr;
1470	__le64 value_le;
1471	struct cursor_node *n = c->nodes + c->depth - 1;
1472	struct btree_node *bn = dm_block_data(n->b);
1473	struct dm_block_manager *bm = dm_tm_get_bm(c->info->tm);
1474
1475	BUG_ON(c->info->value_type.size != sizeof(value_le));
1476
1477	nr = le32_to_cpu(bn->header.nr_entries);
1478	for (i = 0; i < nr; i++) {
1479		memcpy(&value_le, value_ptr(bn, i), sizeof(value_le));
1480		dm_bm_prefetch(bm, le64_to_cpu(value_le));
1481	}
1482}
1483
1484static bool leaf_node(struct dm_btree_cursor *c)
1485{
1486	struct cursor_node *n = c->nodes + c->depth - 1;
1487	struct btree_node *bn = dm_block_data(n->b);
1488
1489	return le32_to_cpu(bn->header.flags) & LEAF_NODE;
1490}
1491
1492static int push_node(struct dm_btree_cursor *c, dm_block_t b)
1493{
1494	int r;
1495	struct cursor_node *n = c->nodes + c->depth;
1496
1497	if (c->depth >= DM_BTREE_CURSOR_MAX_DEPTH - 1) {
1498		DMERR("couldn't push cursor node, stack depth too high");
1499		return -EINVAL;
1500	}
1501
1502	r = bn_read_lock(c->info, b, &n->b);
1503	if (r)
1504		return r;
1505
1506	n->index = 0;
1507	c->depth++;
1508
1509	if (c->prefetch_leaves || !leaf_node(c))
1510		prefetch_values(c);
1511
1512	return 0;
1513}
1514
1515static void pop_node(struct dm_btree_cursor *c)
1516{
1517	c->depth--;
1518	unlock_block(c->info, c->nodes[c->depth].b);
1519}
1520
1521static int inc_or_backtrack(struct dm_btree_cursor *c)
1522{
1523	struct cursor_node *n;
1524	struct btree_node *bn;
1525
1526	for (;;) {
1527		if (!c->depth)
1528			return -ENODATA;
1529
1530		n = c->nodes + c->depth - 1;
1531		bn = dm_block_data(n->b);
1532
1533		n->index++;
1534		if (n->index < le32_to_cpu(bn->header.nr_entries))
1535			break;
1536
1537		pop_node(c);
1538	}
1539
1540	return 0;
1541}
1542
1543static int find_leaf(struct dm_btree_cursor *c)
1544{
1545	int r = 0;
1546	struct cursor_node *n;
1547	struct btree_node *bn;
1548	__le64 value_le;
1549
1550	for (;;) {
1551		n = c->nodes + c->depth - 1;
1552		bn = dm_block_data(n->b);
1553
1554		if (le32_to_cpu(bn->header.flags) & LEAF_NODE)
1555			break;
1556
1557		memcpy(&value_le, value_ptr(bn, n->index), sizeof(value_le));
1558		r = push_node(c, le64_to_cpu(value_le));
1559		if (r) {
1560			DMERR("push_node failed");
1561			break;
1562		}
1563	}
1564
1565	if (!r && (le32_to_cpu(bn->header.nr_entries) == 0))
1566		return -ENODATA;
1567
1568	return r;
1569}
1570
1571int dm_btree_cursor_begin(struct dm_btree_info *info, dm_block_t root,
1572			  bool prefetch_leaves, struct dm_btree_cursor *c)
1573{
1574	int r;
1575
1576	c->info = info;
1577	c->root = root;
1578	c->depth = 0;
1579	c->prefetch_leaves = prefetch_leaves;
1580
1581	r = push_node(c, root);
1582	if (r)
1583		return r;
1584
1585	return find_leaf(c);
1586}
1587EXPORT_SYMBOL_GPL(dm_btree_cursor_begin);
1588
1589void dm_btree_cursor_end(struct dm_btree_cursor *c)
1590{
1591	while (c->depth)
1592		pop_node(c);
1593}
1594EXPORT_SYMBOL_GPL(dm_btree_cursor_end);
1595
1596int dm_btree_cursor_next(struct dm_btree_cursor *c)
1597{
1598	int r = inc_or_backtrack(c);
1599
1600	if (!r) {
1601		r = find_leaf(c);
1602		if (r)
1603			DMERR("find_leaf failed");
1604	}
1605
1606	return r;
1607}
1608EXPORT_SYMBOL_GPL(dm_btree_cursor_next);
1609
1610int dm_btree_cursor_skip(struct dm_btree_cursor *c, uint32_t count)
1611{
1612	int r = 0;
1613
1614	while (count-- && !r)
1615		r = dm_btree_cursor_next(c);
1616
1617	return r;
1618}
1619EXPORT_SYMBOL_GPL(dm_btree_cursor_skip);
1620
1621int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le)
1622{
1623	if (c->depth) {
1624		struct cursor_node *n = c->nodes + c->depth - 1;
1625		struct btree_node *bn = dm_block_data(n->b);
1626
1627		if (le32_to_cpu(bn->header.flags) & INTERNAL_NODE)
1628			return -EINVAL;
1629
1630		*key = le64_to_cpu(*key_ptr(bn, n->index));
1631		memcpy(value_le, value_ptr(bn, n->index), c->info->value_type.size);
1632		return 0;
1633
1634	} else
1635		return -ENODATA;
1636}
1637EXPORT_SYMBOL_GPL(dm_btree_cursor_get_value);
v4.17
 
   1/*
   2 * Copyright (C) 2011 Red Hat, Inc.
   3 *
   4 * This file is released under the GPL.
   5 */
   6
   7#include "dm-btree-internal.h"
   8#include "dm-space-map.h"
   9#include "dm-transaction-manager.h"
  10
  11#include <linux/export.h>
  12#include <linux/device-mapper.h>
  13
  14#define DM_MSG_PREFIX "btree"
  15
  16/*----------------------------------------------------------------
 
  17 * Array manipulation
  18 *--------------------------------------------------------------*/
 
  19static void memcpy_disk(void *dest, const void *src, size_t len)
  20	__dm_written_to_disk(src)
  21{
  22	memcpy(dest, src, len);
  23	__dm_unbless_for_disk(src);
  24}
  25
  26static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
  27			 unsigned index, void *elt)
  28	__dm_written_to_disk(elt)
  29{
  30	if (index < nr_elts)
  31		memmove(base + (elt_size * (index + 1)),
  32			base + (elt_size * index),
  33			(nr_elts - index) * elt_size);
  34
  35	memcpy_disk(base + (elt_size * index), elt, elt_size);
  36}
  37
  38/*----------------------------------------------------------------*/
  39
  40/* makes the assumption that no two keys are the same. */
  41static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
  42{
  43	int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
  44
  45	while (hi - lo > 1) {
  46		int mid = lo + ((hi - lo) / 2);
  47		uint64_t mid_key = le64_to_cpu(n->keys[mid]);
  48
  49		if (mid_key == key)
  50			return mid;
  51
  52		if (mid_key < key)
  53			lo = mid;
  54		else
  55			hi = mid;
  56	}
  57
  58	return want_hi ? hi : lo;
  59}
  60
  61int lower_bound(struct btree_node *n, uint64_t key)
  62{
  63	return bsearch(n, key, 0);
  64}
  65
  66static int upper_bound(struct btree_node *n, uint64_t key)
  67{
  68	return bsearch(n, key, 1);
  69}
  70
  71void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
  72		  struct dm_btree_value_type *vt)
  73{
  74	unsigned i;
  75	uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
  76
  77	if (le32_to_cpu(n->header.flags) & INTERNAL_NODE)
  78		for (i = 0; i < nr_entries; i++)
  79			dm_tm_inc(tm, value64(n, i));
  80	else if (vt->inc)
  81		for (i = 0; i < nr_entries; i++)
  82			vt->inc(vt->context, value_ptr(n, i));
  83}
  84
  85static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
  86		      uint64_t key, void *value)
  87		      __dm_written_to_disk(value)
  88{
  89	uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
 
  90	__le64 key_le = cpu_to_le64(key);
  91
  92	if (index > nr_entries ||
  93	    index >= le32_to_cpu(node->header.max_entries)) {
 
  94		DMERR("too many entries in btree node for insert");
  95		__dm_unbless_for_disk(value);
  96		return -ENOMEM;
  97	}
  98
  99	__dm_bless_for_disk(&key_le);
 100
 101	array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le);
 102	array_insert(value_base(node), value_size, nr_entries, index, value);
 103	node->header.nr_entries = cpu_to_le32(nr_entries + 1);
 104
 105	return 0;
 106}
 107
 108/*----------------------------------------------------------------*/
 109
 110/*
 111 * We want 3n entries (for some n).  This works more nicely for repeated
 112 * insert remove loops than (2n + 1).
 113 */
 114static uint32_t calc_max_entries(size_t value_size, size_t block_size)
 115{
 116	uint32_t total, n;
 117	size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */
 118
 119	block_size -= sizeof(struct node_header);
 120	total = block_size / elt_size;
 121	n = total / 3;		/* rounds down */
 122
 123	return 3 * n;
 124}
 125
 126int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
 127{
 128	int r;
 129	struct dm_block *b;
 130	struct btree_node *n;
 131	size_t block_size;
 132	uint32_t max_entries;
 133
 134	r = new_block(info, &b);
 135	if (r < 0)
 136		return r;
 137
 138	block_size = dm_bm_block_size(dm_tm_get_bm(info->tm));
 139	max_entries = calc_max_entries(info->value_type.size, block_size);
 140
 141	n = dm_block_data(b);
 142	memset(n, 0, block_size);
 143	n->header.flags = cpu_to_le32(LEAF_NODE);
 144	n->header.nr_entries = cpu_to_le32(0);
 145	n->header.max_entries = cpu_to_le32(max_entries);
 146	n->header.value_size = cpu_to_le32(info->value_type.size);
 147
 148	*root = dm_block_location(b);
 149	unlock_block(info, b);
 150
 151	return 0;
 152}
 153EXPORT_SYMBOL_GPL(dm_btree_empty);
 154
 155/*----------------------------------------------------------------*/
 156
 157/*
 158 * Deletion uses a recursive algorithm, since we have limited stack space
 159 * we explicitly manage our own stack on the heap.
 160 */
 161#define MAX_SPINE_DEPTH 64
 162struct frame {
 163	struct dm_block *b;
 164	struct btree_node *n;
 165	unsigned level;
 166	unsigned nr_children;
 167	unsigned current_child;
 168};
 169
 170struct del_stack {
 171	struct dm_btree_info *info;
 172	struct dm_transaction_manager *tm;
 173	int top;
 174	struct frame spine[MAX_SPINE_DEPTH];
 175};
 176
 177static int top_frame(struct del_stack *s, struct frame **f)
 178{
 179	if (s->top < 0) {
 180		DMERR("btree deletion stack empty");
 181		return -EINVAL;
 182	}
 183
 184	*f = s->spine + s->top;
 185
 186	return 0;
 187}
 188
 189static int unprocessed_frames(struct del_stack *s)
 190{
 191	return s->top >= 0;
 192}
 193
 194static void prefetch_children(struct del_stack *s, struct frame *f)
 195{
 196	unsigned i;
 197	struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
 198
 199	for (i = 0; i < f->nr_children; i++)
 200		dm_bm_prefetch(bm, value64(f->n, i));
 201}
 202
 203static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
 204{
 205	return f->level < (info->levels - 1);
 206}
 207
 208static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
 209{
 210	int r;
 211	uint32_t ref_count;
 212
 213	if (s->top >= MAX_SPINE_DEPTH - 1) {
 214		DMERR("btree deletion stack out of memory");
 215		return -ENOMEM;
 216	}
 217
 218	r = dm_tm_ref(s->tm, b, &ref_count);
 219	if (r)
 220		return r;
 221
 222	if (ref_count > 1)
 223		/*
 224		 * This is a shared node, so we can just decrement it's
 225		 * reference counter and leave the children.
 226		 */
 227		dm_tm_dec(s->tm, b);
 228
 229	else {
 230		uint32_t flags;
 231		struct frame *f = s->spine + ++s->top;
 232
 233		r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
 234		if (r) {
 235			s->top--;
 236			return r;
 237		}
 238
 239		f->n = dm_block_data(f->b);
 240		f->level = level;
 241		f->nr_children = le32_to_cpu(f->n->header.nr_entries);
 242		f->current_child = 0;
 243
 244		flags = le32_to_cpu(f->n->header.flags);
 245		if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
 246			prefetch_children(s, f);
 247	}
 248
 249	return 0;
 250}
 251
 252static void pop_frame(struct del_stack *s)
 253{
 254	struct frame *f = s->spine + s->top--;
 255
 256	dm_tm_dec(s->tm, dm_block_location(f->b));
 257	dm_tm_unlock(s->tm, f->b);
 258}
 259
 260static void unlock_all_frames(struct del_stack *s)
 261{
 262	struct frame *f;
 263
 264	while (unprocessed_frames(s)) {
 265		f = s->spine + s->top--;
 266		dm_tm_unlock(s->tm, f->b);
 267	}
 268}
 269
 270int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
 271{
 272	int r;
 273	struct del_stack *s;
 274
 275	/*
 276	 * dm_btree_del() is called via an ioctl, as such should be
 277	 * considered an FS op.  We can't recurse back into the FS, so we
 278	 * allocate GFP_NOFS.
 279	 */
 280	s = kmalloc(sizeof(*s), GFP_NOFS);
 281	if (!s)
 282		return -ENOMEM;
 283	s->info = info;
 284	s->tm = info->tm;
 285	s->top = -1;
 286
 287	r = push_frame(s, root, 0);
 288	if (r)
 289		goto out;
 290
 291	while (unprocessed_frames(s)) {
 292		uint32_t flags;
 293		struct frame *f;
 294		dm_block_t b;
 295
 296		r = top_frame(s, &f);
 297		if (r)
 298			goto out;
 299
 300		if (f->current_child >= f->nr_children) {
 301			pop_frame(s);
 302			continue;
 303		}
 304
 305		flags = le32_to_cpu(f->n->header.flags);
 306		if (flags & INTERNAL_NODE) {
 307			b = value64(f->n, f->current_child);
 308			f->current_child++;
 309			r = push_frame(s, b, f->level);
 310			if (r)
 311				goto out;
 312
 313		} else if (is_internal_level(info, f)) {
 314			b = value64(f->n, f->current_child);
 315			f->current_child++;
 316			r = push_frame(s, b, f->level + 1);
 317			if (r)
 318				goto out;
 319
 320		} else {
 321			if (info->value_type.dec) {
 322				unsigned i;
 323
 324				for (i = 0; i < f->nr_children; i++)
 325					info->value_type.dec(info->value_type.context,
 326							     value_ptr(f->n, i));
 327			}
 328			pop_frame(s);
 329		}
 330	}
 331out:
 332	if (r) {
 333		/* cleanup all frames of del_stack */
 334		unlock_all_frames(s);
 335	}
 336	kfree(s);
 337
 338	return r;
 339}
 340EXPORT_SYMBOL_GPL(dm_btree_del);
 341
 342/*----------------------------------------------------------------*/
 343
 344static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
 345			    int (*search_fn)(struct btree_node *, uint64_t),
 346			    uint64_t *result_key, void *v, size_t value_size)
 347{
 348	int i, r;
 349	uint32_t flags, nr_entries;
 350
 351	do {
 352		r = ro_step(s, block);
 353		if (r < 0)
 354			return r;
 355
 356		i = search_fn(ro_node(s), key);
 357
 358		flags = le32_to_cpu(ro_node(s)->header.flags);
 359		nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries);
 360		if (i < 0 || i >= nr_entries)
 361			return -ENODATA;
 362
 363		if (flags & INTERNAL_NODE)
 364			block = value64(ro_node(s), i);
 365
 366	} while (!(flags & LEAF_NODE));
 367
 368	*result_key = le64_to_cpu(ro_node(s)->keys[i]);
 369	memcpy(v, value_ptr(ro_node(s), i), value_size);
 
 370
 371	return 0;
 372}
 373
 374int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
 375		    uint64_t *keys, void *value_le)
 376{
 377	unsigned level, last_level = info->levels - 1;
 378	int r = -ENODATA;
 379	uint64_t rkey;
 380	__le64 internal_value_le;
 381	struct ro_spine spine;
 382
 383	init_ro_spine(&spine, info);
 384	for (level = 0; level < info->levels; level++) {
 385		size_t size;
 386		void *value_p;
 387
 388		if (level == last_level) {
 389			value_p = value_le;
 390			size = info->value_type.size;
 391
 392		} else {
 393			value_p = &internal_value_le;
 394			size = sizeof(uint64_t);
 395		}
 396
 397		r = btree_lookup_raw(&spine, root, keys[level],
 398				     lower_bound, &rkey,
 399				     value_p, size);
 400
 401		if (!r) {
 402			if (rkey != keys[level]) {
 403				exit_ro_spine(&spine);
 404				return -ENODATA;
 405			}
 406		} else {
 407			exit_ro_spine(&spine);
 408			return r;
 409		}
 410
 411		root = le64_to_cpu(internal_value_le);
 412	}
 413	exit_ro_spine(&spine);
 414
 415	return r;
 416}
 417EXPORT_SYMBOL_GPL(dm_btree_lookup);
 418
 419static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t root,
 420				       uint64_t key, uint64_t *rkey, void *value_le)
 421{
 422	int r, i;
 423	uint32_t flags, nr_entries;
 424	struct dm_block *node;
 425	struct btree_node *n;
 426
 427	r = bn_read_lock(info, root, &node);
 428	if (r)
 429		return r;
 430
 431	n = dm_block_data(node);
 432	flags = le32_to_cpu(n->header.flags);
 433	nr_entries = le32_to_cpu(n->header.nr_entries);
 434
 435	if (flags & INTERNAL_NODE) {
 436		i = lower_bound(n, key);
 437		if (i < 0) {
 438			/*
 439			 * avoid early -ENODATA return when all entries are
 440			 * higher than the search @key.
 441			 */
 442			i = 0;
 443		}
 444		if (i >= nr_entries) {
 445			r = -ENODATA;
 446			goto out;
 447		}
 448
 449		r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
 450		if (r == -ENODATA && i < (nr_entries - 1)) {
 451			i++;
 452			r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
 453		}
 454
 455	} else {
 456		i = upper_bound(n, key);
 457		if (i < 0 || i >= nr_entries) {
 458			r = -ENODATA;
 459			goto out;
 460		}
 461
 462		*rkey = le64_to_cpu(n->keys[i]);
 463		memcpy(value_le, value_ptr(n, i), info->value_type.size);
 464	}
 465out:
 466	dm_tm_unlock(info->tm, node);
 467	return r;
 468}
 469
 470int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
 471			 uint64_t *keys, uint64_t *rkey, void *value_le)
 472{
 473	unsigned level;
 474	int r = -ENODATA;
 475	__le64 internal_value_le;
 476	struct ro_spine spine;
 477
 478	init_ro_spine(&spine, info);
 479	for (level = 0; level < info->levels - 1u; level++) {
 480		r = btree_lookup_raw(&spine, root, keys[level],
 481				     lower_bound, rkey,
 482				     &internal_value_le, sizeof(uint64_t));
 483		if (r)
 484			goto out;
 485
 486		if (*rkey != keys[level]) {
 487			r = -ENODATA;
 488			goto out;
 489		}
 490
 491		root = le64_to_cpu(internal_value_le);
 492	}
 493
 494	r = dm_btree_lookup_next_single(info, root, keys[level], rkey, value_le);
 495out:
 496	exit_ro_spine(&spine);
 497	return r;
 498}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 499
 500EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
 
 
 
 
 
 
 
 
 
 
 
 501
 502/*
 503 * Splits a node by creating a sibling node and shifting half the nodes
 504 * contents across.  Assumes there is a parent node, and it has room for
 505 * another child.
 506 *
 507 * Before:
 508 *	  +--------+
 509 *	  | Parent |
 510 *	  +--------+
 511 *	     |
 512 *	     v
 513 *	+----------+
 514 *	| A ++++++ |
 515 *	+----------+
 516 *
 517 *
 518 * After:
 519 *		+--------+
 520 *		| Parent |
 521 *		+--------+
 522 *		  |	|
 523 *		  v	+------+
 524 *	    +---------+	       |
 525 *	    | A* +++  |	       v
 526 *	    +---------+	  +-------+
 527 *			  | B +++ |
 528 *			  +-------+
 529 *
 530 * Where A* is a shadow of A.
 531 */
 532static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
 533			       uint64_t key)
 534{
 535	int r;
 536	size_t size;
 537	unsigned nr_left, nr_right;
 538	struct dm_block *left, *right, *parent;
 539	struct btree_node *ln, *rn, *pn;
 540	__le64 location;
 541
 542	left = shadow_current(s);
 543
 544	r = new_block(s->info, &right);
 545	if (r < 0)
 546		return r;
 547
 548	ln = dm_block_data(left);
 549	rn = dm_block_data(right);
 550
 551	nr_left = le32_to_cpu(ln->header.nr_entries) / 2;
 552	nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left;
 553
 554	ln->header.nr_entries = cpu_to_le32(nr_left);
 555
 556	rn->header.flags = ln->header.flags;
 557	rn->header.nr_entries = cpu_to_le32(nr_right);
 558	rn->header.max_entries = ln->header.max_entries;
 559	rn->header.value_size = ln->header.value_size;
 560	memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0]));
 561
 562	size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
 563		sizeof(uint64_t) : s->info->value_type.size;
 564	memcpy(value_ptr(rn, 0), value_ptr(ln, nr_left),
 565	       size * nr_right);
 566
 567	/*
 568	 * Patch up the parent
 569	 */
 570	parent = shadow_parent(s);
 571
 572	pn = dm_block_data(parent);
 573	location = cpu_to_le64(dm_block_location(left));
 574	__dm_bless_for_disk(&location);
 575	memcpy_disk(value_ptr(pn, parent_index),
 576		    &location, sizeof(__le64));
 577
 578	location = cpu_to_le64(dm_block_location(right));
 579	__dm_bless_for_disk(&location);
 580
 581	r = insert_at(sizeof(__le64), pn, parent_index + 1,
 582		      le64_to_cpu(rn->keys[0]), &location);
 583	if (r) {
 584		unlock_block(s->info, right);
 585		return r;
 586	}
 587
 
 588	if (key < le64_to_cpu(rn->keys[0])) {
 589		unlock_block(s->info, right);
 590		s->nodes[1] = left;
 591	} else {
 592		unlock_block(s->info, left);
 593		s->nodes[1] = right;
 594	}
 595
 596	return 0;
 597}
 598
 599/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600 * Splits a node by creating two new children beneath the given node.
 601 *
 602 * Before:
 603 *	  +----------+
 604 *	  | A ++++++ |
 605 *	  +----------+
 606 *
 607 *
 608 * After:
 609 *	+------------+
 610 *	| A (shadow) |
 611 *	+------------+
 612 *	    |	|
 613 *   +------+	+----+
 614 *   |		     |
 615 *   v		     v
 616 * +-------+	 +-------+
 617 * | B +++ |	 | C +++ |
 618 * +-------+	 +-------+
 619 */
 620static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
 621{
 622	int r;
 623	size_t size;
 624	unsigned nr_left, nr_right;
 625	struct dm_block *left, *right, *new_parent;
 626	struct btree_node *pn, *ln, *rn;
 627	__le64 val;
 628
 629	new_parent = shadow_current(s);
 630
 
 
 
 
 
 631	r = new_block(s->info, &left);
 632	if (r < 0)
 633		return r;
 634
 
 
 
 
 
 
 
 
 
 
 
 635	r = new_block(s->info, &right);
 636	if (r < 0) {
 637		unlock_block(s->info, left);
 638		return r;
 639	}
 640
 641	pn = dm_block_data(new_parent);
 642	ln = dm_block_data(left);
 643	rn = dm_block_data(right);
 644
 645	nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
 646	nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
 647
 648	ln->header.flags = pn->header.flags;
 649	ln->header.nr_entries = cpu_to_le32(nr_left);
 650	ln->header.max_entries = pn->header.max_entries;
 651	ln->header.value_size = pn->header.value_size;
 652
 653	rn->header.flags = pn->header.flags;
 654	rn->header.nr_entries = cpu_to_le32(nr_right);
 655	rn->header.max_entries = pn->header.max_entries;
 656	rn->header.value_size = pn->header.value_size;
 657
 658	memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
 659	memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
 660
 661	size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
 662		sizeof(__le64) : s->info->value_type.size;
 663	memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
 664	memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
 665	       nr_right * size);
 666
 667	/* new_parent should just point to l and r now */
 668	pn->header.flags = cpu_to_le32(INTERNAL_NODE);
 669	pn->header.nr_entries = cpu_to_le32(2);
 670	pn->header.max_entries = cpu_to_le32(
 671		calc_max_entries(sizeof(__le64),
 672				 dm_bm_block_size(
 673					 dm_tm_get_bm(s->info->tm))));
 674	pn->header.value_size = cpu_to_le32(sizeof(__le64));
 675
 676	val = cpu_to_le64(dm_block_location(left));
 677	__dm_bless_for_disk(&val);
 678	pn->keys[0] = ln->keys[0];
 679	memcpy_disk(value_ptr(pn, 0), &val, sizeof(__le64));
 680
 681	val = cpu_to_le64(dm_block_location(right));
 682	__dm_bless_for_disk(&val);
 683	pn->keys[1] = rn->keys[0];
 684	memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
 685
 686	unlock_block(s->info, left);
 687	unlock_block(s->info, right);
 688	return 0;
 689}
 690
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 691static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
 692			    struct dm_btree_value_type *vt,
 693			    uint64_t key, unsigned *index)
 694{
 695	int r, i = *index, top = 1;
 696	struct btree_node *node;
 697
 698	for (;;) {
 699		r = shadow_step(s, root, vt);
 700		if (r < 0)
 701			return r;
 702
 703		node = dm_block_data(shadow_current(s));
 704
 705		/*
 706		 * We have to patch up the parent node, ugly, but I don't
 707		 * see a way to do this automatically as part of the spine
 708		 * op.
 709		 */
 710		if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */
 711			__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
 712
 713			__dm_bless_for_disk(&location);
 714			memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i),
 715				    &location, sizeof(__le64));
 716		}
 717
 718		node = dm_block_data(shadow_current(s));
 719
 720		if (node->header.nr_entries == node->header.max_entries) {
 721			if (top)
 722				r = btree_split_beneath(s, key);
 723			else
 724				r = btree_split_sibling(s, i, key);
 725
 726			if (r < 0)
 727				return r;
 
 
 
 728		}
 729
 730		node = dm_block_data(shadow_current(s));
 731
 732		i = lower_bound(node, key);
 733
 734		if (le32_to_cpu(node->header.flags) & LEAF_NODE)
 735			break;
 736
 737		if (i < 0) {
 738			/* change the bounds on the lowest key */
 739			node->keys[0] = cpu_to_le64(key);
 740			i = 0;
 741		}
 742
 743		root = value64(node, i);
 744		top = 0;
 745	}
 746
 747	if (i < 0 || le64_to_cpu(node->keys[i]) != key)
 748		i++;
 749
 750	*index = i;
 751	return 0;
 752}
 753
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 754static bool need_insert(struct btree_node *node, uint64_t *keys,
 755			unsigned level, unsigned index)
 756{
 757        return ((index >= le32_to_cpu(node->header.nr_entries)) ||
 758		(le64_to_cpu(node->keys[index]) != keys[level]));
 759}
 760
 761static int insert(struct dm_btree_info *info, dm_block_t root,
 762		  uint64_t *keys, void *value, dm_block_t *new_root,
 763		  int *inserted)
 764		  __dm_written_to_disk(value)
 765{
 766	int r;
 767	unsigned level, index = -1, last_level = info->levels - 1;
 768	dm_block_t block = root;
 769	struct shadow_spine spine;
 770	struct btree_node *n;
 771	struct dm_btree_value_type le64_type;
 772
 773	init_le64_type(info->tm, &le64_type);
 774	init_shadow_spine(&spine, info);
 775
 776	for (level = 0; level < (info->levels - 1); level++) {
 777		r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index);
 778		if (r < 0)
 779			goto bad;
 780
 781		n = dm_block_data(shadow_current(&spine));
 782
 783		if (need_insert(n, keys, level, index)) {
 784			dm_block_t new_tree;
 785			__le64 new_le;
 786
 787			r = dm_btree_empty(info, &new_tree);
 788			if (r < 0)
 789				goto bad;
 790
 791			new_le = cpu_to_le64(new_tree);
 792			__dm_bless_for_disk(&new_le);
 793
 794			r = insert_at(sizeof(uint64_t), n, index,
 795				      keys[level], &new_le);
 796			if (r)
 797				goto bad;
 798		}
 799
 800		if (level < last_level)
 801			block = value64(n, index);
 802	}
 803
 804	r = btree_insert_raw(&spine, block, &info->value_type,
 805			     keys[level], &index);
 806	if (r < 0)
 807		goto bad;
 808
 809	n = dm_block_data(shadow_current(&spine));
 810
 811	if (need_insert(n, keys, level, index)) {
 812		if (inserted)
 813			*inserted = 1;
 814
 815		r = insert_at(info->value_type.size, n, index,
 816			      keys[level], value);
 817		if (r)
 818			goto bad_unblessed;
 819	} else {
 820		if (inserted)
 821			*inserted = 0;
 822
 823		if (info->value_type.dec &&
 824		    (!info->value_type.equal ||
 825		     !info->value_type.equal(
 826			     info->value_type.context,
 827			     value_ptr(n, index),
 828			     value))) {
 829			info->value_type.dec(info->value_type.context,
 830					     value_ptr(n, index));
 831		}
 832		memcpy_disk(value_ptr(n, index),
 833			    value, info->value_type.size);
 834	}
 835
 836	*new_root = shadow_root(&spine);
 837	exit_shadow_spine(&spine);
 838
 839	return 0;
 840
 841bad:
 842	__dm_unbless_for_disk(value);
 843bad_unblessed:
 844	exit_shadow_spine(&spine);
 845	return r;
 846}
 847
 848int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
 849		    uint64_t *keys, void *value, dm_block_t *new_root)
 850		    __dm_written_to_disk(value)
 851{
 852	return insert(info, root, keys, value, new_root, NULL);
 853}
 854EXPORT_SYMBOL_GPL(dm_btree_insert);
 855
 856int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
 857			   uint64_t *keys, void *value, dm_block_t *new_root,
 858			   int *inserted)
 859			   __dm_written_to_disk(value)
 860{
 861	return insert(info, root, keys, value, new_root, inserted);
 862}
 863EXPORT_SYMBOL_GPL(dm_btree_insert_notify);
 864
 865/*----------------------------------------------------------------*/
 866
 867static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
 868		    uint64_t *result_key, dm_block_t *next_block)
 869{
 870	int i, r;
 871	uint32_t flags;
 872
 873	do {
 874		r = ro_step(s, block);
 875		if (r < 0)
 876			return r;
 877
 878		flags = le32_to_cpu(ro_node(s)->header.flags);
 879		i = le32_to_cpu(ro_node(s)->header.nr_entries);
 880		if (!i)
 881			return -ENODATA;
 882		else
 883			i--;
 884
 885		if (find_highest)
 886			*result_key = le64_to_cpu(ro_node(s)->keys[i]);
 887		else
 888			*result_key = le64_to_cpu(ro_node(s)->keys[0]);
 889
 890		if (next_block || flags & INTERNAL_NODE) {
 891			if (find_highest)
 892				block = value64(ro_node(s), i);
 893			else
 894				block = value64(ro_node(s), 0);
 895		}
 896
 897	} while (flags & INTERNAL_NODE);
 898
 899	if (next_block)
 900		*next_block = block;
 901	return 0;
 902}
 903
 904static int dm_btree_find_key(struct dm_btree_info *info, dm_block_t root,
 905			     bool find_highest, uint64_t *result_keys)
 906{
 907	int r = 0, count = 0, level;
 908	struct ro_spine spine;
 909
 910	init_ro_spine(&spine, info);
 911	for (level = 0; level < info->levels; level++) {
 912		r = find_key(&spine, root, find_highest, result_keys + level,
 913			     level == info->levels - 1 ? NULL : &root);
 914		if (r == -ENODATA) {
 915			r = 0;
 916			break;
 917
 918		} else if (r)
 919			break;
 920
 921		count++;
 922	}
 923	exit_ro_spine(&spine);
 924
 925	return r ? r : count;
 926}
 927
 928int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
 929			      uint64_t *result_keys)
 930{
 931	return dm_btree_find_key(info, root, true, result_keys);
 932}
 933EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
 934
 935int dm_btree_find_lowest_key(struct dm_btree_info *info, dm_block_t root,
 936			     uint64_t *result_keys)
 937{
 938	return dm_btree_find_key(info, root, false, result_keys);
 939}
 940EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
 941
 942/*----------------------------------------------------------------*/
 943
 944/*
 945 * FIXME: We shouldn't use a recursive algorithm when we have limited stack
 946 * space.  Also this only works for single level trees.
 947 */
 948static int walk_node(struct dm_btree_info *info, dm_block_t block,
 949		     int (*fn)(void *context, uint64_t *keys, void *leaf),
 950		     void *context)
 951{
 952	int r;
 953	unsigned i, nr;
 954	struct dm_block *node;
 955	struct btree_node *n;
 956	uint64_t keys;
 957
 958	r = bn_read_lock(info, block, &node);
 959	if (r)
 960		return r;
 961
 962	n = dm_block_data(node);
 963
 964	nr = le32_to_cpu(n->header.nr_entries);
 965	for (i = 0; i < nr; i++) {
 966		if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
 967			r = walk_node(info, value64(n, i), fn, context);
 968			if (r)
 969				goto out;
 970		} else {
 971			keys = le64_to_cpu(*key_ptr(n, i));
 972			r = fn(context, &keys, value_ptr(n, i));
 973			if (r)
 974				goto out;
 975		}
 976	}
 977
 978out:
 979	dm_tm_unlock(info->tm, node);
 980	return r;
 981}
 982
 983int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
 984		  int (*fn)(void *context, uint64_t *keys, void *leaf),
 985		  void *context)
 986{
 987	BUG_ON(info->levels > 1);
 988	return walk_node(info, root, fn, context);
 989}
 990EXPORT_SYMBOL_GPL(dm_btree_walk);
 991
 992/*----------------------------------------------------------------*/
 993
 994static void prefetch_values(struct dm_btree_cursor *c)
 995{
 996	unsigned i, nr;
 997	__le64 value_le;
 998	struct cursor_node *n = c->nodes + c->depth - 1;
 999	struct btree_node *bn = dm_block_data(n->b);
1000	struct dm_block_manager *bm = dm_tm_get_bm(c->info->tm);
1001
1002	BUG_ON(c->info->value_type.size != sizeof(value_le));
1003
1004	nr = le32_to_cpu(bn->header.nr_entries);
1005	for (i = 0; i < nr; i++) {
1006		memcpy(&value_le, value_ptr(bn, i), sizeof(value_le));
1007		dm_bm_prefetch(bm, le64_to_cpu(value_le));
1008	}
1009}
1010
1011static bool leaf_node(struct dm_btree_cursor *c)
1012{
1013	struct cursor_node *n = c->nodes + c->depth - 1;
1014	struct btree_node *bn = dm_block_data(n->b);
1015
1016	return le32_to_cpu(bn->header.flags) & LEAF_NODE;
1017}
1018
1019static int push_node(struct dm_btree_cursor *c, dm_block_t b)
1020{
1021	int r;
1022	struct cursor_node *n = c->nodes + c->depth;
1023
1024	if (c->depth >= DM_BTREE_CURSOR_MAX_DEPTH - 1) {
1025		DMERR("couldn't push cursor node, stack depth too high");
1026		return -EINVAL;
1027	}
1028
1029	r = bn_read_lock(c->info, b, &n->b);
1030	if (r)
1031		return r;
1032
1033	n->index = 0;
1034	c->depth++;
1035
1036	if (c->prefetch_leaves || !leaf_node(c))
1037		prefetch_values(c);
1038
1039	return 0;
1040}
1041
1042static void pop_node(struct dm_btree_cursor *c)
1043{
1044	c->depth--;
1045	unlock_block(c->info, c->nodes[c->depth].b);
1046}
1047
1048static int inc_or_backtrack(struct dm_btree_cursor *c)
1049{
1050	struct cursor_node *n;
1051	struct btree_node *bn;
1052
1053	for (;;) {
1054		if (!c->depth)
1055			return -ENODATA;
1056
1057		n = c->nodes + c->depth - 1;
1058		bn = dm_block_data(n->b);
1059
1060		n->index++;
1061		if (n->index < le32_to_cpu(bn->header.nr_entries))
1062			break;
1063
1064		pop_node(c);
1065	}
1066
1067	return 0;
1068}
1069
1070static int find_leaf(struct dm_btree_cursor *c)
1071{
1072	int r = 0;
1073	struct cursor_node *n;
1074	struct btree_node *bn;
1075	__le64 value_le;
1076
1077	for (;;) {
1078		n = c->nodes + c->depth - 1;
1079		bn = dm_block_data(n->b);
1080
1081		if (le32_to_cpu(bn->header.flags) & LEAF_NODE)
1082			break;
1083
1084		memcpy(&value_le, value_ptr(bn, n->index), sizeof(value_le));
1085		r = push_node(c, le64_to_cpu(value_le));
1086		if (r) {
1087			DMERR("push_node failed");
1088			break;
1089		}
1090	}
1091
1092	if (!r && (le32_to_cpu(bn->header.nr_entries) == 0))
1093		return -ENODATA;
1094
1095	return r;
1096}
1097
1098int dm_btree_cursor_begin(struct dm_btree_info *info, dm_block_t root,
1099			  bool prefetch_leaves, struct dm_btree_cursor *c)
1100{
1101	int r;
1102
1103	c->info = info;
1104	c->root = root;
1105	c->depth = 0;
1106	c->prefetch_leaves = prefetch_leaves;
1107
1108	r = push_node(c, root);
1109	if (r)
1110		return r;
1111
1112	return find_leaf(c);
1113}
1114EXPORT_SYMBOL_GPL(dm_btree_cursor_begin);
1115
1116void dm_btree_cursor_end(struct dm_btree_cursor *c)
1117{
1118	while (c->depth)
1119		pop_node(c);
1120}
1121EXPORT_SYMBOL_GPL(dm_btree_cursor_end);
1122
1123int dm_btree_cursor_next(struct dm_btree_cursor *c)
1124{
1125	int r = inc_or_backtrack(c);
 
1126	if (!r) {
1127		r = find_leaf(c);
1128		if (r)
1129			DMERR("find_leaf failed");
1130	}
1131
1132	return r;
1133}
1134EXPORT_SYMBOL_GPL(dm_btree_cursor_next);
1135
1136int dm_btree_cursor_skip(struct dm_btree_cursor *c, uint32_t count)
1137{
1138	int r = 0;
1139
1140	while (count-- && !r)
1141		r = dm_btree_cursor_next(c);
1142
1143	return r;
1144}
1145EXPORT_SYMBOL_GPL(dm_btree_cursor_skip);
1146
1147int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le)
1148{
1149	if (c->depth) {
1150		struct cursor_node *n = c->nodes + c->depth - 1;
1151		struct btree_node *bn = dm_block_data(n->b);
1152
1153		if (le32_to_cpu(bn->header.flags) & INTERNAL_NODE)
1154			return -EINVAL;
1155
1156		*key = le64_to_cpu(*key_ptr(bn, n->index));
1157		memcpy(value_le, value_ptr(bn, n->index), c->info->value_type.size);
1158		return 0;
1159
1160	} else
1161		return -ENODATA;
1162}
1163EXPORT_SYMBOL_GPL(dm_btree_cursor_get_value);