Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/ceph/ceph_debug.h>
   4
   5#include <linux/module.h>
   6#include <linux/slab.h>
   7
   8#include <linux/ceph/libceph.h>
   9#include <linux/ceph/osdmap.h>
  10#include <linux/ceph/decode.h>
  11#include <linux/crush/hash.h>
  12#include <linux/crush/mapper.h>
  13
  14char *ceph_osdmap_state_str(char *str, int len, u32 state)
  15{
  16	if (!len)
  17		return str;
  18
  19	if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
  20		snprintf(str, len, "exists, up");
  21	else if (state & CEPH_OSD_EXISTS)
  22		snprintf(str, len, "exists");
  23	else if (state & CEPH_OSD_UP)
  24		snprintf(str, len, "up");
  25	else
  26		snprintf(str, len, "doesn't exist");
  27
  28	return str;
  29}
  30
  31/* maps */
  32
  33static int calc_bits_of(unsigned int t)
  34{
  35	int b = 0;
  36	while (t) {
  37		t = t >> 1;
  38		b++;
  39	}
  40	return b;
  41}
  42
  43/*
  44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
  45 */
  46static void calc_pg_masks(struct ceph_pg_pool_info *pi)
  47{
  48	pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
  49	pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
  50}
  51
  52/*
  53 * decode crush map
  54 */
  55static int crush_decode_uniform_bucket(void **p, void *end,
  56				       struct crush_bucket_uniform *b)
  57{
  58	dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
  59	ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
  60	b->item_weight = ceph_decode_32(p);
  61	return 0;
  62bad:
  63	return -EINVAL;
  64}
  65
  66static int crush_decode_list_bucket(void **p, void *end,
  67				    struct crush_bucket_list *b)
  68{
  69	int j;
  70	dout("crush_decode_list_bucket %p to %p\n", *p, end);
  71	b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  72	if (b->item_weights == NULL)
  73		return -ENOMEM;
  74	b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  75	if (b->sum_weights == NULL)
  76		return -ENOMEM;
  77	ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
  78	for (j = 0; j < b->h.size; j++) {
  79		b->item_weights[j] = ceph_decode_32(p);
  80		b->sum_weights[j] = ceph_decode_32(p);
  81	}
  82	return 0;
  83bad:
  84	return -EINVAL;
  85}
  86
  87static int crush_decode_tree_bucket(void **p, void *end,
  88				    struct crush_bucket_tree *b)
  89{
  90	int j;
  91	dout("crush_decode_tree_bucket %p to %p\n", *p, end);
  92	ceph_decode_8_safe(p, end, b->num_nodes, bad);
  93	b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
  94	if (b->node_weights == NULL)
  95		return -ENOMEM;
  96	ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
  97	for (j = 0; j < b->num_nodes; j++)
  98		b->node_weights[j] = ceph_decode_32(p);
  99	return 0;
 100bad:
 101	return -EINVAL;
 102}
 103
 104static int crush_decode_straw_bucket(void **p, void *end,
 105				     struct crush_bucket_straw *b)
 106{
 107	int j;
 108	dout("crush_decode_straw_bucket %p to %p\n", *p, end);
 109	b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
 110	if (b->item_weights == NULL)
 111		return -ENOMEM;
 112	b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
 113	if (b->straws == NULL)
 114		return -ENOMEM;
 115	ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
 116	for (j = 0; j < b->h.size; j++) {
 117		b->item_weights[j] = ceph_decode_32(p);
 118		b->straws[j] = ceph_decode_32(p);
 119	}
 120	return 0;
 121bad:
 122	return -EINVAL;
 123}
 124
 125static int crush_decode_straw2_bucket(void **p, void *end,
 126				      struct crush_bucket_straw2 *b)
 127{
 128	int j;
 129	dout("crush_decode_straw2_bucket %p to %p\n", *p, end);
 130	b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
 131	if (b->item_weights == NULL)
 132		return -ENOMEM;
 133	ceph_decode_need(p, end, b->h.size * sizeof(u32), bad);
 134	for (j = 0; j < b->h.size; j++)
 135		b->item_weights[j] = ceph_decode_32(p);
 136	return 0;
 137bad:
 138	return -EINVAL;
 139}
 140
 141struct crush_name_node {
 142	struct rb_node cn_node;
 143	int cn_id;
 144	char cn_name[];
 145};
 146
 147static struct crush_name_node *alloc_crush_name(size_t name_len)
 148{
 149	struct crush_name_node *cn;
 150
 151	cn = kmalloc(sizeof(*cn) + name_len + 1, GFP_NOIO);
 152	if (!cn)
 153		return NULL;
 154
 155	RB_CLEAR_NODE(&cn->cn_node);
 156	return cn;
 157}
 158
 159static void free_crush_name(struct crush_name_node *cn)
 160{
 161	WARN_ON(!RB_EMPTY_NODE(&cn->cn_node));
 162
 163	kfree(cn);
 164}
 165
 166DEFINE_RB_FUNCS(crush_name, struct crush_name_node, cn_id, cn_node)
 167
 168static int decode_crush_names(void **p, void *end, struct rb_root *root)
 169{
 170	u32 n;
 171
 172	ceph_decode_32_safe(p, end, n, e_inval);
 173	while (n--) {
 174		struct crush_name_node *cn;
 175		int id;
 176		u32 name_len;
 177
 178		ceph_decode_32_safe(p, end, id, e_inval);
 179		ceph_decode_32_safe(p, end, name_len, e_inval);
 180		ceph_decode_need(p, end, name_len, e_inval);
 181
 182		cn = alloc_crush_name(name_len);
 183		if (!cn)
 184			return -ENOMEM;
 185
 186		cn->cn_id = id;
 187		memcpy(cn->cn_name, *p, name_len);
 188		cn->cn_name[name_len] = '\0';
 189		*p += name_len;
 190
 191		if (!__insert_crush_name(root, cn)) {
 192			free_crush_name(cn);
 193			return -EEXIST;
 194		}
 195	}
 196
 197	return 0;
 198
 199e_inval:
 200	return -EINVAL;
 201}
 202
 203void clear_crush_names(struct rb_root *root)
 204{
 205	while (!RB_EMPTY_ROOT(root)) {
 206		struct crush_name_node *cn =
 207		    rb_entry(rb_first(root), struct crush_name_node, cn_node);
 208
 209		erase_crush_name(root, cn);
 210		free_crush_name(cn);
 211	}
 212}
 213
 214static struct crush_choose_arg_map *alloc_choose_arg_map(void)
 215{
 216	struct crush_choose_arg_map *arg_map;
 217
 218	arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO);
 219	if (!arg_map)
 220		return NULL;
 221
 222	RB_CLEAR_NODE(&arg_map->node);
 223	return arg_map;
 224}
 225
 226static void free_choose_arg_map(struct crush_choose_arg_map *arg_map)
 227{
 228	if (arg_map) {
 229		int i, j;
 230
 231		WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
 232
 233		for (i = 0; i < arg_map->size; i++) {
 234			struct crush_choose_arg *arg = &arg_map->args[i];
 235
 236			for (j = 0; j < arg->weight_set_size; j++)
 237				kfree(arg->weight_set[j].weights);
 238			kfree(arg->weight_set);
 239			kfree(arg->ids);
 240		}
 241		kfree(arg_map->args);
 242		kfree(arg_map);
 243	}
 244}
 245
 246DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index,
 247		node);
 248
 249void clear_choose_args(struct crush_map *c)
 250{
 251	while (!RB_EMPTY_ROOT(&c->choose_args)) {
 252		struct crush_choose_arg_map *arg_map =
 253		    rb_entry(rb_first(&c->choose_args),
 254			     struct crush_choose_arg_map, node);
 255
 256		erase_choose_arg_map(&c->choose_args, arg_map);
 257		free_choose_arg_map(arg_map);
 258	}
 259}
 260
 261static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen)
 262{
 263	u32 *a = NULL;
 264	u32 len;
 265	int ret;
 266
 267	ceph_decode_32_safe(p, end, len, e_inval);
 268	if (len) {
 269		u32 i;
 270
 271		a = kmalloc_array(len, sizeof(u32), GFP_NOIO);
 272		if (!a) {
 273			ret = -ENOMEM;
 274			goto fail;
 275		}
 276
 277		ceph_decode_need(p, end, len * sizeof(u32), e_inval);
 278		for (i = 0; i < len; i++)
 279			a[i] = ceph_decode_32(p);
 280	}
 281
 282	*plen = len;
 283	return a;
 284
 285e_inval:
 286	ret = -EINVAL;
 287fail:
 288	kfree(a);
 289	return ERR_PTR(ret);
 290}
 291
 292/*
 293 * Assumes @arg is zero-initialized.
 294 */
 295static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg)
 296{
 297	int ret;
 298
 299	ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval);
 300	if (arg->weight_set_size) {
 301		u32 i;
 302
 303		arg->weight_set = kmalloc_array(arg->weight_set_size,
 304						sizeof(*arg->weight_set),
 305						GFP_NOIO);
 306		if (!arg->weight_set)
 307			return -ENOMEM;
 308
 309		for (i = 0; i < arg->weight_set_size; i++) {
 310			struct crush_weight_set *w = &arg->weight_set[i];
 311
 312			w->weights = decode_array_32_alloc(p, end, &w->size);
 313			if (IS_ERR(w->weights)) {
 314				ret = PTR_ERR(w->weights);
 315				w->weights = NULL;
 316				return ret;
 317			}
 318		}
 319	}
 320
 321	arg->ids = decode_array_32_alloc(p, end, &arg->ids_size);
 322	if (IS_ERR(arg->ids)) {
 323		ret = PTR_ERR(arg->ids);
 324		arg->ids = NULL;
 325		return ret;
 326	}
 327
 328	return 0;
 329
 330e_inval:
 331	return -EINVAL;
 332}
 333
 334static int decode_choose_args(void **p, void *end, struct crush_map *c)
 335{
 336	struct crush_choose_arg_map *arg_map = NULL;
 337	u32 num_choose_arg_maps, num_buckets;
 338	int ret;
 339
 340	ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval);
 341	while (num_choose_arg_maps--) {
 342		arg_map = alloc_choose_arg_map();
 343		if (!arg_map) {
 344			ret = -ENOMEM;
 345			goto fail;
 346		}
 347
 348		ceph_decode_64_safe(p, end, arg_map->choose_args_index,
 349				    e_inval);
 350		arg_map->size = c->max_buckets;
 351		arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args),
 352					GFP_NOIO);
 353		if (!arg_map->args) {
 354			ret = -ENOMEM;
 355			goto fail;
 356		}
 357
 358		ceph_decode_32_safe(p, end, num_buckets, e_inval);
 359		while (num_buckets--) {
 360			struct crush_choose_arg *arg;
 361			u32 bucket_index;
 362
 363			ceph_decode_32_safe(p, end, bucket_index, e_inval);
 364			if (bucket_index >= arg_map->size)
 365				goto e_inval;
 366
 367			arg = &arg_map->args[bucket_index];
 368			ret = decode_choose_arg(p, end, arg);
 369			if (ret)
 370				goto fail;
 371
 372			if (arg->ids_size &&
 373			    arg->ids_size != c->buckets[bucket_index]->size)
 374				goto e_inval;
 375		}
 376
 377		insert_choose_arg_map(&c->choose_args, arg_map);
 378	}
 379
 380	return 0;
 381
 382e_inval:
 383	ret = -EINVAL;
 384fail:
 385	free_choose_arg_map(arg_map);
 386	return ret;
 387}
 388
 389static void crush_finalize(struct crush_map *c)
 390{
 391	__s32 b;
 392
 393	/* Space for the array of pointers to per-bucket workspace */
 394	c->working_size = sizeof(struct crush_work) +
 395	    c->max_buckets * sizeof(struct crush_work_bucket *);
 396
 397	for (b = 0; b < c->max_buckets; b++) {
 398		if (!c->buckets[b])
 399			continue;
 400
 401		switch (c->buckets[b]->alg) {
 402		default:
 403			/*
 404			 * The base case, permutation variables and
 405			 * the pointer to the permutation array.
 406			 */
 407			c->working_size += sizeof(struct crush_work_bucket);
 408			break;
 409		}
 410		/* Every bucket has a permutation array. */
 411		c->working_size += c->buckets[b]->size * sizeof(__u32);
 412	}
 413}
 414
 415static struct crush_map *crush_decode(void *pbyval, void *end)
 416{
 417	struct crush_map *c;
 418	int err;
 419	int i, j;
 420	void **p = &pbyval;
 421	void *start = pbyval;
 422	u32 magic;
 423
 424	dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
 425
 426	c = kzalloc(sizeof(*c), GFP_NOFS);
 427	if (c == NULL)
 428		return ERR_PTR(-ENOMEM);
 429
 430	c->type_names = RB_ROOT;
 431	c->names = RB_ROOT;
 432	c->choose_args = RB_ROOT;
 433
 434        /* set tunables to default values */
 435        c->choose_local_tries = 2;
 436        c->choose_local_fallback_tries = 5;
 437        c->choose_total_tries = 19;
 438	c->chooseleaf_descend_once = 0;
 439
 440	ceph_decode_need(p, end, 4*sizeof(u32), bad);
 441	magic = ceph_decode_32(p);
 442	if (magic != CRUSH_MAGIC) {
 443		pr_err("crush_decode magic %x != current %x\n",
 444		       (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
 445		goto bad;
 446	}
 447	c->max_buckets = ceph_decode_32(p);
 448	c->max_rules = ceph_decode_32(p);
 449	c->max_devices = ceph_decode_32(p);
 450
 451	c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
 452	if (c->buckets == NULL)
 453		goto badmem;
 454	c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
 455	if (c->rules == NULL)
 456		goto badmem;
 457
 458	/* buckets */
 459	for (i = 0; i < c->max_buckets; i++) {
 460		int size = 0;
 461		u32 alg;
 462		struct crush_bucket *b;
 463
 464		ceph_decode_32_safe(p, end, alg, bad);
 465		if (alg == 0) {
 466			c->buckets[i] = NULL;
 467			continue;
 468		}
 469		dout("crush_decode bucket %d off %x %p to %p\n",
 470		     i, (int)(*p-start), *p, end);
 471
 472		switch (alg) {
 473		case CRUSH_BUCKET_UNIFORM:
 474			size = sizeof(struct crush_bucket_uniform);
 475			break;
 476		case CRUSH_BUCKET_LIST:
 477			size = sizeof(struct crush_bucket_list);
 478			break;
 479		case CRUSH_BUCKET_TREE:
 480			size = sizeof(struct crush_bucket_tree);
 481			break;
 482		case CRUSH_BUCKET_STRAW:
 483			size = sizeof(struct crush_bucket_straw);
 484			break;
 485		case CRUSH_BUCKET_STRAW2:
 486			size = sizeof(struct crush_bucket_straw2);
 487			break;
 488		default:
 489			goto bad;
 490		}
 491		BUG_ON(size == 0);
 492		b = c->buckets[i] = kzalloc(size, GFP_NOFS);
 493		if (b == NULL)
 494			goto badmem;
 495
 496		ceph_decode_need(p, end, 4*sizeof(u32), bad);
 497		b->id = ceph_decode_32(p);
 498		b->type = ceph_decode_16(p);
 499		b->alg = ceph_decode_8(p);
 500		b->hash = ceph_decode_8(p);
 501		b->weight = ceph_decode_32(p);
 502		b->size = ceph_decode_32(p);
 503
 504		dout("crush_decode bucket size %d off %x %p to %p\n",
 505		     b->size, (int)(*p-start), *p, end);
 506
 507		b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
 508		if (b->items == NULL)
 509			goto badmem;
 510
 511		ceph_decode_need(p, end, b->size*sizeof(u32), bad);
 512		for (j = 0; j < b->size; j++)
 513			b->items[j] = ceph_decode_32(p);
 514
 515		switch (b->alg) {
 516		case CRUSH_BUCKET_UNIFORM:
 517			err = crush_decode_uniform_bucket(p, end,
 518				  (struct crush_bucket_uniform *)b);
 519			if (err < 0)
 520				goto fail;
 521			break;
 522		case CRUSH_BUCKET_LIST:
 523			err = crush_decode_list_bucket(p, end,
 524			       (struct crush_bucket_list *)b);
 525			if (err < 0)
 526				goto fail;
 527			break;
 528		case CRUSH_BUCKET_TREE:
 529			err = crush_decode_tree_bucket(p, end,
 530				(struct crush_bucket_tree *)b);
 531			if (err < 0)
 532				goto fail;
 533			break;
 534		case CRUSH_BUCKET_STRAW:
 535			err = crush_decode_straw_bucket(p, end,
 536				(struct crush_bucket_straw *)b);
 537			if (err < 0)
 538				goto fail;
 539			break;
 540		case CRUSH_BUCKET_STRAW2:
 541			err = crush_decode_straw2_bucket(p, end,
 542				(struct crush_bucket_straw2 *)b);
 543			if (err < 0)
 544				goto fail;
 545			break;
 546		}
 547	}
 548
 549	/* rules */
 550	dout("rule vec is %p\n", c->rules);
 551	for (i = 0; i < c->max_rules; i++) {
 552		u32 yes;
 553		struct crush_rule *r;
 554
 555		ceph_decode_32_safe(p, end, yes, bad);
 556		if (!yes) {
 557			dout("crush_decode NO rule %d off %x %p to %p\n",
 558			     i, (int)(*p-start), *p, end);
 559			c->rules[i] = NULL;
 560			continue;
 561		}
 562
 563		dout("crush_decode rule %d off %x %p to %p\n",
 564		     i, (int)(*p-start), *p, end);
 565
 566		/* len */
 567		ceph_decode_32_safe(p, end, yes, bad);
 568#if BITS_PER_LONG == 32
 569		if (yes > (ULONG_MAX - sizeof(*r))
 570			  / sizeof(struct crush_rule_step))
 571			goto bad;
 572#endif
 573		r = kmalloc(struct_size(r, steps, yes), GFP_NOFS);
 574		c->rules[i] = r;
 575		if (r == NULL)
 576			goto badmem;
 577		dout(" rule %d is at %p\n", i, r);
 578		r->len = yes;
 579		ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
 580		ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
 581		for (j = 0; j < r->len; j++) {
 582			r->steps[j].op = ceph_decode_32(p);
 583			r->steps[j].arg1 = ceph_decode_32(p);
 584			r->steps[j].arg2 = ceph_decode_32(p);
 585		}
 586	}
 587
 588	err = decode_crush_names(p, end, &c->type_names);
 589	if (err)
 590		goto fail;
 591
 592	err = decode_crush_names(p, end, &c->names);
 593	if (err)
 594		goto fail;
 595
 596	ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */
 597
 598        /* tunables */
 599        ceph_decode_need(p, end, 3*sizeof(u32), done);
 600        c->choose_local_tries = ceph_decode_32(p);
 601        c->choose_local_fallback_tries =  ceph_decode_32(p);
 602        c->choose_total_tries = ceph_decode_32(p);
 603        dout("crush decode tunable choose_local_tries = %d\n",
 604             c->choose_local_tries);
 605        dout("crush decode tunable choose_local_fallback_tries = %d\n",
 606             c->choose_local_fallback_tries);
 607        dout("crush decode tunable choose_total_tries = %d\n",
 608             c->choose_total_tries);
 609
 610	ceph_decode_need(p, end, sizeof(u32), done);
 611	c->chooseleaf_descend_once = ceph_decode_32(p);
 612	dout("crush decode tunable chooseleaf_descend_once = %d\n",
 613	     c->chooseleaf_descend_once);
 614
 615	ceph_decode_need(p, end, sizeof(u8), done);
 616	c->chooseleaf_vary_r = ceph_decode_8(p);
 617	dout("crush decode tunable chooseleaf_vary_r = %d\n",
 618	     c->chooseleaf_vary_r);
 619
 620	/* skip straw_calc_version, allowed_bucket_algs */
 621	ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done);
 622	*p += sizeof(u8) + sizeof(u32);
 623
 624	ceph_decode_need(p, end, sizeof(u8), done);
 625	c->chooseleaf_stable = ceph_decode_8(p);
 626	dout("crush decode tunable chooseleaf_stable = %d\n",
 627	     c->chooseleaf_stable);
 628
 629	if (*p != end) {
 630		/* class_map */
 631		ceph_decode_skip_map(p, end, 32, 32, bad);
 632		/* class_name */
 633		ceph_decode_skip_map(p, end, 32, string, bad);
 634		/* class_bucket */
 635		ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad);
 636	}
 637
 638	if (*p != end) {
 639		err = decode_choose_args(p, end, c);
 640		if (err)
 641			goto fail;
 642	}
 643
 644done:
 645	crush_finalize(c);
 646	dout("crush_decode success\n");
 647	return c;
 648
 649badmem:
 650	err = -ENOMEM;
 651fail:
 652	dout("crush_decode fail %d\n", err);
 653	crush_destroy(c);
 654	return ERR_PTR(err);
 655
 656bad:
 657	err = -EINVAL;
 658	goto fail;
 659}
 660
 661int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
 662{
 663	if (lhs->pool < rhs->pool)
 664		return -1;
 665	if (lhs->pool > rhs->pool)
 666		return 1;
 667	if (lhs->seed < rhs->seed)
 668		return -1;
 669	if (lhs->seed > rhs->seed)
 670		return 1;
 671
 672	return 0;
 673}
 674
 675int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs)
 676{
 677	int ret;
 678
 679	ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid);
 680	if (ret)
 681		return ret;
 682
 683	if (lhs->shard < rhs->shard)
 684		return -1;
 685	if (lhs->shard > rhs->shard)
 686		return 1;
 687
 688	return 0;
 689}
 690
 691static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len)
 692{
 693	struct ceph_pg_mapping *pg;
 694
 695	pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO);
 696	if (!pg)
 697		return NULL;
 698
 699	RB_CLEAR_NODE(&pg->node);
 700	return pg;
 701}
 702
 703static void free_pg_mapping(struct ceph_pg_mapping *pg)
 704{
 705	WARN_ON(!RB_EMPTY_NODE(&pg->node));
 706
 707	kfree(pg);
 708}
 709
 710/*
 711 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
 712 * to a set of osds) and primary_temp (explicit primary setting)
 713 */
 714DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare,
 715		 RB_BYPTR, const struct ceph_pg *, node)
 716
 717/*
 718 * rbtree of pg pool info
 719 */
 720DEFINE_RB_FUNCS(pg_pool, struct ceph_pg_pool_info, id, node)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721
 722struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id)
 723{
 724	return lookup_pg_pool(&map->pg_pools, id);
 725}
 726
 727const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
 728{
 729	struct ceph_pg_pool_info *pi;
 730
 731	if (id == CEPH_NOPOOL)
 732		return NULL;
 733
 734	if (WARN_ON_ONCE(id > (u64) INT_MAX))
 735		return NULL;
 736
 737	pi = lookup_pg_pool(&map->pg_pools, id);
 
 738	return pi ? pi->name : NULL;
 739}
 740EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
 741
 742int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
 743{
 744	struct rb_node *rbp;
 745
 746	for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
 747		struct ceph_pg_pool_info *pi =
 748			rb_entry(rbp, struct ceph_pg_pool_info, node);
 749		if (pi->name && strcmp(pi->name, name) == 0)
 750			return pi->id;
 751	}
 752	return -ENOENT;
 753}
 754EXPORT_SYMBOL(ceph_pg_poolid_by_name);
 755
 756u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id)
 757{
 758	struct ceph_pg_pool_info *pi;
 759
 760	pi = lookup_pg_pool(&map->pg_pools, id);
 761	return pi ? pi->flags : 0;
 762}
 763EXPORT_SYMBOL(ceph_pg_pool_flags);
 764
 765static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
 766{
 767	erase_pg_pool(root, pi);
 768	kfree(pi->name);
 769	kfree(pi);
 770}
 771
 772static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
 773{
 774	u8 ev, cv;
 775	unsigned len, num;
 776	void *pool_end;
 777
 778	ceph_decode_need(p, end, 2 + 4, bad);
 779	ev = ceph_decode_8(p);  /* encoding version */
 780	cv = ceph_decode_8(p); /* compat version */
 781	if (ev < 5) {
 782		pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
 783		return -EINVAL;
 784	}
 785	if (cv > 9) {
 786		pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv);
 787		return -EINVAL;
 788	}
 789	len = ceph_decode_32(p);
 790	ceph_decode_need(p, end, len, bad);
 791	pool_end = *p + len;
 792
 793	pi->type = ceph_decode_8(p);
 794	pi->size = ceph_decode_8(p);
 795	pi->crush_ruleset = ceph_decode_8(p);
 796	pi->object_hash = ceph_decode_8(p);
 797
 798	pi->pg_num = ceph_decode_32(p);
 799	pi->pgp_num = ceph_decode_32(p);
 800
 801	*p += 4 + 4;  /* skip lpg* */
 802	*p += 4;      /* skip last_change */
 803	*p += 8 + 4;  /* skip snap_seq, snap_epoch */
 804
 805	/* skip snaps */
 806	num = ceph_decode_32(p);
 807	while (num--) {
 808		*p += 8;  /* snapid key */
 809		*p += 1 + 1; /* versions */
 810		len = ceph_decode_32(p);
 811		*p += len;
 812	}
 813
 814	/* skip removed_snaps */
 815	num = ceph_decode_32(p);
 816	*p += num * (8 + 8);
 817
 818	*p += 8;  /* skip auid */
 819	pi->flags = ceph_decode_64(p);
 820	*p += 4;  /* skip crash_replay_interval */
 821
 822	if (ev >= 7)
 823		pi->min_size = ceph_decode_8(p);
 824	else
 825		pi->min_size = pi->size - pi->size / 2;
 826
 827	if (ev >= 8)
 828		*p += 8 + 8;  /* skip quota_max_* */
 829
 830	if (ev >= 9) {
 831		/* skip tiers */
 832		num = ceph_decode_32(p);
 833		*p += num * 8;
 834
 835		*p += 8;  /* skip tier_of */
 836		*p += 1;  /* skip cache_mode */
 837
 838		pi->read_tier = ceph_decode_64(p);
 839		pi->write_tier = ceph_decode_64(p);
 840	} else {
 841		pi->read_tier = -1;
 842		pi->write_tier = -1;
 843	}
 844
 845	if (ev >= 10) {
 846		/* skip properties */
 847		num = ceph_decode_32(p);
 848		while (num--) {
 849			len = ceph_decode_32(p);
 850			*p += len; /* key */
 851			len = ceph_decode_32(p);
 852			*p += len; /* val */
 853		}
 854	}
 855
 856	if (ev >= 11) {
 857		/* skip hit_set_params */
 858		*p += 1 + 1; /* versions */
 859		len = ceph_decode_32(p);
 860		*p += len;
 861
 862		*p += 4; /* skip hit_set_period */
 863		*p += 4; /* skip hit_set_count */
 864	}
 865
 866	if (ev >= 12)
 867		*p += 4; /* skip stripe_width */
 868
 869	if (ev >= 13) {
 870		*p += 8; /* skip target_max_bytes */
 871		*p += 8; /* skip target_max_objects */
 872		*p += 4; /* skip cache_target_dirty_ratio_micro */
 873		*p += 4; /* skip cache_target_full_ratio_micro */
 874		*p += 4; /* skip cache_min_flush_age */
 875		*p += 4; /* skip cache_min_evict_age */
 876	}
 877
 878	if (ev >=  14) {
 879		/* skip erasure_code_profile */
 880		len = ceph_decode_32(p);
 881		*p += len;
 882	}
 883
 884	/*
 885	 * last_force_op_resend_preluminous, will be overridden if the
 886	 * map was encoded with RESEND_ON_SPLIT
 887	 */
 888	if (ev >= 15)
 889		pi->last_force_request_resend = ceph_decode_32(p);
 890	else
 891		pi->last_force_request_resend = 0;
 892
 893	if (ev >= 16)
 894		*p += 4; /* skip min_read_recency_for_promote */
 895
 896	if (ev >= 17)
 897		*p += 8; /* skip expected_num_objects */
 898
 899	if (ev >= 19)
 900		*p += 4; /* skip cache_target_dirty_high_ratio_micro */
 901
 902	if (ev >= 20)
 903		*p += 4; /* skip min_write_recency_for_promote */
 904
 905	if (ev >= 21)
 906		*p += 1; /* skip use_gmt_hitset */
 907
 908	if (ev >= 22)
 909		*p += 1; /* skip fast_read */
 910
 911	if (ev >= 23) {
 912		*p += 4; /* skip hit_set_grade_decay_rate */
 913		*p += 4; /* skip hit_set_search_last_n */
 914	}
 915
 916	if (ev >= 24) {
 917		/* skip opts */
 918		*p += 1 + 1; /* versions */
 919		len = ceph_decode_32(p);
 920		*p += len;
 921	}
 922
 923	if (ev >= 25)
 924		pi->last_force_request_resend = ceph_decode_32(p);
 925
 926	/* ignore the rest */
 927
 928	*p = pool_end;
 929	calc_pg_masks(pi);
 930	return 0;
 931
 932bad:
 933	return -EINVAL;
 934}
 935
 936static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
 937{
 938	struct ceph_pg_pool_info *pi;
 939	u32 num, len;
 940	u64 pool;
 941
 942	ceph_decode_32_safe(p, end, num, bad);
 943	dout(" %d pool names\n", num);
 944	while (num--) {
 945		ceph_decode_64_safe(p, end, pool, bad);
 946		ceph_decode_32_safe(p, end, len, bad);
 947		dout("  pool %llu len %d\n", pool, len);
 948		ceph_decode_need(p, end, len, bad);
 949		pi = lookup_pg_pool(&map->pg_pools, pool);
 950		if (pi) {
 951			char *name = kstrndup(*p, len, GFP_NOFS);
 952
 953			if (!name)
 954				return -ENOMEM;
 955			kfree(pi->name);
 956			pi->name = name;
 957			dout("  name is %s\n", pi->name);
 958		}
 959		*p += len;
 960	}
 961	return 0;
 962
 963bad:
 964	return -EINVAL;
 965}
 966
 967/*
 968 * CRUSH workspaces
 969 *
 970 * workspace_manager framework borrowed from fs/btrfs/compression.c.
 971 * Two simplifications: there is only one type of workspace and there
 972 * is always at least one workspace.
 973 */
 974static struct crush_work *alloc_workspace(const struct crush_map *c)
 975{
 976	struct crush_work *work;
 977	size_t work_size;
 978
 979	WARN_ON(!c->working_size);
 980	work_size = crush_work_size(c, CEPH_PG_MAX_SIZE);
 981	dout("%s work_size %zu bytes\n", __func__, work_size);
 982
 983	work = ceph_kvmalloc(work_size, GFP_NOIO);
 984	if (!work)
 985		return NULL;
 986
 987	INIT_LIST_HEAD(&work->item);
 988	crush_init_workspace(c, work);
 989	return work;
 990}
 991
 992static void free_workspace(struct crush_work *work)
 993{
 994	WARN_ON(!list_empty(&work->item));
 995	kvfree(work);
 996}
 997
 998static void init_workspace_manager(struct workspace_manager *wsm)
 999{
1000	INIT_LIST_HEAD(&wsm->idle_ws);
1001	spin_lock_init(&wsm->ws_lock);
1002	atomic_set(&wsm->total_ws, 0);
1003	wsm->free_ws = 0;
1004	init_waitqueue_head(&wsm->ws_wait);
1005}
1006
1007static void add_initial_workspace(struct workspace_manager *wsm,
1008				  struct crush_work *work)
1009{
1010	WARN_ON(!list_empty(&wsm->idle_ws));
1011
1012	list_add(&work->item, &wsm->idle_ws);
1013	atomic_set(&wsm->total_ws, 1);
1014	wsm->free_ws = 1;
1015}
1016
1017static void cleanup_workspace_manager(struct workspace_manager *wsm)
1018{
1019	struct crush_work *work;
1020
1021	while (!list_empty(&wsm->idle_ws)) {
1022		work = list_first_entry(&wsm->idle_ws, struct crush_work,
1023					item);
1024		list_del_init(&work->item);
1025		free_workspace(work);
1026	}
1027	atomic_set(&wsm->total_ws, 0);
1028	wsm->free_ws = 0;
1029}
1030
1031/*
1032 * Finds an available workspace or allocates a new one.  If it's not
1033 * possible to allocate a new one, waits until there is one.
1034 */
1035static struct crush_work *get_workspace(struct workspace_manager *wsm,
1036					const struct crush_map *c)
1037{
1038	struct crush_work *work;
1039	int cpus = num_online_cpus();
1040
1041again:
1042	spin_lock(&wsm->ws_lock);
1043	if (!list_empty(&wsm->idle_ws)) {
1044		work = list_first_entry(&wsm->idle_ws, struct crush_work,
1045					item);
1046		list_del_init(&work->item);
1047		wsm->free_ws--;
1048		spin_unlock(&wsm->ws_lock);
1049		return work;
1050
1051	}
1052	if (atomic_read(&wsm->total_ws) > cpus) {
1053		DEFINE_WAIT(wait);
1054
1055		spin_unlock(&wsm->ws_lock);
1056		prepare_to_wait(&wsm->ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1057		if (atomic_read(&wsm->total_ws) > cpus && !wsm->free_ws)
1058			schedule();
1059		finish_wait(&wsm->ws_wait, &wait);
1060		goto again;
1061	}
1062	atomic_inc(&wsm->total_ws);
1063	spin_unlock(&wsm->ws_lock);
1064
1065	work = alloc_workspace(c);
1066	if (!work) {
1067		atomic_dec(&wsm->total_ws);
1068		wake_up(&wsm->ws_wait);
1069
1070		/*
1071		 * Do not return the error but go back to waiting.  We
1072		 * have the initial workspace and the CRUSH computation
1073		 * time is bounded so we will get it eventually.
1074		 */
1075		WARN_ON(atomic_read(&wsm->total_ws) < 1);
1076		goto again;
1077	}
1078	return work;
1079}
1080
1081/*
1082 * Puts a workspace back on the list or frees it if we have enough
1083 * idle ones sitting around.
1084 */
1085static void put_workspace(struct workspace_manager *wsm,
1086			  struct crush_work *work)
1087{
1088	spin_lock(&wsm->ws_lock);
1089	if (wsm->free_ws <= num_online_cpus()) {
1090		list_add(&work->item, &wsm->idle_ws);
1091		wsm->free_ws++;
1092		spin_unlock(&wsm->ws_lock);
1093		goto wake;
1094	}
1095	spin_unlock(&wsm->ws_lock);
1096
1097	free_workspace(work);
1098	atomic_dec(&wsm->total_ws);
1099wake:
1100	if (wq_has_sleeper(&wsm->ws_wait))
1101		wake_up(&wsm->ws_wait);
1102}
1103
1104/*
1105 * osd map
1106 */
1107struct ceph_osdmap *ceph_osdmap_alloc(void)
1108{
1109	struct ceph_osdmap *map;
1110
1111	map = kzalloc(sizeof(*map), GFP_NOIO);
1112	if (!map)
1113		return NULL;
1114
1115	map->pg_pools = RB_ROOT;
1116	map->pool_max = -1;
1117	map->pg_temp = RB_ROOT;
1118	map->primary_temp = RB_ROOT;
1119	map->pg_upmap = RB_ROOT;
1120	map->pg_upmap_items = RB_ROOT;
1121
1122	init_workspace_manager(&map->crush_wsm);
1123
1124	return map;
1125}
1126
1127void ceph_osdmap_destroy(struct ceph_osdmap *map)
1128{
1129	dout("osdmap_destroy %p\n", map);
1130
1131	if (map->crush)
1132		crush_destroy(map->crush);
1133	cleanup_workspace_manager(&map->crush_wsm);
1134
1135	while (!RB_EMPTY_ROOT(&map->pg_temp)) {
1136		struct ceph_pg_mapping *pg =
1137			rb_entry(rb_first(&map->pg_temp),
1138				 struct ceph_pg_mapping, node);
1139		erase_pg_mapping(&map->pg_temp, pg);
1140		free_pg_mapping(pg);
1141	}
1142	while (!RB_EMPTY_ROOT(&map->primary_temp)) {
1143		struct ceph_pg_mapping *pg =
1144			rb_entry(rb_first(&map->primary_temp),
1145				 struct ceph_pg_mapping, node);
1146		erase_pg_mapping(&map->primary_temp, pg);
1147		free_pg_mapping(pg);
1148	}
1149	while (!RB_EMPTY_ROOT(&map->pg_upmap)) {
1150		struct ceph_pg_mapping *pg =
1151			rb_entry(rb_first(&map->pg_upmap),
1152				 struct ceph_pg_mapping, node);
1153		rb_erase(&pg->node, &map->pg_upmap);
1154		kfree(pg);
1155	}
1156	while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) {
1157		struct ceph_pg_mapping *pg =
1158			rb_entry(rb_first(&map->pg_upmap_items),
1159				 struct ceph_pg_mapping, node);
1160		rb_erase(&pg->node, &map->pg_upmap_items);
1161		kfree(pg);
1162	}
1163	while (!RB_EMPTY_ROOT(&map->pg_pools)) {
1164		struct ceph_pg_pool_info *pi =
1165			rb_entry(rb_first(&map->pg_pools),
1166				 struct ceph_pg_pool_info, node);
1167		__remove_pg_pool(&map->pg_pools, pi);
1168	}
1169	kvfree(map->osd_state);
1170	kvfree(map->osd_weight);
1171	kvfree(map->osd_addr);
1172	kvfree(map->osd_primary_affinity);
 
1173	kfree(map);
1174}
1175
1176/*
1177 * Adjust max_osd value, (re)allocate arrays.
1178 *
1179 * The new elements are properly initialized.
1180 */
1181static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max)
1182{
1183	u32 *state;
1184	u32 *weight;
1185	struct ceph_entity_addr *addr;
1186	u32 to_copy;
1187	int i;
1188
1189	dout("%s old %u new %u\n", __func__, map->max_osd, max);
1190	if (max == map->max_osd)
1191		return 0;
1192
1193	state = ceph_kvmalloc(array_size(max, sizeof(*state)), GFP_NOFS);
1194	weight = ceph_kvmalloc(array_size(max, sizeof(*weight)), GFP_NOFS);
1195	addr = ceph_kvmalloc(array_size(max, sizeof(*addr)), GFP_NOFS);
1196	if (!state || !weight || !addr) {
1197		kvfree(state);
1198		kvfree(weight);
1199		kvfree(addr);
1200		return -ENOMEM;
1201	}
1202
1203	to_copy = min(map->max_osd, max);
1204	if (map->osd_state) {
1205		memcpy(state, map->osd_state, to_copy * sizeof(*state));
1206		memcpy(weight, map->osd_weight, to_copy * sizeof(*weight));
1207		memcpy(addr, map->osd_addr, to_copy * sizeof(*addr));
1208		kvfree(map->osd_state);
1209		kvfree(map->osd_weight);
1210		kvfree(map->osd_addr);
1211	}
1212
1213	map->osd_state = state;
1214	map->osd_weight = weight;
1215	map->osd_addr = addr;
1216	for (i = map->max_osd; i < max; i++) {
1217		map->osd_state[i] = 0;
1218		map->osd_weight[i] = CEPH_OSD_OUT;
1219		memset(map->osd_addr + i, 0, sizeof(*map->osd_addr));
1220	}
1221
1222	if (map->osd_primary_affinity) {
1223		u32 *affinity;
1224
1225		affinity = ceph_kvmalloc(array_size(max, sizeof(*affinity)),
1226					 GFP_NOFS);
1227		if (!affinity)
1228			return -ENOMEM;
1229
1230		memcpy(affinity, map->osd_primary_affinity,
1231		       to_copy * sizeof(*affinity));
1232		kvfree(map->osd_primary_affinity);
1233
1234		map->osd_primary_affinity = affinity;
1235		for (i = map->max_osd; i < max; i++)
1236			map->osd_primary_affinity[i] =
1237			    CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1238	}
1239
1240	map->max_osd = max;
1241
1242	return 0;
1243}
1244
1245static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush)
1246{
1247	struct crush_work *work;
 
1248
1249	if (IS_ERR(crush))
1250		return PTR_ERR(crush);
1251
1252	work = alloc_workspace(crush);
1253	if (!work) {
 
 
1254		crush_destroy(crush);
1255		return -ENOMEM;
1256	}
 
1257
1258	if (map->crush)
1259		crush_destroy(map->crush);
1260	cleanup_workspace_manager(&map->crush_wsm);
1261	map->crush = crush;
1262	add_initial_workspace(&map->crush_wsm, work);
1263	return 0;
1264}
1265
1266#define OSDMAP_WRAPPER_COMPAT_VER	7
1267#define OSDMAP_CLIENT_DATA_COMPAT_VER	1
1268
1269/*
1270 * Return 0 or error.  On success, *v is set to 0 for old (v6) osdmaps,
1271 * to struct_v of the client_data section for new (v7 and above)
1272 * osdmaps.
1273 */
1274static int get_osdmap_client_data_v(void **p, void *end,
1275				    const char *prefix, u8 *v)
1276{
1277	u8 struct_v;
1278
1279	ceph_decode_8_safe(p, end, struct_v, e_inval);
1280	if (struct_v >= 7) {
1281		u8 struct_compat;
1282
1283		ceph_decode_8_safe(p, end, struct_compat, e_inval);
1284		if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) {
1285			pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n",
1286				struct_v, struct_compat,
1287				OSDMAP_WRAPPER_COMPAT_VER, prefix);
1288			return -EINVAL;
1289		}
1290		*p += 4; /* ignore wrapper struct_len */
1291
1292		ceph_decode_8_safe(p, end, struct_v, e_inval);
1293		ceph_decode_8_safe(p, end, struct_compat, e_inval);
1294		if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) {
1295			pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n",
1296				struct_v, struct_compat,
1297				OSDMAP_CLIENT_DATA_COMPAT_VER, prefix);
1298			return -EINVAL;
1299		}
1300		*p += 4; /* ignore client data struct_len */
1301	} else {
1302		u16 version;
1303
1304		*p -= 1;
1305		ceph_decode_16_safe(p, end, version, e_inval);
1306		if (version < 6) {
1307			pr_warn("got v %d < 6 of %s ceph_osdmap\n",
1308				version, prefix);
1309			return -EINVAL;
1310		}
1311
1312		/* old osdmap encoding */
1313		struct_v = 0;
1314	}
1315
1316	*v = struct_v;
1317	return 0;
1318
1319e_inval:
1320	return -EINVAL;
1321}
1322
1323static int __decode_pools(void **p, void *end, struct ceph_osdmap *map,
1324			  bool incremental)
1325{
1326	u32 n;
1327
1328	ceph_decode_32_safe(p, end, n, e_inval);
1329	while (n--) {
1330		struct ceph_pg_pool_info *pi;
1331		u64 pool;
1332		int ret;
1333
1334		ceph_decode_64_safe(p, end, pool, e_inval);
1335
1336		pi = lookup_pg_pool(&map->pg_pools, pool);
1337		if (!incremental || !pi) {
1338			pi = kzalloc(sizeof(*pi), GFP_NOFS);
1339			if (!pi)
1340				return -ENOMEM;
1341
1342			RB_CLEAR_NODE(&pi->node);
1343			pi->id = pool;
1344
1345			if (!__insert_pg_pool(&map->pg_pools, pi)) {
 
1346				kfree(pi);
1347				return -EEXIST;
1348			}
1349		}
1350
1351		ret = decode_pool(p, end, pi);
1352		if (ret)
1353			return ret;
1354	}
1355
1356	return 0;
1357
1358e_inval:
1359	return -EINVAL;
1360}
1361
1362static int decode_pools(void **p, void *end, struct ceph_osdmap *map)
1363{
1364	return __decode_pools(p, end, map, false);
1365}
1366
1367static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map)
1368{
1369	return __decode_pools(p, end, map, true);
1370}
1371
1372typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool);
1373
1374static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root,
1375			     decode_mapping_fn_t fn, bool incremental)
1376{
1377	u32 n;
1378
1379	WARN_ON(!incremental && !fn);
1380
1381	ceph_decode_32_safe(p, end, n, e_inval);
1382	while (n--) {
1383		struct ceph_pg_mapping *pg;
1384		struct ceph_pg pgid;
1385		int ret;
1386
1387		ret = ceph_decode_pgid(p, end, &pgid);
1388		if (ret)
1389			return ret;
1390
1391		pg = lookup_pg_mapping(mapping_root, &pgid);
1392		if (pg) {
1393			WARN_ON(!incremental);
1394			erase_pg_mapping(mapping_root, pg);
1395			free_pg_mapping(pg);
1396		}
1397
1398		if (fn) {
1399			pg = fn(p, end, incremental);
1400			if (IS_ERR(pg))
1401				return PTR_ERR(pg);
1402
1403			if (pg) {
1404				pg->pgid = pgid; /* struct */
1405				insert_pg_mapping(mapping_root, pg);
1406			}
1407		}
1408	}
1409
1410	return 0;
1411
1412e_inval:
1413	return -EINVAL;
1414}
1415
1416static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end,
1417						bool incremental)
1418{
1419	struct ceph_pg_mapping *pg;
1420	u32 len, i;
1421
1422	ceph_decode_32_safe(p, end, len, e_inval);
1423	if (len == 0 && incremental)
1424		return NULL;	/* new_pg_temp: [] to remove */
1425	if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
1426		return ERR_PTR(-EINVAL);
1427
1428	ceph_decode_need(p, end, len * sizeof(u32), e_inval);
1429	pg = alloc_pg_mapping(len * sizeof(u32));
1430	if (!pg)
1431		return ERR_PTR(-ENOMEM);
1432
1433	pg->pg_temp.len = len;
1434	for (i = 0; i < len; i++)
1435		pg->pg_temp.osds[i] = ceph_decode_32(p);
1436
1437	return pg;
1438
1439e_inval:
1440	return ERR_PTR(-EINVAL);
1441}
1442
1443static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map)
1444{
1445	return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
1446				 false);
1447}
1448
1449static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map)
1450{
1451	return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
1452				 true);
1453}
1454
1455static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end,
1456						     bool incremental)
1457{
1458	struct ceph_pg_mapping *pg;
1459	u32 osd;
1460
1461	ceph_decode_32_safe(p, end, osd, e_inval);
1462	if (osd == (u32)-1 && incremental)
1463		return NULL;	/* new_primary_temp: -1 to remove */
1464
1465	pg = alloc_pg_mapping(0);
1466	if (!pg)
1467		return ERR_PTR(-ENOMEM);
1468
1469	pg->primary_temp.osd = osd;
1470	return pg;
1471
1472e_inval:
1473	return ERR_PTR(-EINVAL);
1474}
1475
1476static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map)
1477{
1478	return decode_pg_mapping(p, end, &map->primary_temp,
1479				 __decode_primary_temp, false);
1480}
1481
1482static int decode_new_primary_temp(void **p, void *end,
1483				   struct ceph_osdmap *map)
1484{
1485	return decode_pg_mapping(p, end, &map->primary_temp,
1486				 __decode_primary_temp, true);
1487}
1488
1489u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd)
1490{
1491	BUG_ON(osd >= map->max_osd);
1492
1493	if (!map->osd_primary_affinity)
1494		return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1495
1496	return map->osd_primary_affinity[osd];
1497}
1498
1499static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff)
1500{
1501	BUG_ON(osd >= map->max_osd);
1502
1503	if (!map->osd_primary_affinity) {
1504		int i;
1505
1506		map->osd_primary_affinity = ceph_kvmalloc(
1507		    array_size(map->max_osd, sizeof(*map->osd_primary_affinity)),
1508		    GFP_NOFS);
1509		if (!map->osd_primary_affinity)
1510			return -ENOMEM;
1511
1512		for (i = 0; i < map->max_osd; i++)
1513			map->osd_primary_affinity[i] =
1514			    CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1515	}
1516
1517	map->osd_primary_affinity[osd] = aff;
1518
1519	return 0;
1520}
1521
1522static int decode_primary_affinity(void **p, void *end,
1523				   struct ceph_osdmap *map)
1524{
1525	u32 len, i;
1526
1527	ceph_decode_32_safe(p, end, len, e_inval);
1528	if (len == 0) {
1529		kvfree(map->osd_primary_affinity);
1530		map->osd_primary_affinity = NULL;
1531		return 0;
1532	}
1533	if (len != map->max_osd)
1534		goto e_inval;
1535
1536	ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval);
1537
1538	for (i = 0; i < map->max_osd; i++) {
1539		int ret;
1540
1541		ret = set_primary_affinity(map, i, ceph_decode_32(p));
1542		if (ret)
1543			return ret;
1544	}
1545
1546	return 0;
1547
1548e_inval:
1549	return -EINVAL;
1550}
1551
1552static int decode_new_primary_affinity(void **p, void *end,
1553				       struct ceph_osdmap *map)
1554{
1555	u32 n;
1556
1557	ceph_decode_32_safe(p, end, n, e_inval);
1558	while (n--) {
1559		u32 osd, aff;
1560		int ret;
1561
1562		ceph_decode_32_safe(p, end, osd, e_inval);
1563		ceph_decode_32_safe(p, end, aff, e_inval);
1564
1565		ret = set_primary_affinity(map, osd, aff);
1566		if (ret)
1567			return ret;
1568
1569		pr_info("osd%d primary-affinity 0x%x\n", osd, aff);
1570	}
1571
1572	return 0;
1573
1574e_inval:
1575	return -EINVAL;
1576}
1577
1578static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end,
1579						 bool __unused)
1580{
1581	return __decode_pg_temp(p, end, false);
1582}
1583
1584static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1585{
1586	return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
1587				 false);
1588}
1589
1590static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1591{
1592	return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
1593				 true);
1594}
1595
1596static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1597{
1598	return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true);
1599}
1600
1601static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
1602						       bool __unused)
1603{
1604	struct ceph_pg_mapping *pg;
1605	u32 len, i;
1606
1607	ceph_decode_32_safe(p, end, len, e_inval);
1608	if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
1609		return ERR_PTR(-EINVAL);
1610
1611	ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
1612	pg = alloc_pg_mapping(2 * len * sizeof(u32));
1613	if (!pg)
1614		return ERR_PTR(-ENOMEM);
1615
1616	pg->pg_upmap_items.len = len;
1617	for (i = 0; i < len; i++) {
1618		pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p);
1619		pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p);
1620	}
1621
1622	return pg;
1623
1624e_inval:
1625	return ERR_PTR(-EINVAL);
1626}
1627
1628static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map)
1629{
1630	return decode_pg_mapping(p, end, &map->pg_upmap_items,
1631				 __decode_pg_upmap_items, false);
1632}
1633
1634static int decode_new_pg_upmap_items(void **p, void *end,
1635				     struct ceph_osdmap *map)
1636{
1637	return decode_pg_mapping(p, end, &map->pg_upmap_items,
1638				 __decode_pg_upmap_items, true);
1639}
1640
1641static int decode_old_pg_upmap_items(void **p, void *end,
1642				     struct ceph_osdmap *map)
1643{
1644	return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true);
1645}
1646
1647/*
1648 * decode a full map.
1649 */
1650static int osdmap_decode(void **p, void *end, bool msgr2,
1651			 struct ceph_osdmap *map)
1652{
1653	u8 struct_v;
1654	u32 epoch = 0;
1655	void *start = *p;
1656	u32 max;
1657	u32 len, i;
1658	int err;
1659
1660	dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1661
1662	err = get_osdmap_client_data_v(p, end, "full", &struct_v);
1663	if (err)
1664		goto bad;
1665
1666	/* fsid, epoch, created, modified */
1667	ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) +
1668			 sizeof(map->created) + sizeof(map->modified), e_inval);
1669	ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
1670	epoch = map->epoch = ceph_decode_32(p);
1671	ceph_decode_copy(p, &map->created, sizeof(map->created));
1672	ceph_decode_copy(p, &map->modified, sizeof(map->modified));
1673
1674	/* pools */
1675	err = decode_pools(p, end, map);
1676	if (err)
1677		goto bad;
1678
1679	/* pool_name */
1680	err = decode_pool_names(p, end, map);
1681	if (err)
1682		goto bad;
1683
1684	ceph_decode_32_safe(p, end, map->pool_max, e_inval);
1685
1686	ceph_decode_32_safe(p, end, map->flags, e_inval);
1687
1688	/* max_osd */
1689	ceph_decode_32_safe(p, end, max, e_inval);
1690
1691	/* (re)alloc osd arrays */
1692	err = osdmap_set_max_osd(map, max);
1693	if (err)
1694		goto bad;
1695
1696	/* osd_state, osd_weight, osd_addrs->client_addr */
1697	ceph_decode_need(p, end, 3*sizeof(u32) +
1698			 map->max_osd*(struct_v >= 5 ? sizeof(u32) :
1699						       sizeof(u8)) +
1700				       sizeof(*map->osd_weight), e_inval);
1701	if (ceph_decode_32(p) != map->max_osd)
1702		goto e_inval;
1703
1704	if (struct_v >= 5) {
1705		for (i = 0; i < map->max_osd; i++)
1706			map->osd_state[i] = ceph_decode_32(p);
1707	} else {
1708		for (i = 0; i < map->max_osd; i++)
1709			map->osd_state[i] = ceph_decode_8(p);
1710	}
1711
1712	if (ceph_decode_32(p) != map->max_osd)
1713		goto e_inval;
1714
1715	for (i = 0; i < map->max_osd; i++)
1716		map->osd_weight[i] = ceph_decode_32(p);
1717
1718	if (ceph_decode_32(p) != map->max_osd)
1719		goto e_inval;
1720
1721	for (i = 0; i < map->max_osd; i++) {
1722		struct ceph_entity_addr *addr = &map->osd_addr[i];
1723
1724		if (struct_v >= 8)
1725			err = ceph_decode_entity_addrvec(p, end, msgr2, addr);
1726		else
1727			err = ceph_decode_entity_addr(p, end, addr);
1728		if (err)
1729			goto bad;
1730
1731		dout("%s osd%d addr %s\n", __func__, i, ceph_pr_addr(addr));
1732	}
1733
1734	/* pg_temp */
1735	err = decode_pg_temp(p, end, map);
1736	if (err)
1737		goto bad;
1738
1739	/* primary_temp */
1740	if (struct_v >= 1) {
1741		err = decode_primary_temp(p, end, map);
1742		if (err)
1743			goto bad;
1744	}
1745
1746	/* primary_affinity */
1747	if (struct_v >= 2) {
1748		err = decode_primary_affinity(p, end, map);
1749		if (err)
1750			goto bad;
1751	} else {
1752		WARN_ON(map->osd_primary_affinity);
1753	}
1754
1755	/* crush */
1756	ceph_decode_32_safe(p, end, len, e_inval);
1757	err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end)));
1758	if (err)
1759		goto bad;
1760
1761	*p += len;
1762	if (struct_v >= 3) {
1763		/* erasure_code_profiles */
1764		ceph_decode_skip_map_of_map(p, end, string, string, string,
1765					    e_inval);
1766	}
1767
1768	if (struct_v >= 4) {
1769		err = decode_pg_upmap(p, end, map);
1770		if (err)
1771			goto bad;
1772
1773		err = decode_pg_upmap_items(p, end, map);
1774		if (err)
1775			goto bad;
1776	} else {
1777		WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap));
1778		WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items));
1779	}
1780
1781	/* ignore the rest */
1782	*p = end;
1783
1784	dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1785	return 0;
1786
1787e_inval:
1788	err = -EINVAL;
1789bad:
1790	pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1791	       err, epoch, (int)(*p - start), *p, start, end);
1792	print_hex_dump(KERN_DEBUG, "osdmap: ",
1793		       DUMP_PREFIX_OFFSET, 16, 1,
1794		       start, end - start, true);
1795	return err;
1796}
1797
1798/*
1799 * Allocate and decode a full map.
1800 */
1801struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end, bool msgr2)
1802{
1803	struct ceph_osdmap *map;
1804	int ret;
1805
1806	map = ceph_osdmap_alloc();
1807	if (!map)
1808		return ERR_PTR(-ENOMEM);
1809
1810	ret = osdmap_decode(p, end, msgr2, map);
1811	if (ret) {
1812		ceph_osdmap_destroy(map);
1813		return ERR_PTR(ret);
1814	}
1815
1816	return map;
1817}
1818
1819/*
1820 * Encoding order is (new_up_client, new_state, new_weight).  Need to
1821 * apply in the (new_weight, new_state, new_up_client) order, because
1822 * an incremental map may look like e.g.
1823 *
1824 *     new_up_client: { osd=6, addr=... } # set osd_state and addr
1825 *     new_state: { osd=6, xorstate=EXISTS } # clear osd_state
1826 */
1827static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
1828				      bool msgr2, struct ceph_osdmap *map)
1829{
1830	void *new_up_client;
1831	void *new_state;
1832	void *new_weight_end;
1833	u32 len;
1834	int ret;
1835	int i;
1836
1837	new_up_client = *p;
1838	ceph_decode_32_safe(p, end, len, e_inval);
1839	for (i = 0; i < len; ++i) {
1840		struct ceph_entity_addr addr;
1841
1842		ceph_decode_skip_32(p, end, e_inval);
1843		if (struct_v >= 7)
1844			ret = ceph_decode_entity_addrvec(p, end, msgr2, &addr);
1845		else
1846			ret = ceph_decode_entity_addr(p, end, &addr);
1847		if (ret)
1848			return ret;
1849	}
1850
1851	new_state = *p;
1852	ceph_decode_32_safe(p, end, len, e_inval);
1853	len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8));
1854	ceph_decode_need(p, end, len, e_inval);
1855	*p += len;
1856
1857	/* new_weight */
1858	ceph_decode_32_safe(p, end, len, e_inval);
1859	while (len--) {
1860		s32 osd;
1861		u32 w;
1862
1863		ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
1864		osd = ceph_decode_32(p);
1865		w = ceph_decode_32(p);
1866		BUG_ON(osd >= map->max_osd);
1867		pr_info("osd%d weight 0x%x %s\n", osd, w,
1868		     w == CEPH_OSD_IN ? "(in)" :
1869		     (w == CEPH_OSD_OUT ? "(out)" : ""));
1870		map->osd_weight[osd] = w;
1871
1872		/*
1873		 * If we are marking in, set the EXISTS, and clear the
1874		 * AUTOOUT and NEW bits.
1875		 */
1876		if (w) {
1877			map->osd_state[osd] |= CEPH_OSD_EXISTS;
1878			map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
1879						 CEPH_OSD_NEW);
1880		}
1881	}
1882	new_weight_end = *p;
1883
1884	/* new_state (up/down) */
1885	*p = new_state;
1886	len = ceph_decode_32(p);
1887	while (len--) {
1888		s32 osd;
1889		u32 xorstate;
 
1890
1891		osd = ceph_decode_32(p);
1892		if (struct_v >= 5)
1893			xorstate = ceph_decode_32(p);
1894		else
1895			xorstate = ceph_decode_8(p);
1896		if (xorstate == 0)
1897			xorstate = CEPH_OSD_UP;
1898		BUG_ON(osd >= map->max_osd);
1899		if ((map->osd_state[osd] & CEPH_OSD_UP) &&
1900		    (xorstate & CEPH_OSD_UP))
1901			pr_info("osd%d down\n", osd);
1902		if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
1903		    (xorstate & CEPH_OSD_EXISTS)) {
1904			pr_info("osd%d does not exist\n", osd);
1905			ret = set_primary_affinity(map, osd,
1906						   CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
1907			if (ret)
1908				return ret;
1909			memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
1910			map->osd_state[osd] = 0;
1911		} else {
1912			map->osd_state[osd] ^= xorstate;
1913		}
1914	}
1915
1916	/* new_up_client */
1917	*p = new_up_client;
1918	len = ceph_decode_32(p);
1919	while (len--) {
1920		s32 osd;
1921		struct ceph_entity_addr addr;
1922
1923		osd = ceph_decode_32(p);
1924		BUG_ON(osd >= map->max_osd);
1925		if (struct_v >= 7)
1926			ret = ceph_decode_entity_addrvec(p, end, msgr2, &addr);
1927		else
1928			ret = ceph_decode_entity_addr(p, end, &addr);
1929		if (ret)
1930			return ret;
1931
1932		dout("%s osd%d addr %s\n", __func__, osd, ceph_pr_addr(&addr));
1933
1934		pr_info("osd%d up\n", osd);
1935		map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
1936		map->osd_addr[osd] = addr;
1937	}
1938
1939	*p = new_weight_end;
1940	return 0;
1941
1942e_inval:
1943	return -EINVAL;
1944}
1945
1946/*
1947 * decode and apply an incremental map update.
1948 */
1949struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2,
1950					     struct ceph_osdmap *map)
1951{
1952	struct ceph_fsid fsid;
1953	u32 epoch = 0;
1954	struct ceph_timespec modified;
1955	s32 len;
1956	u64 pool;
1957	__s64 new_pool_max;
1958	__s32 new_flags, max;
1959	void *start = *p;
1960	int err;
1961	u8 struct_v;
1962
1963	dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1964
1965	err = get_osdmap_client_data_v(p, end, "inc", &struct_v);
1966	if (err)
1967		goto bad;
1968
1969	/* fsid, epoch, modified, new_pool_max, new_flags */
1970	ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) +
1971			 sizeof(u64) + sizeof(u32), e_inval);
1972	ceph_decode_copy(p, &fsid, sizeof(fsid));
1973	epoch = ceph_decode_32(p);
1974	BUG_ON(epoch != map->epoch+1);
1975	ceph_decode_copy(p, &modified, sizeof(modified));
1976	new_pool_max = ceph_decode_64(p);
1977	new_flags = ceph_decode_32(p);
1978
1979	/* full map? */
1980	ceph_decode_32_safe(p, end, len, e_inval);
1981	if (len > 0) {
1982		dout("apply_incremental full map len %d, %p to %p\n",
1983		     len, *p, end);
1984		return ceph_osdmap_decode(p, min(*p+len, end), msgr2);
1985	}
1986
1987	/* new crush? */
1988	ceph_decode_32_safe(p, end, len, e_inval);
1989	if (len > 0) {
1990		err = osdmap_set_crush(map,
1991				       crush_decode(*p, min(*p + len, end)));
1992		if (err)
1993			goto bad;
1994		*p += len;
1995	}
1996
1997	/* new flags? */
1998	if (new_flags >= 0)
1999		map->flags = new_flags;
2000	if (new_pool_max >= 0)
2001		map->pool_max = new_pool_max;
2002
2003	/* new max? */
2004	ceph_decode_32_safe(p, end, max, e_inval);
2005	if (max >= 0) {
2006		err = osdmap_set_max_osd(map, max);
2007		if (err)
2008			goto bad;
2009	}
2010
2011	map->epoch++;
2012	map->modified = modified;
2013
2014	/* new_pools */
2015	err = decode_new_pools(p, end, map);
2016	if (err)
2017		goto bad;
2018
2019	/* new_pool_names */
2020	err = decode_pool_names(p, end, map);
2021	if (err)
2022		goto bad;
2023
2024	/* old_pool */
2025	ceph_decode_32_safe(p, end, len, e_inval);
2026	while (len--) {
2027		struct ceph_pg_pool_info *pi;
2028
2029		ceph_decode_64_safe(p, end, pool, e_inval);
2030		pi = lookup_pg_pool(&map->pg_pools, pool);
2031		if (pi)
2032			__remove_pg_pool(&map->pg_pools, pi);
2033	}
2034
2035	/* new_up_client, new_state, new_weight */
2036	err = decode_new_up_state_weight(p, end, struct_v, msgr2, map);
2037	if (err)
2038		goto bad;
2039
2040	/* new_pg_temp */
2041	err = decode_new_pg_temp(p, end, map);
2042	if (err)
2043		goto bad;
2044
2045	/* new_primary_temp */
2046	if (struct_v >= 1) {
2047		err = decode_new_primary_temp(p, end, map);
2048		if (err)
2049			goto bad;
2050	}
2051
2052	/* new_primary_affinity */
2053	if (struct_v >= 2) {
2054		err = decode_new_primary_affinity(p, end, map);
2055		if (err)
2056			goto bad;
2057	}
2058
2059	if (struct_v >= 3) {
2060		/* new_erasure_code_profiles */
2061		ceph_decode_skip_map_of_map(p, end, string, string, string,
2062					    e_inval);
2063		/* old_erasure_code_profiles */
2064		ceph_decode_skip_set(p, end, string, e_inval);
2065	}
2066
2067	if (struct_v >= 4) {
2068		err = decode_new_pg_upmap(p, end, map);
2069		if (err)
2070			goto bad;
2071
2072		err = decode_old_pg_upmap(p, end, map);
2073		if (err)
2074			goto bad;
2075
2076		err = decode_new_pg_upmap_items(p, end, map);
2077		if (err)
2078			goto bad;
2079
2080		err = decode_old_pg_upmap_items(p, end, map);
2081		if (err)
2082			goto bad;
2083	}
2084
2085	/* ignore the rest */
2086	*p = end;
2087
2088	dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
2089	return map;
2090
2091e_inval:
2092	err = -EINVAL;
2093bad:
2094	pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
2095	       err, epoch, (int)(*p - start), *p, start, end);
2096	print_hex_dump(KERN_DEBUG, "osdmap: ",
2097		       DUMP_PREFIX_OFFSET, 16, 1,
2098		       start, end - start, true);
2099	return ERR_PTR(err);
2100}
2101
2102void ceph_oloc_copy(struct ceph_object_locator *dest,
2103		    const struct ceph_object_locator *src)
2104{
2105	ceph_oloc_destroy(dest);
2106
2107	dest->pool = src->pool;
2108	if (src->pool_ns)
2109		dest->pool_ns = ceph_get_string(src->pool_ns);
2110	else
2111		dest->pool_ns = NULL;
2112}
2113EXPORT_SYMBOL(ceph_oloc_copy);
2114
2115void ceph_oloc_destroy(struct ceph_object_locator *oloc)
2116{
2117	ceph_put_string(oloc->pool_ns);
2118}
2119EXPORT_SYMBOL(ceph_oloc_destroy);
2120
2121void ceph_oid_copy(struct ceph_object_id *dest,
2122		   const struct ceph_object_id *src)
2123{
2124	ceph_oid_destroy(dest);
2125
2126	if (src->name != src->inline_name) {
2127		/* very rare, see ceph_object_id definition */
2128		dest->name = kmalloc(src->name_len + 1,
2129				     GFP_NOIO | __GFP_NOFAIL);
2130	} else {
2131		dest->name = dest->inline_name;
2132	}
2133	memcpy(dest->name, src->name, src->name_len + 1);
2134	dest->name_len = src->name_len;
2135}
2136EXPORT_SYMBOL(ceph_oid_copy);
2137
2138static __printf(2, 0)
2139int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap)
2140{
2141	int len;
2142
2143	WARN_ON(!ceph_oid_empty(oid));
2144
2145	len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap);
2146	if (len >= sizeof(oid->inline_name))
2147		return len;
2148
2149	oid->name_len = len;
2150	return 0;
2151}
2152
2153/*
2154 * If oid doesn't fit into inline buffer, BUG.
2155 */
2156void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...)
2157{
2158	va_list ap;
2159
2160	va_start(ap, fmt);
2161	BUG_ON(oid_printf_vargs(oid, fmt, ap));
2162	va_end(ap);
2163}
2164EXPORT_SYMBOL(ceph_oid_printf);
2165
2166static __printf(3, 0)
2167int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp,
2168		      const char *fmt, va_list ap)
2169{
2170	va_list aq;
2171	int len;
2172
2173	va_copy(aq, ap);
2174	len = oid_printf_vargs(oid, fmt, aq);
2175	va_end(aq);
2176
2177	if (len) {
2178		char *external_name;
2179
2180		external_name = kmalloc(len + 1, gfp);
2181		if (!external_name)
2182			return -ENOMEM;
2183
2184		oid->name = external_name;
2185		WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len);
2186		oid->name_len = len;
2187	}
2188
2189	return 0;
2190}
2191
2192/*
2193 * If oid doesn't fit into inline buffer, allocate.
2194 */
2195int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
2196		     const char *fmt, ...)
2197{
2198	va_list ap;
2199	int ret;
2200
2201	va_start(ap, fmt);
2202	ret = oid_aprintf_vargs(oid, gfp, fmt, ap);
2203	va_end(ap);
2204
2205	return ret;
2206}
2207EXPORT_SYMBOL(ceph_oid_aprintf);
2208
2209void ceph_oid_destroy(struct ceph_object_id *oid)
2210{
2211	if (oid->name != oid->inline_name)
2212		kfree(oid->name);
2213}
2214EXPORT_SYMBOL(ceph_oid_destroy);
2215
2216/*
2217 * osds only
2218 */
2219static bool __osds_equal(const struct ceph_osds *lhs,
2220			 const struct ceph_osds *rhs)
2221{
2222	if (lhs->size == rhs->size &&
2223	    !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0])))
2224		return true;
2225
2226	return false;
2227}
2228
2229/*
2230 * osds + primary
2231 */
2232static bool osds_equal(const struct ceph_osds *lhs,
2233		       const struct ceph_osds *rhs)
2234{
2235	if (__osds_equal(lhs, rhs) &&
2236	    lhs->primary == rhs->primary)
2237		return true;
2238
2239	return false;
2240}
2241
2242static bool osds_valid(const struct ceph_osds *set)
2243{
2244	/* non-empty set */
2245	if (set->size > 0 && set->primary >= 0)
2246		return true;
2247
2248	/* empty can_shift_osds set */
2249	if (!set->size && set->primary == -1)
2250		return true;
2251
2252	/* empty !can_shift_osds set - all NONE */
2253	if (set->size > 0 && set->primary == -1) {
2254		int i;
2255
2256		for (i = 0; i < set->size; i++) {
2257			if (set->osds[i] != CRUSH_ITEM_NONE)
2258				break;
2259		}
2260		if (i == set->size)
2261			return true;
2262	}
2263
2264	return false;
2265}
2266
2267void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src)
2268{
2269	memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0]));
2270	dest->size = src->size;
2271	dest->primary = src->primary;
2272}
2273
2274bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num,
2275		      u32 new_pg_num)
2276{
2277	int old_bits = calc_bits_of(old_pg_num);
2278	int old_mask = (1 << old_bits) - 1;
2279	int n;
2280
2281	WARN_ON(pgid->seed >= old_pg_num);
2282	if (new_pg_num <= old_pg_num)
2283		return false;
2284
2285	for (n = 1; ; n++) {
2286		int next_bit = n << (old_bits - 1);
2287		u32 s = next_bit | pgid->seed;
2288
2289		if (s < old_pg_num || s == pgid->seed)
2290			continue;
2291		if (s >= new_pg_num)
2292			break;
2293
2294		s = ceph_stable_mod(s, old_pg_num, old_mask);
2295		if (s == pgid->seed)
2296			return true;
2297	}
2298
2299	return false;
2300}
2301
2302bool ceph_is_new_interval(const struct ceph_osds *old_acting,
2303			  const struct ceph_osds *new_acting,
2304			  const struct ceph_osds *old_up,
2305			  const struct ceph_osds *new_up,
2306			  int old_size,
2307			  int new_size,
2308			  int old_min_size,
2309			  int new_min_size,
2310			  u32 old_pg_num,
2311			  u32 new_pg_num,
2312			  bool old_sort_bitwise,
2313			  bool new_sort_bitwise,
2314			  bool old_recovery_deletes,
2315			  bool new_recovery_deletes,
2316			  const struct ceph_pg *pgid)
2317{
2318	return !osds_equal(old_acting, new_acting) ||
2319	       !osds_equal(old_up, new_up) ||
2320	       old_size != new_size ||
2321	       old_min_size != new_min_size ||
2322	       ceph_pg_is_split(pgid, old_pg_num, new_pg_num) ||
2323	       old_sort_bitwise != new_sort_bitwise ||
2324	       old_recovery_deletes != new_recovery_deletes;
2325}
2326
2327static int calc_pg_rank(int osd, const struct ceph_osds *acting)
2328{
2329	int i;
2330
2331	for (i = 0; i < acting->size; i++) {
2332		if (acting->osds[i] == osd)
2333			return i;
2334	}
2335
2336	return -1;
2337}
2338
2339static bool primary_changed(const struct ceph_osds *old_acting,
2340			    const struct ceph_osds *new_acting)
2341{
2342	if (!old_acting->size && !new_acting->size)
2343		return false; /* both still empty */
2344
2345	if (!old_acting->size ^ !new_acting->size)
2346		return true; /* was empty, now not, or vice versa */
2347
2348	if (old_acting->primary != new_acting->primary)
2349		return true; /* primary changed */
2350
2351	if (calc_pg_rank(old_acting->primary, old_acting) !=
2352	    calc_pg_rank(new_acting->primary, new_acting))
2353		return true;
2354
2355	return false; /* same primary (tho replicas may have changed) */
2356}
2357
2358bool ceph_osds_changed(const struct ceph_osds *old_acting,
2359		       const struct ceph_osds *new_acting,
2360		       bool any_change)
2361{
2362	if (primary_changed(old_acting, new_acting))
2363		return true;
2364
2365	if (any_change && !__osds_equal(old_acting, new_acting))
2366		return true;
2367
2368	return false;
2369}
2370
2371/*
2372 * Map an object into a PG.
2373 *
2374 * Should only be called with target_oid and target_oloc (as opposed to
2375 * base_oid and base_oloc), since tiering isn't taken into account.
2376 */
2377void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
2378				 const struct ceph_object_id *oid,
2379				 const struct ceph_object_locator *oloc,
2380				 struct ceph_pg *raw_pgid)
2381{
2382	WARN_ON(pi->id != oloc->pool);
2383
2384	if (!oloc->pool_ns) {
2385		raw_pgid->pool = oloc->pool;
2386		raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
2387					     oid->name_len);
2388		dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
2389		     raw_pgid->pool, raw_pgid->seed);
2390	} else {
2391		char stack_buf[256];
2392		char *buf = stack_buf;
2393		int nsl = oloc->pool_ns->len;
2394		size_t total = nsl + 1 + oid->name_len;
2395
2396		if (total > sizeof(stack_buf))
2397			buf = kmalloc(total, GFP_NOIO | __GFP_NOFAIL);
2398		memcpy(buf, oloc->pool_ns->str, nsl);
2399		buf[nsl] = '\037';
2400		memcpy(buf + nsl + 1, oid->name, oid->name_len);
2401		raw_pgid->pool = oloc->pool;
2402		raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total);
2403		if (buf != stack_buf)
2404			kfree(buf);
2405		dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__,
2406		     oid->name, nsl, oloc->pool_ns->str,
2407		     raw_pgid->pool, raw_pgid->seed);
2408	}
2409}
2410
2411int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
2412			      const struct ceph_object_id *oid,
2413			      const struct ceph_object_locator *oloc,
2414			      struct ceph_pg *raw_pgid)
2415{
2416	struct ceph_pg_pool_info *pi;
2417
2418	pi = ceph_pg_pool_by_id(osdmap, oloc->pool);
2419	if (!pi)
2420		return -ENOENT;
2421
2422	__ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid);
2423	return 0;
2424}
2425EXPORT_SYMBOL(ceph_object_locator_to_pg);
2426
2427/*
2428 * Map a raw PG (full precision ps) into an actual PG.
2429 */
2430static void raw_pg_to_pg(struct ceph_pg_pool_info *pi,
2431			 const struct ceph_pg *raw_pgid,
2432			 struct ceph_pg *pgid)
2433{
2434	pgid->pool = raw_pgid->pool;
2435	pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num,
2436				     pi->pg_num_mask);
2437}
2438
2439/*
2440 * Map a raw PG (full precision ps) into a placement ps (placement
2441 * seed).  Include pool id in that value so that different pools don't
2442 * use the same seeds.
2443 */
2444static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi,
2445			 const struct ceph_pg *raw_pgid)
2446{
2447	if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
2448		/* hash pool id and seed so that pool PGs do not overlap */
2449		return crush_hash32_2(CRUSH_HASH_RJENKINS1,
2450				      ceph_stable_mod(raw_pgid->seed,
2451						      pi->pgp_num,
2452						      pi->pgp_num_mask),
2453				      raw_pgid->pool);
2454	} else {
2455		/*
2456		 * legacy behavior: add ps and pool together.  this is
2457		 * not a great approach because the PGs from each pool
2458		 * will overlap on top of each other: 0.5 == 1.4 ==
2459		 * 2.3 == ...
2460		 */
2461		return ceph_stable_mod(raw_pgid->seed, pi->pgp_num,
2462				       pi->pgp_num_mask) +
2463		       (unsigned)raw_pgid->pool;
2464	}
2465}
2466
2467/*
2468 * Magic value used for a "default" fallback choose_args, used if the
2469 * crush_choose_arg_map passed to do_crush() does not exist.  If this
2470 * also doesn't exist, fall back to canonical weights.
2471 */
2472#define CEPH_DEFAULT_CHOOSE_ARGS	-1
2473
2474static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
2475		    int *result, int result_max,
2476		    const __u32 *weight, int weight_max,
2477		    s64 choose_args_index)
2478{
2479	struct crush_choose_arg_map *arg_map;
2480	struct crush_work *work;
2481	int r;
2482
2483	BUG_ON(result_max > CEPH_PG_MAX_SIZE);
2484
2485	arg_map = lookup_choose_arg_map(&map->crush->choose_args,
2486					choose_args_index);
2487	if (!arg_map)
2488		arg_map = lookup_choose_arg_map(&map->crush->choose_args,
2489						CEPH_DEFAULT_CHOOSE_ARGS);
2490
2491	work = get_workspace(&map->crush_wsm, map->crush);
2492	r = crush_do_rule(map->crush, ruleno, x, result, result_max,
2493			  weight, weight_max, work,
2494			  arg_map ? arg_map->args : NULL);
2495	put_workspace(&map->crush_wsm, work);
 
2496	return r;
2497}
2498
2499static void remove_nonexistent_osds(struct ceph_osdmap *osdmap,
2500				    struct ceph_pg_pool_info *pi,
2501				    struct ceph_osds *set)
2502{
2503	int i;
2504
2505	if (ceph_can_shift_osds(pi)) {
2506		int removed = 0;
2507
2508		/* shift left */
2509		for (i = 0; i < set->size; i++) {
2510			if (!ceph_osd_exists(osdmap, set->osds[i])) {
2511				removed++;
2512				continue;
2513			}
2514			if (removed)
2515				set->osds[i - removed] = set->osds[i];
2516		}
2517		set->size -= removed;
2518	} else {
2519		/* set dne devices to NONE */
2520		for (i = 0; i < set->size; i++) {
2521			if (!ceph_osd_exists(osdmap, set->osds[i]))
2522				set->osds[i] = CRUSH_ITEM_NONE;
2523		}
2524	}
2525}
2526
2527/*
2528 * Calculate raw set (CRUSH output) for given PG and filter out
2529 * nonexistent OSDs.  ->primary is undefined for a raw set.
2530 *
2531 * Placement seed (CRUSH input) is returned through @ppps.
2532 */
2533static void pg_to_raw_osds(struct ceph_osdmap *osdmap,
2534			   struct ceph_pg_pool_info *pi,
2535			   const struct ceph_pg *raw_pgid,
2536			   struct ceph_osds *raw,
2537			   u32 *ppps)
2538{
2539	u32 pps = raw_pg_to_pps(pi, raw_pgid);
2540	int ruleno;
2541	int len;
2542
2543	ceph_osds_init(raw);
2544	if (ppps)
2545		*ppps = pps;
2546
2547	ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type,
2548				 pi->size);
2549	if (ruleno < 0) {
2550		pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
2551		       pi->id, pi->crush_ruleset, pi->type, pi->size);
2552		return;
2553	}
2554
2555	if (pi->size > ARRAY_SIZE(raw->osds)) {
2556		pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n",
2557		       pi->id, pi->crush_ruleset, pi->type, pi->size,
2558		       ARRAY_SIZE(raw->osds));
2559		return;
2560	}
2561
2562	len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size,
2563		       osdmap->osd_weight, osdmap->max_osd, pi->id);
2564	if (len < 0) {
2565		pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
2566		       len, ruleno, pi->id, pi->crush_ruleset, pi->type,
2567		       pi->size);
2568		return;
2569	}
2570
2571	raw->size = len;
2572	remove_nonexistent_osds(osdmap, pi, raw);
2573}
2574
2575/* apply pg_upmap[_items] mappings */
2576static void apply_upmap(struct ceph_osdmap *osdmap,
2577			const struct ceph_pg *pgid,
2578			struct ceph_osds *raw)
2579{
2580	struct ceph_pg_mapping *pg;
2581	int i, j;
2582
2583	pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid);
2584	if (pg) {
2585		/* make sure targets aren't marked out */
2586		for (i = 0; i < pg->pg_upmap.len; i++) {
2587			int osd = pg->pg_upmap.osds[i];
2588
2589			if (osd != CRUSH_ITEM_NONE &&
2590			    osd < osdmap->max_osd &&
2591			    osdmap->osd_weight[osd] == 0) {
2592				/* reject/ignore explicit mapping */
2593				return;
2594			}
2595		}
2596		for (i = 0; i < pg->pg_upmap.len; i++)
2597			raw->osds[i] = pg->pg_upmap.osds[i];
2598		raw->size = pg->pg_upmap.len;
2599		/* check and apply pg_upmap_items, if any */
2600	}
2601
2602	pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid);
2603	if (pg) {
2604		/*
2605		 * Note: this approach does not allow a bidirectional swap,
2606		 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
2607		 */
2608		for (i = 0; i < pg->pg_upmap_items.len; i++) {
2609			int from = pg->pg_upmap_items.from_to[i][0];
2610			int to = pg->pg_upmap_items.from_to[i][1];
2611			int pos = -1;
2612			bool exists = false;
2613
2614			/* make sure replacement doesn't already appear */
2615			for (j = 0; j < raw->size; j++) {
2616				int osd = raw->osds[j];
2617
2618				if (osd == to) {
2619					exists = true;
2620					break;
2621				}
2622				/* ignore mapping if target is marked out */
2623				if (osd == from && pos < 0 &&
2624				    !(to != CRUSH_ITEM_NONE &&
2625				      to < osdmap->max_osd &&
2626				      osdmap->osd_weight[to] == 0)) {
2627					pos = j;
2628				}
2629			}
2630			if (!exists && pos >= 0)
2631				raw->osds[pos] = to;
2632		}
2633	}
2634}
2635
2636/*
2637 * Given raw set, calculate up set and up primary.  By definition of an
2638 * up set, the result won't contain nonexistent or down OSDs.
2639 *
2640 * This is done in-place - on return @set is the up set.  If it's
2641 * empty, ->primary will remain undefined.
2642 */
2643static void raw_to_up_osds(struct ceph_osdmap *osdmap,
2644			   struct ceph_pg_pool_info *pi,
2645			   struct ceph_osds *set)
2646{
2647	int i;
2648
2649	/* ->primary is undefined for a raw set */
2650	BUG_ON(set->primary != -1);
2651
2652	if (ceph_can_shift_osds(pi)) {
2653		int removed = 0;
2654
2655		/* shift left */
2656		for (i = 0; i < set->size; i++) {
2657			if (ceph_osd_is_down(osdmap, set->osds[i])) {
2658				removed++;
2659				continue;
2660			}
2661			if (removed)
2662				set->osds[i - removed] = set->osds[i];
2663		}
2664		set->size -= removed;
2665		if (set->size > 0)
2666			set->primary = set->osds[0];
2667	} else {
2668		/* set down/dne devices to NONE */
2669		for (i = set->size - 1; i >= 0; i--) {
2670			if (ceph_osd_is_down(osdmap, set->osds[i]))
2671				set->osds[i] = CRUSH_ITEM_NONE;
2672			else
2673				set->primary = set->osds[i];
2674		}
2675	}
2676}
2677
2678static void apply_primary_affinity(struct ceph_osdmap *osdmap,
2679				   struct ceph_pg_pool_info *pi,
2680				   u32 pps,
2681				   struct ceph_osds *up)
2682{
2683	int i;
2684	int pos = -1;
2685
2686	/*
2687	 * Do we have any non-default primary_affinity values for these
2688	 * osds?
2689	 */
2690	if (!osdmap->osd_primary_affinity)
2691		return;
2692
2693	for (i = 0; i < up->size; i++) {
2694		int osd = up->osds[i];
2695
2696		if (osd != CRUSH_ITEM_NONE &&
2697		    osdmap->osd_primary_affinity[osd] !=
2698					CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) {
2699			break;
2700		}
2701	}
2702	if (i == up->size)
2703		return;
2704
2705	/*
2706	 * Pick the primary.  Feed both the seed (for the pg) and the
2707	 * osd into the hash/rng so that a proportional fraction of an
2708	 * osd's pgs get rejected as primary.
2709	 */
2710	for (i = 0; i < up->size; i++) {
2711		int osd = up->osds[i];
2712		u32 aff;
2713
2714		if (osd == CRUSH_ITEM_NONE)
2715			continue;
2716
2717		aff = osdmap->osd_primary_affinity[osd];
2718		if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY &&
2719		    (crush_hash32_2(CRUSH_HASH_RJENKINS1,
2720				    pps, osd) >> 16) >= aff) {
2721			/*
2722			 * We chose not to use this primary.  Note it
2723			 * anyway as a fallback in case we don't pick
2724			 * anyone else, but keep looking.
2725			 */
2726			if (pos < 0)
2727				pos = i;
2728		} else {
2729			pos = i;
2730			break;
2731		}
2732	}
2733	if (pos < 0)
2734		return;
2735
2736	up->primary = up->osds[pos];
2737
2738	if (ceph_can_shift_osds(pi) && pos > 0) {
2739		/* move the new primary to the front */
2740		for (i = pos; i > 0; i--)
2741			up->osds[i] = up->osds[i - 1];
2742		up->osds[0] = up->primary;
2743	}
2744}
2745
2746/*
2747 * Get pg_temp and primary_temp mappings for given PG.
2748 *
2749 * Note that a PG may have none, only pg_temp, only primary_temp or
2750 * both pg_temp and primary_temp mappings.  This means @temp isn't
2751 * always a valid OSD set on return: in the "only primary_temp" case,
2752 * @temp will have its ->primary >= 0 but ->size == 0.
2753 */
2754static void get_temp_osds(struct ceph_osdmap *osdmap,
2755			  struct ceph_pg_pool_info *pi,
2756			  const struct ceph_pg *pgid,
2757			  struct ceph_osds *temp)
2758{
2759	struct ceph_pg_mapping *pg;
2760	int i;
2761
2762	ceph_osds_init(temp);
2763
2764	/* pg_temp? */
2765	pg = lookup_pg_mapping(&osdmap->pg_temp, pgid);
2766	if (pg) {
2767		for (i = 0; i < pg->pg_temp.len; i++) {
2768			if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
2769				if (ceph_can_shift_osds(pi))
2770					continue;
2771
2772				temp->osds[temp->size++] = CRUSH_ITEM_NONE;
2773			} else {
2774				temp->osds[temp->size++] = pg->pg_temp.osds[i];
2775			}
2776		}
2777
2778		/* apply pg_temp's primary */
2779		for (i = 0; i < temp->size; i++) {
2780			if (temp->osds[i] != CRUSH_ITEM_NONE) {
2781				temp->primary = temp->osds[i];
2782				break;
2783			}
2784		}
2785	}
2786
2787	/* primary_temp? */
2788	pg = lookup_pg_mapping(&osdmap->primary_temp, pgid);
2789	if (pg)
2790		temp->primary = pg->primary_temp.osd;
2791}
2792
2793/*
2794 * Map a PG to its acting set as well as its up set.
2795 *
2796 * Acting set is used for data mapping purposes, while up set can be
2797 * recorded for detecting interval changes and deciding whether to
2798 * resend a request.
2799 */
2800void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
2801			       struct ceph_pg_pool_info *pi,
2802			       const struct ceph_pg *raw_pgid,
2803			       struct ceph_osds *up,
2804			       struct ceph_osds *acting)
2805{
2806	struct ceph_pg pgid;
2807	u32 pps;
2808
2809	WARN_ON(pi->id != raw_pgid->pool);
2810	raw_pg_to_pg(pi, raw_pgid, &pgid);
2811
2812	pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps);
2813	apply_upmap(osdmap, &pgid, up);
2814	raw_to_up_osds(osdmap, pi, up);
2815	apply_primary_affinity(osdmap, pi, pps, up);
2816	get_temp_osds(osdmap, pi, &pgid, acting);
2817	if (!acting->size) {
2818		memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0]));
2819		acting->size = up->size;
2820		if (acting->primary == -1)
2821			acting->primary = up->primary;
2822	}
2823	WARN_ON(!osds_valid(up) || !osds_valid(acting));
2824}
2825
2826bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
2827			      struct ceph_pg_pool_info *pi,
2828			      const struct ceph_pg *raw_pgid,
2829			      struct ceph_spg *spgid)
2830{
2831	struct ceph_pg pgid;
2832	struct ceph_osds up, acting;
2833	int i;
2834
2835	WARN_ON(pi->id != raw_pgid->pool);
2836	raw_pg_to_pg(pi, raw_pgid, &pgid);
2837
2838	if (ceph_can_shift_osds(pi)) {
2839		spgid->pgid = pgid; /* struct */
2840		spgid->shard = CEPH_SPG_NOSHARD;
2841		return true;
2842	}
2843
2844	ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting);
2845	for (i = 0; i < acting.size; i++) {
2846		if (acting.osds[i] == acting.primary) {
2847			spgid->pgid = pgid; /* struct */
2848			spgid->shard = i;
2849			return true;
2850		}
2851	}
2852
2853	return false;
2854}
2855
2856/*
2857 * Return acting primary for given PG, or -1 if none.
2858 */
2859int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
2860			      const struct ceph_pg *raw_pgid)
2861{
2862	struct ceph_pg_pool_info *pi;
2863	struct ceph_osds up, acting;
2864
2865	pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool);
2866	if (!pi)
2867		return -1;
2868
2869	ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting);
2870	return acting.primary;
2871}
2872EXPORT_SYMBOL(ceph_pg_to_acting_primary);
2873
2874static struct crush_loc_node *alloc_crush_loc(size_t type_name_len,
2875					      size_t name_len)
2876{
2877	struct crush_loc_node *loc;
2878
2879	loc = kmalloc(sizeof(*loc) + type_name_len + name_len + 2, GFP_NOIO);
2880	if (!loc)
2881		return NULL;
2882
2883	RB_CLEAR_NODE(&loc->cl_node);
2884	return loc;
2885}
2886
2887static void free_crush_loc(struct crush_loc_node *loc)
2888{
2889	WARN_ON(!RB_EMPTY_NODE(&loc->cl_node));
2890
2891	kfree(loc);
2892}
2893
2894static int crush_loc_compare(const struct crush_loc *loc1,
2895			     const struct crush_loc *loc2)
2896{
2897	return strcmp(loc1->cl_type_name, loc2->cl_type_name) ?:
2898	       strcmp(loc1->cl_name, loc2->cl_name);
2899}
2900
2901DEFINE_RB_FUNCS2(crush_loc, struct crush_loc_node, cl_loc, crush_loc_compare,
2902		 RB_BYPTR, const struct crush_loc *, cl_node)
2903
2904/*
2905 * Parses a set of <bucket type name>':'<bucket name> pairs separated
2906 * by '|', e.g. "rack:foo1|rack:foo2|datacenter:bar".
2907 *
2908 * Note that @crush_location is modified by strsep().
2909 */
2910int ceph_parse_crush_location(char *crush_location, struct rb_root *locs)
2911{
2912	struct crush_loc_node *loc;
2913	const char *type_name, *name, *colon;
2914	size_t type_name_len, name_len;
2915
2916	dout("%s '%s'\n", __func__, crush_location);
2917	while ((type_name = strsep(&crush_location, "|"))) {
2918		colon = strchr(type_name, ':');
2919		if (!colon)
2920			return -EINVAL;
2921
2922		type_name_len = colon - type_name;
2923		if (type_name_len == 0)
2924			return -EINVAL;
2925
2926		name = colon + 1;
2927		name_len = strlen(name);
2928		if (name_len == 0)
2929			return -EINVAL;
2930
2931		loc = alloc_crush_loc(type_name_len, name_len);
2932		if (!loc)
2933			return -ENOMEM;
2934
2935		loc->cl_loc.cl_type_name = loc->cl_data;
2936		memcpy(loc->cl_loc.cl_type_name, type_name, type_name_len);
2937		loc->cl_loc.cl_type_name[type_name_len] = '\0';
2938
2939		loc->cl_loc.cl_name = loc->cl_data + type_name_len + 1;
2940		memcpy(loc->cl_loc.cl_name, name, name_len);
2941		loc->cl_loc.cl_name[name_len] = '\0';
2942
2943		if (!__insert_crush_loc(locs, loc)) {
2944			free_crush_loc(loc);
2945			return -EEXIST;
2946		}
2947
2948		dout("%s type_name '%s' name '%s'\n", __func__,
2949		     loc->cl_loc.cl_type_name, loc->cl_loc.cl_name);
2950	}
2951
2952	return 0;
2953}
2954
2955int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2)
2956{
2957	struct rb_node *n1 = rb_first(locs1);
2958	struct rb_node *n2 = rb_first(locs2);
2959	int ret;
2960
2961	for ( ; n1 && n2; n1 = rb_next(n1), n2 = rb_next(n2)) {
2962		struct crush_loc_node *loc1 =
2963		    rb_entry(n1, struct crush_loc_node, cl_node);
2964		struct crush_loc_node *loc2 =
2965		    rb_entry(n2, struct crush_loc_node, cl_node);
2966
2967		ret = crush_loc_compare(&loc1->cl_loc, &loc2->cl_loc);
2968		if (ret)
2969			return ret;
2970	}
2971
2972	if (!n1 && n2)
2973		return -1;
2974	if (n1 && !n2)
2975		return 1;
2976	return 0;
2977}
2978
2979void ceph_clear_crush_locs(struct rb_root *locs)
2980{
2981	while (!RB_EMPTY_ROOT(locs)) {
2982		struct crush_loc_node *loc =
2983		    rb_entry(rb_first(locs), struct crush_loc_node, cl_node);
2984
2985		erase_crush_loc(locs, loc);
2986		free_crush_loc(loc);
2987	}
2988}
2989
2990/*
2991 * [a-zA-Z0-9-_.]+
2992 */
2993static bool is_valid_crush_name(const char *name)
2994{
2995	do {
2996		if (!('a' <= *name && *name <= 'z') &&
2997		    !('A' <= *name && *name <= 'Z') &&
2998		    !('0' <= *name && *name <= '9') &&
2999		    *name != '-' && *name != '_' && *name != '.')
3000			return false;
3001	} while (*++name != '\0');
3002
3003	return true;
3004}
3005
3006/*
3007 * Gets the parent of an item.  Returns its id (<0 because the
3008 * parent is always a bucket), type id (>0 for the same reason,
3009 * via @parent_type_id) and location (via @parent_loc).  If no
3010 * parent, returns 0.
3011 *
3012 * Does a linear search, as there are no parent pointers of any
3013 * kind.  Note that the result is ambiguous for items that occur
3014 * multiple times in the map.
3015 */
3016static int get_immediate_parent(struct crush_map *c, int id,
3017				u16 *parent_type_id,
3018				struct crush_loc *parent_loc)
3019{
3020	struct crush_bucket *b;
3021	struct crush_name_node *type_cn, *cn;
3022	int i, j;
3023
3024	for (i = 0; i < c->max_buckets; i++) {
3025		b = c->buckets[i];
3026		if (!b)
3027			continue;
3028
3029		/* ignore per-class shadow hierarchy */
3030		cn = lookup_crush_name(&c->names, b->id);
3031		if (!cn || !is_valid_crush_name(cn->cn_name))
3032			continue;
3033
3034		for (j = 0; j < b->size; j++) {
3035			if (b->items[j] != id)
3036				continue;
3037
3038			*parent_type_id = b->type;
3039			type_cn = lookup_crush_name(&c->type_names, b->type);
3040			parent_loc->cl_type_name = type_cn->cn_name;
3041			parent_loc->cl_name = cn->cn_name;
3042			return b->id;
3043		}
3044	}
3045
3046	return 0;  /* no parent */
3047}
3048
3049/*
3050 * Calculates the locality/distance from an item to a client
3051 * location expressed in terms of CRUSH hierarchy as a set of
3052 * (bucket type name, bucket name) pairs.  Specifically, looks
3053 * for the lowest-valued bucket type for which the location of
3054 * @id matches one of the locations in @locs, so for standard
3055 * bucket types (host = 1, rack = 3, datacenter = 8, zone = 9)
3056 * a matching host is closer than a matching rack and a matching
3057 * data center is closer than a matching zone.
3058 *
3059 * Specifying multiple locations (a "multipath" location) such
3060 * as "rack=foo1 rack=foo2 datacenter=bar" is allowed -- @locs
3061 * is a multimap.  The locality will be:
3062 *
3063 * - 3 for OSDs in racks foo1 and foo2
3064 * - 8 for OSDs in data center bar
3065 * - -1 for all other OSDs
3066 *
3067 * The lowest possible bucket type is 1, so the best locality
3068 * for an OSD is 1 (i.e. a matching host).  Locality 0 would be
3069 * the OSD itself.
3070 */
3071int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id,
3072			    struct rb_root *locs)
3073{
3074	struct crush_loc loc;
3075	u16 type_id;
3076
3077	/*
3078	 * Instead of repeated get_immediate_parent() calls,
3079	 * the location of @id could be obtained with a single
3080	 * depth-first traversal.
3081	 */
3082	for (;;) {
3083		id = get_immediate_parent(osdmap->crush, id, &type_id, &loc);
3084		if (id >= 0)
3085			return -1;  /* not local */
3086
3087		if (lookup_crush_loc(locs, &loc))
3088			return type_id;
3089	}
3090}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/ceph/ceph_debug.h>
   4
   5#include <linux/module.h>
   6#include <linux/slab.h>
   7
   8#include <linux/ceph/libceph.h>
   9#include <linux/ceph/osdmap.h>
  10#include <linux/ceph/decode.h>
  11#include <linux/crush/hash.h>
  12#include <linux/crush/mapper.h>
  13
  14char *ceph_osdmap_state_str(char *str, int len, u32 state)
  15{
  16	if (!len)
  17		return str;
  18
  19	if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
  20		snprintf(str, len, "exists, up");
  21	else if (state & CEPH_OSD_EXISTS)
  22		snprintf(str, len, "exists");
  23	else if (state & CEPH_OSD_UP)
  24		snprintf(str, len, "up");
  25	else
  26		snprintf(str, len, "doesn't exist");
  27
  28	return str;
  29}
  30
  31/* maps */
  32
  33static int calc_bits_of(unsigned int t)
  34{
  35	int b = 0;
  36	while (t) {
  37		t = t >> 1;
  38		b++;
  39	}
  40	return b;
  41}
  42
  43/*
  44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
  45 */
  46static void calc_pg_masks(struct ceph_pg_pool_info *pi)
  47{
  48	pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
  49	pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
  50}
  51
  52/*
  53 * decode crush map
  54 */
  55static int crush_decode_uniform_bucket(void **p, void *end,
  56				       struct crush_bucket_uniform *b)
  57{
  58	dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
  59	ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
  60	b->item_weight = ceph_decode_32(p);
  61	return 0;
  62bad:
  63	return -EINVAL;
  64}
  65
  66static int crush_decode_list_bucket(void **p, void *end,
  67				    struct crush_bucket_list *b)
  68{
  69	int j;
  70	dout("crush_decode_list_bucket %p to %p\n", *p, end);
  71	b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  72	if (b->item_weights == NULL)
  73		return -ENOMEM;
  74	b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  75	if (b->sum_weights == NULL)
  76		return -ENOMEM;
  77	ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
  78	for (j = 0; j < b->h.size; j++) {
  79		b->item_weights[j] = ceph_decode_32(p);
  80		b->sum_weights[j] = ceph_decode_32(p);
  81	}
  82	return 0;
  83bad:
  84	return -EINVAL;
  85}
  86
  87static int crush_decode_tree_bucket(void **p, void *end,
  88				    struct crush_bucket_tree *b)
  89{
  90	int j;
  91	dout("crush_decode_tree_bucket %p to %p\n", *p, end);
  92	ceph_decode_8_safe(p, end, b->num_nodes, bad);
  93	b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
  94	if (b->node_weights == NULL)
  95		return -ENOMEM;
  96	ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
  97	for (j = 0; j < b->num_nodes; j++)
  98		b->node_weights[j] = ceph_decode_32(p);
  99	return 0;
 100bad:
 101	return -EINVAL;
 102}
 103
 104static int crush_decode_straw_bucket(void **p, void *end,
 105				     struct crush_bucket_straw *b)
 106{
 107	int j;
 108	dout("crush_decode_straw_bucket %p to %p\n", *p, end);
 109	b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
 110	if (b->item_weights == NULL)
 111		return -ENOMEM;
 112	b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
 113	if (b->straws == NULL)
 114		return -ENOMEM;
 115	ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
 116	for (j = 0; j < b->h.size; j++) {
 117		b->item_weights[j] = ceph_decode_32(p);
 118		b->straws[j] = ceph_decode_32(p);
 119	}
 120	return 0;
 121bad:
 122	return -EINVAL;
 123}
 124
 125static int crush_decode_straw2_bucket(void **p, void *end,
 126				      struct crush_bucket_straw2 *b)
 127{
 128	int j;
 129	dout("crush_decode_straw2_bucket %p to %p\n", *p, end);
 130	b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
 131	if (b->item_weights == NULL)
 132		return -ENOMEM;
 133	ceph_decode_need(p, end, b->h.size * sizeof(u32), bad);
 134	for (j = 0; j < b->h.size; j++)
 135		b->item_weights[j] = ceph_decode_32(p);
 136	return 0;
 137bad:
 138	return -EINVAL;
 139}
 140
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 141static struct crush_choose_arg_map *alloc_choose_arg_map(void)
 142{
 143	struct crush_choose_arg_map *arg_map;
 144
 145	arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO);
 146	if (!arg_map)
 147		return NULL;
 148
 149	RB_CLEAR_NODE(&arg_map->node);
 150	return arg_map;
 151}
 152
 153static void free_choose_arg_map(struct crush_choose_arg_map *arg_map)
 154{
 155	if (arg_map) {
 156		int i, j;
 157
 158		WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
 159
 160		for (i = 0; i < arg_map->size; i++) {
 161			struct crush_choose_arg *arg = &arg_map->args[i];
 162
 163			for (j = 0; j < arg->weight_set_size; j++)
 164				kfree(arg->weight_set[j].weights);
 165			kfree(arg->weight_set);
 166			kfree(arg->ids);
 167		}
 168		kfree(arg_map->args);
 169		kfree(arg_map);
 170	}
 171}
 172
 173DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index,
 174		node);
 175
 176void clear_choose_args(struct crush_map *c)
 177{
 178	while (!RB_EMPTY_ROOT(&c->choose_args)) {
 179		struct crush_choose_arg_map *arg_map =
 180		    rb_entry(rb_first(&c->choose_args),
 181			     struct crush_choose_arg_map, node);
 182
 183		erase_choose_arg_map(&c->choose_args, arg_map);
 184		free_choose_arg_map(arg_map);
 185	}
 186}
 187
 188static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen)
 189{
 190	u32 *a = NULL;
 191	u32 len;
 192	int ret;
 193
 194	ceph_decode_32_safe(p, end, len, e_inval);
 195	if (len) {
 196		u32 i;
 197
 198		a = kmalloc_array(len, sizeof(u32), GFP_NOIO);
 199		if (!a) {
 200			ret = -ENOMEM;
 201			goto fail;
 202		}
 203
 204		ceph_decode_need(p, end, len * sizeof(u32), e_inval);
 205		for (i = 0; i < len; i++)
 206			a[i] = ceph_decode_32(p);
 207	}
 208
 209	*plen = len;
 210	return a;
 211
 212e_inval:
 213	ret = -EINVAL;
 214fail:
 215	kfree(a);
 216	return ERR_PTR(ret);
 217}
 218
 219/*
 220 * Assumes @arg is zero-initialized.
 221 */
 222static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg)
 223{
 224	int ret;
 225
 226	ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval);
 227	if (arg->weight_set_size) {
 228		u32 i;
 229
 230		arg->weight_set = kmalloc_array(arg->weight_set_size,
 231						sizeof(*arg->weight_set),
 232						GFP_NOIO);
 233		if (!arg->weight_set)
 234			return -ENOMEM;
 235
 236		for (i = 0; i < arg->weight_set_size; i++) {
 237			struct crush_weight_set *w = &arg->weight_set[i];
 238
 239			w->weights = decode_array_32_alloc(p, end, &w->size);
 240			if (IS_ERR(w->weights)) {
 241				ret = PTR_ERR(w->weights);
 242				w->weights = NULL;
 243				return ret;
 244			}
 245		}
 246	}
 247
 248	arg->ids = decode_array_32_alloc(p, end, &arg->ids_size);
 249	if (IS_ERR(arg->ids)) {
 250		ret = PTR_ERR(arg->ids);
 251		arg->ids = NULL;
 252		return ret;
 253	}
 254
 255	return 0;
 256
 257e_inval:
 258	return -EINVAL;
 259}
 260
 261static int decode_choose_args(void **p, void *end, struct crush_map *c)
 262{
 263	struct crush_choose_arg_map *arg_map = NULL;
 264	u32 num_choose_arg_maps, num_buckets;
 265	int ret;
 266
 267	ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval);
 268	while (num_choose_arg_maps--) {
 269		arg_map = alloc_choose_arg_map();
 270		if (!arg_map) {
 271			ret = -ENOMEM;
 272			goto fail;
 273		}
 274
 275		ceph_decode_64_safe(p, end, arg_map->choose_args_index,
 276				    e_inval);
 277		arg_map->size = c->max_buckets;
 278		arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args),
 279					GFP_NOIO);
 280		if (!arg_map->args) {
 281			ret = -ENOMEM;
 282			goto fail;
 283		}
 284
 285		ceph_decode_32_safe(p, end, num_buckets, e_inval);
 286		while (num_buckets--) {
 287			struct crush_choose_arg *arg;
 288			u32 bucket_index;
 289
 290			ceph_decode_32_safe(p, end, bucket_index, e_inval);
 291			if (bucket_index >= arg_map->size)
 292				goto e_inval;
 293
 294			arg = &arg_map->args[bucket_index];
 295			ret = decode_choose_arg(p, end, arg);
 296			if (ret)
 297				goto fail;
 298
 299			if (arg->ids_size &&
 300			    arg->ids_size != c->buckets[bucket_index]->size)
 301				goto e_inval;
 302		}
 303
 304		insert_choose_arg_map(&c->choose_args, arg_map);
 305	}
 306
 307	return 0;
 308
 309e_inval:
 310	ret = -EINVAL;
 311fail:
 312	free_choose_arg_map(arg_map);
 313	return ret;
 314}
 315
 316static void crush_finalize(struct crush_map *c)
 317{
 318	__s32 b;
 319
 320	/* Space for the array of pointers to per-bucket workspace */
 321	c->working_size = sizeof(struct crush_work) +
 322	    c->max_buckets * sizeof(struct crush_work_bucket *);
 323
 324	for (b = 0; b < c->max_buckets; b++) {
 325		if (!c->buckets[b])
 326			continue;
 327
 328		switch (c->buckets[b]->alg) {
 329		default:
 330			/*
 331			 * The base case, permutation variables and
 332			 * the pointer to the permutation array.
 333			 */
 334			c->working_size += sizeof(struct crush_work_bucket);
 335			break;
 336		}
 337		/* Every bucket has a permutation array. */
 338		c->working_size += c->buckets[b]->size * sizeof(__u32);
 339	}
 340}
 341
 342static struct crush_map *crush_decode(void *pbyval, void *end)
 343{
 344	struct crush_map *c;
 345	int err;
 346	int i, j;
 347	void **p = &pbyval;
 348	void *start = pbyval;
 349	u32 magic;
 350
 351	dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
 352
 353	c = kzalloc(sizeof(*c), GFP_NOFS);
 354	if (c == NULL)
 355		return ERR_PTR(-ENOMEM);
 356
 
 
 357	c->choose_args = RB_ROOT;
 358
 359        /* set tunables to default values */
 360        c->choose_local_tries = 2;
 361        c->choose_local_fallback_tries = 5;
 362        c->choose_total_tries = 19;
 363	c->chooseleaf_descend_once = 0;
 364
 365	ceph_decode_need(p, end, 4*sizeof(u32), bad);
 366	magic = ceph_decode_32(p);
 367	if (magic != CRUSH_MAGIC) {
 368		pr_err("crush_decode magic %x != current %x\n",
 369		       (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
 370		goto bad;
 371	}
 372	c->max_buckets = ceph_decode_32(p);
 373	c->max_rules = ceph_decode_32(p);
 374	c->max_devices = ceph_decode_32(p);
 375
 376	c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
 377	if (c->buckets == NULL)
 378		goto badmem;
 379	c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
 380	if (c->rules == NULL)
 381		goto badmem;
 382
 383	/* buckets */
 384	for (i = 0; i < c->max_buckets; i++) {
 385		int size = 0;
 386		u32 alg;
 387		struct crush_bucket *b;
 388
 389		ceph_decode_32_safe(p, end, alg, bad);
 390		if (alg == 0) {
 391			c->buckets[i] = NULL;
 392			continue;
 393		}
 394		dout("crush_decode bucket %d off %x %p to %p\n",
 395		     i, (int)(*p-start), *p, end);
 396
 397		switch (alg) {
 398		case CRUSH_BUCKET_UNIFORM:
 399			size = sizeof(struct crush_bucket_uniform);
 400			break;
 401		case CRUSH_BUCKET_LIST:
 402			size = sizeof(struct crush_bucket_list);
 403			break;
 404		case CRUSH_BUCKET_TREE:
 405			size = sizeof(struct crush_bucket_tree);
 406			break;
 407		case CRUSH_BUCKET_STRAW:
 408			size = sizeof(struct crush_bucket_straw);
 409			break;
 410		case CRUSH_BUCKET_STRAW2:
 411			size = sizeof(struct crush_bucket_straw2);
 412			break;
 413		default:
 414			goto bad;
 415		}
 416		BUG_ON(size == 0);
 417		b = c->buckets[i] = kzalloc(size, GFP_NOFS);
 418		if (b == NULL)
 419			goto badmem;
 420
 421		ceph_decode_need(p, end, 4*sizeof(u32), bad);
 422		b->id = ceph_decode_32(p);
 423		b->type = ceph_decode_16(p);
 424		b->alg = ceph_decode_8(p);
 425		b->hash = ceph_decode_8(p);
 426		b->weight = ceph_decode_32(p);
 427		b->size = ceph_decode_32(p);
 428
 429		dout("crush_decode bucket size %d off %x %p to %p\n",
 430		     b->size, (int)(*p-start), *p, end);
 431
 432		b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
 433		if (b->items == NULL)
 434			goto badmem;
 435
 436		ceph_decode_need(p, end, b->size*sizeof(u32), bad);
 437		for (j = 0; j < b->size; j++)
 438			b->items[j] = ceph_decode_32(p);
 439
 440		switch (b->alg) {
 441		case CRUSH_BUCKET_UNIFORM:
 442			err = crush_decode_uniform_bucket(p, end,
 443				  (struct crush_bucket_uniform *)b);
 444			if (err < 0)
 445				goto fail;
 446			break;
 447		case CRUSH_BUCKET_LIST:
 448			err = crush_decode_list_bucket(p, end,
 449			       (struct crush_bucket_list *)b);
 450			if (err < 0)
 451				goto fail;
 452			break;
 453		case CRUSH_BUCKET_TREE:
 454			err = crush_decode_tree_bucket(p, end,
 455				(struct crush_bucket_tree *)b);
 456			if (err < 0)
 457				goto fail;
 458			break;
 459		case CRUSH_BUCKET_STRAW:
 460			err = crush_decode_straw_bucket(p, end,
 461				(struct crush_bucket_straw *)b);
 462			if (err < 0)
 463				goto fail;
 464			break;
 465		case CRUSH_BUCKET_STRAW2:
 466			err = crush_decode_straw2_bucket(p, end,
 467				(struct crush_bucket_straw2 *)b);
 468			if (err < 0)
 469				goto fail;
 470			break;
 471		}
 472	}
 473
 474	/* rules */
 475	dout("rule vec is %p\n", c->rules);
 476	for (i = 0; i < c->max_rules; i++) {
 477		u32 yes;
 478		struct crush_rule *r;
 479
 480		ceph_decode_32_safe(p, end, yes, bad);
 481		if (!yes) {
 482			dout("crush_decode NO rule %d off %x %p to %p\n",
 483			     i, (int)(*p-start), *p, end);
 484			c->rules[i] = NULL;
 485			continue;
 486		}
 487
 488		dout("crush_decode rule %d off %x %p to %p\n",
 489		     i, (int)(*p-start), *p, end);
 490
 491		/* len */
 492		ceph_decode_32_safe(p, end, yes, bad);
 493#if BITS_PER_LONG == 32
 494		if (yes > (ULONG_MAX - sizeof(*r))
 495			  / sizeof(struct crush_rule_step))
 496			goto bad;
 497#endif
 498		r = kmalloc(struct_size(r, steps, yes), GFP_NOFS);
 499		c->rules[i] = r;
 500		if (r == NULL)
 501			goto badmem;
 502		dout(" rule %d is at %p\n", i, r);
 503		r->len = yes;
 504		ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
 505		ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
 506		for (j = 0; j < r->len; j++) {
 507			r->steps[j].op = ceph_decode_32(p);
 508			r->steps[j].arg1 = ceph_decode_32(p);
 509			r->steps[j].arg2 = ceph_decode_32(p);
 510		}
 511	}
 512
 513	ceph_decode_skip_map(p, end, 32, string, bad); /* type_map */
 514	ceph_decode_skip_map(p, end, 32, string, bad); /* name_map */
 
 
 
 
 
 
 515	ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */
 516
 517        /* tunables */
 518        ceph_decode_need(p, end, 3*sizeof(u32), done);
 519        c->choose_local_tries = ceph_decode_32(p);
 520        c->choose_local_fallback_tries =  ceph_decode_32(p);
 521        c->choose_total_tries = ceph_decode_32(p);
 522        dout("crush decode tunable choose_local_tries = %d\n",
 523             c->choose_local_tries);
 524        dout("crush decode tunable choose_local_fallback_tries = %d\n",
 525             c->choose_local_fallback_tries);
 526        dout("crush decode tunable choose_total_tries = %d\n",
 527             c->choose_total_tries);
 528
 529	ceph_decode_need(p, end, sizeof(u32), done);
 530	c->chooseleaf_descend_once = ceph_decode_32(p);
 531	dout("crush decode tunable chooseleaf_descend_once = %d\n",
 532	     c->chooseleaf_descend_once);
 533
 534	ceph_decode_need(p, end, sizeof(u8), done);
 535	c->chooseleaf_vary_r = ceph_decode_8(p);
 536	dout("crush decode tunable chooseleaf_vary_r = %d\n",
 537	     c->chooseleaf_vary_r);
 538
 539	/* skip straw_calc_version, allowed_bucket_algs */
 540	ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done);
 541	*p += sizeof(u8) + sizeof(u32);
 542
 543	ceph_decode_need(p, end, sizeof(u8), done);
 544	c->chooseleaf_stable = ceph_decode_8(p);
 545	dout("crush decode tunable chooseleaf_stable = %d\n",
 546	     c->chooseleaf_stable);
 547
 548	if (*p != end) {
 549		/* class_map */
 550		ceph_decode_skip_map(p, end, 32, 32, bad);
 551		/* class_name */
 552		ceph_decode_skip_map(p, end, 32, string, bad);
 553		/* class_bucket */
 554		ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad);
 555	}
 556
 557	if (*p != end) {
 558		err = decode_choose_args(p, end, c);
 559		if (err)
 560			goto fail;
 561	}
 562
 563done:
 564	crush_finalize(c);
 565	dout("crush_decode success\n");
 566	return c;
 567
 568badmem:
 569	err = -ENOMEM;
 570fail:
 571	dout("crush_decode fail %d\n", err);
 572	crush_destroy(c);
 573	return ERR_PTR(err);
 574
 575bad:
 576	err = -EINVAL;
 577	goto fail;
 578}
 579
 580int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
 581{
 582	if (lhs->pool < rhs->pool)
 583		return -1;
 584	if (lhs->pool > rhs->pool)
 585		return 1;
 586	if (lhs->seed < rhs->seed)
 587		return -1;
 588	if (lhs->seed > rhs->seed)
 589		return 1;
 590
 591	return 0;
 592}
 593
 594int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs)
 595{
 596	int ret;
 597
 598	ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid);
 599	if (ret)
 600		return ret;
 601
 602	if (lhs->shard < rhs->shard)
 603		return -1;
 604	if (lhs->shard > rhs->shard)
 605		return 1;
 606
 607	return 0;
 608}
 609
 610static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len)
 611{
 612	struct ceph_pg_mapping *pg;
 613
 614	pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO);
 615	if (!pg)
 616		return NULL;
 617
 618	RB_CLEAR_NODE(&pg->node);
 619	return pg;
 620}
 621
 622static void free_pg_mapping(struct ceph_pg_mapping *pg)
 623{
 624	WARN_ON(!RB_EMPTY_NODE(&pg->node));
 625
 626	kfree(pg);
 627}
 628
 629/*
 630 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
 631 * to a set of osds) and primary_temp (explicit primary setting)
 632 */
 633DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare,
 634		 RB_BYPTR, const struct ceph_pg *, node)
 635
 636/*
 637 * rbtree of pg pool info
 638 */
 639static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
 640{
 641	struct rb_node **p = &root->rb_node;
 642	struct rb_node *parent = NULL;
 643	struct ceph_pg_pool_info *pi = NULL;
 644
 645	while (*p) {
 646		parent = *p;
 647		pi = rb_entry(parent, struct ceph_pg_pool_info, node);
 648		if (new->id < pi->id)
 649			p = &(*p)->rb_left;
 650		else if (new->id > pi->id)
 651			p = &(*p)->rb_right;
 652		else
 653			return -EEXIST;
 654	}
 655
 656	rb_link_node(&new->node, parent, p);
 657	rb_insert_color(&new->node, root);
 658	return 0;
 659}
 660
 661static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id)
 662{
 663	struct ceph_pg_pool_info *pi;
 664	struct rb_node *n = root->rb_node;
 665
 666	while (n) {
 667		pi = rb_entry(n, struct ceph_pg_pool_info, node);
 668		if (id < pi->id)
 669			n = n->rb_left;
 670		else if (id > pi->id)
 671			n = n->rb_right;
 672		else
 673			return pi;
 674	}
 675	return NULL;
 676}
 677
 678struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id)
 679{
 680	return __lookup_pg_pool(&map->pg_pools, id);
 681}
 682
 683const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
 684{
 685	struct ceph_pg_pool_info *pi;
 686
 687	if (id == CEPH_NOPOOL)
 688		return NULL;
 689
 690	if (WARN_ON_ONCE(id > (u64) INT_MAX))
 691		return NULL;
 692
 693	pi = __lookup_pg_pool(&map->pg_pools, (int) id);
 694
 695	return pi ? pi->name : NULL;
 696}
 697EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
 698
 699int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
 700{
 701	struct rb_node *rbp;
 702
 703	for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
 704		struct ceph_pg_pool_info *pi =
 705			rb_entry(rbp, struct ceph_pg_pool_info, node);
 706		if (pi->name && strcmp(pi->name, name) == 0)
 707			return pi->id;
 708	}
 709	return -ENOENT;
 710}
 711EXPORT_SYMBOL(ceph_pg_poolid_by_name);
 712
 
 
 
 
 
 
 
 
 
 713static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
 714{
 715	rb_erase(&pi->node, root);
 716	kfree(pi->name);
 717	kfree(pi);
 718}
 719
 720static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
 721{
 722	u8 ev, cv;
 723	unsigned len, num;
 724	void *pool_end;
 725
 726	ceph_decode_need(p, end, 2 + 4, bad);
 727	ev = ceph_decode_8(p);  /* encoding version */
 728	cv = ceph_decode_8(p); /* compat version */
 729	if (ev < 5) {
 730		pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
 731		return -EINVAL;
 732	}
 733	if (cv > 9) {
 734		pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv);
 735		return -EINVAL;
 736	}
 737	len = ceph_decode_32(p);
 738	ceph_decode_need(p, end, len, bad);
 739	pool_end = *p + len;
 740
 741	pi->type = ceph_decode_8(p);
 742	pi->size = ceph_decode_8(p);
 743	pi->crush_ruleset = ceph_decode_8(p);
 744	pi->object_hash = ceph_decode_8(p);
 745
 746	pi->pg_num = ceph_decode_32(p);
 747	pi->pgp_num = ceph_decode_32(p);
 748
 749	*p += 4 + 4;  /* skip lpg* */
 750	*p += 4;      /* skip last_change */
 751	*p += 8 + 4;  /* skip snap_seq, snap_epoch */
 752
 753	/* skip snaps */
 754	num = ceph_decode_32(p);
 755	while (num--) {
 756		*p += 8;  /* snapid key */
 757		*p += 1 + 1; /* versions */
 758		len = ceph_decode_32(p);
 759		*p += len;
 760	}
 761
 762	/* skip removed_snaps */
 763	num = ceph_decode_32(p);
 764	*p += num * (8 + 8);
 765
 766	*p += 8;  /* skip auid */
 767	pi->flags = ceph_decode_64(p);
 768	*p += 4;  /* skip crash_replay_interval */
 769
 770	if (ev >= 7)
 771		pi->min_size = ceph_decode_8(p);
 772	else
 773		pi->min_size = pi->size - pi->size / 2;
 774
 775	if (ev >= 8)
 776		*p += 8 + 8;  /* skip quota_max_* */
 777
 778	if (ev >= 9) {
 779		/* skip tiers */
 780		num = ceph_decode_32(p);
 781		*p += num * 8;
 782
 783		*p += 8;  /* skip tier_of */
 784		*p += 1;  /* skip cache_mode */
 785
 786		pi->read_tier = ceph_decode_64(p);
 787		pi->write_tier = ceph_decode_64(p);
 788	} else {
 789		pi->read_tier = -1;
 790		pi->write_tier = -1;
 791	}
 792
 793	if (ev >= 10) {
 794		/* skip properties */
 795		num = ceph_decode_32(p);
 796		while (num--) {
 797			len = ceph_decode_32(p);
 798			*p += len; /* key */
 799			len = ceph_decode_32(p);
 800			*p += len; /* val */
 801		}
 802	}
 803
 804	if (ev >= 11) {
 805		/* skip hit_set_params */
 806		*p += 1 + 1; /* versions */
 807		len = ceph_decode_32(p);
 808		*p += len;
 809
 810		*p += 4; /* skip hit_set_period */
 811		*p += 4; /* skip hit_set_count */
 812	}
 813
 814	if (ev >= 12)
 815		*p += 4; /* skip stripe_width */
 816
 817	if (ev >= 13) {
 818		*p += 8; /* skip target_max_bytes */
 819		*p += 8; /* skip target_max_objects */
 820		*p += 4; /* skip cache_target_dirty_ratio_micro */
 821		*p += 4; /* skip cache_target_full_ratio_micro */
 822		*p += 4; /* skip cache_min_flush_age */
 823		*p += 4; /* skip cache_min_evict_age */
 824	}
 825
 826	if (ev >=  14) {
 827		/* skip erasure_code_profile */
 828		len = ceph_decode_32(p);
 829		*p += len;
 830	}
 831
 832	/*
 833	 * last_force_op_resend_preluminous, will be overridden if the
 834	 * map was encoded with RESEND_ON_SPLIT
 835	 */
 836	if (ev >= 15)
 837		pi->last_force_request_resend = ceph_decode_32(p);
 838	else
 839		pi->last_force_request_resend = 0;
 840
 841	if (ev >= 16)
 842		*p += 4; /* skip min_read_recency_for_promote */
 843
 844	if (ev >= 17)
 845		*p += 8; /* skip expected_num_objects */
 846
 847	if (ev >= 19)
 848		*p += 4; /* skip cache_target_dirty_high_ratio_micro */
 849
 850	if (ev >= 20)
 851		*p += 4; /* skip min_write_recency_for_promote */
 852
 853	if (ev >= 21)
 854		*p += 1; /* skip use_gmt_hitset */
 855
 856	if (ev >= 22)
 857		*p += 1; /* skip fast_read */
 858
 859	if (ev >= 23) {
 860		*p += 4; /* skip hit_set_grade_decay_rate */
 861		*p += 4; /* skip hit_set_search_last_n */
 862	}
 863
 864	if (ev >= 24) {
 865		/* skip opts */
 866		*p += 1 + 1; /* versions */
 867		len = ceph_decode_32(p);
 868		*p += len;
 869	}
 870
 871	if (ev >= 25)
 872		pi->last_force_request_resend = ceph_decode_32(p);
 873
 874	/* ignore the rest */
 875
 876	*p = pool_end;
 877	calc_pg_masks(pi);
 878	return 0;
 879
 880bad:
 881	return -EINVAL;
 882}
 883
 884static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
 885{
 886	struct ceph_pg_pool_info *pi;
 887	u32 num, len;
 888	u64 pool;
 889
 890	ceph_decode_32_safe(p, end, num, bad);
 891	dout(" %d pool names\n", num);
 892	while (num--) {
 893		ceph_decode_64_safe(p, end, pool, bad);
 894		ceph_decode_32_safe(p, end, len, bad);
 895		dout("  pool %llu len %d\n", pool, len);
 896		ceph_decode_need(p, end, len, bad);
 897		pi = __lookup_pg_pool(&map->pg_pools, pool);
 898		if (pi) {
 899			char *name = kstrndup(*p, len, GFP_NOFS);
 900
 901			if (!name)
 902				return -ENOMEM;
 903			kfree(pi->name);
 904			pi->name = name;
 905			dout("  name is %s\n", pi->name);
 906		}
 907		*p += len;
 908	}
 909	return 0;
 910
 911bad:
 912	return -EINVAL;
 913}
 914
 915/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 916 * osd map
 917 */
 918struct ceph_osdmap *ceph_osdmap_alloc(void)
 919{
 920	struct ceph_osdmap *map;
 921
 922	map = kzalloc(sizeof(*map), GFP_NOIO);
 923	if (!map)
 924		return NULL;
 925
 926	map->pg_pools = RB_ROOT;
 927	map->pool_max = -1;
 928	map->pg_temp = RB_ROOT;
 929	map->primary_temp = RB_ROOT;
 930	map->pg_upmap = RB_ROOT;
 931	map->pg_upmap_items = RB_ROOT;
 932	mutex_init(&map->crush_workspace_mutex);
 
 933
 934	return map;
 935}
 936
 937void ceph_osdmap_destroy(struct ceph_osdmap *map)
 938{
 939	dout("osdmap_destroy %p\n", map);
 
 940	if (map->crush)
 941		crush_destroy(map->crush);
 
 
 942	while (!RB_EMPTY_ROOT(&map->pg_temp)) {
 943		struct ceph_pg_mapping *pg =
 944			rb_entry(rb_first(&map->pg_temp),
 945				 struct ceph_pg_mapping, node);
 946		erase_pg_mapping(&map->pg_temp, pg);
 947		free_pg_mapping(pg);
 948	}
 949	while (!RB_EMPTY_ROOT(&map->primary_temp)) {
 950		struct ceph_pg_mapping *pg =
 951			rb_entry(rb_first(&map->primary_temp),
 952				 struct ceph_pg_mapping, node);
 953		erase_pg_mapping(&map->primary_temp, pg);
 954		free_pg_mapping(pg);
 955	}
 956	while (!RB_EMPTY_ROOT(&map->pg_upmap)) {
 957		struct ceph_pg_mapping *pg =
 958			rb_entry(rb_first(&map->pg_upmap),
 959				 struct ceph_pg_mapping, node);
 960		rb_erase(&pg->node, &map->pg_upmap);
 961		kfree(pg);
 962	}
 963	while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) {
 964		struct ceph_pg_mapping *pg =
 965			rb_entry(rb_first(&map->pg_upmap_items),
 966				 struct ceph_pg_mapping, node);
 967		rb_erase(&pg->node, &map->pg_upmap_items);
 968		kfree(pg);
 969	}
 970	while (!RB_EMPTY_ROOT(&map->pg_pools)) {
 971		struct ceph_pg_pool_info *pi =
 972			rb_entry(rb_first(&map->pg_pools),
 973				 struct ceph_pg_pool_info, node);
 974		__remove_pg_pool(&map->pg_pools, pi);
 975	}
 976	kvfree(map->osd_state);
 977	kvfree(map->osd_weight);
 978	kvfree(map->osd_addr);
 979	kvfree(map->osd_primary_affinity);
 980	kvfree(map->crush_workspace);
 981	kfree(map);
 982}
 983
 984/*
 985 * Adjust max_osd value, (re)allocate arrays.
 986 *
 987 * The new elements are properly initialized.
 988 */
 989static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max)
 990{
 991	u32 *state;
 992	u32 *weight;
 993	struct ceph_entity_addr *addr;
 994	u32 to_copy;
 995	int i;
 996
 997	dout("%s old %u new %u\n", __func__, map->max_osd, max);
 998	if (max == map->max_osd)
 999		return 0;
1000
1001	state = ceph_kvmalloc(array_size(max, sizeof(*state)), GFP_NOFS);
1002	weight = ceph_kvmalloc(array_size(max, sizeof(*weight)), GFP_NOFS);
1003	addr = ceph_kvmalloc(array_size(max, sizeof(*addr)), GFP_NOFS);
1004	if (!state || !weight || !addr) {
1005		kvfree(state);
1006		kvfree(weight);
1007		kvfree(addr);
1008		return -ENOMEM;
1009	}
1010
1011	to_copy = min(map->max_osd, max);
1012	if (map->osd_state) {
1013		memcpy(state, map->osd_state, to_copy * sizeof(*state));
1014		memcpy(weight, map->osd_weight, to_copy * sizeof(*weight));
1015		memcpy(addr, map->osd_addr, to_copy * sizeof(*addr));
1016		kvfree(map->osd_state);
1017		kvfree(map->osd_weight);
1018		kvfree(map->osd_addr);
1019	}
1020
1021	map->osd_state = state;
1022	map->osd_weight = weight;
1023	map->osd_addr = addr;
1024	for (i = map->max_osd; i < max; i++) {
1025		map->osd_state[i] = 0;
1026		map->osd_weight[i] = CEPH_OSD_OUT;
1027		memset(map->osd_addr + i, 0, sizeof(*map->osd_addr));
1028	}
1029
1030	if (map->osd_primary_affinity) {
1031		u32 *affinity;
1032
1033		affinity = ceph_kvmalloc(array_size(max, sizeof(*affinity)),
1034					 GFP_NOFS);
1035		if (!affinity)
1036			return -ENOMEM;
1037
1038		memcpy(affinity, map->osd_primary_affinity,
1039		       to_copy * sizeof(*affinity));
1040		kvfree(map->osd_primary_affinity);
1041
1042		map->osd_primary_affinity = affinity;
1043		for (i = map->max_osd; i < max; i++)
1044			map->osd_primary_affinity[i] =
1045			    CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1046	}
1047
1048	map->max_osd = max;
1049
1050	return 0;
1051}
1052
1053static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush)
1054{
1055	void *workspace;
1056	size_t work_size;
1057
1058	if (IS_ERR(crush))
1059		return PTR_ERR(crush);
1060
1061	work_size = crush_work_size(crush, CEPH_PG_MAX_SIZE);
1062	dout("%s work_size %zu bytes\n", __func__, work_size);
1063	workspace = ceph_kvmalloc(work_size, GFP_NOIO);
1064	if (!workspace) {
1065		crush_destroy(crush);
1066		return -ENOMEM;
1067	}
1068	crush_init_workspace(crush, workspace);
1069
1070	if (map->crush)
1071		crush_destroy(map->crush);
1072	kvfree(map->crush_workspace);
1073	map->crush = crush;
1074	map->crush_workspace = workspace;
1075	return 0;
1076}
1077
1078#define OSDMAP_WRAPPER_COMPAT_VER	7
1079#define OSDMAP_CLIENT_DATA_COMPAT_VER	1
1080
1081/*
1082 * Return 0 or error.  On success, *v is set to 0 for old (v6) osdmaps,
1083 * to struct_v of the client_data section for new (v7 and above)
1084 * osdmaps.
1085 */
1086static int get_osdmap_client_data_v(void **p, void *end,
1087				    const char *prefix, u8 *v)
1088{
1089	u8 struct_v;
1090
1091	ceph_decode_8_safe(p, end, struct_v, e_inval);
1092	if (struct_v >= 7) {
1093		u8 struct_compat;
1094
1095		ceph_decode_8_safe(p, end, struct_compat, e_inval);
1096		if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) {
1097			pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n",
1098				struct_v, struct_compat,
1099				OSDMAP_WRAPPER_COMPAT_VER, prefix);
1100			return -EINVAL;
1101		}
1102		*p += 4; /* ignore wrapper struct_len */
1103
1104		ceph_decode_8_safe(p, end, struct_v, e_inval);
1105		ceph_decode_8_safe(p, end, struct_compat, e_inval);
1106		if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) {
1107			pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n",
1108				struct_v, struct_compat,
1109				OSDMAP_CLIENT_DATA_COMPAT_VER, prefix);
1110			return -EINVAL;
1111		}
1112		*p += 4; /* ignore client data struct_len */
1113	} else {
1114		u16 version;
1115
1116		*p -= 1;
1117		ceph_decode_16_safe(p, end, version, e_inval);
1118		if (version < 6) {
1119			pr_warn("got v %d < 6 of %s ceph_osdmap\n",
1120				version, prefix);
1121			return -EINVAL;
1122		}
1123
1124		/* old osdmap enconding */
1125		struct_v = 0;
1126	}
1127
1128	*v = struct_v;
1129	return 0;
1130
1131e_inval:
1132	return -EINVAL;
1133}
1134
1135static int __decode_pools(void **p, void *end, struct ceph_osdmap *map,
1136			  bool incremental)
1137{
1138	u32 n;
1139
1140	ceph_decode_32_safe(p, end, n, e_inval);
1141	while (n--) {
1142		struct ceph_pg_pool_info *pi;
1143		u64 pool;
1144		int ret;
1145
1146		ceph_decode_64_safe(p, end, pool, e_inval);
1147
1148		pi = __lookup_pg_pool(&map->pg_pools, pool);
1149		if (!incremental || !pi) {
1150			pi = kzalloc(sizeof(*pi), GFP_NOFS);
1151			if (!pi)
1152				return -ENOMEM;
1153
 
1154			pi->id = pool;
1155
1156			ret = __insert_pg_pool(&map->pg_pools, pi);
1157			if (ret) {
1158				kfree(pi);
1159				return ret;
1160			}
1161		}
1162
1163		ret = decode_pool(p, end, pi);
1164		if (ret)
1165			return ret;
1166	}
1167
1168	return 0;
1169
1170e_inval:
1171	return -EINVAL;
1172}
1173
1174static int decode_pools(void **p, void *end, struct ceph_osdmap *map)
1175{
1176	return __decode_pools(p, end, map, false);
1177}
1178
1179static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map)
1180{
1181	return __decode_pools(p, end, map, true);
1182}
1183
1184typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool);
1185
1186static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root,
1187			     decode_mapping_fn_t fn, bool incremental)
1188{
1189	u32 n;
1190
1191	WARN_ON(!incremental && !fn);
1192
1193	ceph_decode_32_safe(p, end, n, e_inval);
1194	while (n--) {
1195		struct ceph_pg_mapping *pg;
1196		struct ceph_pg pgid;
1197		int ret;
1198
1199		ret = ceph_decode_pgid(p, end, &pgid);
1200		if (ret)
1201			return ret;
1202
1203		pg = lookup_pg_mapping(mapping_root, &pgid);
1204		if (pg) {
1205			WARN_ON(!incremental);
1206			erase_pg_mapping(mapping_root, pg);
1207			free_pg_mapping(pg);
1208		}
1209
1210		if (fn) {
1211			pg = fn(p, end, incremental);
1212			if (IS_ERR(pg))
1213				return PTR_ERR(pg);
1214
1215			if (pg) {
1216				pg->pgid = pgid; /* struct */
1217				insert_pg_mapping(mapping_root, pg);
1218			}
1219		}
1220	}
1221
1222	return 0;
1223
1224e_inval:
1225	return -EINVAL;
1226}
1227
1228static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end,
1229						bool incremental)
1230{
1231	struct ceph_pg_mapping *pg;
1232	u32 len, i;
1233
1234	ceph_decode_32_safe(p, end, len, e_inval);
1235	if (len == 0 && incremental)
1236		return NULL;	/* new_pg_temp: [] to remove */
1237	if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
1238		return ERR_PTR(-EINVAL);
1239
1240	ceph_decode_need(p, end, len * sizeof(u32), e_inval);
1241	pg = alloc_pg_mapping(len * sizeof(u32));
1242	if (!pg)
1243		return ERR_PTR(-ENOMEM);
1244
1245	pg->pg_temp.len = len;
1246	for (i = 0; i < len; i++)
1247		pg->pg_temp.osds[i] = ceph_decode_32(p);
1248
1249	return pg;
1250
1251e_inval:
1252	return ERR_PTR(-EINVAL);
1253}
1254
1255static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map)
1256{
1257	return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
1258				 false);
1259}
1260
1261static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map)
1262{
1263	return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
1264				 true);
1265}
1266
1267static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end,
1268						     bool incremental)
1269{
1270	struct ceph_pg_mapping *pg;
1271	u32 osd;
1272
1273	ceph_decode_32_safe(p, end, osd, e_inval);
1274	if (osd == (u32)-1 && incremental)
1275		return NULL;	/* new_primary_temp: -1 to remove */
1276
1277	pg = alloc_pg_mapping(0);
1278	if (!pg)
1279		return ERR_PTR(-ENOMEM);
1280
1281	pg->primary_temp.osd = osd;
1282	return pg;
1283
1284e_inval:
1285	return ERR_PTR(-EINVAL);
1286}
1287
1288static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map)
1289{
1290	return decode_pg_mapping(p, end, &map->primary_temp,
1291				 __decode_primary_temp, false);
1292}
1293
1294static int decode_new_primary_temp(void **p, void *end,
1295				   struct ceph_osdmap *map)
1296{
1297	return decode_pg_mapping(p, end, &map->primary_temp,
1298				 __decode_primary_temp, true);
1299}
1300
1301u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd)
1302{
1303	BUG_ON(osd >= map->max_osd);
1304
1305	if (!map->osd_primary_affinity)
1306		return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1307
1308	return map->osd_primary_affinity[osd];
1309}
1310
1311static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff)
1312{
1313	BUG_ON(osd >= map->max_osd);
1314
1315	if (!map->osd_primary_affinity) {
1316		int i;
1317
1318		map->osd_primary_affinity = ceph_kvmalloc(
1319		    array_size(map->max_osd, sizeof(*map->osd_primary_affinity)),
1320		    GFP_NOFS);
1321		if (!map->osd_primary_affinity)
1322			return -ENOMEM;
1323
1324		for (i = 0; i < map->max_osd; i++)
1325			map->osd_primary_affinity[i] =
1326			    CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1327	}
1328
1329	map->osd_primary_affinity[osd] = aff;
1330
1331	return 0;
1332}
1333
1334static int decode_primary_affinity(void **p, void *end,
1335				   struct ceph_osdmap *map)
1336{
1337	u32 len, i;
1338
1339	ceph_decode_32_safe(p, end, len, e_inval);
1340	if (len == 0) {
1341		kvfree(map->osd_primary_affinity);
1342		map->osd_primary_affinity = NULL;
1343		return 0;
1344	}
1345	if (len != map->max_osd)
1346		goto e_inval;
1347
1348	ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval);
1349
1350	for (i = 0; i < map->max_osd; i++) {
1351		int ret;
1352
1353		ret = set_primary_affinity(map, i, ceph_decode_32(p));
1354		if (ret)
1355			return ret;
1356	}
1357
1358	return 0;
1359
1360e_inval:
1361	return -EINVAL;
1362}
1363
1364static int decode_new_primary_affinity(void **p, void *end,
1365				       struct ceph_osdmap *map)
1366{
1367	u32 n;
1368
1369	ceph_decode_32_safe(p, end, n, e_inval);
1370	while (n--) {
1371		u32 osd, aff;
1372		int ret;
1373
1374		ceph_decode_32_safe(p, end, osd, e_inval);
1375		ceph_decode_32_safe(p, end, aff, e_inval);
1376
1377		ret = set_primary_affinity(map, osd, aff);
1378		if (ret)
1379			return ret;
1380
1381		pr_info("osd%d primary-affinity 0x%x\n", osd, aff);
1382	}
1383
1384	return 0;
1385
1386e_inval:
1387	return -EINVAL;
1388}
1389
1390static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end,
1391						 bool __unused)
1392{
1393	return __decode_pg_temp(p, end, false);
1394}
1395
1396static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1397{
1398	return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
1399				 false);
1400}
1401
1402static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1403{
1404	return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
1405				 true);
1406}
1407
1408static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1409{
1410	return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true);
1411}
1412
1413static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
1414						       bool __unused)
1415{
1416	struct ceph_pg_mapping *pg;
1417	u32 len, i;
1418
1419	ceph_decode_32_safe(p, end, len, e_inval);
1420	if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
1421		return ERR_PTR(-EINVAL);
1422
1423	ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
1424	pg = alloc_pg_mapping(2 * len * sizeof(u32));
1425	if (!pg)
1426		return ERR_PTR(-ENOMEM);
1427
1428	pg->pg_upmap_items.len = len;
1429	for (i = 0; i < len; i++) {
1430		pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p);
1431		pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p);
1432	}
1433
1434	return pg;
1435
1436e_inval:
1437	return ERR_PTR(-EINVAL);
1438}
1439
1440static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map)
1441{
1442	return decode_pg_mapping(p, end, &map->pg_upmap_items,
1443				 __decode_pg_upmap_items, false);
1444}
1445
1446static int decode_new_pg_upmap_items(void **p, void *end,
1447				     struct ceph_osdmap *map)
1448{
1449	return decode_pg_mapping(p, end, &map->pg_upmap_items,
1450				 __decode_pg_upmap_items, true);
1451}
1452
1453static int decode_old_pg_upmap_items(void **p, void *end,
1454				     struct ceph_osdmap *map)
1455{
1456	return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true);
1457}
1458
1459/*
1460 * decode a full map.
1461 */
1462static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map)
 
1463{
1464	u8 struct_v;
1465	u32 epoch = 0;
1466	void *start = *p;
1467	u32 max;
1468	u32 len, i;
1469	int err;
1470
1471	dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1472
1473	err = get_osdmap_client_data_v(p, end, "full", &struct_v);
1474	if (err)
1475		goto bad;
1476
1477	/* fsid, epoch, created, modified */
1478	ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) +
1479			 sizeof(map->created) + sizeof(map->modified), e_inval);
1480	ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
1481	epoch = map->epoch = ceph_decode_32(p);
1482	ceph_decode_copy(p, &map->created, sizeof(map->created));
1483	ceph_decode_copy(p, &map->modified, sizeof(map->modified));
1484
1485	/* pools */
1486	err = decode_pools(p, end, map);
1487	if (err)
1488		goto bad;
1489
1490	/* pool_name */
1491	err = decode_pool_names(p, end, map);
1492	if (err)
1493		goto bad;
1494
1495	ceph_decode_32_safe(p, end, map->pool_max, e_inval);
1496
1497	ceph_decode_32_safe(p, end, map->flags, e_inval);
1498
1499	/* max_osd */
1500	ceph_decode_32_safe(p, end, max, e_inval);
1501
1502	/* (re)alloc osd arrays */
1503	err = osdmap_set_max_osd(map, max);
1504	if (err)
1505		goto bad;
1506
1507	/* osd_state, osd_weight, osd_addrs->client_addr */
1508	ceph_decode_need(p, end, 3*sizeof(u32) +
1509			 map->max_osd*(struct_v >= 5 ? sizeof(u32) :
1510						       sizeof(u8)) +
1511				       sizeof(*map->osd_weight), e_inval);
1512	if (ceph_decode_32(p) != map->max_osd)
1513		goto e_inval;
1514
1515	if (struct_v >= 5) {
1516		for (i = 0; i < map->max_osd; i++)
1517			map->osd_state[i] = ceph_decode_32(p);
1518	} else {
1519		for (i = 0; i < map->max_osd; i++)
1520			map->osd_state[i] = ceph_decode_8(p);
1521	}
1522
1523	if (ceph_decode_32(p) != map->max_osd)
1524		goto e_inval;
1525
1526	for (i = 0; i < map->max_osd; i++)
1527		map->osd_weight[i] = ceph_decode_32(p);
1528
1529	if (ceph_decode_32(p) != map->max_osd)
1530		goto e_inval;
1531
1532	for (i = 0; i < map->max_osd; i++) {
1533		err = ceph_decode_entity_addr(p, end, &map->osd_addr[i]);
 
 
 
 
 
1534		if (err)
1535			goto bad;
 
 
1536	}
1537
1538	/* pg_temp */
1539	err = decode_pg_temp(p, end, map);
1540	if (err)
1541		goto bad;
1542
1543	/* primary_temp */
1544	if (struct_v >= 1) {
1545		err = decode_primary_temp(p, end, map);
1546		if (err)
1547			goto bad;
1548	}
1549
1550	/* primary_affinity */
1551	if (struct_v >= 2) {
1552		err = decode_primary_affinity(p, end, map);
1553		if (err)
1554			goto bad;
1555	} else {
1556		WARN_ON(map->osd_primary_affinity);
1557	}
1558
1559	/* crush */
1560	ceph_decode_32_safe(p, end, len, e_inval);
1561	err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end)));
1562	if (err)
1563		goto bad;
1564
1565	*p += len;
1566	if (struct_v >= 3) {
1567		/* erasure_code_profiles */
1568		ceph_decode_skip_map_of_map(p, end, string, string, string,
1569					    e_inval);
1570	}
1571
1572	if (struct_v >= 4) {
1573		err = decode_pg_upmap(p, end, map);
1574		if (err)
1575			goto bad;
1576
1577		err = decode_pg_upmap_items(p, end, map);
1578		if (err)
1579			goto bad;
1580	} else {
1581		WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap));
1582		WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items));
1583	}
1584
1585	/* ignore the rest */
1586	*p = end;
1587
1588	dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1589	return 0;
1590
1591e_inval:
1592	err = -EINVAL;
1593bad:
1594	pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1595	       err, epoch, (int)(*p - start), *p, start, end);
1596	print_hex_dump(KERN_DEBUG, "osdmap: ",
1597		       DUMP_PREFIX_OFFSET, 16, 1,
1598		       start, end - start, true);
1599	return err;
1600}
1601
1602/*
1603 * Allocate and decode a full map.
1604 */
1605struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
1606{
1607	struct ceph_osdmap *map;
1608	int ret;
1609
1610	map = ceph_osdmap_alloc();
1611	if (!map)
1612		return ERR_PTR(-ENOMEM);
1613
1614	ret = osdmap_decode(p, end, map);
1615	if (ret) {
1616		ceph_osdmap_destroy(map);
1617		return ERR_PTR(ret);
1618	}
1619
1620	return map;
1621}
1622
1623/*
1624 * Encoding order is (new_up_client, new_state, new_weight).  Need to
1625 * apply in the (new_weight, new_state, new_up_client) order, because
1626 * an incremental map may look like e.g.
1627 *
1628 *     new_up_client: { osd=6, addr=... } # set osd_state and addr
1629 *     new_state: { osd=6, xorstate=EXISTS } # clear osd_state
1630 */
1631static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
1632				      struct ceph_osdmap *map)
1633{
1634	void *new_up_client;
1635	void *new_state;
1636	void *new_weight_end;
1637	u32 len;
 
1638	int i;
1639
1640	new_up_client = *p;
1641	ceph_decode_32_safe(p, end, len, e_inval);
1642	for (i = 0; i < len; ++i) {
1643		struct ceph_entity_addr addr;
1644
1645		ceph_decode_skip_32(p, end, e_inval);
1646		if (ceph_decode_entity_addr(p, end, &addr))
1647			goto e_inval;
 
 
 
 
1648	}
1649
1650	new_state = *p;
1651	ceph_decode_32_safe(p, end, len, e_inval);
1652	len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8));
1653	ceph_decode_need(p, end, len, e_inval);
1654	*p += len;
1655
1656	/* new_weight */
1657	ceph_decode_32_safe(p, end, len, e_inval);
1658	while (len--) {
1659		s32 osd;
1660		u32 w;
1661
1662		ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
1663		osd = ceph_decode_32(p);
1664		w = ceph_decode_32(p);
1665		BUG_ON(osd >= map->max_osd);
1666		pr_info("osd%d weight 0x%x %s\n", osd, w,
1667		     w == CEPH_OSD_IN ? "(in)" :
1668		     (w == CEPH_OSD_OUT ? "(out)" : ""));
1669		map->osd_weight[osd] = w;
1670
1671		/*
1672		 * If we are marking in, set the EXISTS, and clear the
1673		 * AUTOOUT and NEW bits.
1674		 */
1675		if (w) {
1676			map->osd_state[osd] |= CEPH_OSD_EXISTS;
1677			map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
1678						 CEPH_OSD_NEW);
1679		}
1680	}
1681	new_weight_end = *p;
1682
1683	/* new_state (up/down) */
1684	*p = new_state;
1685	len = ceph_decode_32(p);
1686	while (len--) {
1687		s32 osd;
1688		u32 xorstate;
1689		int ret;
1690
1691		osd = ceph_decode_32(p);
1692		if (struct_v >= 5)
1693			xorstate = ceph_decode_32(p);
1694		else
1695			xorstate = ceph_decode_8(p);
1696		if (xorstate == 0)
1697			xorstate = CEPH_OSD_UP;
1698		BUG_ON(osd >= map->max_osd);
1699		if ((map->osd_state[osd] & CEPH_OSD_UP) &&
1700		    (xorstate & CEPH_OSD_UP))
1701			pr_info("osd%d down\n", osd);
1702		if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
1703		    (xorstate & CEPH_OSD_EXISTS)) {
1704			pr_info("osd%d does not exist\n", osd);
1705			ret = set_primary_affinity(map, osd,
1706						   CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
1707			if (ret)
1708				return ret;
1709			memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
1710			map->osd_state[osd] = 0;
1711		} else {
1712			map->osd_state[osd] ^= xorstate;
1713		}
1714	}
1715
1716	/* new_up_client */
1717	*p = new_up_client;
1718	len = ceph_decode_32(p);
1719	while (len--) {
1720		s32 osd;
1721		struct ceph_entity_addr addr;
1722
1723		osd = ceph_decode_32(p);
1724		BUG_ON(osd >= map->max_osd);
1725		if (ceph_decode_entity_addr(p, end, &addr))
1726			goto e_inval;
 
 
 
 
 
 
 
1727		pr_info("osd%d up\n", osd);
1728		map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
1729		map->osd_addr[osd] = addr;
1730	}
1731
1732	*p = new_weight_end;
1733	return 0;
1734
1735e_inval:
1736	return -EINVAL;
1737}
1738
1739/*
1740 * decode and apply an incremental map update.
1741 */
1742struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
1743					     struct ceph_osdmap *map)
1744{
1745	struct ceph_fsid fsid;
1746	u32 epoch = 0;
1747	struct ceph_timespec modified;
1748	s32 len;
1749	u64 pool;
1750	__s64 new_pool_max;
1751	__s32 new_flags, max;
1752	void *start = *p;
1753	int err;
1754	u8 struct_v;
1755
1756	dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1757
1758	err = get_osdmap_client_data_v(p, end, "inc", &struct_v);
1759	if (err)
1760		goto bad;
1761
1762	/* fsid, epoch, modified, new_pool_max, new_flags */
1763	ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) +
1764			 sizeof(u64) + sizeof(u32), e_inval);
1765	ceph_decode_copy(p, &fsid, sizeof(fsid));
1766	epoch = ceph_decode_32(p);
1767	BUG_ON(epoch != map->epoch+1);
1768	ceph_decode_copy(p, &modified, sizeof(modified));
1769	new_pool_max = ceph_decode_64(p);
1770	new_flags = ceph_decode_32(p);
1771
1772	/* full map? */
1773	ceph_decode_32_safe(p, end, len, e_inval);
1774	if (len > 0) {
1775		dout("apply_incremental full map len %d, %p to %p\n",
1776		     len, *p, end);
1777		return ceph_osdmap_decode(p, min(*p+len, end));
1778	}
1779
1780	/* new crush? */
1781	ceph_decode_32_safe(p, end, len, e_inval);
1782	if (len > 0) {
1783		err = osdmap_set_crush(map,
1784				       crush_decode(*p, min(*p + len, end)));
1785		if (err)
1786			goto bad;
1787		*p += len;
1788	}
1789
1790	/* new flags? */
1791	if (new_flags >= 0)
1792		map->flags = new_flags;
1793	if (new_pool_max >= 0)
1794		map->pool_max = new_pool_max;
1795
1796	/* new max? */
1797	ceph_decode_32_safe(p, end, max, e_inval);
1798	if (max >= 0) {
1799		err = osdmap_set_max_osd(map, max);
1800		if (err)
1801			goto bad;
1802	}
1803
1804	map->epoch++;
1805	map->modified = modified;
1806
1807	/* new_pools */
1808	err = decode_new_pools(p, end, map);
1809	if (err)
1810		goto bad;
1811
1812	/* new_pool_names */
1813	err = decode_pool_names(p, end, map);
1814	if (err)
1815		goto bad;
1816
1817	/* old_pool */
1818	ceph_decode_32_safe(p, end, len, e_inval);
1819	while (len--) {
1820		struct ceph_pg_pool_info *pi;
1821
1822		ceph_decode_64_safe(p, end, pool, e_inval);
1823		pi = __lookup_pg_pool(&map->pg_pools, pool);
1824		if (pi)
1825			__remove_pg_pool(&map->pg_pools, pi);
1826	}
1827
1828	/* new_up_client, new_state, new_weight */
1829	err = decode_new_up_state_weight(p, end, struct_v, map);
1830	if (err)
1831		goto bad;
1832
1833	/* new_pg_temp */
1834	err = decode_new_pg_temp(p, end, map);
1835	if (err)
1836		goto bad;
1837
1838	/* new_primary_temp */
1839	if (struct_v >= 1) {
1840		err = decode_new_primary_temp(p, end, map);
1841		if (err)
1842			goto bad;
1843	}
1844
1845	/* new_primary_affinity */
1846	if (struct_v >= 2) {
1847		err = decode_new_primary_affinity(p, end, map);
1848		if (err)
1849			goto bad;
1850	}
1851
1852	if (struct_v >= 3) {
1853		/* new_erasure_code_profiles */
1854		ceph_decode_skip_map_of_map(p, end, string, string, string,
1855					    e_inval);
1856		/* old_erasure_code_profiles */
1857		ceph_decode_skip_set(p, end, string, e_inval);
1858	}
1859
1860	if (struct_v >= 4) {
1861		err = decode_new_pg_upmap(p, end, map);
1862		if (err)
1863			goto bad;
1864
1865		err = decode_old_pg_upmap(p, end, map);
1866		if (err)
1867			goto bad;
1868
1869		err = decode_new_pg_upmap_items(p, end, map);
1870		if (err)
1871			goto bad;
1872
1873		err = decode_old_pg_upmap_items(p, end, map);
1874		if (err)
1875			goto bad;
1876	}
1877
1878	/* ignore the rest */
1879	*p = end;
1880
1881	dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1882	return map;
1883
1884e_inval:
1885	err = -EINVAL;
1886bad:
1887	pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1888	       err, epoch, (int)(*p - start), *p, start, end);
1889	print_hex_dump(KERN_DEBUG, "osdmap: ",
1890		       DUMP_PREFIX_OFFSET, 16, 1,
1891		       start, end - start, true);
1892	return ERR_PTR(err);
1893}
1894
1895void ceph_oloc_copy(struct ceph_object_locator *dest,
1896		    const struct ceph_object_locator *src)
1897{
1898	ceph_oloc_destroy(dest);
1899
1900	dest->pool = src->pool;
1901	if (src->pool_ns)
1902		dest->pool_ns = ceph_get_string(src->pool_ns);
1903	else
1904		dest->pool_ns = NULL;
1905}
1906EXPORT_SYMBOL(ceph_oloc_copy);
1907
1908void ceph_oloc_destroy(struct ceph_object_locator *oloc)
1909{
1910	ceph_put_string(oloc->pool_ns);
1911}
1912EXPORT_SYMBOL(ceph_oloc_destroy);
1913
1914void ceph_oid_copy(struct ceph_object_id *dest,
1915		   const struct ceph_object_id *src)
1916{
1917	ceph_oid_destroy(dest);
1918
1919	if (src->name != src->inline_name) {
1920		/* very rare, see ceph_object_id definition */
1921		dest->name = kmalloc(src->name_len + 1,
1922				     GFP_NOIO | __GFP_NOFAIL);
1923	} else {
1924		dest->name = dest->inline_name;
1925	}
1926	memcpy(dest->name, src->name, src->name_len + 1);
1927	dest->name_len = src->name_len;
1928}
1929EXPORT_SYMBOL(ceph_oid_copy);
1930
1931static __printf(2, 0)
1932int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap)
1933{
1934	int len;
1935
1936	WARN_ON(!ceph_oid_empty(oid));
1937
1938	len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap);
1939	if (len >= sizeof(oid->inline_name))
1940		return len;
1941
1942	oid->name_len = len;
1943	return 0;
1944}
1945
1946/*
1947 * If oid doesn't fit into inline buffer, BUG.
1948 */
1949void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...)
1950{
1951	va_list ap;
1952
1953	va_start(ap, fmt);
1954	BUG_ON(oid_printf_vargs(oid, fmt, ap));
1955	va_end(ap);
1956}
1957EXPORT_SYMBOL(ceph_oid_printf);
1958
1959static __printf(3, 0)
1960int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp,
1961		      const char *fmt, va_list ap)
1962{
1963	va_list aq;
1964	int len;
1965
1966	va_copy(aq, ap);
1967	len = oid_printf_vargs(oid, fmt, aq);
1968	va_end(aq);
1969
1970	if (len) {
1971		char *external_name;
1972
1973		external_name = kmalloc(len + 1, gfp);
1974		if (!external_name)
1975			return -ENOMEM;
1976
1977		oid->name = external_name;
1978		WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len);
1979		oid->name_len = len;
1980	}
1981
1982	return 0;
1983}
1984
1985/*
1986 * If oid doesn't fit into inline buffer, allocate.
1987 */
1988int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
1989		     const char *fmt, ...)
1990{
1991	va_list ap;
1992	int ret;
1993
1994	va_start(ap, fmt);
1995	ret = oid_aprintf_vargs(oid, gfp, fmt, ap);
1996	va_end(ap);
1997
1998	return ret;
1999}
2000EXPORT_SYMBOL(ceph_oid_aprintf);
2001
2002void ceph_oid_destroy(struct ceph_object_id *oid)
2003{
2004	if (oid->name != oid->inline_name)
2005		kfree(oid->name);
2006}
2007EXPORT_SYMBOL(ceph_oid_destroy);
2008
2009/*
2010 * osds only
2011 */
2012static bool __osds_equal(const struct ceph_osds *lhs,
2013			 const struct ceph_osds *rhs)
2014{
2015	if (lhs->size == rhs->size &&
2016	    !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0])))
2017		return true;
2018
2019	return false;
2020}
2021
2022/*
2023 * osds + primary
2024 */
2025static bool osds_equal(const struct ceph_osds *lhs,
2026		       const struct ceph_osds *rhs)
2027{
2028	if (__osds_equal(lhs, rhs) &&
2029	    lhs->primary == rhs->primary)
2030		return true;
2031
2032	return false;
2033}
2034
2035static bool osds_valid(const struct ceph_osds *set)
2036{
2037	/* non-empty set */
2038	if (set->size > 0 && set->primary >= 0)
2039		return true;
2040
2041	/* empty can_shift_osds set */
2042	if (!set->size && set->primary == -1)
2043		return true;
2044
2045	/* empty !can_shift_osds set - all NONE */
2046	if (set->size > 0 && set->primary == -1) {
2047		int i;
2048
2049		for (i = 0; i < set->size; i++) {
2050			if (set->osds[i] != CRUSH_ITEM_NONE)
2051				break;
2052		}
2053		if (i == set->size)
2054			return true;
2055	}
2056
2057	return false;
2058}
2059
2060void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src)
2061{
2062	memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0]));
2063	dest->size = src->size;
2064	dest->primary = src->primary;
2065}
2066
2067bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num,
2068		      u32 new_pg_num)
2069{
2070	int old_bits = calc_bits_of(old_pg_num);
2071	int old_mask = (1 << old_bits) - 1;
2072	int n;
2073
2074	WARN_ON(pgid->seed >= old_pg_num);
2075	if (new_pg_num <= old_pg_num)
2076		return false;
2077
2078	for (n = 1; ; n++) {
2079		int next_bit = n << (old_bits - 1);
2080		u32 s = next_bit | pgid->seed;
2081
2082		if (s < old_pg_num || s == pgid->seed)
2083			continue;
2084		if (s >= new_pg_num)
2085			break;
2086
2087		s = ceph_stable_mod(s, old_pg_num, old_mask);
2088		if (s == pgid->seed)
2089			return true;
2090	}
2091
2092	return false;
2093}
2094
2095bool ceph_is_new_interval(const struct ceph_osds *old_acting,
2096			  const struct ceph_osds *new_acting,
2097			  const struct ceph_osds *old_up,
2098			  const struct ceph_osds *new_up,
2099			  int old_size,
2100			  int new_size,
2101			  int old_min_size,
2102			  int new_min_size,
2103			  u32 old_pg_num,
2104			  u32 new_pg_num,
2105			  bool old_sort_bitwise,
2106			  bool new_sort_bitwise,
2107			  bool old_recovery_deletes,
2108			  bool new_recovery_deletes,
2109			  const struct ceph_pg *pgid)
2110{
2111	return !osds_equal(old_acting, new_acting) ||
2112	       !osds_equal(old_up, new_up) ||
2113	       old_size != new_size ||
2114	       old_min_size != new_min_size ||
2115	       ceph_pg_is_split(pgid, old_pg_num, new_pg_num) ||
2116	       old_sort_bitwise != new_sort_bitwise ||
2117	       old_recovery_deletes != new_recovery_deletes;
2118}
2119
2120static int calc_pg_rank(int osd, const struct ceph_osds *acting)
2121{
2122	int i;
2123
2124	for (i = 0; i < acting->size; i++) {
2125		if (acting->osds[i] == osd)
2126			return i;
2127	}
2128
2129	return -1;
2130}
2131
2132static bool primary_changed(const struct ceph_osds *old_acting,
2133			    const struct ceph_osds *new_acting)
2134{
2135	if (!old_acting->size && !new_acting->size)
2136		return false; /* both still empty */
2137
2138	if (!old_acting->size ^ !new_acting->size)
2139		return true; /* was empty, now not, or vice versa */
2140
2141	if (old_acting->primary != new_acting->primary)
2142		return true; /* primary changed */
2143
2144	if (calc_pg_rank(old_acting->primary, old_acting) !=
2145	    calc_pg_rank(new_acting->primary, new_acting))
2146		return true;
2147
2148	return false; /* same primary (tho replicas may have changed) */
2149}
2150
2151bool ceph_osds_changed(const struct ceph_osds *old_acting,
2152		       const struct ceph_osds *new_acting,
2153		       bool any_change)
2154{
2155	if (primary_changed(old_acting, new_acting))
2156		return true;
2157
2158	if (any_change && !__osds_equal(old_acting, new_acting))
2159		return true;
2160
2161	return false;
2162}
2163
2164/*
2165 * Map an object into a PG.
2166 *
2167 * Should only be called with target_oid and target_oloc (as opposed to
2168 * base_oid and base_oloc), since tiering isn't taken into account.
2169 */
2170void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
2171				 const struct ceph_object_id *oid,
2172				 const struct ceph_object_locator *oloc,
2173				 struct ceph_pg *raw_pgid)
2174{
2175	WARN_ON(pi->id != oloc->pool);
2176
2177	if (!oloc->pool_ns) {
2178		raw_pgid->pool = oloc->pool;
2179		raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
2180					     oid->name_len);
2181		dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
2182		     raw_pgid->pool, raw_pgid->seed);
2183	} else {
2184		char stack_buf[256];
2185		char *buf = stack_buf;
2186		int nsl = oloc->pool_ns->len;
2187		size_t total = nsl + 1 + oid->name_len;
2188
2189		if (total > sizeof(stack_buf))
2190			buf = kmalloc(total, GFP_NOIO | __GFP_NOFAIL);
2191		memcpy(buf, oloc->pool_ns->str, nsl);
2192		buf[nsl] = '\037';
2193		memcpy(buf + nsl + 1, oid->name, oid->name_len);
2194		raw_pgid->pool = oloc->pool;
2195		raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total);
2196		if (buf != stack_buf)
2197			kfree(buf);
2198		dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__,
2199		     oid->name, nsl, oloc->pool_ns->str,
2200		     raw_pgid->pool, raw_pgid->seed);
2201	}
2202}
2203
2204int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
2205			      const struct ceph_object_id *oid,
2206			      const struct ceph_object_locator *oloc,
2207			      struct ceph_pg *raw_pgid)
2208{
2209	struct ceph_pg_pool_info *pi;
2210
2211	pi = ceph_pg_pool_by_id(osdmap, oloc->pool);
2212	if (!pi)
2213		return -ENOENT;
2214
2215	__ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid);
2216	return 0;
2217}
2218EXPORT_SYMBOL(ceph_object_locator_to_pg);
2219
2220/*
2221 * Map a raw PG (full precision ps) into an actual PG.
2222 */
2223static void raw_pg_to_pg(struct ceph_pg_pool_info *pi,
2224			 const struct ceph_pg *raw_pgid,
2225			 struct ceph_pg *pgid)
2226{
2227	pgid->pool = raw_pgid->pool;
2228	pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num,
2229				     pi->pg_num_mask);
2230}
2231
2232/*
2233 * Map a raw PG (full precision ps) into a placement ps (placement
2234 * seed).  Include pool id in that value so that different pools don't
2235 * use the same seeds.
2236 */
2237static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi,
2238			 const struct ceph_pg *raw_pgid)
2239{
2240	if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
2241		/* hash pool id and seed so that pool PGs do not overlap */
2242		return crush_hash32_2(CRUSH_HASH_RJENKINS1,
2243				      ceph_stable_mod(raw_pgid->seed,
2244						      pi->pgp_num,
2245						      pi->pgp_num_mask),
2246				      raw_pgid->pool);
2247	} else {
2248		/*
2249		 * legacy behavior: add ps and pool together.  this is
2250		 * not a great approach because the PGs from each pool
2251		 * will overlap on top of each other: 0.5 == 1.4 ==
2252		 * 2.3 == ...
2253		 */
2254		return ceph_stable_mod(raw_pgid->seed, pi->pgp_num,
2255				       pi->pgp_num_mask) +
2256		       (unsigned)raw_pgid->pool;
2257	}
2258}
2259
2260/*
2261 * Magic value used for a "default" fallback choose_args, used if the
2262 * crush_choose_arg_map passed to do_crush() does not exist.  If this
2263 * also doesn't exist, fall back to canonical weights.
2264 */
2265#define CEPH_DEFAULT_CHOOSE_ARGS	-1
2266
2267static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
2268		    int *result, int result_max,
2269		    const __u32 *weight, int weight_max,
2270		    s64 choose_args_index)
2271{
2272	struct crush_choose_arg_map *arg_map;
 
2273	int r;
2274
2275	BUG_ON(result_max > CEPH_PG_MAX_SIZE);
2276
2277	arg_map = lookup_choose_arg_map(&map->crush->choose_args,
2278					choose_args_index);
2279	if (!arg_map)
2280		arg_map = lookup_choose_arg_map(&map->crush->choose_args,
2281						CEPH_DEFAULT_CHOOSE_ARGS);
2282
2283	mutex_lock(&map->crush_workspace_mutex);
2284	r = crush_do_rule(map->crush, ruleno, x, result, result_max,
2285			  weight, weight_max, map->crush_workspace,
2286			  arg_map ? arg_map->args : NULL);
2287	mutex_unlock(&map->crush_workspace_mutex);
2288
2289	return r;
2290}
2291
2292static void remove_nonexistent_osds(struct ceph_osdmap *osdmap,
2293				    struct ceph_pg_pool_info *pi,
2294				    struct ceph_osds *set)
2295{
2296	int i;
2297
2298	if (ceph_can_shift_osds(pi)) {
2299		int removed = 0;
2300
2301		/* shift left */
2302		for (i = 0; i < set->size; i++) {
2303			if (!ceph_osd_exists(osdmap, set->osds[i])) {
2304				removed++;
2305				continue;
2306			}
2307			if (removed)
2308				set->osds[i - removed] = set->osds[i];
2309		}
2310		set->size -= removed;
2311	} else {
2312		/* set dne devices to NONE */
2313		for (i = 0; i < set->size; i++) {
2314			if (!ceph_osd_exists(osdmap, set->osds[i]))
2315				set->osds[i] = CRUSH_ITEM_NONE;
2316		}
2317	}
2318}
2319
2320/*
2321 * Calculate raw set (CRUSH output) for given PG and filter out
2322 * nonexistent OSDs.  ->primary is undefined for a raw set.
2323 *
2324 * Placement seed (CRUSH input) is returned through @ppps.
2325 */
2326static void pg_to_raw_osds(struct ceph_osdmap *osdmap,
2327			   struct ceph_pg_pool_info *pi,
2328			   const struct ceph_pg *raw_pgid,
2329			   struct ceph_osds *raw,
2330			   u32 *ppps)
2331{
2332	u32 pps = raw_pg_to_pps(pi, raw_pgid);
2333	int ruleno;
2334	int len;
2335
2336	ceph_osds_init(raw);
2337	if (ppps)
2338		*ppps = pps;
2339
2340	ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type,
2341				 pi->size);
2342	if (ruleno < 0) {
2343		pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
2344		       pi->id, pi->crush_ruleset, pi->type, pi->size);
2345		return;
2346	}
2347
2348	if (pi->size > ARRAY_SIZE(raw->osds)) {
2349		pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n",
2350		       pi->id, pi->crush_ruleset, pi->type, pi->size,
2351		       ARRAY_SIZE(raw->osds));
2352		return;
2353	}
2354
2355	len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size,
2356		       osdmap->osd_weight, osdmap->max_osd, pi->id);
2357	if (len < 0) {
2358		pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
2359		       len, ruleno, pi->id, pi->crush_ruleset, pi->type,
2360		       pi->size);
2361		return;
2362	}
2363
2364	raw->size = len;
2365	remove_nonexistent_osds(osdmap, pi, raw);
2366}
2367
2368/* apply pg_upmap[_items] mappings */
2369static void apply_upmap(struct ceph_osdmap *osdmap,
2370			const struct ceph_pg *pgid,
2371			struct ceph_osds *raw)
2372{
2373	struct ceph_pg_mapping *pg;
2374	int i, j;
2375
2376	pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid);
2377	if (pg) {
2378		/* make sure targets aren't marked out */
2379		for (i = 0; i < pg->pg_upmap.len; i++) {
2380			int osd = pg->pg_upmap.osds[i];
2381
2382			if (osd != CRUSH_ITEM_NONE &&
2383			    osd < osdmap->max_osd &&
2384			    osdmap->osd_weight[osd] == 0) {
2385				/* reject/ignore explicit mapping */
2386				return;
2387			}
2388		}
2389		for (i = 0; i < pg->pg_upmap.len; i++)
2390			raw->osds[i] = pg->pg_upmap.osds[i];
2391		raw->size = pg->pg_upmap.len;
2392		/* check and apply pg_upmap_items, if any */
2393	}
2394
2395	pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid);
2396	if (pg) {
2397		/*
2398		 * Note: this approach does not allow a bidirectional swap,
2399		 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
2400		 */
2401		for (i = 0; i < pg->pg_upmap_items.len; i++) {
2402			int from = pg->pg_upmap_items.from_to[i][0];
2403			int to = pg->pg_upmap_items.from_to[i][1];
2404			int pos = -1;
2405			bool exists = false;
2406
2407			/* make sure replacement doesn't already appear */
2408			for (j = 0; j < raw->size; j++) {
2409				int osd = raw->osds[j];
2410
2411				if (osd == to) {
2412					exists = true;
2413					break;
2414				}
2415				/* ignore mapping if target is marked out */
2416				if (osd == from && pos < 0 &&
2417				    !(to != CRUSH_ITEM_NONE &&
2418				      to < osdmap->max_osd &&
2419				      osdmap->osd_weight[to] == 0)) {
2420					pos = j;
2421				}
2422			}
2423			if (!exists && pos >= 0)
2424				raw->osds[pos] = to;
2425		}
2426	}
2427}
2428
2429/*
2430 * Given raw set, calculate up set and up primary.  By definition of an
2431 * up set, the result won't contain nonexistent or down OSDs.
2432 *
2433 * This is done in-place - on return @set is the up set.  If it's
2434 * empty, ->primary will remain undefined.
2435 */
2436static void raw_to_up_osds(struct ceph_osdmap *osdmap,
2437			   struct ceph_pg_pool_info *pi,
2438			   struct ceph_osds *set)
2439{
2440	int i;
2441
2442	/* ->primary is undefined for a raw set */
2443	BUG_ON(set->primary != -1);
2444
2445	if (ceph_can_shift_osds(pi)) {
2446		int removed = 0;
2447
2448		/* shift left */
2449		for (i = 0; i < set->size; i++) {
2450			if (ceph_osd_is_down(osdmap, set->osds[i])) {
2451				removed++;
2452				continue;
2453			}
2454			if (removed)
2455				set->osds[i - removed] = set->osds[i];
2456		}
2457		set->size -= removed;
2458		if (set->size > 0)
2459			set->primary = set->osds[0];
2460	} else {
2461		/* set down/dne devices to NONE */
2462		for (i = set->size - 1; i >= 0; i--) {
2463			if (ceph_osd_is_down(osdmap, set->osds[i]))
2464				set->osds[i] = CRUSH_ITEM_NONE;
2465			else
2466				set->primary = set->osds[i];
2467		}
2468	}
2469}
2470
2471static void apply_primary_affinity(struct ceph_osdmap *osdmap,
2472				   struct ceph_pg_pool_info *pi,
2473				   u32 pps,
2474				   struct ceph_osds *up)
2475{
2476	int i;
2477	int pos = -1;
2478
2479	/*
2480	 * Do we have any non-default primary_affinity values for these
2481	 * osds?
2482	 */
2483	if (!osdmap->osd_primary_affinity)
2484		return;
2485
2486	for (i = 0; i < up->size; i++) {
2487		int osd = up->osds[i];
2488
2489		if (osd != CRUSH_ITEM_NONE &&
2490		    osdmap->osd_primary_affinity[osd] !=
2491					CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) {
2492			break;
2493		}
2494	}
2495	if (i == up->size)
2496		return;
2497
2498	/*
2499	 * Pick the primary.  Feed both the seed (for the pg) and the
2500	 * osd into the hash/rng so that a proportional fraction of an
2501	 * osd's pgs get rejected as primary.
2502	 */
2503	for (i = 0; i < up->size; i++) {
2504		int osd = up->osds[i];
2505		u32 aff;
2506
2507		if (osd == CRUSH_ITEM_NONE)
2508			continue;
2509
2510		aff = osdmap->osd_primary_affinity[osd];
2511		if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY &&
2512		    (crush_hash32_2(CRUSH_HASH_RJENKINS1,
2513				    pps, osd) >> 16) >= aff) {
2514			/*
2515			 * We chose not to use this primary.  Note it
2516			 * anyway as a fallback in case we don't pick
2517			 * anyone else, but keep looking.
2518			 */
2519			if (pos < 0)
2520				pos = i;
2521		} else {
2522			pos = i;
2523			break;
2524		}
2525	}
2526	if (pos < 0)
2527		return;
2528
2529	up->primary = up->osds[pos];
2530
2531	if (ceph_can_shift_osds(pi) && pos > 0) {
2532		/* move the new primary to the front */
2533		for (i = pos; i > 0; i--)
2534			up->osds[i] = up->osds[i - 1];
2535		up->osds[0] = up->primary;
2536	}
2537}
2538
2539/*
2540 * Get pg_temp and primary_temp mappings for given PG.
2541 *
2542 * Note that a PG may have none, only pg_temp, only primary_temp or
2543 * both pg_temp and primary_temp mappings.  This means @temp isn't
2544 * always a valid OSD set on return: in the "only primary_temp" case,
2545 * @temp will have its ->primary >= 0 but ->size == 0.
2546 */
2547static void get_temp_osds(struct ceph_osdmap *osdmap,
2548			  struct ceph_pg_pool_info *pi,
2549			  const struct ceph_pg *pgid,
2550			  struct ceph_osds *temp)
2551{
2552	struct ceph_pg_mapping *pg;
2553	int i;
2554
2555	ceph_osds_init(temp);
2556
2557	/* pg_temp? */
2558	pg = lookup_pg_mapping(&osdmap->pg_temp, pgid);
2559	if (pg) {
2560		for (i = 0; i < pg->pg_temp.len; i++) {
2561			if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
2562				if (ceph_can_shift_osds(pi))
2563					continue;
2564
2565				temp->osds[temp->size++] = CRUSH_ITEM_NONE;
2566			} else {
2567				temp->osds[temp->size++] = pg->pg_temp.osds[i];
2568			}
2569		}
2570
2571		/* apply pg_temp's primary */
2572		for (i = 0; i < temp->size; i++) {
2573			if (temp->osds[i] != CRUSH_ITEM_NONE) {
2574				temp->primary = temp->osds[i];
2575				break;
2576			}
2577		}
2578	}
2579
2580	/* primary_temp? */
2581	pg = lookup_pg_mapping(&osdmap->primary_temp, pgid);
2582	if (pg)
2583		temp->primary = pg->primary_temp.osd;
2584}
2585
2586/*
2587 * Map a PG to its acting set as well as its up set.
2588 *
2589 * Acting set is used for data mapping purposes, while up set can be
2590 * recorded for detecting interval changes and deciding whether to
2591 * resend a request.
2592 */
2593void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
2594			       struct ceph_pg_pool_info *pi,
2595			       const struct ceph_pg *raw_pgid,
2596			       struct ceph_osds *up,
2597			       struct ceph_osds *acting)
2598{
2599	struct ceph_pg pgid;
2600	u32 pps;
2601
2602	WARN_ON(pi->id != raw_pgid->pool);
2603	raw_pg_to_pg(pi, raw_pgid, &pgid);
2604
2605	pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps);
2606	apply_upmap(osdmap, &pgid, up);
2607	raw_to_up_osds(osdmap, pi, up);
2608	apply_primary_affinity(osdmap, pi, pps, up);
2609	get_temp_osds(osdmap, pi, &pgid, acting);
2610	if (!acting->size) {
2611		memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0]));
2612		acting->size = up->size;
2613		if (acting->primary == -1)
2614			acting->primary = up->primary;
2615	}
2616	WARN_ON(!osds_valid(up) || !osds_valid(acting));
2617}
2618
2619bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
2620			      struct ceph_pg_pool_info *pi,
2621			      const struct ceph_pg *raw_pgid,
2622			      struct ceph_spg *spgid)
2623{
2624	struct ceph_pg pgid;
2625	struct ceph_osds up, acting;
2626	int i;
2627
2628	WARN_ON(pi->id != raw_pgid->pool);
2629	raw_pg_to_pg(pi, raw_pgid, &pgid);
2630
2631	if (ceph_can_shift_osds(pi)) {
2632		spgid->pgid = pgid; /* struct */
2633		spgid->shard = CEPH_SPG_NOSHARD;
2634		return true;
2635	}
2636
2637	ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting);
2638	for (i = 0; i < acting.size; i++) {
2639		if (acting.osds[i] == acting.primary) {
2640			spgid->pgid = pgid; /* struct */
2641			spgid->shard = i;
2642			return true;
2643		}
2644	}
2645
2646	return false;
2647}
2648
2649/*
2650 * Return acting primary for given PG, or -1 if none.
2651 */
2652int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
2653			      const struct ceph_pg *raw_pgid)
2654{
2655	struct ceph_pg_pool_info *pi;
2656	struct ceph_osds up, acting;
2657
2658	pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool);
2659	if (!pi)
2660		return -1;
2661
2662	ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting);
2663	return acting.primary;
2664}
2665EXPORT_SYMBOL(ceph_pg_to_acting_primary);