Linux Audio

Check our new training course

Loading...
v3.15
   1
   2/*
   3   rbd.c -- Export ceph rados objects as a Linux block device
   4
   5
   6   based on drivers/block/osdblk.c:
   7
   8   Copyright 2009 Red Hat, Inc.
   9
  10   This program is free software; you can redistribute it and/or modify
  11   it under the terms of the GNU General Public License as published by
  12   the Free Software Foundation.
  13
  14   This program is distributed in the hope that it will be useful,
  15   but WITHOUT ANY WARRANTY; without even the implied warranty of
  16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17   GNU General Public License for more details.
  18
  19   You should have received a copy of the GNU General Public License
  20   along with this program; see the file COPYING.  If not, write to
  21   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  22
  23
  24
  25   For usage instructions, please refer to:
  26
  27                 Documentation/ABI/testing/sysfs-bus-rbd
  28
  29 */
  30
  31#include <linux/ceph/libceph.h>
  32#include <linux/ceph/osd_client.h>
  33#include <linux/ceph/mon_client.h>
 
 
  34#include <linux/ceph/decode.h>
  35#include <linux/parser.h>
  36#include <linux/bsearch.h>
  37
  38#include <linux/kernel.h>
  39#include <linux/device.h>
  40#include <linux/module.h>
 
  41#include <linux/fs.h>
  42#include <linux/blkdev.h>
  43#include <linux/slab.h>
  44#include <linux/idr.h>
 
  45
  46#include "rbd_types.h"
  47
  48#define RBD_DEBUG	/* Activate rbd_assert() calls */
  49
  50/*
  51 * The basic unit of block I/O is a sector.  It is interpreted in a
  52 * number of contexts in Linux (blk, bio, genhd), but the default is
  53 * universally 512 bytes.  These symbols are just slightly more
  54 * meaningful than the bare numbers they represent.
  55 */
  56#define	SECTOR_SHIFT	9
  57#define	SECTOR_SIZE	(1ULL << SECTOR_SHIFT)
  58
  59/*
  60 * Increment the given counter and return its updated value.
  61 * If the counter is already 0 it will not be incremented.
  62 * If the counter is already at its maximum value returns
  63 * -EINVAL without updating it.
  64 */
  65static int atomic_inc_return_safe(atomic_t *v)
  66{
  67	unsigned int counter;
  68
  69	counter = (unsigned int)__atomic_add_unless(v, 1, 0);
  70	if (counter <= (unsigned int)INT_MAX)
  71		return (int)counter;
  72
  73	atomic_dec(v);
  74
  75	return -EINVAL;
  76}
  77
  78/* Decrement the counter.  Return the resulting value, or -EINVAL */
  79static int atomic_dec_return_safe(atomic_t *v)
  80{
  81	int counter;
  82
  83	counter = atomic_dec_return(v);
  84	if (counter >= 0)
  85		return counter;
  86
  87	atomic_inc(v);
  88
  89	return -EINVAL;
  90}
  91
  92#define RBD_DRV_NAME "rbd"
  93
  94#define RBD_MINORS_PER_MAJOR		256
  95#define RBD_SINGLE_MAJOR_PART_SHIFT	4
  96
 
 
  97#define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
  98#define RBD_MAX_SNAP_NAME_LEN	\
  99			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
 100
 101#define RBD_MAX_SNAP_COUNT	510	/* allows max snapc to fit in 4KB */
 102
 103#define RBD_SNAP_HEAD_NAME	"-"
 104
 105#define	BAD_SNAP_INDEX	U32_MAX		/* invalid index into snap array */
 106
 107/* This allows a single page to hold an image name sent by OSD */
 108#define RBD_IMAGE_NAME_LEN_MAX	(PAGE_SIZE - sizeof (__le32) - 1)
 109#define RBD_IMAGE_ID_LEN_MAX	64
 110
 111#define RBD_OBJ_PREFIX_LEN_MAX	64
 112
 
 
 
 113/* Feature bits */
 114
 115#define RBD_FEATURE_LAYERING	(1<<0)
 116#define RBD_FEATURE_STRIPINGV2	(1<<1)
 117#define RBD_FEATURES_ALL \
 118	    (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 119
 120/* Features supported by this (client software) implementation. */
 121
 122#define RBD_FEATURES_SUPPORTED	(RBD_FEATURES_ALL)
 123
 124/*
 125 * An RBD device name will be "rbd#", where the "rbd" comes from
 126 * RBD_DRV_NAME above, and # is a unique integer identifier.
 127 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
 128 * enough to hold all possible device names.
 129 */
 130#define DEV_NAME_LEN		32
 131#define MAX_INT_FORMAT_WIDTH	((5 * sizeof (int)) / 2 + 1)
 132
 133/*
 134 * block device image metadata (in-memory version)
 135 */
 136struct rbd_image_header {
 137	/* These six fields never change for a given rbd image */
 138	char *object_prefix;
 139	__u8 obj_order;
 140	__u8 crypt_type;
 141	__u8 comp_type;
 142	u64 stripe_unit;
 143	u64 stripe_count;
 
 144	u64 features;		/* Might be changeable someday? */
 145
 146	/* The remaining fields need to be updated occasionally */
 147	u64 image_size;
 148	struct ceph_snap_context *snapc;
 149	char *snap_names;	/* format 1 only */
 150	u64 *snap_sizes;	/* format 1 only */
 151};
 152
 153/*
 154 * An rbd image specification.
 155 *
 156 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
 157 * identify an image.  Each rbd_dev structure includes a pointer to
 158 * an rbd_spec structure that encapsulates this identity.
 159 *
 160 * Each of the id's in an rbd_spec has an associated name.  For a
 161 * user-mapped image, the names are supplied and the id's associated
 162 * with them are looked up.  For a layered image, a parent image is
 163 * defined by the tuple, and the names are looked up.
 164 *
 165 * An rbd_dev structure contains a parent_spec pointer which is
 166 * non-null if the image it represents is a child in a layered
 167 * image.  This pointer will refer to the rbd_spec structure used
 168 * by the parent rbd_dev for its own identity (i.e., the structure
 169 * is shared between the parent and child).
 170 *
 171 * Since these structures are populated once, during the discovery
 172 * phase of image construction, they are effectively immutable so
 173 * we make no effort to synchronize access to them.
 174 *
 175 * Note that code herein does not assume the image name is known (it
 176 * could be a null pointer).
 177 */
 178struct rbd_spec {
 179	u64		pool_id;
 180	const char	*pool_name;
 
 181
 182	const char	*image_id;
 183	const char	*image_name;
 184
 185	u64		snap_id;
 186	const char	*snap_name;
 187
 188	struct kref	kref;
 189};
 190
 191/*
 192 * an instance of the client.  multiple devices may share an rbd client.
 193 */
 194struct rbd_client {
 195	struct ceph_client	*client;
 196	struct kref		kref;
 197	struct list_head	node;
 198};
 199
 
 
 
 
 
 200struct rbd_img_request;
 201typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
 202
 203#define	BAD_WHICH	U32_MAX		/* Good which or bad which, which? */
 
 
 
 
 
 204
 205struct rbd_obj_request;
 206typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
 
 
 
 
 207
 208enum obj_request_type {
 209	OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
 
 
 
 
 
 
 
 
 210};
 211
 212enum obj_req_flags {
 213	OBJ_REQ_DONE,		/* completion flag: not done = 0, done = 1 */
 214	OBJ_REQ_IMG_DATA,	/* object usage: standalone = 0, image = 1 */
 215	OBJ_REQ_KNOWN,		/* EXISTS flag valid: no = 0, yes = 1 */
 216	OBJ_REQ_EXISTS,		/* target exists: no = 0, yes = 1 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 217};
 218
 219struct rbd_obj_request {
 220	const char		*object_name;
 221	u64			offset;		/* object start byte */
 222	u64			length;		/* bytes from offset */
 223	unsigned long		flags;
 
 
 
 224
 225	/*
 226	 * An object request associated with an image will have its
 227	 * img_data flag set; a standalone object request will not.
 228	 *
 229	 * A standalone object request will have which == BAD_WHICH
 230	 * and a null obj_request pointer.
 231	 *
 232	 * An object request initiated in support of a layered image
 233	 * object (to check for its existence before a write) will
 234	 * have which == BAD_WHICH and a non-null obj_request pointer.
 235	 *
 236	 * Finally, an object request for rbd image data will have
 237	 * which != BAD_WHICH, and will have a non-null img_request
 238	 * pointer.  The value of which will be in the range
 239	 * 0..(img_request->obj_request_count-1).
 240	 */
 241	union {
 242		struct rbd_obj_request	*obj_request;	/* STAT op */
 243		struct {
 244			struct rbd_img_request	*img_request;
 245			u64			img_offset;
 246			/* links for img_request->obj_requests list */
 247			struct list_head	links;
 248		};
 249	};
 250	u32			which;		/* posn image request list */
 251
 252	enum obj_request_type	type;
 
 
 
 253	union {
 254		struct bio	*bio_list;
 255		struct {
 256			struct page	**pages;
 257			u32		page_count;
 
 258		};
 259	};
 260	struct page		**copyup_pages;
 261	u32			copyup_page_count;
 262
 263	struct ceph_osd_request	*osd_req;
 
 
 264
 265	u64			xferred;	/* bytes transferred */
 266	int			result;
 267
 268	rbd_obj_callback_t	callback;
 269	struct completion	completion;
 270
 
 
 271	struct kref		kref;
 272};
 273
 274enum img_req_flags {
 275	IMG_REQ_WRITE,		/* I/O direction: read = 0, write = 1 */
 276	IMG_REQ_CHILD,		/* initiator: block = 0, child image = 1 */
 277	IMG_REQ_LAYERED,	/* ENOENT handling: normal = 0, layered = 1 */
 278};
 279
 
 
 
 
 
 
 
 280struct rbd_img_request {
 281	struct rbd_device	*rbd_dev;
 282	u64			offset;	/* starting image byte offset */
 283	u64			length;	/* byte count from offset */
 284	unsigned long		flags;
 
 285	union {
 286		u64			snap_id;	/* for reads */
 287		struct ceph_snap_context *snapc;	/* for writes */
 288	};
 289	union {
 290		struct request		*rq;		/* block request */
 291		struct rbd_obj_request	*obj_request;	/* obj req initiator */
 292	};
 293	struct page		**copyup_pages;
 294	u32			copyup_page_count;
 295	spinlock_t		completion_lock;/* protects next_completion */
 296	u32			next_completion;
 297	rbd_img_callback_t	callback;
 298	u64			xferred;/* aggregate bytes transferred */
 299	int			result;	/* first nonzero obj_request result */
 300
 301	u32			obj_request_count;
 302	struct list_head	obj_requests;	/* rbd_obj_request structs */
 303
 304	struct kref		kref;
 
 
 
 305};
 306
 307#define for_each_obj_request(ireq, oreq) \
 308	list_for_each_entry(oreq, &(ireq)->obj_requests, links)
 309#define for_each_obj_request_from(ireq, oreq) \
 310	list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
 311#define for_each_obj_request_safe(ireq, oreq, n) \
 312	list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 313
 314struct rbd_mapping {
 315	u64                     size;
 316	u64                     features;
 317	bool			read_only;
 318};
 319
 320/*
 321 * a single device
 322 */
 323struct rbd_device {
 324	int			dev_id;		/* blkdev unique id */
 325
 326	int			major;		/* blkdev assigned major */
 327	int			minor;
 328	struct gendisk		*disk;		/* blkdev's gendisk and rq */
 329
 330	u32			image_format;	/* Either 1 or 2 */
 331	struct rbd_client	*rbd_client;
 332
 333	char			name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
 334
 335	spinlock_t		lock;		/* queue, flags, open_count */
 336
 337	struct rbd_image_header	header;
 338	unsigned long		flags;		/* possibly lock protected */
 339	struct rbd_spec		*spec;
 
 
 
 
 
 340
 341	char			*header_name;
 342
 343	struct ceph_file_layout	layout;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 344
 345	struct ceph_osd_event   *watch_event;
 346	struct rbd_obj_request	*watch_request;
 347
 348	struct rbd_spec		*parent_spec;
 349	u64			parent_overlap;
 350	atomic_t		parent_ref;
 351	struct rbd_device	*parent;
 352
 
 
 
 353	/* protects updating the header */
 354	struct rw_semaphore     header_rwsem;
 355
 356	struct rbd_mapping	mapping;
 357
 358	struct list_head	node;
 359
 360	/* sysfs related */
 361	struct device		dev;
 362	unsigned long		open_count;	/* protected by lock */
 363};
 364
 365/*
 366 * Flag bits for rbd_dev->flags.  If atomicity is required,
 367 * rbd_dev->lock is used to protect access.
 368 *
 369 * Currently, only the "removing" flag (which is coupled with the
 370 * "open_count" field) requires atomic access.
 371 */
 372enum rbd_dev_flags {
 373	RBD_DEV_FLAG_EXISTS,	/* mapped snapshot has not been deleted */
 374	RBD_DEV_FLAG_REMOVING,	/* this mapping is being removed */
 
 375};
 376
 377static DEFINE_MUTEX(client_mutex);	/* Serialize client creation */
 378
 379static LIST_HEAD(rbd_dev_list);    /* devices */
 380static DEFINE_SPINLOCK(rbd_dev_list_lock);
 381
 382static LIST_HEAD(rbd_client_list);		/* clients */
 383static DEFINE_SPINLOCK(rbd_client_list_lock);
 384
 385/* Slab caches for frequently-allocated structures */
 386
 387static struct kmem_cache	*rbd_img_request_cache;
 388static struct kmem_cache	*rbd_obj_request_cache;
 389static struct kmem_cache	*rbd_segment_name_cache;
 390
 391static int rbd_major;
 392static DEFINE_IDA(rbd_dev_id_ida);
 393
 
 
 
 
 
 
 394/*
 395 * Default to false for now, as single-major requires >= 0.75 version of
 396 * userspace rbd utility.
 397 */
 398static bool single_major = false;
 399module_param(single_major, bool, S_IRUGO);
 400MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
 401
 402static int rbd_img_request_submit(struct rbd_img_request *img_request);
 403
 404static void rbd_dev_device_release(struct device *dev);
 405
 406static ssize_t rbd_add(struct bus_type *bus, const char *buf,
 407		       size_t count);
 408static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
 409			  size_t count);
 410static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
 411				    size_t count);
 412static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
 413				       size_t count);
 414static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
 415static void rbd_spec_put(struct rbd_spec *spec);
 416
 417static int rbd_dev_id_to_minor(int dev_id)
 418{
 419	return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
 420}
 421
 422static int minor_to_rbd_dev_id(int minor)
 423{
 424	return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
 425}
 426
 427static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
 428static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
 429static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
 430static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 431
 432static struct attribute *rbd_bus_attrs[] = {
 433	&bus_attr_add.attr,
 434	&bus_attr_remove.attr,
 435	&bus_attr_add_single_major.attr,
 436	&bus_attr_remove_single_major.attr,
 
 437	NULL,
 438};
 439
 440static umode_t rbd_bus_is_visible(struct kobject *kobj,
 441				  struct attribute *attr, int index)
 442{
 443	if (!single_major &&
 444	    (attr == &bus_attr_add_single_major.attr ||
 445	     attr == &bus_attr_remove_single_major.attr))
 446		return 0;
 447
 448	return attr->mode;
 449}
 450
 451static const struct attribute_group rbd_bus_group = {
 452	.attrs = rbd_bus_attrs,
 453	.is_visible = rbd_bus_is_visible,
 454};
 455__ATTRIBUTE_GROUPS(rbd_bus);
 456
 457static struct bus_type rbd_bus_type = {
 458	.name		= "rbd",
 459	.bus_groups	= rbd_bus_groups,
 460};
 461
 462static void rbd_root_dev_release(struct device *dev)
 463{
 464}
 465
 466static struct device rbd_root_dev = {
 467	.init_name =    "rbd",
 468	.release =      rbd_root_dev_release,
 469};
 470
 471static __printf(2, 3)
 472void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
 473{
 474	struct va_format vaf;
 475	va_list args;
 476
 477	va_start(args, fmt);
 478	vaf.fmt = fmt;
 479	vaf.va = &args;
 480
 481	if (!rbd_dev)
 482		printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
 483	else if (rbd_dev->disk)
 484		printk(KERN_WARNING "%s: %s: %pV\n",
 485			RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
 486	else if (rbd_dev->spec && rbd_dev->spec->image_name)
 487		printk(KERN_WARNING "%s: image %s: %pV\n",
 488			RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
 489	else if (rbd_dev->spec && rbd_dev->spec->image_id)
 490		printk(KERN_WARNING "%s: id %s: %pV\n",
 491			RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
 492	else	/* punt */
 493		printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
 494			RBD_DRV_NAME, rbd_dev, &vaf);
 495	va_end(args);
 496}
 497
 498#ifdef RBD_DEBUG
 499#define rbd_assert(expr)						\
 500		if (unlikely(!(expr))) {				\
 501			printk(KERN_ERR "\nAssertion failure in %s() "	\
 502						"at line %d:\n\n"	\
 503					"\trbd_assert(%s);\n\n",	\
 504					__func__, __LINE__, #expr);	\
 505			BUG();						\
 506		}
 507#else /* !RBD_DEBUG */
 508#  define rbd_assert(expr)	((void) 0)
 509#endif /* !RBD_DEBUG */
 510
 511static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
 512static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
 513static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
 514
 515static int rbd_dev_refresh(struct rbd_device *rbd_dev);
 516static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
 517static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
 518static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
 519					u64 snap_id);
 520static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
 521				u8 *order, u64 *snap_size);
 522static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
 523		u64 *snap_features);
 524static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
 525
 526static int rbd_open(struct block_device *bdev, fmode_t mode)
 
 
 
 
 
 
 527{
 528	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
 529	bool removing = false;
 530
 531	if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
 532		return -EROFS;
 
 
 
 
 
 
 
 
 
 
 
 533
 534	spin_lock_irq(&rbd_dev->lock);
 535	if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
 536		removing = true;
 537	else
 538		rbd_dev->open_count++;
 539	spin_unlock_irq(&rbd_dev->lock);
 540	if (removing)
 541		return -ENOENT;
 542
 543	(void) get_device(&rbd_dev->dev);
 544	set_device_ro(bdev, rbd_dev->mapping.read_only);
 545
 546	return 0;
 547}
 548
 549static void rbd_release(struct gendisk *disk, fmode_t mode)
 550{
 551	struct rbd_device *rbd_dev = disk->private_data;
 552	unsigned long open_count_before;
 553
 554	spin_lock_irq(&rbd_dev->lock);
 555	open_count_before = rbd_dev->open_count--;
 556	spin_unlock_irq(&rbd_dev->lock);
 557	rbd_assert(open_count_before > 0);
 558
 559	put_device(&rbd_dev->dev);
 560}
 561
 562static const struct block_device_operations rbd_bd_ops = {
 563	.owner			= THIS_MODULE,
 564	.open			= rbd_open,
 565	.release		= rbd_release,
 566};
 567
 568/*
 569 * Initialize an rbd client instance.  Success or not, this function
 570 * consumes ceph_opts.  Caller holds client_mutex.
 571 */
 572static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
 573{
 574	struct rbd_client *rbdc;
 575	int ret = -ENOMEM;
 576
 577	dout("%s:\n", __func__);
 578	rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
 579	if (!rbdc)
 580		goto out_opt;
 581
 582	kref_init(&rbdc->kref);
 583	INIT_LIST_HEAD(&rbdc->node);
 584
 585	rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
 586	if (IS_ERR(rbdc->client))
 587		goto out_rbdc;
 588	ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
 589
 590	ret = ceph_open_session(rbdc->client);
 591	if (ret < 0)
 592		goto out_client;
 593
 594	spin_lock(&rbd_client_list_lock);
 595	list_add_tail(&rbdc->node, &rbd_client_list);
 596	spin_unlock(&rbd_client_list_lock);
 597
 598	dout("%s: rbdc %p\n", __func__, rbdc);
 599
 600	return rbdc;
 601out_client:
 602	ceph_destroy_client(rbdc->client);
 603out_rbdc:
 604	kfree(rbdc);
 605out_opt:
 606	if (ceph_opts)
 607		ceph_destroy_options(ceph_opts);
 608	dout("%s: error %d\n", __func__, ret);
 609
 610	return ERR_PTR(ret);
 611}
 612
 613static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
 614{
 615	kref_get(&rbdc->kref);
 616
 617	return rbdc;
 618}
 619
 620/*
 621 * Find a ceph client with specific addr and configuration.  If
 622 * found, bump its reference count.
 623 */
 624static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
 625{
 626	struct rbd_client *client_node;
 627	bool found = false;
 628
 629	if (ceph_opts->flags & CEPH_OPT_NOSHARE)
 630		return NULL;
 631
 632	spin_lock(&rbd_client_list_lock);
 633	list_for_each_entry(client_node, &rbd_client_list, node) {
 634		if (!ceph_compare_options(ceph_opts, client_node->client)) {
 635			__rbd_get_client(client_node);
 636
 637			found = true;
 638			break;
 639		}
 640	}
 641	spin_unlock(&rbd_client_list_lock);
 642
 643	return found ? client_node : NULL;
 644}
 645
 646/*
 647 * mount options
 648 */
 649enum {
 650	Opt_last_int,
 
 
 651	/* int args above */
 652	Opt_last_string,
 
 653	/* string args above */
 654	Opt_read_only,
 655	Opt_read_write,
 656	/* Boolean args above */
 657	Opt_last_bool,
 
 658};
 659
 660static match_table_t rbd_opts_tokens = {
 661	/* int args above */
 662	/* string args above */
 663	{Opt_read_only, "read_only"},
 664	{Opt_read_only, "ro"},		/* Alternate spelling */
 665	{Opt_read_write, "read_write"},
 666	{Opt_read_write, "rw"},		/* Alternate spelling */
 667	/* Boolean args above */
 668	{-1, NULL}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 669};
 670
 671struct rbd_options {
 
 
 
 672	bool	read_only;
 
 
 
 
 
 673};
 674
 
 
 
 675#define RBD_READ_ONLY_DEFAULT	false
 
 
 
 676
 677static int parse_rbd_opts_token(char *c, void *private)
 678{
 679	struct rbd_options *rbd_opts = private;
 680	substring_t argstr[MAX_OPT_ARGS];
 681	int token, intval, ret;
 682
 683	token = match_token(c, rbd_opts_tokens, argstr);
 684	if (token < 0)
 685		return -EINVAL;
 686
 687	if (token < Opt_last_int) {
 688		ret = match_int(&argstr[0], &intval);
 689		if (ret < 0) {
 690			pr_err("bad mount option arg (not int) "
 691			       "at '%s'\n", c);
 692			return ret;
 693		}
 694		dout("got int token %d val %d\n", token, intval);
 695	} else if (token > Opt_last_int && token < Opt_last_string) {
 696		dout("got string token %d val %s\n", token,
 697		     argstr[0].from);
 698	} else if (token > Opt_last_string && token < Opt_last_bool) {
 699		dout("got Boolean token %d\n", token);
 700	} else {
 701		dout("got token %d\n", token);
 702	}
 703
 704	switch (token) {
 705	case Opt_read_only:
 706		rbd_opts->read_only = true;
 707		break;
 708	case Opt_read_write:
 709		rbd_opts->read_only = false;
 710		break;
 
 
 
 
 711	default:
 712		rbd_assert(false);
 713		break;
 714	}
 715	return 0;
 716}
 717
 718/*
 719 * Get a ceph client with specific addr and configuration, if one does
 720 * not exist create it.  Either way, ceph_opts is consumed by this
 721 * function.
 722 */
 723static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
 724{
 725	struct rbd_client *rbdc;
 726
 727	mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
 728	rbdc = rbd_client_find(ceph_opts);
 729	if (rbdc)	/* using an existing client */
 730		ceph_destroy_options(ceph_opts);
 731	else
 732		rbdc = rbd_client_create(ceph_opts);
 733	mutex_unlock(&client_mutex);
 734
 735	return rbdc;
 736}
 737
 738/*
 739 * Destroy ceph client
 740 *
 741 * Caller must hold rbd_client_list_lock.
 742 */
 743static void rbd_client_release(struct kref *kref)
 744{
 745	struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
 746
 747	dout("%s: rbdc %p\n", __func__, rbdc);
 748	spin_lock(&rbd_client_list_lock);
 749	list_del(&rbdc->node);
 750	spin_unlock(&rbd_client_list_lock);
 751
 752	ceph_destroy_client(rbdc->client);
 753	kfree(rbdc);
 754}
 755
 756/*
 757 * Drop reference to ceph client node. If it's not referenced anymore, release
 758 * it.
 759 */
 760static void rbd_put_client(struct rbd_client *rbdc)
 761{
 762	if (rbdc)
 763		kref_put(&rbdc->kref, rbd_client_release);
 764}
 765
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 766static bool rbd_image_format_valid(u32 image_format)
 767{
 768	return image_format == 1 || image_format == 2;
 769}
 770
 771static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
 772{
 773	size_t size;
 774	u32 snap_count;
 775
 776	/* The header has to start with the magic rbd header text */
 777	if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
 778		return false;
 779
 780	/* The bio layer requires at least sector-sized I/O */
 781
 782	if (ondisk->options.order < SECTOR_SHIFT)
 783		return false;
 784
 785	/* If we use u64 in a few spots we may be able to loosen this */
 786
 787	if (ondisk->options.order > 8 * sizeof (int) - 1)
 788		return false;
 789
 790	/*
 791	 * The size of a snapshot header has to fit in a size_t, and
 792	 * that limits the number of snapshots.
 793	 */
 794	snap_count = le32_to_cpu(ondisk->snap_count);
 795	size = SIZE_MAX - sizeof (struct ceph_snap_context);
 796	if (snap_count > size / sizeof (__le64))
 797		return false;
 798
 799	/*
 800	 * Not only that, but the size of the entire the snapshot
 801	 * header must also be representable in a size_t.
 802	 */
 803	size -= snap_count * sizeof (__le64);
 804	if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
 805		return false;
 806
 807	return true;
 808}
 809
 810/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 811 * Fill an rbd image header with information from the given format 1
 812 * on-disk header.
 813 */
 814static int rbd_header_from_disk(struct rbd_device *rbd_dev,
 815				 struct rbd_image_header_ondisk *ondisk)
 
 816{
 817	struct rbd_image_header *header = &rbd_dev->header;
 818	bool first_time = header->object_prefix == NULL;
 819	struct ceph_snap_context *snapc;
 820	char *object_prefix = NULL;
 821	char *snap_names = NULL;
 822	u64 *snap_sizes = NULL;
 823	u32 snap_count;
 824	size_t size;
 825	int ret = -ENOMEM;
 826	u32 i;
 827
 828	/* Allocate this now to avoid having to handle failure below */
 829
 830	if (first_time) {
 831		size_t len;
 832
 833		len = strnlen(ondisk->object_prefix,
 834				sizeof (ondisk->object_prefix));
 835		object_prefix = kmalloc(len + 1, GFP_KERNEL);
 836		if (!object_prefix)
 837			return -ENOMEM;
 838		memcpy(object_prefix, ondisk->object_prefix, len);
 839		object_prefix[len] = '\0';
 840	}
 841
 842	/* Allocate the snapshot context and fill it in */
 843
 844	snap_count = le32_to_cpu(ondisk->snap_count);
 845	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
 846	if (!snapc)
 847		goto out_err;
 848	snapc->seq = le64_to_cpu(ondisk->snap_seq);
 849	if (snap_count) {
 850		struct rbd_image_snap_ondisk *snaps;
 851		u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
 852
 853		/* We'll keep a copy of the snapshot names... */
 854
 855		if (snap_names_len > (u64)SIZE_MAX)
 856			goto out_2big;
 857		snap_names = kmalloc(snap_names_len, GFP_KERNEL);
 858		if (!snap_names)
 859			goto out_err;
 860
 861		/* ...as well as the array of their sizes. */
 862
 863		size = snap_count * sizeof (*header->snap_sizes);
 864		snap_sizes = kmalloc(size, GFP_KERNEL);
 865		if (!snap_sizes)
 866			goto out_err;
 867
 868		/*
 869		 * Copy the names, and fill in each snapshot's id
 870		 * and size.
 871		 *
 872		 * Note that rbd_dev_v1_header_info() guarantees the
 873		 * ondisk buffer we're working with has
 874		 * snap_names_len bytes beyond the end of the
 875		 * snapshot id array, this memcpy() is safe.
 876		 */
 877		memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
 878		snaps = ondisk->snaps;
 879		for (i = 0; i < snap_count; i++) {
 880			snapc->snaps[i] = le64_to_cpu(snaps[i].id);
 881			snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
 882		}
 883	}
 884
 885	/* We won't fail any more, fill in the header */
 886
 887	if (first_time) {
 888		header->object_prefix = object_prefix;
 889		header->obj_order = ondisk->options.order;
 890		header->crypt_type = ondisk->options.crypt_type;
 891		header->comp_type = ondisk->options.comp_type;
 892		/* The rest aren't used for format 1 images */
 893		header->stripe_unit = 0;
 894		header->stripe_count = 0;
 895		header->features = 0;
 896	} else {
 897		ceph_put_snap_context(header->snapc);
 898		kfree(header->snap_names);
 899		kfree(header->snap_sizes);
 900	}
 901
 902	/* The remaining fields always get updated (when we refresh) */
 903
 904	header->image_size = le64_to_cpu(ondisk->image_size);
 905	header->snapc = snapc;
 906	header->snap_names = snap_names;
 907	header->snap_sizes = snap_sizes;
 908
 909	/* Make sure mapping size is consistent with header info */
 910
 911	if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
 912		if (rbd_dev->mapping.size != header->image_size)
 913			rbd_dev->mapping.size = header->image_size;
 914
 915	return 0;
 916out_2big:
 917	ret = -EIO;
 918out_err:
 919	kfree(snap_sizes);
 920	kfree(snap_names);
 921	ceph_put_snap_context(snapc);
 922	kfree(object_prefix);
 923
 924	return ret;
 925}
 926
 927static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
 928{
 929	const char *snap_name;
 930
 931	rbd_assert(which < rbd_dev->header.snapc->num_snaps);
 932
 933	/* Skip over names until we find the one we are looking for */
 934
 935	snap_name = rbd_dev->header.snap_names;
 936	while (which--)
 937		snap_name += strlen(snap_name) + 1;
 938
 939	return kstrdup(snap_name, GFP_KERNEL);
 940}
 941
 942/*
 943 * Snapshot id comparison function for use with qsort()/bsearch().
 944 * Note that result is for snapshots in *descending* order.
 945 */
 946static int snapid_compare_reverse(const void *s1, const void *s2)
 947{
 948	u64 snap_id1 = *(u64 *)s1;
 949	u64 snap_id2 = *(u64 *)s2;
 950
 951	if (snap_id1 < snap_id2)
 952		return 1;
 953	return snap_id1 == snap_id2 ? 0 : -1;
 954}
 955
 956/*
 957 * Search a snapshot context to see if the given snapshot id is
 958 * present.
 959 *
 960 * Returns the position of the snapshot id in the array if it's found,
 961 * or BAD_SNAP_INDEX otherwise.
 962 *
 963 * Note: The snapshot array is in kept sorted (by the osd) in
 964 * reverse order, highest snapshot id first.
 965 */
 966static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
 967{
 968	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
 969	u64 *found;
 970
 971	found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
 972				sizeof (snap_id), snapid_compare_reverse);
 973
 974	return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
 975}
 976
 977static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
 978					u64 snap_id)
 979{
 980	u32 which;
 981	const char *snap_name;
 982
 983	which = rbd_dev_snap_index(rbd_dev, snap_id);
 984	if (which == BAD_SNAP_INDEX)
 985		return ERR_PTR(-ENOENT);
 986
 987	snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
 988	return snap_name ? snap_name : ERR_PTR(-ENOMEM);
 989}
 990
 991static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
 992{
 993	if (snap_id == CEPH_NOSNAP)
 994		return RBD_SNAP_HEAD_NAME;
 995
 996	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
 997	if (rbd_dev->image_format == 1)
 998		return rbd_dev_v1_snap_name(rbd_dev, snap_id);
 999
1000	return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1001}
1002
1003static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1004				u64 *snap_size)
1005{
1006	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1007	if (snap_id == CEPH_NOSNAP) {
1008		*snap_size = rbd_dev->header.image_size;
1009	} else if (rbd_dev->image_format == 1) {
1010		u32 which;
1011
1012		which = rbd_dev_snap_index(rbd_dev, snap_id);
1013		if (which == BAD_SNAP_INDEX)
1014			return -ENOENT;
1015
1016		*snap_size = rbd_dev->header.snap_sizes[which];
1017	} else {
1018		u64 size = 0;
1019		int ret;
1020
1021		ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1022		if (ret)
1023			return ret;
1024
1025		*snap_size = size;
1026	}
1027	return 0;
1028}
1029
1030static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1031			u64 *snap_features)
1032{
1033	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1034	if (snap_id == CEPH_NOSNAP) {
1035		*snap_features = rbd_dev->header.features;
1036	} else if (rbd_dev->image_format == 1) {
1037		*snap_features = 0;	/* No features for format 1 */
1038	} else {
1039		u64 features = 0;
1040		int ret;
1041
1042		ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1043		if (ret)
1044			return ret;
1045
1046		*snap_features = features;
1047	}
1048	return 0;
1049}
1050
1051static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1052{
1053	u64 snap_id = rbd_dev->spec->snap_id;
1054	u64 size = 0;
1055	u64 features = 0;
1056	int ret;
1057
1058	ret = rbd_snap_size(rbd_dev, snap_id, &size);
1059	if (ret)
1060		return ret;
1061	ret = rbd_snap_features(rbd_dev, snap_id, &features);
1062	if (ret)
1063		return ret;
1064
1065	rbd_dev->mapping.size = size;
1066	rbd_dev->mapping.features = features;
1067
1068	return 0;
1069}
1070
1071static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1072{
1073	rbd_dev->mapping.size = 0;
1074	rbd_dev->mapping.features = 0;
1075}
1076
1077static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1078{
1079	char *name;
1080	u64 segment;
1081	int ret;
1082	char *name_format;
1083
1084	name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1085	if (!name)
1086		return NULL;
1087	segment = offset >> rbd_dev->header.obj_order;
1088	name_format = "%s.%012llx";
1089	if (rbd_dev->image_format == 2)
1090		name_format = "%s.%016llx";
1091	ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1092			rbd_dev->header.object_prefix, segment);
1093	if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1094		pr_err("error formatting segment name for #%llu (%d)\n",
1095			segment, ret);
1096		kfree(name);
1097		name = NULL;
1098	}
1099
1100	return name;
1101}
1102
1103static void rbd_segment_name_free(const char *name)
1104{
1105	/* The explicit cast here is needed to drop the const qualifier */
1106
1107	kmem_cache_free(rbd_segment_name_cache, (void *)name);
 
 
 
1108}
1109
1110static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
 
 
 
 
 
 
 
1111{
1112	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1113
1114	return offset & (segment_size - 1);
 
 
 
 
 
 
 
 
 
 
1115}
1116
1117static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1118				u64 offset, u64 length)
1119{
1120	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
 
 
 
 
1121
1122	offset &= segment_size - 1;
 
 
 
1123
1124	rbd_assert(length <= U64_MAX - offset);
1125	if (offset + length > segment_size)
1126		length = segment_size - offset;
 
1127
1128	return length;
 
 
 
 
 
 
1129}
1130
1131/*
1132 * returns the size of an object in the image
1133 */
1134static u64 rbd_obj_bytes(struct rbd_image_header *header)
1135{
1136	return 1 << header->obj_order;
 
 
 
 
 
1137}
1138
1139/*
1140 * bio helpers
 
 
1141 */
 
 
 
 
1142
1143static void bio_chain_put(struct bio *chain)
1144{
1145	struct bio *tmp;
 
1146
1147	while (chain) {
1148		tmp = chain;
1149		chain = chain->bi_next;
1150		bio_put(tmp);
1151	}
 
 
 
 
 
 
 
 
 
1152}
1153
1154/*
1155 * zeros a bio chain, starting at specific offset
1156 */
1157static void zero_bio_chain(struct bio *chain, int start_ofs)
1158{
1159	struct bio_vec bv;
1160	struct bvec_iter iter;
1161	unsigned long flags;
1162	void *buf;
1163	int pos = 0;
1164
1165	while (chain) {
1166		bio_for_each_segment(bv, chain, iter) {
1167			if (pos + bv.bv_len > start_ofs) {
1168				int remainder = max(start_ofs - pos, 0);
1169				buf = bvec_kmap_irq(&bv, &flags);
1170				memset(buf + remainder, 0,
1171				       bv.bv_len - remainder);
1172				flush_dcache_page(bv.bv_page);
1173				bvec_kunmap_irq(buf, &flags);
1174			}
1175			pos += bv.bv_len;
1176		}
1177
1178		chain = chain->bi_next;
 
 
 
 
1179	}
 
 
1180}
1181
1182/*
1183 * similar to zero_bio_chain(), zeros data defined by a page array,
1184 * starting at the given byte offset from the start of the array and
1185 * continuing up to the given end offset.  The pages array is
1186 * assumed to be big enough to hold all bytes up to the end.
1187 */
1188static void zero_pages(struct page **pages, u64 offset, u64 end)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1189{
1190	struct page **page = &pages[offset >> PAGE_SHIFT];
 
 
 
1191
1192	rbd_assert(end > offset);
1193	rbd_assert(end - offset <= (u64)SIZE_MAX);
1194	while (offset < end) {
1195		size_t page_offset;
1196		size_t length;
1197		unsigned long flags;
1198		void *kaddr;
1199
1200		page_offset = offset & ~PAGE_MASK;
1201		length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1202		local_irq_save(flags);
1203		kaddr = kmap_atomic(*page);
1204		memset(kaddr + page_offset, 0, length);
1205		flush_dcache_page(*page);
1206		kunmap_atomic(kaddr);
1207		local_irq_restore(flags);
1208
1209		offset += length;
1210		page++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1211	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1212}
1213
1214/*
1215 * Clone a portion of a bio, starting at the given byte offset
1216 * and continuing for the number of bytes indicated.
 
 
1217 */
1218static struct bio *bio_clone_range(struct bio *bio_src,
1219					unsigned int offset,
1220					unsigned int len,
1221					gfp_t gfpmask)
1222{
1223	struct bio *bio;
1224
1225	bio = bio_clone(bio_src, gfpmask);
1226	if (!bio)
1227		return NULL;	/* ENOMEM */
1228
1229	bio_advance(bio, offset);
1230	bio->bi_iter.bi_size = len;
 
 
 
1231
1232	return bio;
 
 
 
1233}
1234
1235/*
1236 * Clone a portion of a bio chain, starting at the given byte offset
1237 * into the first bio in the source chain and continuing for the
1238 * number of bytes indicated.  The result is another bio chain of
1239 * exactly the given length, or a null pointer on error.
1240 *
1241 * The bio_src and offset parameters are both in-out.  On entry they
1242 * refer to the first source bio and the offset into that bio where
1243 * the start of data to be cloned is located.
1244 *
1245 * On return, bio_src is updated to refer to the bio in the source
1246 * chain that contains first un-cloned byte, and *offset will
1247 * contain the offset of that byte within that bio.
1248 */
1249static struct bio *bio_chain_clone_range(struct bio **bio_src,
1250					unsigned int *offset,
1251					unsigned int len,
1252					gfp_t gfpmask)
1253{
1254	struct bio *bi = *bio_src;
1255	unsigned int off = *offset;
1256	struct bio *chain = NULL;
1257	struct bio **end;
1258
1259	/* Build up a chain of clone bios up to the limit */
1260
1261	if (!bi || off >= bi->bi_iter.bi_size || !len)
1262		return NULL;		/* Nothing to clone */
1263
1264	end = &chain;
1265	while (len) {
1266		unsigned int bi_size;
1267		struct bio *bio;
1268
1269		if (!bi) {
1270			rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1271			goto out_err;	/* EINVAL; ran out of bio's */
1272		}
1273		bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1274		bio = bio_clone_range(bi, off, bi_size, gfpmask);
1275		if (!bio)
1276			goto out_err;	/* ENOMEM */
1277
1278		*end = bio;
1279		end = &bio->bi_next;
1280
1281		off += bi_size;
1282		if (off == bi->bi_iter.bi_size) {
1283			bi = bi->bi_next;
1284			off = 0;
1285		}
1286		len -= bi_size;
1287	}
1288	*bio_src = bi;
1289	*offset = off;
1290
1291	return chain;
1292out_err:
1293	bio_chain_put(chain);
1294
1295	return NULL;
 
 
1296}
1297
1298/*
1299 * The default/initial value for all object request flags is 0.  For
1300 * each flag, once its value is set to 1 it is never reset to 0
1301 * again.
1302 */
1303static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1304{
1305	if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1306		struct rbd_device *rbd_dev;
1307
1308		rbd_dev = obj_request->img_request->rbd_dev;
1309		rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1310			obj_request);
1311	}
 
 
 
1312}
1313
1314static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1315{
1316	smp_mb();
1317	return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1318}
1319
1320static void obj_request_done_set(struct rbd_obj_request *obj_request)
 
 
 
 
 
1321{
1322	if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1323		struct rbd_device *rbd_dev = NULL;
1324
1325		if (obj_request_img_data_test(obj_request))
1326			rbd_dev = obj_request->img_request->rbd_dev;
1327		rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1328			obj_request);
1329	}
1330}
1331
1332static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1333{
1334	smp_mb();
1335	return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
 
 
 
 
1336}
1337
1338/*
1339 * This sets the KNOWN flag after (possibly) setting the EXISTS
1340 * flag.  The latter is set based on the "exists" value provided.
1341 *
1342 * Note that for our purposes once an object exists it never goes
1343 * away again.  It's possible that the response from two existence
1344 * checks are separated by the creation of the target object, and
1345 * the first ("doesn't exist") response arrives *after* the second
1346 * ("does exist").  In that case we ignore the second one.
1347 */
1348static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1349				bool exists)
1350{
1351	if (exists)
1352		set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1353	set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1354	smp_mb();
 
 
 
 
 
 
1355}
1356
1357static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1358{
1359	smp_mb();
1360	return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
 
 
 
 
1361}
1362
1363static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1364{
1365	smp_mb();
1366	return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
 
 
 
 
 
 
 
 
 
 
 
1367}
1368
1369static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1370{
1371	dout("%s: obj %p (was %d)\n", __func__, obj_request,
1372		atomic_read(&obj_request->kref.refcount));
1373	kref_get(&obj_request->kref);
 
 
 
 
 
1374}
1375
1376static void rbd_obj_request_destroy(struct kref *kref);
1377static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1378{
1379	rbd_assert(obj_request != NULL);
1380	dout("%s: obj %p (was %d)\n", __func__, obj_request,
1381		atomic_read(&obj_request->kref.refcount));
1382	kref_put(&obj_request->kref, rbd_obj_request_destroy);
 
 
1383}
1384
1385static bool img_request_child_test(struct rbd_img_request *img_request);
1386static void rbd_parent_request_destroy(struct kref *kref);
1387static void rbd_img_request_destroy(struct kref *kref);
1388static void rbd_img_request_put(struct rbd_img_request *img_request)
1389{
1390	rbd_assert(img_request != NULL);
1391	dout("%s: img %p (was %d)\n", __func__, img_request,
1392		atomic_read(&img_request->kref.refcount));
1393	if (img_request_child_test(img_request))
1394		kref_put(&img_request->kref, rbd_parent_request_destroy);
1395	else
1396		kref_put(&img_request->kref, rbd_img_request_destroy);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1397}
1398
1399static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1400					struct rbd_obj_request *obj_request)
1401{
1402	rbd_assert(obj_request->img_request == NULL);
 
 
1403
1404	/* Image request now owns object's original reference */
1405	obj_request->img_request = img_request;
1406	obj_request->which = img_request->obj_request_count;
1407	rbd_assert(!obj_request_img_data_test(obj_request));
1408	obj_request_img_data_set(obj_request);
1409	rbd_assert(obj_request->which != BAD_WHICH);
1410	img_request->obj_request_count++;
1411	list_add_tail(&obj_request->links, &img_request->obj_requests);
1412	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1413		obj_request->which);
1414}
1415
1416static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1417					struct rbd_obj_request *obj_request)
1418{
1419	rbd_assert(obj_request->which != BAD_WHICH);
 
 
 
 
1420
1421	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1422		obj_request->which);
1423	list_del(&obj_request->links);
1424	rbd_assert(img_request->obj_request_count > 0);
1425	img_request->obj_request_count--;
1426	rbd_assert(obj_request->which == img_request->obj_request_count);
1427	obj_request->which = BAD_WHICH;
1428	rbd_assert(obj_request_img_data_test(obj_request));
1429	rbd_assert(obj_request->img_request == img_request);
1430	obj_request->img_request = NULL;
1431	obj_request->callback = NULL;
1432	rbd_obj_request_put(obj_request);
 
 
 
1433}
1434
1435static bool obj_request_type_valid(enum obj_request_type type)
1436{
1437	switch (type) {
1438	case OBJ_REQUEST_NODATA:
1439	case OBJ_REQUEST_BIO:
1440	case OBJ_REQUEST_PAGES:
1441		return true;
1442	default:
1443		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1444	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1445}
1446
1447static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1448				struct rbd_obj_request *obj_request)
1449{
1450	dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
 
 
 
 
 
 
 
1451
1452	return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
1453}
1454
1455static void rbd_img_request_complete(struct rbd_img_request *img_request)
1456{
 
1457
1458	dout("%s: img %p\n", __func__, img_request);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1459
1460	/*
1461	 * If no error occurred, compute the aggregate transfer
1462	 * count for the image request.  We could instead use
1463	 * atomic64_cmpxchg() to update it as each object request
1464	 * completes; not clear which way is better off hand.
1465	 */
1466	if (!img_request->result) {
1467		struct rbd_obj_request *obj_request;
1468		u64 xferred = 0;
1469
1470		for_each_obj_request(img_request, obj_request)
1471			xferred += obj_request->xferred;
1472		img_request->xferred = xferred;
1473	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1474
1475	if (img_request->callback)
1476		img_request->callback(img_request);
1477	else
1478		rbd_img_request_put(img_request);
1479}
1480
1481/* Caller is responsible for rbd_obj_request_destroy(obj_request) */
 
 
 
 
 
 
1482
1483static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
 
 
 
 
1484{
1485	dout("%s: obj %p\n", __func__, obj_request);
 
 
 
 
 
1486
1487	return wait_for_completion_interruptible(&obj_request->completion);
1488}
1489
1490/*
1491 * The default/initial value for all image request flags is 0.  Each
1492 * is conditionally set to 1 at image request initialization time
1493 * and currently never change thereafter.
1494 */
1495static void img_request_write_set(struct rbd_img_request *img_request)
1496{
1497	set_bit(IMG_REQ_WRITE, &img_request->flags);
1498	smp_mb();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1499}
1500
1501static bool img_request_write_test(struct rbd_img_request *img_request)
 
 
 
 
 
 
 
1502{
1503	smp_mb();
1504	return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1505}
1506
1507static void img_request_child_set(struct rbd_img_request *img_request)
 
1508{
1509	set_bit(IMG_REQ_CHILD, &img_request->flags);
1510	smp_mb();
 
 
 
 
 
 
 
 
 
 
 
 
 
1511}
1512
1513static void img_request_child_clear(struct rbd_img_request *img_request)
 
 
 
 
 
1514{
1515	clear_bit(IMG_REQ_CHILD, &img_request->flags);
1516	smp_mb();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1517}
1518
1519static bool img_request_child_test(struct rbd_img_request *img_request)
1520{
1521	smp_mb();
1522	return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1523}
1524
1525static void img_request_layered_set(struct rbd_img_request *img_request)
1526{
1527	set_bit(IMG_REQ_LAYERED, &img_request->flags);
1528	smp_mb();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1529}
1530
1531static void img_request_layered_clear(struct rbd_img_request *img_request)
 
1532{
1533	clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1534	smp_mb();
 
 
 
 
 
 
 
 
1535}
1536
1537static bool img_request_layered_test(struct rbd_img_request *img_request)
1538{
1539	smp_mb();
1540	return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1541}
1542
1543static void
1544rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1545{
1546	u64 xferred = obj_request->xferred;
1547	u64 length = obj_request->length;
 
1548
1549	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1550		obj_request, obj_request->img_request, obj_request->result,
1551		xferred, length);
1552	/*
1553	 * ENOENT means a hole in the image.  We zero-fill the entire
1554	 * length of the request.  A short read also implies zero-fill
1555	 * to the end of the request.  An error requires the whole
1556	 * length of the request to be reported finished with an error
1557	 * to the block layer.  In each case we update the xferred
1558	 * count to indicate the whole request was satisfied.
1559	 */
1560	rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1561	if (obj_request->result == -ENOENT) {
1562		if (obj_request->type == OBJ_REQUEST_BIO)
1563			zero_bio_chain(obj_request->bio_list, 0);
1564		else
1565			zero_pages(obj_request->pages, 0, length);
1566		obj_request->result = 0;
1567	} else if (xferred < length && !obj_request->result) {
1568		if (obj_request->type == OBJ_REQUEST_BIO)
1569			zero_bio_chain(obj_request->bio_list, xferred);
1570		else
1571			zero_pages(obj_request->pages, xferred, length);
1572	}
1573	obj_request->xferred = length;
1574	obj_request_done_set(obj_request);
 
 
 
 
 
 
 
1575}
1576
1577static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1578{
1579	dout("%s: obj %p cb %p\n", __func__, obj_request,
1580		obj_request->callback);
1581	if (obj_request->callback)
1582		obj_request->callback(obj_request);
1583	else
1584		complete_all(&obj_request->completion);
 
 
 
1585}
1586
1587static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1588{
1589	dout("%s: obj %p\n", __func__, obj_request);
1590	obj_request_done_set(obj_request);
1591}
1592
1593static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
 
1594{
1595	struct rbd_img_request *img_request = NULL;
1596	struct rbd_device *rbd_dev = NULL;
1597	bool layered = false;
1598
1599	if (obj_request_img_data_test(obj_request)) {
1600		img_request = obj_request->img_request;
1601		layered = img_request && img_request_layered_test(img_request);
1602		rbd_dev = img_request->rbd_dev;
1603	}
1604
1605	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1606		obj_request, img_request, obj_request->result,
1607		obj_request->xferred, obj_request->length);
1608	if (layered && obj_request->result == -ENOENT &&
1609			obj_request->img_offset < rbd_dev->parent_overlap)
1610		rbd_img_parent_read(obj_request);
1611	else if (img_request)
1612		rbd_img_obj_request_read_callback(obj_request);
1613	else
1614		obj_request_done_set(obj_request);
1615}
1616
1617static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1618{
1619	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1620		obj_request->result, obj_request->length);
 
 
1621	/*
1622	 * There is no such thing as a successful short write.  Set
1623	 * it to our originally-requested length.
1624	 */
1625	obj_request->xferred = obj_request->length;
1626	obj_request_done_set(obj_request);
1627}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1628
1629/*
1630 * For a simple stat call there's nothing to do.  We'll do more if
1631 * this is part of a write sequence for a layered image.
1632 */
1633static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1634{
1635	dout("%s: obj %p\n", __func__, obj_request);
1636	obj_request_done_set(obj_request);
 
 
 
1637}
1638
1639static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1640				struct ceph_msg *msg)
1641{
1642	struct rbd_obj_request *obj_request = osd_req->r_priv;
1643	u16 opcode;
1644
1645	dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1646	rbd_assert(osd_req == obj_request->osd_req);
1647	if (obj_request_img_data_test(obj_request)) {
1648		rbd_assert(obj_request->img_request);
1649		rbd_assert(obj_request->which != BAD_WHICH);
 
 
 
 
 
 
 
1650	} else {
1651		rbd_assert(obj_request->which == BAD_WHICH);
1652	}
1653
1654	if (osd_req->r_result < 0)
1655		obj_request->result = osd_req->r_result;
 
 
 
1656
1657	rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
 
 
1658
1659	/*
1660	 * We support a 64-bit length, but ultimately it has to be
1661	 * passed to blk_end_request(), which takes an unsigned int.
1662	 */
1663	obj_request->xferred = osd_req->r_reply_op_len[0];
1664	rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1665
1666	opcode = osd_req->r_ops[0].op;
1667	switch (opcode) {
1668	case CEPH_OSD_OP_READ:
1669		rbd_osd_read_callback(obj_request);
1670		break;
1671	case CEPH_OSD_OP_SETALLOCHINT:
1672		rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1673		/* fall through */
1674	case CEPH_OSD_OP_WRITE:
1675		rbd_osd_write_callback(obj_request);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1676		break;
1677	case CEPH_OSD_OP_STAT:
1678		rbd_osd_stat_callback(obj_request);
1679		break;
1680	case CEPH_OSD_OP_CALL:
1681	case CEPH_OSD_OP_NOTIFY_ACK:
1682	case CEPH_OSD_OP_WATCH:
1683		rbd_osd_trivial_callback(obj_request);
1684		break;
1685	default:
1686		rbd_warn(NULL, "%s: unsupported op %hu\n",
1687			obj_request->object_name, (unsigned short) opcode);
1688		break;
1689	}
1690
1691	if (obj_request_done_test(obj_request))
1692		rbd_obj_request_complete(obj_request);
1693}
1694
1695static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
 
 
 
 
 
1696{
1697	struct rbd_img_request *img_request = obj_request->img_request;
1698	struct ceph_osd_request *osd_req = obj_request->osd_req;
1699	u64 snap_id;
1700
1701	rbd_assert(osd_req != NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1702
1703	snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1704	ceph_osdc_build_request(osd_req, obj_request->offset,
1705			NULL, snap_id, NULL);
1706}
1707
1708static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1709{
1710	struct rbd_img_request *img_request = obj_request->img_request;
1711	struct ceph_osd_request *osd_req = obj_request->osd_req;
1712	struct ceph_snap_context *snapc;
1713	struct timespec mtime = CURRENT_TIME;
1714
1715	rbd_assert(osd_req != NULL);
 
 
1716
1717	snapc = img_request ? img_request->snapc : NULL;
1718	ceph_osdc_build_request(osd_req, obj_request->offset,
1719			snapc, CEPH_NOSNAP, &mtime);
1720}
1721
1722/*
1723 * Create an osd request.  A read request has one osd op (read).
1724 * A write request has either one (watch) or two (hint+write) osd ops.
1725 * (All rbd data writes are prefixed with an allocation hint op, but
1726 * technically osd watch is a write request, hence this distinction.)
 
1727 */
1728static struct ceph_osd_request *rbd_osd_req_create(
1729					struct rbd_device *rbd_dev,
1730					bool write_request,
1731					unsigned int num_ops,
1732					struct rbd_obj_request *obj_request)
1733{
1734	struct ceph_snap_context *snapc = NULL;
1735	struct ceph_osd_client *osdc;
1736	struct ceph_osd_request *osd_req;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1737
1738	if (obj_request_img_data_test(obj_request)) {
1739		struct rbd_img_request *img_request = obj_request->img_request;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1740
1741		rbd_assert(write_request ==
1742				img_request_write_test(img_request));
1743		if (write_request)
1744			snapc = img_request->snapc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1745	}
1746
1747	rbd_assert(num_ops == 1 || (write_request && num_ops == 2));
 
 
 
 
 
 
1748
1749	/* Allocate and initialize the request, for the num_ops ops */
 
 
 
 
 
 
 
 
 
 
 
 
 
1750
1751	osdc = &rbd_dev->rbd_client->client->osdc;
1752	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1753					  GFP_ATOMIC);
1754	if (!osd_req)
1755		return NULL;	/* ENOMEM */
1756
1757	if (write_request)
1758		osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1759	else
1760		osd_req->r_flags = CEPH_OSD_FLAG_READ;
 
 
 
 
 
1761
1762	osd_req->r_callback = rbd_osd_req_callback;
1763	osd_req->r_priv = obj_request;
1764
1765	osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1766	ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
 
 
 
1767
1768	return osd_req;
 
 
1769}
1770
1771/*
1772 * Create a copyup osd request based on the information in the
1773 * object request supplied.  A copyup request has three osd ops,
1774 * a copyup method call, a hint op, and a write op.
1775 */
1776static struct ceph_osd_request *
1777rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1778{
1779	struct rbd_img_request *img_request;
1780	struct ceph_snap_context *snapc;
1781	struct rbd_device *rbd_dev;
1782	struct ceph_osd_client *osdc;
1783	struct ceph_osd_request *osd_req;
1784
1785	rbd_assert(obj_request_img_data_test(obj_request));
1786	img_request = obj_request->img_request;
1787	rbd_assert(img_request);
1788	rbd_assert(img_request_write_test(img_request));
1789
1790	/* Allocate and initialize the request, for the three ops */
1791
1792	snapc = img_request->snapc;
1793	rbd_dev = img_request->rbd_dev;
1794	osdc = &rbd_dev->rbd_client->client->osdc;
1795	osd_req = ceph_osdc_alloc_request(osdc, snapc, 3, false, GFP_ATOMIC);
1796	if (!osd_req)
1797		return NULL;	/* ENOMEM */
1798
1799	osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1800	osd_req->r_callback = rbd_osd_req_callback;
1801	osd_req->r_priv = obj_request;
 
 
 
1802
1803	osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1804	ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
 
 
 
 
 
 
 
 
 
 
1805
1806	return osd_req;
 
1807}
1808
 
 
 
 
 
1809
1810static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
 
 
 
1811{
1812	ceph_osdc_put_request(osd_req);
 
 
 
 
 
 
1813}
1814
1815/* object_name is assumed to be a non-null pointer and NUL-terminated */
 
 
 
 
 
 
 
 
 
1816
1817static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1818						u64 offset, u64 length,
1819						enum obj_request_type type)
1820{
1821	struct rbd_obj_request *obj_request;
1822	size_t size;
1823	char *name;
1824
1825	rbd_assert(obj_request_type_valid(type));
 
 
 
 
1826
1827	size = strlen(object_name) + 1;
1828	name = kmalloc(size, GFP_KERNEL);
1829	if (!name)
1830		return NULL;
 
 
 
 
 
 
 
 
1831
1832	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1833	if (!obj_request) {
1834		kfree(name);
1835		return NULL;
1836	}
1837
1838	obj_request->object_name = memcpy(name, object_name, size);
1839	obj_request->offset = offset;
1840	obj_request->length = length;
1841	obj_request->flags = 0;
1842	obj_request->which = BAD_WHICH;
1843	obj_request->type = type;
1844	INIT_LIST_HEAD(&obj_request->links);
1845	init_completion(&obj_request->completion);
1846	kref_init(&obj_request->kref);
 
1847
1848	dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1849		offset, length, (int)type, obj_request);
 
1850
1851	return obj_request;
 
 
 
 
 
1852}
1853
1854static void rbd_obj_request_destroy(struct kref *kref)
1855{
1856	struct rbd_obj_request *obj_request;
 
 
 
1857
1858	obj_request = container_of(kref, struct rbd_obj_request, kref);
 
 
1859
1860	dout("%s: obj %p\n", __func__, obj_request);
 
 
 
1861
1862	rbd_assert(obj_request->img_request == NULL);
1863	rbd_assert(obj_request->which == BAD_WHICH);
 
 
 
 
 
 
 
1864
1865	if (obj_request->osd_req)
1866		rbd_osd_req_destroy(obj_request->osd_req);
 
 
 
 
 
 
1867
1868	rbd_assert(obj_request_type_valid(obj_request->type));
1869	switch (obj_request->type) {
1870	case OBJ_REQUEST_NODATA:
1871		break;		/* Nothing to do */
1872	case OBJ_REQUEST_BIO:
1873		if (obj_request->bio_list)
1874			bio_chain_put(obj_request->bio_list);
1875		break;
1876	case OBJ_REQUEST_PAGES:
1877		if (obj_request->pages)
1878			ceph_release_page_vector(obj_request->pages,
1879						obj_request->page_count);
1880		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1881	}
1882
1883	kfree(obj_request->object_name);
1884	obj_request->object_name = NULL;
1885	kmem_cache_free(rbd_obj_request_cache, obj_request);
1886}
1887
1888/* It's OK to call this for a device with no parent */
 
 
 
1889
1890static void rbd_spec_put(struct rbd_spec *spec);
1891static void rbd_dev_unparent(struct rbd_device *rbd_dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1892{
1893	rbd_dev_remove_parent(rbd_dev);
1894	rbd_spec_put(rbd_dev->parent_spec);
1895	rbd_dev->parent_spec = NULL;
1896	rbd_dev->parent_overlap = 0;
 
 
 
 
 
 
 
 
1897}
1898
1899/*
1900 * Parent image reference counting is used to determine when an
1901 * image's parent fields can be safely torn down--after there are no
1902 * more in-flight requests to the parent image.  When the last
1903 * reference is dropped, cleaning them up is safe.
1904 */
1905static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1906{
1907	int counter;
 
1908
1909	if (!rbd_dev->parent_spec)
1910		return;
1911
1912	counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1913	if (counter > 0)
1914		return;
 
1915
1916	/* Last reference; clean up parent data structures */
 
1917
1918	if (!counter)
1919		rbd_dev_unparent(rbd_dev);
1920	else
1921		rbd_warn(rbd_dev, "parent reference underflow\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1922}
1923
1924/*
1925 * If an image has a non-zero parent overlap, get a reference to its
1926 * parent.
1927 *
1928 * We must get the reference before checking for the overlap to
1929 * coordinate properly with zeroing the parent overlap in
1930 * rbd_dev_v2_parent_info() when an image gets flattened.  We
1931 * drop it again if there is no overlap.
1932 *
1933 * Returns true if the rbd device has a parent with a non-zero
1934 * overlap and a reference for it was successfully taken, or
1935 * false otherwise.
1936 */
1937static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1938{
1939	int counter;
 
 
 
1940
1941	if (!rbd_dev->parent_spec)
1942		return false;
 
 
 
 
1943
1944	counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1945	if (counter > 0 && rbd_dev->parent_overlap)
1946		return true;
1947
1948	/* Image was flattened, but parent is not yet torn down */
 
 
 
 
1949
1950	if (counter < 0)
1951		rbd_warn(rbd_dev, "parent reference overflow\n");
1952
1953	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1954}
1955
1956/*
1957 * Caller is responsible for filling in the list of object requests
1958 * that comprises the image request, and the Linux request pointer
1959 * (if there is one).
1960 */
1961static struct rbd_img_request *rbd_img_request_create(
1962					struct rbd_device *rbd_dev,
1963					u64 offset, u64 length,
1964					bool write_request)
1965{
1966	struct rbd_img_request *img_request;
 
 
 
1967
1968	img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1969	if (!img_request)
1970		return NULL;
1971
1972	if (write_request) {
1973		down_read(&rbd_dev->header_rwsem);
1974		ceph_get_snap_context(rbd_dev->header.snapc);
1975		up_read(&rbd_dev->header_rwsem);
1976	}
1977
1978	img_request->rq = NULL;
1979	img_request->rbd_dev = rbd_dev;
1980	img_request->offset = offset;
1981	img_request->length = length;
1982	img_request->flags = 0;
1983	if (write_request) {
1984		img_request_write_set(img_request);
1985		img_request->snapc = rbd_dev->header.snapc;
1986	} else {
1987		img_request->snap_id = rbd_dev->spec->snap_id;
1988	}
1989	if (rbd_dev_parent_get(rbd_dev))
1990		img_request_layered_set(img_request);
1991	spin_lock_init(&img_request->completion_lock);
1992	img_request->next_completion = 0;
1993	img_request->callback = NULL;
1994	img_request->result = 0;
1995	img_request->obj_request_count = 0;
1996	INIT_LIST_HEAD(&img_request->obj_requests);
1997	kref_init(&img_request->kref);
1998
1999	dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2000		write_request ? "write" : "read", offset, length,
2001		img_request);
2002
2003	return img_request;
 
 
 
 
 
2004}
2005
2006static void rbd_img_request_destroy(struct kref *kref)
2007{
2008	struct rbd_img_request *img_request;
2009	struct rbd_obj_request *obj_request;
2010	struct rbd_obj_request *next_obj_request;
2011
2012	img_request = container_of(kref, struct rbd_img_request, kref);
 
 
 
 
 
 
2013
2014	dout("%s: img %p\n", __func__, img_request);
 
 
2015
2016	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2017		rbd_img_obj_request_del(img_request, obj_request);
2018	rbd_assert(img_request->obj_request_count == 0);
2019
2020	if (img_request_layered_test(img_request)) {
2021		img_request_layered_clear(img_request);
2022		rbd_dev_parent_put(img_request->rbd_dev);
2023	}
2024
2025	if (img_request_write_test(img_request))
2026		ceph_put_snap_context(img_request->snapc);
 
2027
2028	kmem_cache_free(rbd_img_request_cache, img_request);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2029}
2030
2031static struct rbd_img_request *rbd_parent_request_create(
2032					struct rbd_obj_request *obj_request,
2033					u64 img_offset, u64 length)
2034{
2035	struct rbd_img_request *parent_request;
2036	struct rbd_device *rbd_dev;
 
 
 
2037
2038	rbd_assert(obj_request->img_request);
2039	rbd_dev = obj_request->img_request->rbd_dev;
2040
2041	parent_request = rbd_img_request_create(rbd_dev->parent,
2042						img_offset, length, false);
2043	if (!parent_request)
2044		return NULL;
2045
2046	img_request_child_set(parent_request);
2047	rbd_obj_request_get(obj_request);
2048	parent_request->obj_request = obj_request;
 
 
 
 
 
 
2049
2050	return parent_request;
 
 
 
 
 
 
 
 
 
2051}
2052
2053static void rbd_parent_request_destroy(struct kref *kref)
2054{
2055	struct rbd_img_request *parent_request;
2056	struct rbd_obj_request *orig_request;
 
 
 
 
 
 
 
 
 
 
2057
2058	parent_request = container_of(kref, struct rbd_img_request, kref);
2059	orig_request = parent_request->obj_request;
 
 
 
 
 
 
 
 
 
 
2060
2061	parent_request->obj_request = NULL;
2062	rbd_obj_request_put(orig_request);
2063	img_request_child_clear(parent_request);
 
 
 
 
 
 
2064
2065	rbd_img_request_destroy(kref);
2066}
2067
2068static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2069{
2070	struct rbd_img_request *img_request;
2071	unsigned int xferred;
2072	int result;
2073	bool more;
2074
2075	rbd_assert(obj_request_img_data_test(obj_request));
2076	img_request = obj_request->img_request;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2077
2078	rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2079	xferred = (unsigned int)obj_request->xferred;
2080	result = obj_request->result;
2081	if (result) {
2082		struct rbd_device *rbd_dev = img_request->rbd_dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
2083
2084		rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2085			img_request_write_test(img_request) ? "write" : "read",
2086			obj_request->length, obj_request->img_offset,
2087			obj_request->offset);
2088		rbd_warn(rbd_dev, "  result %d xferred %x\n",
2089			result, xferred);
2090		if (!img_request->result)
2091			img_request->result = result;
 
 
 
 
 
 
 
 
2092	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2093
2094	/* Image object requests don't own their page array */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2095
2096	if (obj_request->type == OBJ_REQUEST_PAGES) {
2097		obj_request->pages = NULL;
2098		obj_request->page_count = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2099	}
 
2100
2101	if (img_request_child_test(img_request)) {
2102		rbd_assert(img_request->obj_request != NULL);
2103		more = obj_request->which < img_request->obj_request_count - 1;
2104	} else {
2105		rbd_assert(img_request->rq != NULL);
2106		more = blk_end_request(img_request->rq, result, xferred);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2107	}
 
 
2108
2109	return more;
 
 
 
 
 
 
 
2110}
2111
2112static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2113{
2114	struct rbd_img_request *img_request;
2115	u32 which = obj_request->which;
2116	bool more = true;
2117
2118	rbd_assert(obj_request_img_data_test(obj_request));
2119	img_request = obj_request->img_request;
2120
2121	dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2122	rbd_assert(img_request != NULL);
2123	rbd_assert(img_request->obj_request_count > 0);
2124	rbd_assert(which != BAD_WHICH);
2125	rbd_assert(which < img_request->obj_request_count);
2126
2127	spin_lock_irq(&img_request->completion_lock);
2128	if (which != img_request->next_completion)
2129		goto out;
 
2130
2131	for_each_obj_request_from(img_request, obj_request) {
2132		rbd_assert(more);
2133		rbd_assert(which < img_request->obj_request_count);
2134
2135		if (!obj_request_done_test(obj_request))
2136			break;
2137		more = rbd_img_obj_end_request(obj_request);
2138		which++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2139	}
2140
2141	rbd_assert(more ^ (which == img_request->obj_request_count));
2142	img_request->next_completion = which;
2143out:
2144	spin_unlock_irq(&img_request->completion_lock);
2145
2146	if (!more)
2147		rbd_img_request_complete(img_request);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2148}
2149
2150/*
2151 * Split up an image request into one or more object requests, each
2152 * to a different object.  The "type" parameter indicates whether
2153 * "data_desc" is the pointer to the head of a list of bio
2154 * structures, or the base of a page array.  In either case this
2155 * function assumes data_desc describes memory sufficient to hold
2156 * all data described by the image request.
2157 */
2158static int rbd_img_request_fill(struct rbd_img_request *img_request,
2159					enum obj_request_type type,
2160					void *data_desc)
2161{
2162	struct rbd_device *rbd_dev = img_request->rbd_dev;
2163	struct rbd_obj_request *obj_request = NULL;
2164	struct rbd_obj_request *next_obj_request;
2165	bool write_request = img_request_write_test(img_request);
2166	struct bio *bio_list = NULL;
2167	unsigned int bio_offset = 0;
2168	struct page **pages = NULL;
2169	u64 img_offset;
2170	u64 resid;
2171	u16 opcode;
 
 
 
 
 
 
2172
2173	dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2174		(int)type, data_desc);
 
 
 
 
 
 
2175
2176	opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2177	img_offset = img_request->offset;
2178	resid = img_request->length;
2179	rbd_assert(resid > 0);
2180
2181	if (type == OBJ_REQUEST_BIO) {
2182		bio_list = data_desc;
2183		rbd_assert(img_offset ==
2184			   bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
 
 
 
 
 
2185	} else {
2186		rbd_assert(type == OBJ_REQUEST_PAGES);
2187		pages = data_desc;
 
 
2188	}
 
2189
2190	while (resid) {
2191		struct ceph_osd_request *osd_req;
2192		const char *object_name;
2193		u64 offset;
2194		u64 length;
2195		unsigned int which = 0;
2196
2197		object_name = rbd_segment_name(rbd_dev, img_offset);
2198		if (!object_name)
2199			goto out_unwind;
2200		offset = rbd_segment_offset(rbd_dev, img_offset);
2201		length = rbd_segment_length(rbd_dev, img_offset, resid);
2202		obj_request = rbd_obj_request_create(object_name,
2203						offset, length, type);
2204		/* object request has its own copy of the object name */
2205		rbd_segment_name_free(object_name);
2206		if (!obj_request)
2207			goto out_unwind;
2208
2209		/*
2210		 * set obj_request->img_request before creating the
2211		 * osd_request so that it gets the right snapc
2212		 */
2213		rbd_img_obj_request_add(img_request, obj_request);
2214
2215		if (type == OBJ_REQUEST_BIO) {
2216			unsigned int clone_size;
 
2217
2218			rbd_assert(length <= (u64)UINT_MAX);
2219			clone_size = (unsigned int)length;
2220			obj_request->bio_list =
2221					bio_chain_clone_range(&bio_list,
2222								&bio_offset,
2223								clone_size,
2224								GFP_ATOMIC);
2225			if (!obj_request->bio_list)
2226				goto out_unwind;
2227		} else {
2228			unsigned int page_count;
2229
2230			obj_request->pages = pages;
2231			page_count = (u32)calc_pages_for(offset, length);
2232			obj_request->page_count = page_count;
2233			if ((offset + length) & ~PAGE_MASK)
2234				page_count--;	/* more on last page */
2235			pages += page_count;
2236		}
2237
2238		osd_req = rbd_osd_req_create(rbd_dev, write_request,
2239					     (write_request ? 2 : 1),
2240					     obj_request);
2241		if (!osd_req)
2242			goto out_unwind;
2243		obj_request->osd_req = osd_req;
2244		obj_request->callback = rbd_img_obj_callback;
2245
2246		if (write_request) {
2247			osd_req_op_alloc_hint_init(osd_req, which,
2248					     rbd_obj_bytes(&rbd_dev->header),
2249					     rbd_obj_bytes(&rbd_dev->header));
2250			which++;
2251		}
2252
2253		osd_req_op_extent_init(osd_req, which, opcode, offset, length,
2254				       0, 0);
2255		if (type == OBJ_REQUEST_BIO)
2256			osd_req_op_extent_osd_data_bio(osd_req, which,
2257					obj_request->bio_list, length);
2258		else
2259			osd_req_op_extent_osd_data_pages(osd_req, which,
2260					obj_request->pages, length,
2261					offset & ~PAGE_MASK, false, false);
2262
2263		if (write_request)
2264			rbd_osd_req_format_write(obj_request);
2265		else
2266			rbd_osd_req_format_read(obj_request);
2267
2268		obj_request->img_offset = img_offset;
 
 
 
 
2269
2270		img_offset += length;
2271		resid -= length;
2272	}
 
 
 
 
 
2273
 
 
 
 
 
 
 
 
 
 
 
2274	return 0;
 
2275
2276out_unwind:
2277	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2278		rbd_img_obj_request_del(img_request, obj_request);
 
 
 
 
2279
2280	return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2281}
2282
2283static void
2284rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2285{
2286	struct rbd_img_request *img_request;
2287	struct rbd_device *rbd_dev;
2288	struct page **pages;
2289	u32 page_count;
2290
2291	rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2292	rbd_assert(obj_request_img_data_test(obj_request));
2293	img_request = obj_request->img_request;
2294	rbd_assert(img_request);
2295
2296	rbd_dev = img_request->rbd_dev;
2297	rbd_assert(rbd_dev);
2298
2299	pages = obj_request->copyup_pages;
2300	rbd_assert(pages != NULL);
2301	obj_request->copyup_pages = NULL;
2302	page_count = obj_request->copyup_page_count;
2303	rbd_assert(page_count);
2304	obj_request->copyup_page_count = 0;
2305	ceph_release_page_vector(pages, page_count);
2306
2307	/*
2308	 * We want the transfer count to reflect the size of the
2309	 * original write request.  There is no such thing as a
2310	 * successful short write, so if the request was successful
2311	 * we can just set it to the originally-requested length.
2312	 */
2313	if (!obj_request->result)
2314		obj_request->xferred = obj_request->length;
2315
2316	/* Finish up with the normal image object callback */
 
 
 
2317
2318	rbd_img_obj_callback(obj_request);
2319}
2320
2321static void
2322rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2323{
2324	struct rbd_obj_request *orig_request;
2325	struct ceph_osd_request *osd_req;
2326	struct ceph_osd_client *osdc;
2327	struct rbd_device *rbd_dev;
2328	struct page **pages;
2329	u32 page_count;
2330	int img_result;
2331	u64 parent_length;
2332	u64 offset;
2333	u64 length;
2334
2335	rbd_assert(img_request_child_test(img_request));
2336
2337	/* First get what we need from the image request */
2338
2339	pages = img_request->copyup_pages;
2340	rbd_assert(pages != NULL);
2341	img_request->copyup_pages = NULL;
2342	page_count = img_request->copyup_page_count;
2343	rbd_assert(page_count);
2344	img_request->copyup_page_count = 0;
2345
2346	orig_request = img_request->obj_request;
2347	rbd_assert(orig_request != NULL);
2348	rbd_assert(obj_request_type_valid(orig_request->type));
2349	img_result = img_request->result;
2350	parent_length = img_request->length;
2351	rbd_assert(parent_length == img_request->xferred);
2352	rbd_img_request_put(img_request);
2353
2354	rbd_assert(orig_request->img_request);
2355	rbd_dev = orig_request->img_request->rbd_dev;
2356	rbd_assert(rbd_dev);
2357
2358	/*
2359	 * If the overlap has become 0 (most likely because the
2360	 * image has been flattened) we need to free the pages
2361	 * and re-submit the original write request.
2362	 */
2363	if (!rbd_dev->parent_overlap) {
2364		struct ceph_osd_client *osdc;
2365
2366		ceph_release_page_vector(pages, page_count);
2367		osdc = &rbd_dev->rbd_client->client->osdc;
2368		img_result = rbd_obj_request_submit(osdc, orig_request);
2369		if (!img_result)
2370			return;
2371	}
2372
2373	if (img_result)
2374		goto out_err;
 
 
 
 
 
 
 
2375
2376	/*
2377	 * The original osd request is of no use to use any more.
2378	 * We need a new one that can hold the three ops in a copyup
2379	 * request.  Allocate the new copyup osd request for the
2380	 * original request, and release the old one.
2381	 */
2382	img_result = -ENOMEM;
2383	osd_req = rbd_osd_req_create_copyup(orig_request);
2384	if (!osd_req)
2385		goto out_err;
2386	rbd_osd_req_destroy(orig_request->osd_req);
2387	orig_request->osd_req = osd_req;
2388	orig_request->copyup_pages = pages;
2389	orig_request->copyup_page_count = page_count;
2390
2391	/* Initialize the copyup op */
2392
2393	osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2394	osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2395						false, false);
2396
2397	/* Then the hint op */
2398
2399	osd_req_op_alloc_hint_init(osd_req, 1, rbd_obj_bytes(&rbd_dev->header),
2400				   rbd_obj_bytes(&rbd_dev->header));
2401
2402	/* And the original write request op */
2403
2404	offset = orig_request->offset;
2405	length = orig_request->length;
2406	osd_req_op_extent_init(osd_req, 2, CEPH_OSD_OP_WRITE,
2407					offset, length, 0, 0);
2408	if (orig_request->type == OBJ_REQUEST_BIO)
2409		osd_req_op_extent_osd_data_bio(osd_req, 2,
2410					orig_request->bio_list, length);
2411	else
2412		osd_req_op_extent_osd_data_pages(osd_req, 2,
2413					orig_request->pages, length,
2414					offset & ~PAGE_MASK, false, false);
2415
2416	rbd_osd_req_format_write(orig_request);
2417
2418	/* All set, send it off. */
2419
2420	orig_request->callback = rbd_img_obj_copyup_callback;
2421	osdc = &rbd_dev->rbd_client->client->osdc;
2422	img_result = rbd_obj_request_submit(osdc, orig_request);
2423	if (!img_result)
2424		return;
2425out_err:
2426	/* Record the error code and complete the request */
2427
2428	orig_request->result = img_result;
2429	orig_request->xferred = 0;
2430	obj_request_done_set(orig_request);
2431	rbd_obj_request_complete(orig_request);
 
 
 
2432}
2433
2434/*
2435 * Read from the parent image the range of data that covers the
2436 * entire target of the given object request.  This is used for
2437 * satisfying a layered image write request when the target of an
2438 * object request from the image request does not exist.
2439 *
2440 * A page array big enough to hold the returned data is allocated
2441 * and supplied to rbd_img_request_fill() as the "data descriptor."
2442 * When the read completes, this page array will be transferred to
2443 * the original object request for the copyup operation.
2444 *
2445 * If an error occurs, record it as the result of the original
2446 * object request and mark it done so it gets completed.
2447 */
2448static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2449{
2450	struct rbd_img_request *img_request = NULL;
2451	struct rbd_img_request *parent_request = NULL;
2452	struct rbd_device *rbd_dev;
2453	u64 img_offset;
2454	u64 length;
2455	struct page **pages = NULL;
2456	u32 page_count;
2457	int result;
2458
2459	rbd_assert(obj_request_img_data_test(obj_request));
2460	rbd_assert(obj_request_type_valid(obj_request->type));
2461
2462	img_request = obj_request->img_request;
2463	rbd_assert(img_request != NULL);
2464	rbd_dev = img_request->rbd_dev;
2465	rbd_assert(rbd_dev->parent != NULL);
 
 
 
 
2466
2467	/*
2468	 * Determine the byte range covered by the object in the
2469	 * child image to which the original request was to be sent.
2470	 */
2471	img_offset = obj_request->img_offset - obj_request->offset;
2472	length = (u64)1 << rbd_dev->header.obj_order;
 
 
 
 
 
 
 
 
2473
2474	/*
2475	 * There is no defined parent data beyond the parent
2476	 * overlap, so limit what we read at that boundary if
2477	 * necessary.
2478	 */
2479	if (img_offset + length > rbd_dev->parent_overlap) {
2480		rbd_assert(img_offset < rbd_dev->parent_overlap);
2481		length = rbd_dev->parent_overlap - img_offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2482	}
2483
2484	/*
2485	 * Allocate a page array big enough to receive the data read
2486	 * from the parent.
2487	 */
2488	page_count = (u32)calc_pages_for(0, length);
2489	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2490	if (IS_ERR(pages)) {
2491		result = PTR_ERR(pages);
2492		pages = NULL;
2493		goto out_err;
2494	}
2495
2496	result = -ENOMEM;
2497	parent_request = rbd_parent_request_create(obj_request,
2498						img_offset, length);
2499	if (!parent_request)
2500		goto out_err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2501
2502	result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2503	if (result)
2504		goto out_err;
2505	parent_request->copyup_pages = pages;
2506	parent_request->copyup_page_count = page_count;
2507
2508	parent_request->callback = rbd_img_obj_parent_read_full_callback;
2509	result = rbd_img_request_submit(parent_request);
2510	if (!result)
2511		return 0;
 
2512
2513	parent_request->copyup_pages = NULL;
2514	parent_request->copyup_page_count = 0;
2515	parent_request->obj_request = NULL;
2516	rbd_obj_request_put(obj_request);
2517out_err:
2518	if (pages)
2519		ceph_release_page_vector(pages, page_count);
2520	if (parent_request)
2521		rbd_img_request_put(parent_request);
2522	obj_request->result = result;
2523	obj_request->xferred = 0;
2524	obj_request_done_set(obj_request);
2525
2526	return result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2527}
2528
2529static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
 
 
 
2530{
2531	struct rbd_obj_request *orig_request;
2532	struct rbd_device *rbd_dev;
2533	int result;
2534
2535	rbd_assert(!obj_request_img_data_test(obj_request));
 
2536
2537	/*
2538	 * All we need from the object request is the original
2539	 * request and the result of the STAT op.  Grab those, then
2540	 * we're done with the request.
2541	 */
2542	orig_request = obj_request->obj_request;
2543	obj_request->obj_request = NULL;
2544	rbd_obj_request_put(orig_request);
2545	rbd_assert(orig_request);
2546	rbd_assert(orig_request->img_request);
2547
2548	result = obj_request->result;
2549	obj_request->result = 0;
2550
2551	dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2552		obj_request, orig_request, result,
2553		obj_request->xferred, obj_request->length);
2554	rbd_obj_request_put(obj_request);
2555
2556	/*
2557	 * If the overlap has become 0 (most likely because the
2558	 * image has been flattened) we need to free the pages
2559	 * and re-submit the original write request.
2560	 */
2561	rbd_dev = orig_request->img_request->rbd_dev;
2562	if (!rbd_dev->parent_overlap) {
2563		struct ceph_osd_client *osdc;
 
2564
2565		osdc = &rbd_dev->rbd_client->client->osdc;
2566		result = rbd_obj_request_submit(osdc, orig_request);
2567		if (!result)
2568			return;
2569	}
2570
2571	/*
2572	 * Our only purpose here is to determine whether the object
2573	 * exists, and we don't want to treat the non-existence as
2574	 * an error.  If something else comes back, transfer the
2575	 * error to the original request and complete it now.
2576	 */
2577	if (!result) {
2578		obj_request_existence_set(orig_request, true);
2579	} else if (result == -ENOENT) {
2580		obj_request_existence_set(orig_request, false);
2581	} else if (result) {
2582		orig_request->result = result;
2583		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2584	}
2585
2586	/*
2587	 * Resubmit the original request now that we have recorded
2588	 * whether the target object exists.
2589	 */
2590	orig_request->result = rbd_img_obj_request_submit(orig_request);
2591out:
2592	if (orig_request->result)
2593		rbd_obj_request_complete(orig_request);
 
2594}
2595
2596static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2597{
2598	struct rbd_obj_request *stat_request;
2599	struct rbd_device *rbd_dev;
2600	struct ceph_osd_client *osdc;
2601	struct page **pages = NULL;
2602	u32 page_count;
2603	size_t size;
2604	int ret;
2605
2606	/*
2607	 * The response data for a STAT call consists of:
2608	 *     le64 length;
2609	 *     struct {
2610	 *         le32 tv_sec;
2611	 *         le32 tv_nsec;
2612	 *     } mtime;
2613	 */
2614	size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2615	page_count = (u32)calc_pages_for(0, size);
2616	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2617	if (IS_ERR(pages))
2618		return PTR_ERR(pages);
2619
2620	ret = -ENOMEM;
2621	stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2622							OBJ_REQUEST_PAGES);
2623	if (!stat_request)
2624		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2625
2626	rbd_obj_request_get(obj_request);
2627	stat_request->obj_request = obj_request;
2628	stat_request->pages = pages;
2629	stat_request->page_count = page_count;
2630
2631	rbd_assert(obj_request->img_request);
2632	rbd_dev = obj_request->img_request->rbd_dev;
2633	stat_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2634						   stat_request);
2635	if (!stat_request->osd_req)
 
 
2636		goto out;
2637	stat_request->callback = rbd_img_obj_exists_callback;
 
 
 
 
2638
2639	osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2640	osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2641					false, false);
2642	rbd_osd_req_format_read(stat_request);
2643
2644	osdc = &rbd_dev->rbd_client->client->osdc;
2645	ret = rbd_obj_request_submit(osdc, stat_request);
2646out:
2647	if (ret)
2648		rbd_obj_request_put(obj_request);
 
 
 
 
 
2649
 
 
 
2650	return ret;
2651}
2652
2653static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2654{
2655	struct rbd_img_request *img_request;
2656	struct rbd_device *rbd_dev;
2657	bool known;
2658
2659	rbd_assert(obj_request_img_data_test(obj_request));
 
 
 
 
 
 
2660
2661	img_request = obj_request->img_request;
2662	rbd_assert(img_request);
2663	rbd_dev = img_request->rbd_dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2664
2665	/*
2666	 * Only writes to layered images need special handling.
2667	 * Reads and non-layered writes are simple object requests.
2668	 * Layered writes that start beyond the end of the overlap
2669	 * with the parent have no parent data, so they too are
2670	 * simple object requests.  Finally, if the target object is
2671	 * known to already exist, its parent data has already been
2672	 * copied, so a write to the object can also be handled as a
2673	 * simple object request.
2674	 */
2675	if (!img_request_write_test(img_request) ||
2676		!img_request_layered_test(img_request) ||
2677		rbd_dev->parent_overlap <= obj_request->img_offset ||
2678		((known = obj_request_known_test(obj_request)) &&
2679			obj_request_exists_test(obj_request))) {
2680
2681		struct rbd_device *rbd_dev;
2682		struct ceph_osd_client *osdc;
2683
2684		rbd_dev = obj_request->img_request->rbd_dev;
2685		osdc = &rbd_dev->rbd_client->client->osdc;
 
2686
2687		return rbd_obj_request_submit(osdc, obj_request);
2688	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2689
2690	/*
2691	 * It's a layered write.  The target object might exist but
2692	 * we may not know that yet.  If we know it doesn't exist,
2693	 * start by reading the data for the full target object from
2694	 * the parent so we can use it for a copyup to the target.
 
2695	 */
2696	if (known)
2697		return rbd_img_obj_parent_read_full(obj_request);
2698
2699	/* We don't know whether the target exists.  Go find out. */
 
 
 
2700
2701	return rbd_img_obj_exists_submit(obj_request);
 
 
2702}
2703
2704static int rbd_img_request_submit(struct rbd_img_request *img_request)
2705{
2706	struct rbd_obj_request *obj_request;
2707	struct rbd_obj_request *next_obj_request;
2708
2709	dout("%s: img %p\n", __func__, img_request);
2710	for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2711		int ret;
2712
2713		ret = rbd_img_obj_request_submit(obj_request);
2714		if (ret)
2715			return ret;
 
 
 
2716	}
2717
2718	return 0;
2719}
2720
2721static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
 
2722{
2723	struct rbd_obj_request *obj_request;
2724	struct rbd_device *rbd_dev;
2725	u64 obj_end;
2726	u64 img_xferred;
2727	int img_result;
2728
2729	rbd_assert(img_request_child_test(img_request));
2730
2731	/* First get what we need from the image request and release it */
2732
2733	obj_request = img_request->obj_request;
2734	img_xferred = img_request->xferred;
2735	img_result = img_request->result;
2736	rbd_img_request_put(img_request);
2737
2738	/*
2739	 * If the overlap has become 0 (most likely because the
2740	 * image has been flattened) we need to re-submit the
2741	 * original request.
2742	 */
2743	rbd_assert(obj_request);
2744	rbd_assert(obj_request->img_request);
2745	rbd_dev = obj_request->img_request->rbd_dev;
2746	if (!rbd_dev->parent_overlap) {
2747		struct ceph_osd_client *osdc;
2748
2749		osdc = &rbd_dev->rbd_client->client->osdc;
2750		img_result = rbd_obj_request_submit(osdc, obj_request);
2751		if (!img_result)
2752			return;
2753	}
2754
2755	obj_request->result = img_result;
2756	if (obj_request->result)
2757		goto out;
 
 
 
 
 
 
 
 
 
 
 
2758
2759	/*
2760	 * We need to zero anything beyond the parent overlap
2761	 * boundary.  Since rbd_img_obj_request_read_callback()
2762	 * will zero anything beyond the end of a short read, an
2763	 * easy way to do this is to pretend the data from the
2764	 * parent came up short--ending at the overlap boundary.
2765	 */
2766	rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2767	obj_end = obj_request->img_offset + obj_request->length;
2768	if (obj_end > rbd_dev->parent_overlap) {
2769		u64 xferred = 0;
2770
2771		if (obj_request->img_offset < rbd_dev->parent_overlap)
2772			xferred = rbd_dev->parent_overlap -
2773					obj_request->img_offset;
2774
2775		obj_request->xferred = min(img_xferred, xferred);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2776	} else {
2777		obj_request->xferred = img_xferred;
2778	}
2779out:
2780	rbd_img_obj_request_read_callback(obj_request);
2781	rbd_obj_request_complete(obj_request);
2782}
2783
2784static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
 
 
 
 
 
2785{
2786	struct rbd_img_request *img_request;
2787	int result;
 
2788
2789	rbd_assert(obj_request_img_data_test(obj_request));
2790	rbd_assert(obj_request->img_request != NULL);
2791	rbd_assert(obj_request->result == (s32) -ENOENT);
2792	rbd_assert(obj_request_type_valid(obj_request->type));
2793
2794	/* rbd_read_finish(obj_request, obj_request->length); */
2795	img_request = rbd_parent_request_create(obj_request,
2796						obj_request->img_offset,
2797						obj_request->length);
2798	result = -ENOMEM;
2799	if (!img_request)
2800		goto out_err;
2801
2802	if (obj_request->type == OBJ_REQUEST_BIO)
2803		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2804						obj_request->bio_list);
2805	else
2806		result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2807						obj_request->pages);
2808	if (result)
2809		goto out_err;
2810
2811	img_request->callback = rbd_img_parent_read_callback;
2812	result = rbd_img_request_submit(img_request);
2813	if (result)
2814		goto out_err;
 
2815
2816	return;
2817out_err:
2818	if (img_request)
2819		rbd_img_request_put(img_request);
2820	obj_request->result = result;
2821	obj_request->xferred = 0;
2822	obj_request_done_set(obj_request);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2823}
2824
2825static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
 
2826{
2827	struct rbd_obj_request *obj_request;
2828	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
 
 
2829	int ret;
2830
2831	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2832							OBJ_REQUEST_NODATA);
2833	if (!obj_request)
2834		return -ENOMEM;
2835
2836	ret = -ENOMEM;
2837	obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2838						  obj_request);
2839	if (!obj_request->osd_req)
2840		goto out;
2841
2842	osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2843					notify_id, 0, 0);
2844	rbd_osd_req_format_read(obj_request);
 
 
 
 
2845
2846	ret = rbd_obj_request_submit(osdc, obj_request);
 
 
2847	if (ret)
2848		goto out;
2849	ret = rbd_obj_request_wait(obj_request);
2850out:
2851	rbd_obj_request_put(obj_request);
2852
2853	return ret;
 
 
 
 
 
 
 
 
 
 
 
2854}
2855
2856static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
 
2857{
2858	struct rbd_device *rbd_dev = (struct rbd_device *)data;
 
 
 
 
 
2859	int ret;
2860
2861	if (!rbd_dev)
2862		return;
 
 
 
 
 
 
 
 
2863
2864	dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2865		rbd_dev->header_name, (unsigned long long)notify_id,
2866		(unsigned int)opcode);
2867	ret = rbd_dev_refresh(rbd_dev);
2868	if (ret)
2869		rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2870
2871	rbd_obj_notify_ack_sync(rbd_dev, notify_id);
 
 
2872}
2873
2874/*
2875 * Request sync osd watch/unwatch.  The value of "start" determines
2876 * whether a watch request is being initiated or torn down.
2877 */
2878static int __rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2879{
2880	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2881	struct rbd_obj_request *obj_request;
2882	int ret;
2883
2884	rbd_assert(start ^ !!rbd_dev->watch_event);
2885	rbd_assert(start ^ !!rbd_dev->watch_request);
2886
2887	if (start) {
2888		ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2889						&rbd_dev->watch_event);
2890		if (ret < 0)
2891			return ret;
2892		rbd_assert(rbd_dev->watch_event != NULL);
2893	}
2894
2895	ret = -ENOMEM;
2896	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2897							OBJ_REQUEST_NODATA);
2898	if (!obj_request)
2899		goto out_cancel;
2900
2901	obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
2902						  obj_request);
2903	if (!obj_request->osd_req)
2904		goto out_cancel;
2905
2906	if (start)
2907		ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2908	else
2909		ceph_osdc_unregister_linger_request(osdc,
2910					rbd_dev->watch_request->osd_req);
 
 
2911
2912	osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2913				rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2914	rbd_osd_req_format_write(obj_request);
2915
2916	ret = rbd_obj_request_submit(osdc, obj_request);
2917	if (ret)
2918		goto out_cancel;
2919	ret = rbd_obj_request_wait(obj_request);
2920	if (ret)
2921		goto out_cancel;
2922	ret = obj_request->result;
2923	if (ret)
2924		goto out_cancel;
2925
2926	/*
2927	 * A watch request is set to linger, so the underlying osd
2928	 * request won't go away until we unregister it.  We retain
2929	 * a pointer to the object request during that time (in
2930	 * rbd_dev->watch_request), so we'll keep a reference to
2931	 * it.  We'll drop that reference (below) after we've
2932	 * unregistered it.
2933	 */
2934	if (start) {
2935		rbd_dev->watch_request = obj_request;
2936
2937		return 0;
2938	}
 
2939
2940	/* We have successfully torn down the watch request */
 
 
 
 
2941
2942	rbd_obj_request_put(rbd_dev->watch_request);
2943	rbd_dev->watch_request = NULL;
2944out_cancel:
2945	/* Cancel the event if we're tearing down, or on error */
2946	ceph_osdc_cancel_event(rbd_dev->watch_event);
2947	rbd_dev->watch_event = NULL;
2948	if (obj_request)
2949		rbd_obj_request_put(obj_request);
2950
 
 
2951	return ret;
2952}
2953
2954static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2955{
2956	return __rbd_dev_header_watch_sync(rbd_dev, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2957}
2958
2959static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
2960{
 
 
2961	int ret;
2962
2963	ret = __rbd_dev_header_watch_sync(rbd_dev, false);
 
 
 
 
 
 
 
 
2964	if (ret) {
2965		rbd_warn(rbd_dev, "unable to tear down watch request: %d\n",
2966			 ret);
 
 
 
 
 
 
 
 
 
 
 
 
2967	}
 
 
 
 
 
 
 
 
 
 
 
 
 
2968}
2969
2970/*
2971 * Synchronous osd object method call.  Returns the number of bytes
2972 * returned in the outbound buffer, or a negative error code.
2973 */
2974static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2975			     const char *object_name,
2976			     const char *class_name,
2977			     const char *method_name,
2978			     const void *outbound,
2979			     size_t outbound_size,
2980			     void *inbound,
2981			     size_t inbound_size)
2982{
2983	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2984	struct rbd_obj_request *obj_request;
2985	struct page **pages;
2986	u32 page_count;
2987	int ret;
2988
2989	/*
2990	 * Method calls are ultimately read operations.  The result
2991	 * should placed into the inbound buffer provided.  They
2992	 * also supply outbound data--parameters for the object
2993	 * method.  Currently if this is present it will be a
2994	 * snapshot id.
2995	 */
2996	page_count = (u32)calc_pages_for(0, inbound_size);
2997	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2998	if (IS_ERR(pages))
2999		return PTR_ERR(pages);
3000
3001	ret = -ENOMEM;
3002	obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3003							OBJ_REQUEST_PAGES);
3004	if (!obj_request)
3005		goto out;
3006
3007	obj_request->pages = pages;
3008	obj_request->page_count = page_count;
3009
3010	obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3011						  obj_request);
3012	if (!obj_request->osd_req)
3013		goto out;
3014
3015	osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3016					class_name, method_name);
3017	if (outbound_size) {
3018		struct ceph_pagelist *pagelist;
3019
3020		pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3021		if (!pagelist)
3022			goto out;
3023
3024		ceph_pagelist_init(pagelist);
3025		ceph_pagelist_append(pagelist, outbound, outbound_size);
3026		osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3027						pagelist);
3028	}
3029	osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3030					obj_request->pages, inbound_size,
3031					0, false, false);
3032	rbd_osd_req_format_read(obj_request);
3033
3034	ret = rbd_obj_request_submit(osdc, obj_request);
3035	if (ret)
3036		goto out;
3037	ret = rbd_obj_request_wait(obj_request);
3038	if (ret)
3039		goto out;
3040
3041	ret = obj_request->result;
3042	if (ret < 0)
3043		goto out;
 
 
 
3044
3045	rbd_assert(obj_request->xferred < (u64)INT_MAX);
3046	ret = (int)obj_request->xferred;
3047	ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3048out:
3049	if (obj_request)
3050		rbd_obj_request_put(obj_request);
3051	else
3052		ceph_release_page_vector(pages, page_count);
3053
 
 
 
3054	return ret;
3055}
3056
3057static void rbd_request_fn(struct request_queue *q)
3058		__releases(q->queue_lock) __acquires(q->queue_lock)
3059{
3060	struct rbd_device *rbd_dev = q->queuedata;
3061	bool read_only = rbd_dev->mapping.read_only;
3062	struct request *rq;
 
 
 
 
 
3063	int result;
3064
3065	while ((rq = blk_fetch_request(q))) {
3066		bool write_request = rq_data_dir(rq) == WRITE;
3067		struct rbd_img_request *img_request;
3068		u64 offset;
3069		u64 length;
3070
3071		/* Ignore any non-FS requests that filter through. */
3072
3073		if (rq->cmd_type != REQ_TYPE_FS) {
3074			dout("%s: non-fs request type %d\n", __func__,
3075				(int) rq->cmd_type);
3076			__blk_end_request_all(rq, 0);
3077			continue;
3078		}
3079
3080		/* Ignore/skip any zero-length requests */
3081
3082		offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3083		length = (u64) blk_rq_bytes(rq);
3084
3085		if (!length) {
3086			dout("%s: zero-length request\n", __func__);
3087			__blk_end_request_all(rq, 0);
3088			continue;
3089		}
3090
3091		spin_unlock_irq(q->queue_lock);
3092
3093		/* Disallow writes to a read-only device */
3094
3095		if (write_request) {
3096			result = -EROFS;
3097			if (read_only)
3098				goto end_request;
3099			rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3100		}
3101
3102		/*
3103		 * Quit early if the mapped snapshot no longer
3104		 * exists.  It's still possible the snapshot will
3105		 * have disappeared by the time our request arrives
3106		 * at the osd, but there's no sense in sending it if
3107		 * we already know.
3108		 */
3109		if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3110			dout("request for non-existent snapshot");
3111			rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3112			result = -ENXIO;
3113			goto end_request;
3114		}
3115
3116		result = -EINVAL;
3117		if (offset && length > U64_MAX - offset + 1) {
3118			rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3119				offset, length);
3120			goto end_request;	/* Shouldn't happen */
3121		}
3122
 
 
 
3123		result = -EIO;
3124		if (offset + length > rbd_dev->mapping.size) {
3125			rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3126				offset, length, rbd_dev->mapping.size);
3127			goto end_request;
3128		}
3129
3130		result = -ENOMEM;
3131		img_request = rbd_img_request_create(rbd_dev, offset, length,
3132							write_request);
3133		if (!img_request)
3134			goto end_request;
3135
3136		img_request->rq = rq;
 
3137
3138		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3139						rq->bio);
3140		if (!result)
3141			result = rbd_img_request_submit(img_request);
3142		if (result)
3143			rbd_img_request_put(img_request);
3144end_request:
3145		spin_lock_irq(q->queue_lock);
3146		if (result < 0) {
3147			rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3148				write_request ? "write" : "read",
3149				length, offset, result);
3150
3151			__blk_end_request_all(rq, result);
3152		}
3153	}
 
 
 
 
 
 
3154}
3155
3156/*
3157 * a queue callback. Makes sure that we don't create a bio that spans across
3158 * multiple osd objects. One exception would be with a single page bios,
3159 * which we handle later at bio_chain_clone_range()
3160 */
3161static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3162			  struct bio_vec *bvec)
3163{
3164	struct rbd_device *rbd_dev = q->queuedata;
3165	sector_t sector_offset;
3166	sector_t sectors_per_obj;
3167	sector_t obj_sector_offset;
3168	int ret;
3169
3170	/*
3171	 * Find how far into its rbd object the partition-relative
3172	 * bio start sector is to offset relative to the enclosing
3173	 * device.
3174	 */
3175	sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3176	sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3177	obj_sector_offset = sector_offset & (sectors_per_obj - 1);
 
 
 
 
 
 
 
 
 
3178
3179	/*
3180	 * Compute the number of bytes from that offset to the end
3181	 * of the object.  Account for what's already used by the bio.
3182	 */
3183	ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3184	if (ret > bmd->bi_size)
3185		ret -= bmd->bi_size;
3186	else
3187		ret = 0;
3188
3189	/*
3190	 * Don't send back more than was asked for.  And if the bio
3191	 * was empty, let the whole thing through because:  "Note
3192	 * that a block device *must* allow a single page to be
3193	 * added to an empty bio."
3194	 */
3195	rbd_assert(bvec->bv_len <= PAGE_SIZE);
3196	if (ret > (int) bvec->bv_len || !bmd->bi_size)
3197		ret = (int) bvec->bv_len;
3198
3199	return ret;
 
 
3200}
3201
3202static void rbd_free_disk(struct rbd_device *rbd_dev)
3203{
3204	struct gendisk *disk = rbd_dev->disk;
3205
3206	if (!disk)
3207		return;
3208
3209	rbd_dev->disk = NULL;
3210	if (disk->flags & GENHD_FL_UP) {
3211		del_gendisk(disk);
3212		if (disk->queue)
3213			blk_cleanup_queue(disk->queue);
3214	}
3215	put_disk(disk);
3216}
3217
3218static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3219				const char *object_name,
3220				u64 offset, u64 length, void *buf)
 
3221
3222{
3223	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3224	struct rbd_obj_request *obj_request;
3225	struct page **pages = NULL;
3226	u32 page_count;
3227	size_t size;
3228	int ret;
3229
3230	page_count = (u32) calc_pages_for(offset, length);
3231	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3232	if (IS_ERR(pages))
3233		ret = PTR_ERR(pages);
3234
3235	ret = -ENOMEM;
3236	obj_request = rbd_obj_request_create(object_name, offset, length,
3237							OBJ_REQUEST_PAGES);
3238	if (!obj_request)
3239		goto out;
3240
3241	obj_request->pages = pages;
3242	obj_request->page_count = page_count;
 
3243
3244	obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3245						  obj_request);
3246	if (!obj_request->osd_req)
3247		goto out;
 
3248
3249	osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3250					offset, length, 0, 0);
3251	osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3252					obj_request->pages,
3253					obj_request->length,
3254					obj_request->offset & ~PAGE_MASK,
3255					false, false);
3256	rbd_osd_req_format_read(obj_request);
3257
3258	ret = rbd_obj_request_submit(osdc, obj_request);
3259	if (ret)
3260		goto out;
3261	ret = rbd_obj_request_wait(obj_request);
3262	if (ret)
3263		goto out;
3264
3265	ret = obj_request->result;
3266	if (ret < 0)
3267		goto out;
3268
3269	rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3270	size = (size_t) obj_request->xferred;
3271	ceph_copy_from_page_vector(pages, buf, 0, size);
3272	rbd_assert(size <= (size_t)INT_MAX);
3273	ret = (int)size;
3274out:
3275	if (obj_request)
3276		rbd_obj_request_put(obj_request);
3277	else
3278		ceph_release_page_vector(pages, page_count);
3279
 
 
3280	return ret;
3281}
3282
3283/*
3284 * Read the complete header for the given rbd device.  On successful
3285 * return, the rbd_dev->header field will contain up-to-date
3286 * information about the image.
3287 */
3288static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
 
 
3289{
3290	struct rbd_image_header_ondisk *ondisk = NULL;
3291	u32 snap_count = 0;
3292	u64 names_size = 0;
3293	u32 want_count;
3294	int ret;
3295
3296	/*
3297	 * The complete header will include an array of its 64-bit
3298	 * snapshot ids, followed by the names of those snapshots as
3299	 * a contiguous block of NUL-terminated strings.  Note that
3300	 * the number of snapshots could change by the time we read
3301	 * it in, in which case we re-read it.
3302	 */
3303	do {
3304		size_t size;
3305
3306		kfree(ondisk);
3307
3308		size = sizeof (*ondisk);
3309		size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3310		size += names_size;
3311		ondisk = kmalloc(size, GFP_KERNEL);
3312		if (!ondisk)
3313			return -ENOMEM;
3314
3315		ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3316				       0, size, ondisk);
3317		if (ret < 0)
3318			goto out;
3319		if ((size_t)ret < size) {
3320			ret = -ENXIO;
3321			rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3322				size, ret);
3323			goto out;
3324		}
3325		if (!rbd_dev_ondisk_valid(ondisk)) {
3326			ret = -ENXIO;
3327			rbd_warn(rbd_dev, "invalid header");
3328			goto out;
3329		}
3330
3331		names_size = le64_to_cpu(ondisk->snap_names_len);
3332		want_count = snap_count;
3333		snap_count = le32_to_cpu(ondisk->snap_count);
3334	} while (snap_count != want_count);
3335
3336	ret = rbd_header_from_disk(rbd_dev, ondisk);
3337out:
3338	kfree(ondisk);
3339
3340	return ret;
3341}
3342
3343/*
3344 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3345 * has disappeared from the (just updated) snapshot context.
3346 */
3347static void rbd_exists_validate(struct rbd_device *rbd_dev)
3348{
3349	u64 snap_id;
3350
3351	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3352		return;
3353
3354	snap_id = rbd_dev->spec->snap_id;
3355	if (snap_id == CEPH_NOSNAP)
3356		return;
3357
3358	if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3359		clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3360}
3361
3362static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3363{
3364	sector_t size;
3365	bool removing;
3366
3367	/*
3368	 * Don't hold the lock while doing disk operations,
3369	 * or lock ordering will conflict with the bdev mutex via:
3370	 * rbd_add() -> blkdev_get() -> rbd_open()
3371	 */
3372	spin_lock_irq(&rbd_dev->lock);
3373	removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3374	spin_unlock_irq(&rbd_dev->lock);
3375	/*
3376	 * If the device is being removed, rbd_dev->disk has
3377	 * been destroyed, so don't try to update its size
3378	 */
3379	if (!removing) {
3380		size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3381		dout("setting size to %llu sectors", (unsigned long long)size);
3382		set_capacity(rbd_dev->disk, size);
3383		revalidate_disk(rbd_dev->disk);
3384	}
3385}
3386
3387static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3388{
3389	u64 mapping_size;
3390	int ret;
3391
3392	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3393	down_write(&rbd_dev->header_rwsem);
3394	mapping_size = rbd_dev->mapping.size;
3395	if (rbd_dev->image_format == 1)
3396		ret = rbd_dev_v1_header_info(rbd_dev);
3397	else
3398		ret = rbd_dev_v2_header_info(rbd_dev);
3399
3400	/* If it's a mapped snapshot, validate its EXISTS flag */
3401
3402	rbd_exists_validate(rbd_dev);
3403	up_write(&rbd_dev->header_rwsem);
3404
3405	if (mapping_size != rbd_dev->mapping.size) {
3406		rbd_dev_update_size(rbd_dev);
3407	}
3408
3409	return ret;
3410}
3411
3412static int rbd_init_disk(struct rbd_device *rbd_dev)
3413{
3414	struct gendisk *disk;
3415	struct request_queue *q;
3416	u64 segment_size;
 
 
 
 
 
 
 
 
3417
3418	/* create gendisk info */
3419	disk = alloc_disk(single_major ?
3420			  (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3421			  RBD_MINORS_PER_MAJOR);
3422	if (!disk)
3423		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3424
3425	snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3426		 rbd_dev->dev_id);
3427	disk->major = rbd_dev->major;
3428	disk->first_minor = rbd_dev->minor;
3429	if (single_major)
3430		disk->flags |= GENHD_FL_EXT_DEVT;
 
 
3431	disk->fops = &rbd_bd_ops;
3432	disk->private_data = rbd_dev;
3433
3434	q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3435	if (!q)
3436		goto out_disk;
3437
3438	/* We use the default size, but let's be explicit about it. */
3439	blk_queue_physical_block_size(q, SECTOR_SIZE);
3440
3441	/* set io sizes to object size */
3442	segment_size = rbd_obj_bytes(&rbd_dev->header);
3443	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3444	blk_queue_max_segment_size(q, segment_size);
3445	blk_queue_io_min(q, segment_size);
3446	blk_queue_io_opt(q, segment_size);
3447
3448	blk_queue_merge_bvec(q, rbd_merge_bvec);
3449	disk->queue = q;
3450
3451	q->queuedata = rbd_dev;
3452
3453	rbd_dev->disk = disk;
3454
3455	return 0;
3456out_disk:
3457	put_disk(disk);
3458
3459	return -ENOMEM;
3460}
3461
3462/*
3463  sysfs
3464*/
3465
3466static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3467{
3468	return container_of(dev, struct rbd_device, dev);
3469}
3470
3471static ssize_t rbd_size_show(struct device *dev,
3472			     struct device_attribute *attr, char *buf)
3473{
3474	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3475
3476	return sprintf(buf, "%llu\n",
3477		(unsigned long long)rbd_dev->mapping.size);
3478}
3479
3480/*
3481 * Note this shows the features for whatever's mapped, which is not
3482 * necessarily the base image.
3483 */
3484static ssize_t rbd_features_show(struct device *dev,
3485			     struct device_attribute *attr, char *buf)
3486{
3487	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3488
3489	return sprintf(buf, "0x%016llx\n",
3490			(unsigned long long)rbd_dev->mapping.features);
3491}
3492
3493static ssize_t rbd_major_show(struct device *dev,
3494			      struct device_attribute *attr, char *buf)
3495{
3496	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3497
3498	if (rbd_dev->major)
3499		return sprintf(buf, "%d\n", rbd_dev->major);
3500
3501	return sprintf(buf, "(none)\n");
3502}
3503
3504static ssize_t rbd_minor_show(struct device *dev,
3505			      struct device_attribute *attr, char *buf)
3506{
3507	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3508
3509	return sprintf(buf, "%d\n", rbd_dev->minor);
3510}
3511
 
 
 
 
 
 
 
 
 
 
 
3512static ssize_t rbd_client_id_show(struct device *dev,
3513				  struct device_attribute *attr, char *buf)
3514{
3515	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3516
3517	return sprintf(buf, "client%lld\n",
3518			ceph_client_id(rbd_dev->rbd_client->client));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3519}
3520
3521static ssize_t rbd_pool_show(struct device *dev,
3522			     struct device_attribute *attr, char *buf)
3523{
3524	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3525
3526	return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3527}
3528
3529static ssize_t rbd_pool_id_show(struct device *dev,
3530			     struct device_attribute *attr, char *buf)
3531{
3532	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3533
3534	return sprintf(buf, "%llu\n",
3535			(unsigned long long) rbd_dev->spec->pool_id);
3536}
3537
 
 
 
 
 
 
 
 
3538static ssize_t rbd_name_show(struct device *dev,
3539			     struct device_attribute *attr, char *buf)
3540{
3541	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3542
3543	if (rbd_dev->spec->image_name)
3544		return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3545
3546	return sprintf(buf, "(unknown)\n");
3547}
3548
3549static ssize_t rbd_image_id_show(struct device *dev,
3550			     struct device_attribute *attr, char *buf)
3551{
3552	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3553
3554	return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3555}
3556
3557/*
3558 * Shows the name of the currently-mapped snapshot (or
3559 * RBD_SNAP_HEAD_NAME for the base image).
3560 */
3561static ssize_t rbd_snap_show(struct device *dev,
3562			     struct device_attribute *attr,
3563			     char *buf)
3564{
3565	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3566
3567	return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3568}
3569
 
 
 
 
 
 
 
 
3570/*
3571 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3572 * for the parent image.  If there is no parent, simply shows
3573 * "(no parent image)".
3574 */
3575static ssize_t rbd_parent_show(struct device *dev,
3576			     struct device_attribute *attr,
3577			     char *buf)
3578{
3579	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3580	struct rbd_spec *spec = rbd_dev->parent_spec;
3581	int count;
3582	char *bufp = buf;
3583
3584	if (!spec)
3585		return sprintf(buf, "(no parent image)\n");
3586
3587	count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3588			(unsigned long long) spec->pool_id, spec->pool_name);
3589	if (count < 0)
3590		return count;
3591	bufp += count;
3592
3593	count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3594			spec->image_name ? spec->image_name : "(unknown)");
3595	if (count < 0)
3596		return count;
3597	bufp += count;
3598
3599	count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3600			(unsigned long long) spec->snap_id, spec->snap_name);
3601	if (count < 0)
3602		return count;
3603	bufp += count;
3604
3605	count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3606	if (count < 0)
3607		return count;
3608	bufp += count;
3609
3610	return (ssize_t) (bufp - buf);
3611}
3612
3613static ssize_t rbd_image_refresh(struct device *dev,
3614				 struct device_attribute *attr,
3615				 const char *buf,
3616				 size_t size)
3617{
3618	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3619	int ret;
3620
 
 
 
3621	ret = rbd_dev_refresh(rbd_dev);
3622	if (ret)
3623		rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3624
3625	return ret < 0 ? ret : size;
3626}
3627
3628static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3629static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3630static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3631static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3632static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3633static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3634static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3635static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3636static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3637static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3638static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3639static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
 
 
 
 
 
3640
3641static struct attribute *rbd_attrs[] = {
3642	&dev_attr_size.attr,
3643	&dev_attr_features.attr,
3644	&dev_attr_major.attr,
3645	&dev_attr_minor.attr,
 
3646	&dev_attr_client_id.attr,
 
 
3647	&dev_attr_pool.attr,
3648	&dev_attr_pool_id.attr,
 
3649	&dev_attr_name.attr,
3650	&dev_attr_image_id.attr,
3651	&dev_attr_current_snap.attr,
 
3652	&dev_attr_parent.attr,
3653	&dev_attr_refresh.attr,
3654	NULL
3655};
3656
3657static struct attribute_group rbd_attr_group = {
3658	.attrs = rbd_attrs,
3659};
3660
3661static const struct attribute_group *rbd_attr_groups[] = {
3662	&rbd_attr_group,
3663	NULL
3664};
3665
3666static void rbd_sysfs_dev_release(struct device *dev)
3667{
3668}
3669
3670static struct device_type rbd_device_type = {
3671	.name		= "rbd",
3672	.groups		= rbd_attr_groups,
3673	.release	= rbd_sysfs_dev_release,
3674};
3675
3676static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3677{
3678	kref_get(&spec->kref);
3679
3680	return spec;
3681}
3682
3683static void rbd_spec_free(struct kref *kref);
3684static void rbd_spec_put(struct rbd_spec *spec)
3685{
3686	if (spec)
3687		kref_put(&spec->kref, rbd_spec_free);
3688}
3689
3690static struct rbd_spec *rbd_spec_alloc(void)
3691{
3692	struct rbd_spec *spec;
3693
3694	spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3695	if (!spec)
3696		return NULL;
 
 
 
3697	kref_init(&spec->kref);
3698
3699	return spec;
3700}
3701
3702static void rbd_spec_free(struct kref *kref)
3703{
3704	struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3705
3706	kfree(spec->pool_name);
 
3707	kfree(spec->image_id);
3708	kfree(spec->image_name);
3709	kfree(spec->snap_name);
3710	kfree(spec);
3711}
3712
3713static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3714				struct rbd_spec *spec)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3715{
3716	struct rbd_device *rbd_dev;
3717
3718	rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3719	if (!rbd_dev)
3720		return NULL;
3721
3722	spin_lock_init(&rbd_dev->lock);
3723	rbd_dev->flags = 0;
3724	atomic_set(&rbd_dev->parent_ref, 0);
3725	INIT_LIST_HEAD(&rbd_dev->node);
3726	init_rwsem(&rbd_dev->header_rwsem);
3727
3728	rbd_dev->spec = spec;
3729	rbd_dev->rbd_client = rbdc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3730
3731	/* Initialize the layout used for all rbd requests */
 
3732
3733	rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3734	rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3735	rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3736	rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3737
 
3738	return rbd_dev;
 
 
 
 
 
 
3739}
3740
3741static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3742{
3743	rbd_put_client(rbd_dev->rbd_client);
3744	rbd_spec_put(rbd_dev->spec);
3745	kfree(rbd_dev);
3746}
3747
3748/*
3749 * Get the size and object order for an image snapshot, or if
3750 * snap_id is CEPH_NOSNAP, gets this information for the base
3751 * image.
3752 */
3753static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3754				u8 *order, u64 *snap_size)
3755{
3756	__le64 snapid = cpu_to_le64(snap_id);
3757	int ret;
3758	struct {
3759		u8 order;
3760		__le64 size;
3761	} __attribute__ ((packed)) size_buf = { 0 };
3762
3763	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3764				"rbd", "get_size",
3765				&snapid, sizeof (snapid),
3766				&size_buf, sizeof (size_buf));
3767	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3768	if (ret < 0)
3769		return ret;
3770	if (ret < sizeof (size_buf))
3771		return -ERANGE;
3772
3773	if (order) {
3774		*order = size_buf.order;
3775		dout("  order %u", (unsigned int)*order);
3776	}
3777	*snap_size = le64_to_cpu(size_buf.size);
3778
3779	dout("  snap_id 0x%016llx snap_size = %llu\n",
3780		(unsigned long long)snap_id,
3781		(unsigned long long)*snap_size);
3782
3783	return 0;
3784}
3785
3786static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3787{
3788	return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3789					&rbd_dev->header.obj_order,
3790					&rbd_dev->header.image_size);
3791}
3792
3793static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3794{
 
3795	void *reply_buf;
 
3796	int ret;
3797	void *p;
3798
3799	reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
 
 
3800	if (!reply_buf)
3801		return -ENOMEM;
3802
3803	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3804				"rbd", "get_object_prefix", NULL, 0,
3805				reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3806	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3807	if (ret < 0)
3808		goto out;
3809
3810	p = reply_buf;
3811	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3812						p + ret, NULL, GFP_NOIO);
 
 
 
 
3813	ret = 0;
3814
3815	if (IS_ERR(rbd_dev->header.object_prefix)) {
3816		ret = PTR_ERR(rbd_dev->header.object_prefix);
3817		rbd_dev->header.object_prefix = NULL;
3818	} else {
3819		dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
3820	}
3821out:
3822	kfree(reply_buf);
3823
3824	return ret;
3825}
3826
3827static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3828		u64 *snap_features)
3829{
3830	__le64 snapid = cpu_to_le64(snap_id);
 
 
 
3831	struct {
3832		__le64 features;
3833		__le64 incompat;
3834	} __attribute__ ((packed)) features_buf = { 0 };
3835	u64 incompat;
3836	int ret;
3837
3838	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3839				"rbd", "get_features",
3840				&snapid, sizeof (snapid),
3841				&features_buf, sizeof (features_buf));
 
 
 
3842	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3843	if (ret < 0)
3844		return ret;
3845	if (ret < sizeof (features_buf))
3846		return -ERANGE;
3847
3848	incompat = le64_to_cpu(features_buf.incompat);
3849	if (incompat & ~RBD_FEATURES_SUPPORTED)
 
 
3850		return -ENXIO;
 
3851
3852	*snap_features = le64_to_cpu(features_buf.features);
3853
3854	dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3855		(unsigned long long)snap_id,
3856		(unsigned long long)*snap_features,
3857		(unsigned long long)le64_to_cpu(features_buf.incompat));
3858
3859	return 0;
3860}
3861
3862static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
 
 
 
 
 
 
 
3863{
3864	return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3865						&rbd_dev->header.features);
 
 
 
 
 
 
 
 
 
 
 
 
 
3866}
3867
3868static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
 
 
 
 
 
 
 
 
 
 
3869{
3870	struct rbd_spec *parent_spec;
3871	size_t size;
3872	void *reply_buf = NULL;
3873	__le64 snapid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3874	void *p;
3875	void *end;
3876	u64 pool_id;
3877	char *image_id;
3878	u64 snap_id;
3879	u64 overlap;
3880	int ret;
3881
3882	parent_spec = rbd_spec_alloc();
3883	if (!parent_spec)
3884		return -ENOMEM;
3885
3886	size = sizeof (__le64) +				/* pool_id */
3887		sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +	/* image_id */
3888		sizeof (__le64) +				/* snap_id */
3889		sizeof (__le64);				/* overlap */
3890	reply_buf = kmalloc(size, GFP_KERNEL);
3891	if (!reply_buf) {
3892		ret = -ENOMEM;
3893		goto out_err;
3894	}
3895
3896	snapid = cpu_to_le64(CEPH_NOSNAP);
3897	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3898				"rbd", "get_parent",
3899				&snapid, sizeof (snapid),
3900				reply_buf, size);
3901	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3902	if (ret < 0)
3903		goto out_err;
3904
3905	p = reply_buf;
3906	end = reply_buf + ret;
3907	ret = -ERANGE;
3908	ceph_decode_64_safe(&p, end, pool_id, out_err);
3909	if (pool_id == CEPH_NOPOOL) {
3910		/*
3911		 * Either the parent never existed, or we have
3912		 * record of it but the image got flattened so it no
3913		 * longer has a parent.  When the parent of a
3914		 * layered image disappears we immediately set the
3915		 * overlap to 0.  The effect of this is that all new
3916		 * requests will be treated as if the image had no
3917		 * parent.
3918		 */
3919		if (rbd_dev->parent_overlap) {
3920			rbd_dev->parent_overlap = 0;
3921			smp_mb();
3922			rbd_dev_parent_put(rbd_dev);
3923			pr_info("%s: clone image has been flattened\n",
3924				rbd_dev->disk->disk_name);
3925		}
3926
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3927		goto out;	/* No parent?  No problem. */
3928	}
3929
3930	/* The ceph file layout needs to fit pool id in 32 bits */
3931
3932	ret = -EIO;
3933	if (pool_id > (u64)U32_MAX) {
3934		rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3935			(unsigned long long)pool_id, U32_MAX);
3936		goto out_err;
3937	}
3938
3939	image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3940	if (IS_ERR(image_id)) {
3941		ret = PTR_ERR(image_id);
3942		goto out_err;
3943	}
3944	ceph_decode_64_safe(&p, end, snap_id, out_err);
3945	ceph_decode_64_safe(&p, end, overlap, out_err);
3946
3947	/*
3948	 * The parent won't change (except when the clone is
3949	 * flattened, already handled that).  So we only need to
3950	 * record the parent spec we have not already done so.
3951	 */
3952	if (!rbd_dev->parent_spec) {
3953		parent_spec->pool_id = pool_id;
3954		parent_spec->image_id = image_id;
3955		parent_spec->snap_id = snap_id;
3956		rbd_dev->parent_spec = parent_spec;
3957		parent_spec = NULL;	/* rbd_dev now owns this */
3958	}
 
 
 
 
 
 
 
3959
3960	/*
3961	 * We always update the parent overlap.  If it's zero we
3962	 * treat it specially.
3963	 */
3964	rbd_dev->parent_overlap = overlap;
3965	smp_mb();
3966	if (!overlap) {
3967
3968		/* A null parent_spec indicates it's the initial probe */
3969
3970		if (parent_spec) {
3971			/*
3972			 * The overlap has become zero, so the clone
3973			 * must have been resized down to 0 at some
3974			 * point.  Treat this the same as a flatten.
3975			 */
3976			rbd_dev_parent_put(rbd_dev);
3977			pr_info("%s: clone image now standalone\n",
3978				rbd_dev->disk->disk_name);
3979		} else {
3980			/*
3981			 * For the initial probe, if we find the
3982			 * overlap is zero we just pretend there was
3983			 * no parent image.
3984			 */
3985			rbd_warn(rbd_dev, "ignoring parent of "
3986						"clone with overlap 0\n");
3987		}
3988	}
3989out:
3990	ret = 0;
3991out_err:
3992	kfree(reply_buf);
3993	rbd_spec_put(parent_spec);
3994
3995	return ret;
3996}
3997
3998static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
 
3999{
4000	struct {
4001		__le64 stripe_unit;
4002		__le64 stripe_count;
4003	} __attribute__ ((packed)) striping_info_buf = { 0 };
4004	size_t size = sizeof (striping_info_buf);
4005	void *p;
4006	u64 obj_size;
4007	u64 stripe_unit;
4008	u64 stripe_count;
4009	int ret;
4010
4011	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4012				"rbd", "get_stripe_unit_count", NULL, 0,
4013				(char *)&striping_info_buf, size);
4014	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4015	if (ret < 0)
4016		return ret;
4017	if (ret < size)
4018		return -ERANGE;
4019
4020	/*
4021	 * We don't actually support the "fancy striping" feature
4022	 * (STRIPINGV2) yet, but if the striping sizes are the
4023	 * defaults the behavior is the same as before.  So find
4024	 * out, and only fail if the image has non-default values.
4025	 */
4026	ret = -EINVAL;
4027	obj_size = (u64)1 << rbd_dev->header.obj_order;
4028	p = &striping_info_buf;
4029	stripe_unit = ceph_decode_64(&p);
4030	if (stripe_unit != obj_size) {
4031		rbd_warn(rbd_dev, "unsupported stripe unit "
4032				"(got %llu want %llu)",
4033				stripe_unit, obj_size);
4034		return -EINVAL;
4035	}
4036	stripe_count = ceph_decode_64(&p);
4037	if (stripe_count != 1) {
4038		rbd_warn(rbd_dev, "unsupported stripe count "
4039				"(got %llu want 1)", stripe_count);
4040		return -EINVAL;
4041	}
4042	rbd_dev->header.stripe_unit = stripe_unit;
4043	rbd_dev->header.stripe_count = stripe_count;
 
 
4044
4045	return 0;
4046}
4047
4048static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4049{
 
4050	size_t image_id_size;
4051	char *image_id;
4052	void *p;
4053	void *end;
4054	size_t size;
4055	void *reply_buf = NULL;
4056	size_t len = 0;
4057	char *image_name = NULL;
4058	int ret;
4059
4060	rbd_assert(!rbd_dev->spec->image_name);
4061
4062	len = strlen(rbd_dev->spec->image_id);
4063	image_id_size = sizeof (__le32) + len;
4064	image_id = kmalloc(image_id_size, GFP_KERNEL);
4065	if (!image_id)
4066		return NULL;
4067
4068	p = image_id;
4069	end = image_id + image_id_size;
4070	ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4071
4072	size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4073	reply_buf = kmalloc(size, GFP_KERNEL);
4074	if (!reply_buf)
4075		goto out;
4076
4077	ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4078				"rbd", "dir_get_name",
4079				image_id, image_id_size,
4080				reply_buf, size);
4081	if (ret < 0)
4082		goto out;
4083	p = reply_buf;
4084	end = reply_buf + ret;
4085
4086	image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4087	if (IS_ERR(image_name))
4088		image_name = NULL;
4089	else
4090		dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4091out:
4092	kfree(reply_buf);
4093	kfree(image_id);
4094
4095	return image_name;
4096}
4097
4098static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4099{
4100	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4101	const char *snap_name;
4102	u32 which = 0;
4103
4104	/* Skip over names until we find the one we are looking for */
4105
4106	snap_name = rbd_dev->header.snap_names;
4107	while (which < snapc->num_snaps) {
4108		if (!strcmp(name, snap_name))
4109			return snapc->snaps[which];
4110		snap_name += strlen(snap_name) + 1;
4111		which++;
4112	}
4113	return CEPH_NOSNAP;
4114}
4115
4116static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4117{
4118	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4119	u32 which;
4120	bool found = false;
4121	u64 snap_id;
4122
4123	for (which = 0; !found && which < snapc->num_snaps; which++) {
4124		const char *snap_name;
4125
4126		snap_id = snapc->snaps[which];
4127		snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4128		if (IS_ERR(snap_name)) {
4129			/* ignore no-longer existing snapshots */
4130			if (PTR_ERR(snap_name) == -ENOENT)
4131				continue;
4132			else
4133				break;
4134		}
4135		found = !strcmp(name, snap_name);
4136		kfree(snap_name);
4137	}
4138	return found ? snap_id : CEPH_NOSNAP;
4139}
4140
4141/*
4142 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4143 * no snapshot by that name is found, or if an error occurs.
4144 */
4145static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4146{
4147	if (rbd_dev->image_format == 1)
4148		return rbd_v1_snap_id_by_name(rbd_dev, name);
4149
4150	return rbd_v2_snap_id_by_name(rbd_dev, name);
4151}
4152
4153/*
4154 * When an rbd image has a parent image, it is identified by the
4155 * pool, image, and snapshot ids (not names).  This function fills
4156 * in the names for those ids.  (It's OK if we can't figure out the
4157 * name for an image id, but the pool and snapshot ids should always
4158 * exist and have names.)  All names in an rbd spec are dynamically
4159 * allocated.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4160 *
4161 * When an image being mapped (not a parent) is probed, we have the
4162 * pool name and pool id, image name and image id, and the snapshot
4163 * name.  The only thing we're missing is the snapshot id.
4164 */
4165static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4166{
4167	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4168	struct rbd_spec *spec = rbd_dev->spec;
4169	const char *pool_name;
4170	const char *image_name;
4171	const char *snap_name;
4172	int ret;
4173
4174	/*
4175	 * An image being mapped will have the pool name (etc.), but
4176	 * we need to look up the snapshot id.
4177	 */
4178	if (spec->pool_name) {
4179		if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4180			u64 snap_id;
4181
4182			snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4183			if (snap_id == CEPH_NOSNAP)
4184				return -ENOENT;
4185			spec->snap_id = snap_id;
4186		} else {
4187			spec->snap_id = CEPH_NOSNAP;
4188		}
4189
4190		return 0;
4191	}
4192
4193	/* Get the pool name; we have to make our own copy of this */
4194
4195	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4196	if (!pool_name) {
4197		rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4198		return -EIO;
4199	}
4200	pool_name = kstrdup(pool_name, GFP_KERNEL);
4201	if (!pool_name)
4202		return -ENOMEM;
4203
4204	/* Fetch the image name; tolerate failure here */
4205
4206	image_name = rbd_dev_image_name(rbd_dev);
4207	if (!image_name)
4208		rbd_warn(rbd_dev, "unable to get image name");
4209
4210	/* Look up the snapshot name, and make a copy */
4211
4212	snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4213	if (IS_ERR(snap_name)) {
4214		ret = PTR_ERR(snap_name);
4215		goto out_err;
4216	}
4217
4218	spec->pool_name = pool_name;
4219	spec->image_name = image_name;
4220	spec->snap_name = snap_name;
4221
4222	return 0;
 
4223out_err:
4224	kfree(image_name);
4225	kfree(pool_name);
4226
4227	return ret;
4228}
4229
4230static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
 
4231{
4232	size_t size;
4233	int ret;
4234	void *reply_buf;
4235	void *p;
4236	void *end;
4237	u64 seq;
4238	u32 snap_count;
4239	struct ceph_snap_context *snapc;
4240	u32 i;
4241
4242	/*
4243	 * We'll need room for the seq value (maximum snapshot id),
4244	 * snapshot count, and array of that many snapshot ids.
4245	 * For now we have a fixed upper limit on the number we're
4246	 * prepared to receive.
4247	 */
4248	size = sizeof (__le64) + sizeof (__le32) +
4249			RBD_MAX_SNAP_COUNT * sizeof (__le64);
4250	reply_buf = kzalloc(size, GFP_KERNEL);
4251	if (!reply_buf)
4252		return -ENOMEM;
4253
4254	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4255				"rbd", "get_snapcontext", NULL, 0,
4256				reply_buf, size);
4257	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4258	if (ret < 0)
4259		goto out;
4260
4261	p = reply_buf;
4262	end = reply_buf + ret;
4263	ret = -ERANGE;
4264	ceph_decode_64_safe(&p, end, seq, out);
4265	ceph_decode_32_safe(&p, end, snap_count, out);
4266
4267	/*
4268	 * Make sure the reported number of snapshot ids wouldn't go
4269	 * beyond the end of our buffer.  But before checking that,
4270	 * make sure the computed size of the snapshot context we
4271	 * allocate is representable in a size_t.
4272	 */
4273	if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4274				 / sizeof (u64)) {
4275		ret = -EINVAL;
4276		goto out;
4277	}
4278	if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4279		goto out;
4280	ret = 0;
4281
4282	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4283	if (!snapc) {
4284		ret = -ENOMEM;
4285		goto out;
4286	}
4287	snapc->seq = seq;
4288	for (i = 0; i < snap_count; i++)
4289		snapc->snaps[i] = ceph_decode_64(&p);
4290
4291	ceph_put_snap_context(rbd_dev->header.snapc);
4292	rbd_dev->header.snapc = snapc;
4293
4294	dout("  snap context seq = %llu, snap_count = %u\n",
4295		(unsigned long long)seq, (unsigned int)snap_count);
4296out:
4297	kfree(reply_buf);
4298
4299	return ret;
4300}
4301
4302static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4303					u64 snap_id)
4304{
4305	size_t size;
4306	void *reply_buf;
4307	__le64 snapid;
4308	int ret;
4309	void *p;
4310	void *end;
4311	char *snap_name;
4312
4313	size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4314	reply_buf = kmalloc(size, GFP_KERNEL);
4315	if (!reply_buf)
4316		return ERR_PTR(-ENOMEM);
4317
4318	snapid = cpu_to_le64(snap_id);
4319	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4320				"rbd", "get_snapshot_name",
4321				&snapid, sizeof (snapid),
4322				reply_buf, size);
4323	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4324	if (ret < 0) {
4325		snap_name = ERR_PTR(ret);
4326		goto out;
4327	}
4328
4329	p = reply_buf;
4330	end = reply_buf + ret;
4331	snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4332	if (IS_ERR(snap_name))
4333		goto out;
4334
4335	dout("  snap_id 0x%016llx snap_name = %s\n",
4336		(unsigned long long)snap_id, snap_name);
4337out:
4338	kfree(reply_buf);
4339
4340	return snap_name;
4341}
4342
4343static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
 
 
4344{
4345	bool first_time = rbd_dev->header.object_prefix == NULL;
4346	int ret;
4347
4348	ret = rbd_dev_v2_image_size(rbd_dev);
 
 
4349	if (ret)
4350		return ret;
4351
4352	if (first_time) {
4353		ret = rbd_dev_v2_header_onetime(rbd_dev);
4354		if (ret)
4355			return ret;
4356	}
4357
4358	/*
4359	 * If the image supports layering, get the parent info.  We
4360	 * need to probe the first time regardless.  Thereafter we
4361	 * only need to if there's a parent, to see if it has
4362	 * disappeared due to the mapped image getting flattened.
4363	 */
4364	if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4365			(first_time || rbd_dev->parent_spec)) {
4366		bool warn;
4367
4368		ret = rbd_dev_v2_parent_info(rbd_dev);
4369		if (ret)
4370			return ret;
4371
4372		/*
4373		 * Print a warning if this is the initial probe and
4374		 * the image has a parent.  Don't print it if the
4375		 * image now being probed is itself a parent.  We
4376		 * can tell at this point because we won't know its
4377		 * pool name yet (just its pool id).
4378		 */
4379		warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4380		if (first_time && warn)
4381			rbd_warn(rbd_dev, "WARNING: kernel layering "
4382					"is EXPERIMENTAL!");
4383	}
4384
4385	if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4386		if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4387			rbd_dev->mapping.size = rbd_dev->header.image_size;
4388
4389	ret = rbd_dev_v2_snap_context(rbd_dev);
4390	dout("rbd_dev_v2_snap_context returned %d\n", ret);
4391
4392	return ret;
4393}
4394
4395static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4396{
4397	struct device *dev;
4398	int ret;
4399
4400	dev = &rbd_dev->dev;
4401	dev->bus = &rbd_bus_type;
4402	dev->type = &rbd_device_type;
4403	dev->parent = &rbd_root_dev;
4404	dev->release = rbd_dev_device_release;
4405	dev_set_name(dev, "%d", rbd_dev->dev_id);
4406	ret = device_register(dev);
4407
4408	return ret;
4409}
4410
4411static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4412{
4413	device_unregister(&rbd_dev->dev);
4414}
4415
4416/*
4417 * Get a unique rbd identifier for the given new rbd_dev, and add
4418 * the rbd_dev to the global list.
4419 */
4420static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4421{
4422	int new_dev_id;
4423
4424	new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4425				    0, minor_to_rbd_dev_id(1 << MINORBITS),
4426				    GFP_KERNEL);
4427	if (new_dev_id < 0)
4428		return new_dev_id;
4429
4430	rbd_dev->dev_id = new_dev_id;
4431
4432	spin_lock(&rbd_dev_list_lock);
4433	list_add_tail(&rbd_dev->node, &rbd_dev_list);
4434	spin_unlock(&rbd_dev_list_lock);
4435
4436	dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4437
4438	return 0;
4439}
4440
4441/*
4442 * Remove an rbd_dev from the global list, and record that its
4443 * identifier is no longer in use.
4444 */
4445static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4446{
4447	spin_lock(&rbd_dev_list_lock);
4448	list_del_init(&rbd_dev->node);
4449	spin_unlock(&rbd_dev_list_lock);
4450
4451	ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
 
4452
4453	dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4454}
4455
4456/*
4457 * Skips over white space at *buf, and updates *buf to point to the
4458 * first found non-space character (if any). Returns the length of
4459 * the token (string of non-white space characters) found.  Note
4460 * that *buf must be terminated with '\0'.
4461 */
4462static inline size_t next_token(const char **buf)
4463{
4464        /*
4465        * These are the characters that produce nonzero for
4466        * isspace() in the "C" and "POSIX" locales.
4467        */
4468        const char *spaces = " \f\n\r\t\v";
4469
4470        *buf += strspn(*buf, spaces);	/* Find start of token */
4471
4472	return strcspn(*buf, spaces);   /* Return token length */
4473}
4474
4475/*
4476 * Finds the next token in *buf, and if the provided token buffer is
4477 * big enough, copies the found token into it.  The result, if
4478 * copied, is guaranteed to be terminated with '\0'.  Note that *buf
4479 * must be terminated with '\0' on entry.
4480 *
4481 * Returns the length of the token found (not including the '\0').
4482 * Return value will be 0 if no token is found, and it will be >=
4483 * token_size if the token would not fit.
4484 *
4485 * The *buf pointer will be updated to point beyond the end of the
4486 * found token.  Note that this occurs even if the token buffer is
4487 * too small to hold it.
4488 */
4489static inline size_t copy_token(const char **buf,
4490				char *token,
4491				size_t token_size)
4492{
4493        size_t len;
4494
4495	len = next_token(buf);
4496	if (len < token_size) {
4497		memcpy(token, *buf, len);
4498		*(token + len) = '\0';
4499	}
4500	*buf += len;
4501
4502        return len;
4503}
4504
4505/*
4506 * Finds the next token in *buf, dynamically allocates a buffer big
4507 * enough to hold a copy of it, and copies the token into the new
4508 * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
4509 * that a duplicate buffer is created even for a zero-length token.
4510 *
4511 * Returns a pointer to the newly-allocated duplicate, or a null
4512 * pointer if memory for the duplicate was not available.  If
4513 * the lenp argument is a non-null pointer, the length of the token
4514 * (not including the '\0') is returned in *lenp.
4515 *
4516 * If successful, the *buf pointer will be updated to point beyond
4517 * the end of the found token.
4518 *
4519 * Note: uses GFP_KERNEL for allocation.
4520 */
4521static inline char *dup_token(const char **buf, size_t *lenp)
4522{
4523	char *dup;
4524	size_t len;
4525
4526	len = next_token(buf);
4527	dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4528	if (!dup)
4529		return NULL;
4530	*(dup + len) = '\0';
4531	*buf += len;
4532
4533	if (lenp)
4534		*lenp = len;
4535
4536	return dup;
4537}
4538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4539/*
4540 * Parse the options provided for an "rbd add" (i.e., rbd image
4541 * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
4542 * and the data written is passed here via a NUL-terminated buffer.
4543 * Returns 0 if successful or an error code otherwise.
4544 *
4545 * The information extracted from these options is recorded in
4546 * the other parameters which return dynamically-allocated
4547 * structures:
4548 *  ceph_opts
4549 *      The address of a pointer that will refer to a ceph options
4550 *      structure.  Caller must release the returned pointer using
4551 *      ceph_destroy_options() when it is no longer needed.
4552 *  rbd_opts
4553 *	Address of an rbd options pointer.  Fully initialized by
4554 *	this function; caller must release with kfree().
4555 *  spec
4556 *	Address of an rbd image specification pointer.  Fully
4557 *	initialized by this function based on parsed options.
4558 *	Caller must release with rbd_spec_put().
4559 *
4560 * The options passed take this form:
4561 *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4562 * where:
4563 *  <mon_addrs>
4564 *      A comma-separated list of one or more monitor addresses.
4565 *      A monitor address is an ip address, optionally followed
4566 *      by a port number (separated by a colon).
4567 *        I.e.:  ip1[:port1][,ip2[:port2]...]
4568 *  <options>
4569 *      A comma-separated list of ceph and/or rbd options.
4570 *  <pool_name>
4571 *      The name of the rados pool containing the rbd image.
4572 *  <image_name>
4573 *      The name of the image in that pool to map.
4574 *  <snap_id>
4575 *      An optional snapshot id.  If provided, the mapping will
4576 *      present data from the image at the time that snapshot was
4577 *      created.  The image head is used if no snapshot id is
4578 *      provided.  Snapshot mappings are always read-only.
4579 */
4580static int rbd_add_parse_args(const char *buf,
4581				struct ceph_options **ceph_opts,
4582				struct rbd_options **opts,
4583				struct rbd_spec **rbd_spec)
4584{
4585	size_t len;
4586	char *options;
4587	const char *mon_addrs;
4588	char *snap_name;
4589	size_t mon_addrs_size;
4590	struct rbd_spec *spec = NULL;
4591	struct rbd_options *rbd_opts = NULL;
4592	struct ceph_options *copts;
4593	int ret;
4594
4595	/* The first four tokens are required */
4596
4597	len = next_token(&buf);
4598	if (!len) {
4599		rbd_warn(NULL, "no monitor address(es) provided");
4600		return -EINVAL;
4601	}
4602	mon_addrs = buf;
4603	mon_addrs_size = len + 1;
4604	buf += len;
4605
4606	ret = -EINVAL;
4607	options = dup_token(&buf, NULL);
4608	if (!options)
4609		return -ENOMEM;
4610	if (!*options) {
4611		rbd_warn(NULL, "no options provided");
4612		goto out_err;
4613	}
4614
4615	spec = rbd_spec_alloc();
4616	if (!spec)
4617		goto out_mem;
4618
4619	spec->pool_name = dup_token(&buf, NULL);
4620	if (!spec->pool_name)
4621		goto out_mem;
4622	if (!*spec->pool_name) {
4623		rbd_warn(NULL, "no pool name provided");
4624		goto out_err;
4625	}
4626
4627	spec->image_name = dup_token(&buf, NULL);
4628	if (!spec->image_name)
4629		goto out_mem;
4630	if (!*spec->image_name) {
4631		rbd_warn(NULL, "no image name provided");
4632		goto out_err;
4633	}
4634
4635	/*
4636	 * Snapshot name is optional; default is to use "-"
4637	 * (indicating the head/no snapshot).
4638	 */
4639	len = next_token(&buf);
4640	if (!len) {
4641		buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4642		len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4643	} else if (len > RBD_MAX_SNAP_NAME_LEN) {
4644		ret = -ENAMETOOLONG;
4645		goto out_err;
4646	}
4647	snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4648	if (!snap_name)
4649		goto out_mem;
4650	*(snap_name + len) = '\0';
4651	spec->snap_name = snap_name;
 
 
 
 
4652
4653	/* Initialize all rbd options to the defaults */
4654
4655	rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4656	if (!rbd_opts)
4657		goto out_mem;
4658
4659	rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
 
 
 
 
 
 
4660
4661	copts = ceph_parse_options(options, mon_addrs,
4662					mon_addrs + mon_addrs_size - 1,
4663					parse_rbd_opts_token, rbd_opts);
4664	if (IS_ERR(copts)) {
4665		ret = PTR_ERR(copts);
4666		goto out_err;
4667	}
4668	kfree(options);
4669
4670	*ceph_opts = copts;
4671	*opts = rbd_opts;
4672	*rbd_spec = spec;
4673
 
 
 
 
4674	return 0;
 
4675out_mem:
4676	ret = -ENOMEM;
4677out_err:
4678	kfree(rbd_opts);
4679	rbd_spec_put(spec);
 
4680	kfree(options);
4681
4682	return ret;
4683}
4684
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4685/*
4686 * An rbd format 2 image has a unique identifier, distinct from the
4687 * name given to it by the user.  Internally, that identifier is
4688 * what's used to specify the names of objects related to the image.
4689 *
4690 * A special "rbd id" object is used to map an rbd image name to its
4691 * id.  If that object doesn't exist, then there is no v2 rbd image
4692 * with the supplied name.
4693 *
4694 * This function will record the given rbd_dev's image_id field if
4695 * it can be determined, and in that case will return 0.  If any
4696 * errors occur a negative errno will be returned and the rbd_dev's
4697 * image_id field will be unchanged (and should be NULL).
4698 */
4699static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4700{
4701	int ret;
4702	size_t size;
4703	char *object_name;
4704	void *response;
4705	char *image_id;
4706
4707	/*
4708	 * When probing a parent image, the image id is already
4709	 * known (and the image name likely is not).  There's no
4710	 * need to fetch the image id again in this case.  We
4711	 * do still need to set the image format though.
4712	 */
4713	if (rbd_dev->spec->image_id) {
4714		rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4715
4716		return 0;
4717	}
4718
4719	/*
4720	 * First, see if the format 2 image id file exists, and if
4721	 * so, get the image's persistent id from it.
4722	 */
4723	size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4724	object_name = kmalloc(size, GFP_NOIO);
4725	if (!object_name)
4726		return -ENOMEM;
4727	sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4728	dout("rbd id object name is %s\n", object_name);
4729
4730	/* Response will be an encoded string, which includes a length */
4731
 
4732	size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4733	response = kzalloc(size, GFP_NOIO);
4734	if (!response) {
4735		ret = -ENOMEM;
4736		goto out;
4737	}
4738
4739	/* If it doesn't exist we'll assume it's a format 1 image */
4740
4741	ret = rbd_obj_method_sync(rbd_dev, object_name,
4742				"rbd", "get_id", NULL, 0,
4743				response, RBD_IMAGE_ID_LEN_MAX);
4744	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4745	if (ret == -ENOENT) {
4746		image_id = kstrdup("", GFP_KERNEL);
4747		ret = image_id ? 0 : -ENOMEM;
4748		if (!ret)
4749			rbd_dev->image_format = 1;
4750	} else if (ret > sizeof (__le32)) {
4751		void *p = response;
4752
4753		image_id = ceph_extract_encoded_string(&p, p + ret,
4754						NULL, GFP_NOIO);
4755		ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4756		if (!ret)
4757			rbd_dev->image_format = 2;
4758	} else {
4759		ret = -EINVAL;
4760	}
4761
4762	if (!ret) {
4763		rbd_dev->spec->image_id = image_id;
4764		dout("image_id is %s\n", image_id);
4765	}
4766out:
4767	kfree(response);
4768	kfree(object_name);
4769
4770	return ret;
4771}
4772
4773/*
4774 * Undo whatever state changes are made by v1 or v2 header info
4775 * call.
4776 */
4777static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4778{
4779	struct rbd_image_header	*header;
4780
4781	/* Drop parent reference unless it's already been done (or none) */
4782
4783	if (rbd_dev->parent_overlap)
4784		rbd_dev_parent_put(rbd_dev);
4785
4786	/* Free dynamic fields from the header, then zero it out */
4787
4788	header = &rbd_dev->header;
4789	ceph_put_snap_context(header->snapc);
4790	kfree(header->snap_sizes);
4791	kfree(header->snap_names);
4792	kfree(header->object_prefix);
4793	memset(header, 0, sizeof (*header));
4794}
4795
4796static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
 
4797{
4798	int ret;
4799
4800	ret = rbd_dev_v2_object_prefix(rbd_dev);
4801	if (ret)
4802		goto out_err;
4803
4804	/*
4805	 * Get the and check features for the image.  Currently the
4806	 * features are assumed to never change.
4807	 */
4808	ret = rbd_dev_v2_features(rbd_dev);
 
4809	if (ret)
4810		goto out_err;
4811
4812	/* If the image supports fancy striping, get its parameters */
4813
4814	if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4815		ret = rbd_dev_v2_striping_info(rbd_dev);
4816		if (ret < 0)
4817			goto out_err;
 
4818	}
4819	/* No support for crypto and compression type format 2 images */
4820
4821	return 0;
4822out_err:
4823	rbd_dev->header.features = 0;
4824	kfree(rbd_dev->header.object_prefix);
4825	rbd_dev->header.object_prefix = NULL;
4826
4827	return ret;
4828}
4829
4830static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
 
 
 
 
 
4831{
4832	struct rbd_device *parent = NULL;
4833	struct rbd_spec *parent_spec;
4834	struct rbd_client *rbdc;
4835	int ret;
4836
4837	if (!rbd_dev->parent_spec)
4838		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
4839	/*
4840	 * We need to pass a reference to the client and the parent
4841	 * spec when creating the parent rbd_dev.  Images related by
4842	 * parent/child relationships always share both.
4843	 */
4844	parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4845	rbdc = __rbd_get_client(rbd_dev->rbd_client);
4846
4847	ret = -ENOMEM;
4848	parent = rbd_dev_create(rbdc, parent_spec);
4849	if (!parent)
4850		goto out_err;
4851
4852	ret = rbd_dev_image_probe(parent, false);
4853	if (ret < 0)
4854		goto out_err;
 
4855	rbd_dev->parent = parent;
4856	atomic_set(&rbd_dev->parent_ref, 1);
4857
4858	return 0;
4859out_err:
4860	if (parent) {
4861		rbd_dev_unparent(rbd_dev);
4862		kfree(rbd_dev->header_name);
4863		rbd_dev_destroy(parent);
4864	} else {
4865		rbd_put_client(rbdc);
4866		rbd_spec_put(parent_spec);
4867	}
4868
 
 
 
4869	return ret;
4870}
4871
 
 
 
 
 
 
 
 
 
 
 
 
4872static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4873{
4874	int ret;
4875
4876	/* Get an id and fill in device name. */
4877
4878	ret = rbd_dev_id_get(rbd_dev);
4879	if (ret)
4880		return ret;
4881
4882	BUILD_BUG_ON(DEV_NAME_LEN
4883			< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4884	sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4885
4886	/* Record our major and minor device numbers. */
4887
4888	if (!single_major) {
4889		ret = register_blkdev(0, rbd_dev->name);
4890		if (ret < 0)
4891			goto err_out_id;
4892
4893		rbd_dev->major = ret;
4894		rbd_dev->minor = 0;
4895	} else {
4896		rbd_dev->major = rbd_major;
4897		rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
4898	}
4899
4900	/* Set up the blkdev mapping. */
4901
4902	ret = rbd_init_disk(rbd_dev);
4903	if (ret)
4904		goto err_out_blkdev;
4905
4906	ret = rbd_dev_mapping_set(rbd_dev);
4907	if (ret)
4908		goto err_out_disk;
4909	set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
 
4910
4911	ret = rbd_bus_add_dev(rbd_dev);
4912	if (ret)
4913		goto err_out_mapping;
4914
4915	/* Everything's ready.  Announce the disk to the world. */
4916
4917	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4918	add_disk(rbd_dev->disk);
4919
4920	pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4921		(unsigned long long) rbd_dev->mapping.size);
4922
4923	return ret;
4924
4925err_out_mapping:
4926	rbd_dev_mapping_clear(rbd_dev);
4927err_out_disk:
4928	rbd_free_disk(rbd_dev);
4929err_out_blkdev:
4930	if (!single_major)
4931		unregister_blkdev(rbd_dev->major, rbd_dev->name);
4932err_out_id:
4933	rbd_dev_id_put(rbd_dev);
4934	rbd_dev_mapping_clear(rbd_dev);
4935
4936	return ret;
4937}
4938
4939static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4940{
4941	struct rbd_spec *spec = rbd_dev->spec;
4942	size_t size;
4943
4944	/* Record the header object name for this rbd image. */
4945
4946	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4947
4948	if (rbd_dev->image_format == 1)
4949		size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
 
4950	else
4951		size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
 
4952
4953	rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4954	if (!rbd_dev->header_name)
4955		return -ENOMEM;
4956
4957	if (rbd_dev->image_format == 1)
4958		sprintf(rbd_dev->header_name, "%s%s",
4959			spec->image_name, RBD_SUFFIX);
4960	else
4961		sprintf(rbd_dev->header_name, "%s%s",
4962			RBD_HEADER_PREFIX, spec->image_id);
4963	return 0;
 
 
 
 
 
 
 
 
 
4964}
4965
4966static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4967{
 
 
 
4968	rbd_dev_unprobe(rbd_dev);
4969	kfree(rbd_dev->header_name);
4970	rbd_dev->header_name = NULL;
4971	rbd_dev->image_format = 0;
4972	kfree(rbd_dev->spec->image_id);
4973	rbd_dev->spec->image_id = NULL;
4974
4975	rbd_dev_destroy(rbd_dev);
4976}
4977
4978/*
4979 * Probe for the existence of the header object for the given rbd
4980 * device.  If this image is the one being mapped (i.e., not a
4981 * parent), initiate a watch on its header object before using that
4982 * object to get detailed information about the rbd image.
 
 
 
4983 */
4984static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4985{
 
4986	int ret;
4987
4988	/*
4989	 * Get the id from the image id object.  Unless there's an
4990	 * error, rbd_dev->spec->image_id will be filled in with
4991	 * a dynamically-allocated string, and rbd_dev->image_format
4992	 * will be set to either 1 or 2.
4993	 */
4994	ret = rbd_dev_image_id(rbd_dev);
4995	if (ret)
4996		return ret;
4997	rbd_assert(rbd_dev->spec->image_id);
4998	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4999
5000	ret = rbd_dev_header_name(rbd_dev);
5001	if (ret)
5002		goto err_out_format;
5003
5004	if (mapping) {
5005		ret = rbd_dev_header_watch_sync(rbd_dev);
5006		if (ret)
5007			goto out_header_name;
 
 
 
5008	}
5009
5010	if (rbd_dev->image_format == 1)
5011		ret = rbd_dev_v1_header_info(rbd_dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5012	else
5013		ret = rbd_dev_v2_header_info(rbd_dev);
5014	if (ret)
5015		goto err_out_watch;
 
 
 
5016
5017	ret = rbd_dev_spec_update(rbd_dev);
5018	if (ret)
5019		goto err_out_probe;
5020
5021	ret = rbd_dev_probe_parent(rbd_dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
5022	if (ret)
5023		goto err_out_probe;
5024
5025	dout("discovered format %u image, header name is %s\n",
5026		rbd_dev->image_format, rbd_dev->header_name);
5027
5028	return 0;
 
5029err_out_probe:
 
 
 
 
5030	rbd_dev_unprobe(rbd_dev);
5031err_out_watch:
5032	if (mapping)
5033		rbd_dev_header_unwatch_sync(rbd_dev);
5034out_header_name:
5035	kfree(rbd_dev->header_name);
5036	rbd_dev->header_name = NULL;
5037err_out_format:
5038	rbd_dev->image_format = 0;
5039	kfree(rbd_dev->spec->image_id);
5040	rbd_dev->spec->image_id = NULL;
 
 
 
 
 
 
 
 
5041
5042	dout("probe failed, returning %d\n", ret);
 
5043
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5044	return ret;
5045}
5046
5047static ssize_t do_rbd_add(struct bus_type *bus,
5048			  const char *buf,
5049			  size_t count)
5050{
5051	struct rbd_device *rbd_dev = NULL;
5052	struct ceph_options *ceph_opts = NULL;
5053	struct rbd_options *rbd_opts = NULL;
5054	struct rbd_spec *spec = NULL;
5055	struct rbd_client *rbdc;
5056	struct ceph_osd_client *osdc;
5057	bool read_only;
5058	int rc = -ENOMEM;
 
5059
5060	if (!try_module_get(THIS_MODULE))
5061		return -ENODEV;
5062
5063	/* parse add command */
5064	rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5065	if (rc < 0)
5066		goto err_out_module;
5067	read_only = rbd_opts->read_only;
5068	kfree(rbd_opts);
5069	rbd_opts = NULL;	/* done with this */
5070
5071	rbdc = rbd_get_client(ceph_opts);
5072	if (IS_ERR(rbdc)) {
5073		rc = PTR_ERR(rbdc);
5074		goto err_out_args;
5075	}
5076
5077	/* pick the pool */
5078	osdc = &rbdc->client->osdc;
5079	rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5080	if (rc < 0)
 
5081		goto err_out_client;
 
5082	spec->pool_id = (u64)rc;
5083
5084	/* The ceph file layout needs to fit pool id in 32 bits */
5085
5086	if (spec->pool_id > (u64)U32_MAX) {
5087		rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5088				(unsigned long long)spec->pool_id, U32_MAX);
5089		rc = -EIO;
5090		goto err_out_client;
5091	}
5092
5093	rbd_dev = rbd_dev_create(rbdc, spec);
5094	if (!rbd_dev)
5095		goto err_out_client;
5096	rbdc = NULL;		/* rbd_dev now owns this */
5097	spec = NULL;		/* rbd_dev now owns this */
 
5098
5099	rc = rbd_dev_image_probe(rbd_dev, true);
5100	if (rc < 0)
 
 
 
 
 
 
5101		goto err_out_rbd_dev;
 
5102
5103	/* If we are mapping a snapshot it must be marked read-only */
 
 
5104
5105	if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5106		read_only = true;
5107	rbd_dev->mapping.read_only = read_only;
 
 
5108
5109	rc = rbd_dev_device_setup(rbd_dev);
5110	if (rc) {
5111		/*
5112		 * rbd_dev_header_unwatch_sync() can't be moved into
5113		 * rbd_dev_image_release() without refactoring, see
5114		 * commit 1f3ef78861ac.
5115		 */
5116		rbd_dev_header_unwatch_sync(rbd_dev);
5117		rbd_dev_image_release(rbd_dev);
5118		goto err_out_module;
5119	}
5120
5121	return count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5122
 
 
 
 
 
 
 
5123err_out_rbd_dev:
5124	rbd_dev_destroy(rbd_dev);
5125err_out_client:
5126	rbd_put_client(rbdc);
5127err_out_args:
5128	rbd_spec_put(spec);
5129err_out_module:
5130	module_put(THIS_MODULE);
5131
5132	dout("Error adding device %s\n", buf);
5133
5134	return (ssize_t)rc;
5135}
5136
5137static ssize_t rbd_add(struct bus_type *bus,
5138		       const char *buf,
5139		       size_t count)
5140{
5141	if (single_major)
5142		return -EINVAL;
5143
5144	return do_rbd_add(bus, buf, count);
5145}
5146
5147static ssize_t rbd_add_single_major(struct bus_type *bus,
5148				    const char *buf,
5149				    size_t count)
5150{
5151	return do_rbd_add(bus, buf, count);
5152}
5153
5154static void rbd_dev_device_release(struct device *dev)
 
5155{
5156	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5157
5158	rbd_free_disk(rbd_dev);
5159	clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5160	rbd_dev_mapping_clear(rbd_dev);
5161	if (!single_major)
5162		unregister_blkdev(rbd_dev->major, rbd_dev->name);
5163	rbd_dev_id_put(rbd_dev);
5164	rbd_dev_mapping_clear(rbd_dev);
5165}
5166
5167static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5168{
5169	while (rbd_dev->parent) {
5170		struct rbd_device *first = rbd_dev;
5171		struct rbd_device *second = first->parent;
5172		struct rbd_device *third;
5173
5174		/*
5175		 * Follow to the parent with no grandparent and
5176		 * remove it.
5177		 */
5178		while (second && (third = second->parent)) {
5179			first = second;
5180			second = third;
5181		}
5182		rbd_assert(second);
5183		rbd_dev_image_release(second);
 
5184		first->parent = NULL;
5185		first->parent_overlap = 0;
5186
5187		rbd_assert(first->parent_spec);
5188		rbd_spec_put(first->parent_spec);
5189		first->parent_spec = NULL;
5190	}
5191}
5192
5193static ssize_t do_rbd_remove(struct bus_type *bus,
5194			     const char *buf,
5195			     size_t count)
5196{
5197	struct rbd_device *rbd_dev = NULL;
5198	struct list_head *tmp;
5199	int dev_id;
5200	unsigned long ul;
5201	bool already = false;
5202	int ret;
5203
5204	ret = kstrtoul(buf, 10, &ul);
5205	if (ret)
5206		return ret;
5207
5208	/* convert to int; abort if we lost anything in the conversion */
5209	dev_id = (int)ul;
5210	if (dev_id != ul)
 
 
5211		return -EINVAL;
 
 
 
 
 
 
 
 
 
5212
5213	ret = -ENOENT;
5214	spin_lock(&rbd_dev_list_lock);
5215	list_for_each(tmp, &rbd_dev_list) {
5216		rbd_dev = list_entry(tmp, struct rbd_device, node);
5217		if (rbd_dev->dev_id == dev_id) {
5218			ret = 0;
5219			break;
5220		}
5221	}
5222	if (!ret) {
5223		spin_lock_irq(&rbd_dev->lock);
5224		if (rbd_dev->open_count)
5225			ret = -EBUSY;
5226		else
5227			already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5228							&rbd_dev->flags);
5229		spin_unlock_irq(&rbd_dev->lock);
5230	}
5231	spin_unlock(&rbd_dev_list_lock);
5232	if (ret < 0 || already)
5233		return ret;
5234
5235	rbd_dev_header_unwatch_sync(rbd_dev);
5236	/*
5237	 * flush remaining watch callbacks - these must be complete
5238	 * before the osd_client is shutdown
5239	 */
5240	dout("%s: flushing notifies", __func__);
5241	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
 
 
5242
5243	/*
5244	 * Don't free anything from rbd_dev->disk until after all
5245	 * notifies are completely processed. Otherwise
5246	 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5247	 * in a potential use after free of rbd_dev->disk or rbd_dev.
5248	 */
5249	rbd_bus_del_dev(rbd_dev);
5250	rbd_dev_image_release(rbd_dev);
5251	module_put(THIS_MODULE);
5252
 
 
 
 
5253	return count;
5254}
5255
5256static ssize_t rbd_remove(struct bus_type *bus,
5257			  const char *buf,
5258			  size_t count)
5259{
5260	if (single_major)
5261		return -EINVAL;
5262
5263	return do_rbd_remove(bus, buf, count);
5264}
5265
5266static ssize_t rbd_remove_single_major(struct bus_type *bus,
5267				       const char *buf,
5268				       size_t count)
5269{
5270	return do_rbd_remove(bus, buf, count);
5271}
5272
5273/*
5274 * create control files in sysfs
5275 * /sys/bus/rbd/...
5276 */
5277static int rbd_sysfs_init(void)
5278{
5279	int ret;
5280
5281	ret = device_register(&rbd_root_dev);
5282	if (ret < 0)
 
5283		return ret;
 
5284
5285	ret = bus_register(&rbd_bus_type);
5286	if (ret < 0)
5287		device_unregister(&rbd_root_dev);
5288
5289	return ret;
5290}
5291
5292static void rbd_sysfs_cleanup(void)
5293{
5294	bus_unregister(&rbd_bus_type);
5295	device_unregister(&rbd_root_dev);
5296}
5297
5298static int rbd_slab_init(void)
5299{
5300	rbd_assert(!rbd_img_request_cache);
5301	rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5302					sizeof (struct rbd_img_request),
5303					__alignof__(struct rbd_img_request),
5304					0, NULL);
5305	if (!rbd_img_request_cache)
5306		return -ENOMEM;
5307
5308	rbd_assert(!rbd_obj_request_cache);
5309	rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5310					sizeof (struct rbd_obj_request),
5311					__alignof__(struct rbd_obj_request),
5312					0, NULL);
5313	if (!rbd_obj_request_cache)
5314		goto out_err;
5315
5316	rbd_assert(!rbd_segment_name_cache);
5317	rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5318					CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5319	if (rbd_segment_name_cache)
5320		return 0;
5321out_err:
5322	if (rbd_obj_request_cache) {
5323		kmem_cache_destroy(rbd_obj_request_cache);
5324		rbd_obj_request_cache = NULL;
5325	}
5326
 
5327	kmem_cache_destroy(rbd_img_request_cache);
5328	rbd_img_request_cache = NULL;
5329
5330	return -ENOMEM;
5331}
5332
5333static void rbd_slab_exit(void)
5334{
5335	rbd_assert(rbd_segment_name_cache);
5336	kmem_cache_destroy(rbd_segment_name_cache);
5337	rbd_segment_name_cache = NULL;
5338
5339	rbd_assert(rbd_obj_request_cache);
5340	kmem_cache_destroy(rbd_obj_request_cache);
5341	rbd_obj_request_cache = NULL;
5342
5343	rbd_assert(rbd_img_request_cache);
5344	kmem_cache_destroy(rbd_img_request_cache);
5345	rbd_img_request_cache = NULL;
5346}
5347
5348static int __init rbd_init(void)
5349{
5350	int rc;
5351
5352	if (!libceph_compatible(NULL)) {
5353		rbd_warn(NULL, "libceph incompatibility (quitting)");
5354		return -EINVAL;
5355	}
5356
5357	rc = rbd_slab_init();
5358	if (rc)
5359		return rc;
5360
 
 
 
 
 
 
 
 
 
 
5361	if (single_major) {
5362		rbd_major = register_blkdev(0, RBD_DRV_NAME);
5363		if (rbd_major < 0) {
5364			rc = rbd_major;
5365			goto err_out_slab;
5366		}
5367	}
5368
5369	rc = rbd_sysfs_init();
5370	if (rc)
5371		goto err_out_blkdev;
5372
5373	if (single_major)
5374		pr_info("loaded (major %d)\n", rbd_major);
5375	else
5376		pr_info("loaded\n");
5377
5378	return 0;
5379
5380err_out_blkdev:
5381	if (single_major)
5382		unregister_blkdev(rbd_major, RBD_DRV_NAME);
 
 
5383err_out_slab:
5384	rbd_slab_exit();
5385	return rc;
5386}
5387
5388static void __exit rbd_exit(void)
5389{
 
5390	rbd_sysfs_cleanup();
5391	if (single_major)
5392		unregister_blkdev(rbd_major, RBD_DRV_NAME);
 
5393	rbd_slab_exit();
5394}
5395
5396module_init(rbd_init);
5397module_exit(rbd_exit);
5398
5399MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5400MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5401MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5402/* following authorship retained from original osdblk.c */
5403MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5404
5405MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5406MODULE_LICENSE("GPL");
v6.13.7
   1
   2/*
   3   rbd.c -- Export ceph rados objects as a Linux block device
   4
   5
   6   based on drivers/block/osdblk.c:
   7
   8   Copyright 2009 Red Hat, Inc.
   9
  10   This program is free software; you can redistribute it and/or modify
  11   it under the terms of the GNU General Public License as published by
  12   the Free Software Foundation.
  13
  14   This program is distributed in the hope that it will be useful,
  15   but WITHOUT ANY WARRANTY; without even the implied warranty of
  16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17   GNU General Public License for more details.
  18
  19   You should have received a copy of the GNU General Public License
  20   along with this program; see the file COPYING.  If not, write to
  21   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  22
  23
  24
  25   For usage instructions, please refer to:
  26
  27                 Documentation/ABI/testing/sysfs-bus-rbd
  28
  29 */
  30
  31#include <linux/ceph/libceph.h>
  32#include <linux/ceph/osd_client.h>
  33#include <linux/ceph/mon_client.h>
  34#include <linux/ceph/cls_lock_client.h>
  35#include <linux/ceph/striper.h>
  36#include <linux/ceph/decode.h>
  37#include <linux/fs_parser.h>
  38#include <linux/bsearch.h>
  39
  40#include <linux/kernel.h>
  41#include <linux/device.h>
  42#include <linux/module.h>
  43#include <linux/blk-mq.h>
  44#include <linux/fs.h>
  45#include <linux/blkdev.h>
  46#include <linux/slab.h>
  47#include <linux/idr.h>
  48#include <linux/workqueue.h>
  49
  50#include "rbd_types.h"
  51
  52#define RBD_DEBUG	/* Activate rbd_assert() calls */
  53
  54/*
 
 
 
 
 
 
 
 
 
  55 * Increment the given counter and return its updated value.
  56 * If the counter is already 0 it will not be incremented.
  57 * If the counter is already at its maximum value returns
  58 * -EINVAL without updating it.
  59 */
  60static int atomic_inc_return_safe(atomic_t *v)
  61{
  62	unsigned int counter;
  63
  64	counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
  65	if (counter <= (unsigned int)INT_MAX)
  66		return (int)counter;
  67
  68	atomic_dec(v);
  69
  70	return -EINVAL;
  71}
  72
  73/* Decrement the counter.  Return the resulting value, or -EINVAL */
  74static int atomic_dec_return_safe(atomic_t *v)
  75{
  76	int counter;
  77
  78	counter = atomic_dec_return(v);
  79	if (counter >= 0)
  80		return counter;
  81
  82	atomic_inc(v);
  83
  84	return -EINVAL;
  85}
  86
  87#define RBD_DRV_NAME "rbd"
  88
  89#define RBD_MINORS_PER_MAJOR		256
  90#define RBD_SINGLE_MAJOR_PART_SHIFT	4
  91
  92#define RBD_MAX_PARENT_CHAIN_LEN	16
  93
  94#define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
  95#define RBD_MAX_SNAP_NAME_LEN	\
  96			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
  97
  98#define RBD_MAX_SNAP_COUNT	510	/* allows max snapc to fit in 4KB */
  99
 100#define RBD_SNAP_HEAD_NAME	"-"
 101
 102#define	BAD_SNAP_INDEX	U32_MAX		/* invalid index into snap array */
 103
 104/* This allows a single page to hold an image name sent by OSD */
 105#define RBD_IMAGE_NAME_LEN_MAX	(PAGE_SIZE - sizeof (__le32) - 1)
 106#define RBD_IMAGE_ID_LEN_MAX	64
 107
 108#define RBD_OBJ_PREFIX_LEN_MAX	64
 109
 110#define RBD_NOTIFY_TIMEOUT	5	/* seconds */
 111#define RBD_RETRY_DELAY		msecs_to_jiffies(1000)
 112
 113/* Feature bits */
 114
 115#define RBD_FEATURE_LAYERING		(1ULL<<0)
 116#define RBD_FEATURE_STRIPINGV2		(1ULL<<1)
 117#define RBD_FEATURE_EXCLUSIVE_LOCK	(1ULL<<2)
 118#define RBD_FEATURE_OBJECT_MAP		(1ULL<<3)
 119#define RBD_FEATURE_FAST_DIFF		(1ULL<<4)
 120#define RBD_FEATURE_DEEP_FLATTEN	(1ULL<<5)
 121#define RBD_FEATURE_DATA_POOL		(1ULL<<7)
 122#define RBD_FEATURE_OPERATIONS		(1ULL<<8)
 123
 124#define RBD_FEATURES_ALL	(RBD_FEATURE_LAYERING |		\
 125				 RBD_FEATURE_STRIPINGV2 |	\
 126				 RBD_FEATURE_EXCLUSIVE_LOCK |	\
 127				 RBD_FEATURE_OBJECT_MAP |	\
 128				 RBD_FEATURE_FAST_DIFF |	\
 129				 RBD_FEATURE_DEEP_FLATTEN |	\
 130				 RBD_FEATURE_DATA_POOL |	\
 131				 RBD_FEATURE_OPERATIONS)
 132
 133/* Features supported by this (client software) implementation. */
 134
 135#define RBD_FEATURES_SUPPORTED	(RBD_FEATURES_ALL)
 136
 137/*
 138 * An RBD device name will be "rbd#", where the "rbd" comes from
 139 * RBD_DRV_NAME above, and # is a unique integer identifier.
 
 
 140 */
 141#define DEV_NAME_LEN		32
 
 142
 143/*
 144 * block device image metadata (in-memory version)
 145 */
 146struct rbd_image_header {
 147	/* These six fields never change for a given rbd image */
 148	char *object_prefix;
 149	__u8 obj_order;
 
 
 150	u64 stripe_unit;
 151	u64 stripe_count;
 152	s64 data_pool_id;
 153	u64 features;		/* Might be changeable someday? */
 154
 155	/* The remaining fields need to be updated occasionally */
 156	u64 image_size;
 157	struct ceph_snap_context *snapc;
 158	char *snap_names;	/* format 1 only */
 159	u64 *snap_sizes;	/* format 1 only */
 160};
 161
 162/*
 163 * An rbd image specification.
 164 *
 165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
 166 * identify an image.  Each rbd_dev structure includes a pointer to
 167 * an rbd_spec structure that encapsulates this identity.
 168 *
 169 * Each of the id's in an rbd_spec has an associated name.  For a
 170 * user-mapped image, the names are supplied and the id's associated
 171 * with them are looked up.  For a layered image, a parent image is
 172 * defined by the tuple, and the names are looked up.
 173 *
 174 * An rbd_dev structure contains a parent_spec pointer which is
 175 * non-null if the image it represents is a child in a layered
 176 * image.  This pointer will refer to the rbd_spec structure used
 177 * by the parent rbd_dev for its own identity (i.e., the structure
 178 * is shared between the parent and child).
 179 *
 180 * Since these structures are populated once, during the discovery
 181 * phase of image construction, they are effectively immutable so
 182 * we make no effort to synchronize access to them.
 183 *
 184 * Note that code herein does not assume the image name is known (it
 185 * could be a null pointer).
 186 */
 187struct rbd_spec {
 188	u64		pool_id;
 189	const char	*pool_name;
 190	const char	*pool_ns;	/* NULL if default, never "" */
 191
 192	const char	*image_id;
 193	const char	*image_name;
 194
 195	u64		snap_id;
 196	const char	*snap_name;
 197
 198	struct kref	kref;
 199};
 200
 201/*
 202 * an instance of the client.  multiple devices may share an rbd client.
 203 */
 204struct rbd_client {
 205	struct ceph_client	*client;
 206	struct kref		kref;
 207	struct list_head	node;
 208};
 209
 210struct pending_result {
 211	int			result;		/* first nonzero result */
 212	int			num_pending;
 213};
 214
 215struct rbd_img_request;
 
 216
 217enum obj_request_type {
 218	OBJ_REQUEST_NODATA = 1,
 219	OBJ_REQUEST_BIO,	/* pointer into provided bio (list) */
 220	OBJ_REQUEST_BVECS,	/* pointer into provided bio_vec array */
 221	OBJ_REQUEST_OWN_BVECS,	/* private bio_vec array, doesn't own pages */
 222};
 223
 224enum obj_operation_type {
 225	OBJ_OP_READ = 1,
 226	OBJ_OP_WRITE,
 227	OBJ_OP_DISCARD,
 228	OBJ_OP_ZEROOUT,
 229};
 230
 231#define RBD_OBJ_FLAG_DELETION			(1U << 0)
 232#define RBD_OBJ_FLAG_COPYUP_ENABLED		(1U << 1)
 233#define RBD_OBJ_FLAG_COPYUP_ZEROS		(1U << 2)
 234#define RBD_OBJ_FLAG_MAY_EXIST			(1U << 3)
 235#define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT	(1U << 4)
 236
 237enum rbd_obj_read_state {
 238	RBD_OBJ_READ_START = 1,
 239	RBD_OBJ_READ_OBJECT,
 240	RBD_OBJ_READ_PARENT,
 241};
 242
 243/*
 244 * Writes go through the following state machine to deal with
 245 * layering:
 246 *
 247 *            . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
 248 *            .                 |                                    .
 249 *            .                 v                                    .
 250 *            .    RBD_OBJ_WRITE_READ_FROM_PARENT. . .               .
 251 *            .                 |                    .               .
 252 *            .                 v                    v (deep-copyup  .
 253 *    (image  .   RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC   .  not needed)  .
 254 * flattened) v                 |                    .               .
 255 *            .                 v                    .               .
 256 *            . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . .      (copyup  .
 257 *                              |                        not needed) v
 258 *                              v                                    .
 259 *                            done . . . . . . . . . . . . . . . . . .
 260 *                              ^
 261 *                              |
 262 *                     RBD_OBJ_WRITE_FLAT
 263 *
 264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
 265 * assert_exists guard is needed or not (in some cases it's not needed
 266 * even if there is a parent).
 267 */
 268enum rbd_obj_write_state {
 269	RBD_OBJ_WRITE_START = 1,
 270	RBD_OBJ_WRITE_PRE_OBJECT_MAP,
 271	RBD_OBJ_WRITE_OBJECT,
 272	__RBD_OBJ_WRITE_COPYUP,
 273	RBD_OBJ_WRITE_COPYUP,
 274	RBD_OBJ_WRITE_POST_OBJECT_MAP,
 275};
 276
 277enum rbd_obj_copyup_state {
 278	RBD_OBJ_COPYUP_START = 1,
 279	RBD_OBJ_COPYUP_READ_PARENT,
 280	__RBD_OBJ_COPYUP_OBJECT_MAPS,
 281	RBD_OBJ_COPYUP_OBJECT_MAPS,
 282	__RBD_OBJ_COPYUP_WRITE_OBJECT,
 283	RBD_OBJ_COPYUP_WRITE_OBJECT,
 284};
 285
 286struct rbd_obj_request {
 287	struct ceph_object_extent ex;
 288	unsigned int		flags;	/* RBD_OBJ_FLAG_* */
 
 
 
 
 
 
 
 
 
 
 
 
 
 289	union {
 290		enum rbd_obj_read_state	 read_state;	/* for reads */
 291		enum rbd_obj_write_state write_state;	/* for writes */
 
 
 
 
 
 292	};
 
 293
 294	struct rbd_img_request	*img_request;
 295	struct ceph_file_extent	*img_extents;
 296	u32			num_img_extents;
 297
 298	union {
 299		struct ceph_bio_iter	bio_pos;
 300		struct {
 301			struct ceph_bvec_iter	bvec_pos;
 302			u32			bvec_count;
 303			u32			bvec_idx;
 304		};
 305	};
 
 
 306
 307	enum rbd_obj_copyup_state copyup_state;
 308	struct bio_vec		*copyup_bvecs;
 309	u32			copyup_bvec_count;
 310
 311	struct list_head	osd_reqs;	/* w/ r_private_item */
 
 
 
 
 312
 313	struct mutex		state_mutex;
 314	struct pending_result	pending;
 315	struct kref		kref;
 316};
 317
 318enum img_req_flags {
 
 319	IMG_REQ_CHILD,		/* initiator: block = 0, child image = 1 */
 320	IMG_REQ_LAYERED,	/* ENOENT handling: normal = 0, layered = 1 */
 321};
 322
 323enum rbd_img_state {
 324	RBD_IMG_START = 1,
 325	RBD_IMG_EXCLUSIVE_LOCK,
 326	__RBD_IMG_OBJECT_REQUESTS,
 327	RBD_IMG_OBJECT_REQUESTS,
 328};
 329
 330struct rbd_img_request {
 331	struct rbd_device	*rbd_dev;
 332	enum obj_operation_type	op_type;
 333	enum obj_request_type	data_type;
 334	unsigned long		flags;
 335	enum rbd_img_state	state;
 336	union {
 337		u64			snap_id;	/* for reads */
 338		struct ceph_snap_context *snapc;	/* for writes */
 339	};
 340	struct rbd_obj_request	*obj_request;	/* obj req initiator */
 
 
 
 
 
 
 
 
 
 
 341
 342	struct list_head	lock_item;
 343	struct list_head	object_extents;	/* obj_req.ex structs */
 344
 345	struct mutex		state_mutex;
 346	struct pending_result	pending;
 347	struct work_struct	work;
 348	int			work_result;
 349};
 350
 351#define for_each_obj_request(ireq, oreq) \
 352	list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
 
 
 353#define for_each_obj_request_safe(ireq, oreq, n) \
 354	list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
 355
 356enum rbd_watch_state {
 357	RBD_WATCH_STATE_UNREGISTERED,
 358	RBD_WATCH_STATE_REGISTERED,
 359	RBD_WATCH_STATE_ERROR,
 360};
 361
 362enum rbd_lock_state {
 363	RBD_LOCK_STATE_UNLOCKED,
 364	RBD_LOCK_STATE_LOCKED,
 365	RBD_LOCK_STATE_QUIESCING,
 366};
 367
 368/* WatchNotify::ClientId */
 369struct rbd_client_id {
 370	u64 gid;
 371	u64 handle;
 372};
 373
 374struct rbd_mapping {
 375	u64                     size;
 
 
 376};
 377
 378/*
 379 * a single device
 380 */
 381struct rbd_device {
 382	int			dev_id;		/* blkdev unique id */
 383
 384	int			major;		/* blkdev assigned major */
 385	int			minor;
 386	struct gendisk		*disk;		/* blkdev's gendisk and rq */
 387
 388	u32			image_format;	/* Either 1 or 2 */
 389	struct rbd_client	*rbd_client;
 390
 391	char			name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
 392
 393	spinlock_t		lock;		/* queue, flags, open_count */
 394
 395	struct rbd_image_header	header;
 396	unsigned long		flags;		/* possibly lock protected */
 397	struct rbd_spec		*spec;
 398	struct rbd_options	*opts;
 399	char			*config_info;	/* add{,_single_major} string */
 400
 401	struct ceph_object_id	header_oid;
 402	struct ceph_object_locator header_oloc;
 403
 404	struct ceph_file_layout	layout;		/* used for all rbd requests */
 405
 406	struct mutex		watch_mutex;
 407	enum rbd_watch_state	watch_state;
 408	struct ceph_osd_linger_request *watch_handle;
 409	u64			watch_cookie;
 410	struct delayed_work	watch_dwork;
 411
 412	struct rw_semaphore	lock_rwsem;
 413	enum rbd_lock_state	lock_state;
 414	char			lock_cookie[32];
 415	struct rbd_client_id	owner_cid;
 416	struct work_struct	acquired_lock_work;
 417	struct work_struct	released_lock_work;
 418	struct delayed_work	lock_dwork;
 419	struct work_struct	unlock_work;
 420	spinlock_t		lock_lists_lock;
 421	struct list_head	acquiring_list;
 422	struct list_head	running_list;
 423	struct completion	acquire_wait;
 424	int			acquire_err;
 425	struct completion	quiescing_wait;
 426
 427	spinlock_t		object_map_lock;
 428	u8			*object_map;
 429	u64			object_map_size;	/* in objects */
 430	u64			object_map_flags;
 431
 432	struct workqueue_struct	*task_wq;
 
 433
 434	struct rbd_spec		*parent_spec;
 435	u64			parent_overlap;
 436	atomic_t		parent_ref;
 437	struct rbd_device	*parent;
 438
 439	/* Block layer tags. */
 440	struct blk_mq_tag_set	tag_set;
 441
 442	/* protects updating the header */
 443	struct rw_semaphore     header_rwsem;
 444
 445	struct rbd_mapping	mapping;
 446
 447	struct list_head	node;
 448
 449	/* sysfs related */
 450	struct device		dev;
 451	unsigned long		open_count;	/* protected by lock */
 452};
 453
 454/*
 455 * Flag bits for rbd_dev->flags:
 456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
 457 *   by rbd_dev->lock
 
 
 458 */
 459enum rbd_dev_flags {
 460	RBD_DEV_FLAG_EXISTS,	/* rbd_dev_device_setup() ran */
 461	RBD_DEV_FLAG_REMOVING,	/* this mapping is being removed */
 462	RBD_DEV_FLAG_READONLY,  /* -o ro or snapshot */
 463};
 464
 465static DEFINE_MUTEX(client_mutex);	/* Serialize client creation */
 466
 467static LIST_HEAD(rbd_dev_list);    /* devices */
 468static DEFINE_SPINLOCK(rbd_dev_list_lock);
 469
 470static LIST_HEAD(rbd_client_list);		/* clients */
 471static DEFINE_SPINLOCK(rbd_client_list_lock);
 472
 473/* Slab caches for frequently-allocated structures */
 474
 475static struct kmem_cache	*rbd_img_request_cache;
 476static struct kmem_cache	*rbd_obj_request_cache;
 
 477
 478static int rbd_major;
 479static DEFINE_IDA(rbd_dev_id_ida);
 480
 481static struct workqueue_struct *rbd_wq;
 482
 483static struct ceph_snap_context rbd_empty_snapc = {
 484	.nref = REFCOUNT_INIT(1),
 485};
 486
 487/*
 488 * single-major requires >= 0.75 version of userspace rbd utility.
 
 489 */
 490static bool single_major = true;
 491module_param(single_major, bool, 0444);
 492MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
 493
 494static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count);
 495static ssize_t remove_store(const struct bus_type *bus, const char *buf,
 496			    size_t count);
 497static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf,
 498				      size_t count);
 499static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf,
 500					 size_t count);
 501static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
 
 
 
 
 
 
 502
 503static int rbd_dev_id_to_minor(int dev_id)
 504{
 505	return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
 506}
 507
 508static int minor_to_rbd_dev_id(int minor)
 509{
 510	return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
 511}
 512
 513static bool rbd_is_ro(struct rbd_device *rbd_dev)
 514{
 515	return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
 516}
 517
 518static bool rbd_is_snap(struct rbd_device *rbd_dev)
 519{
 520	return rbd_dev->spec->snap_id != CEPH_NOSNAP;
 521}
 522
 523static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
 524{
 525	lockdep_assert_held(&rbd_dev->lock_rwsem);
 526
 527	return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
 528	       rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING;
 529}
 530
 531static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
 532{
 533	bool is_lock_owner;
 534
 535	down_read(&rbd_dev->lock_rwsem);
 536	is_lock_owner = __rbd_is_lock_owner(rbd_dev);
 537	up_read(&rbd_dev->lock_rwsem);
 538	return is_lock_owner;
 539}
 540
 541static ssize_t supported_features_show(const struct bus_type *bus, char *buf)
 542{
 543	return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
 544}
 545
 546static BUS_ATTR_WO(add);
 547static BUS_ATTR_WO(remove);
 548static BUS_ATTR_WO(add_single_major);
 549static BUS_ATTR_WO(remove_single_major);
 550static BUS_ATTR_RO(supported_features);
 551
 552static struct attribute *rbd_bus_attrs[] = {
 553	&bus_attr_add.attr,
 554	&bus_attr_remove.attr,
 555	&bus_attr_add_single_major.attr,
 556	&bus_attr_remove_single_major.attr,
 557	&bus_attr_supported_features.attr,
 558	NULL,
 559};
 560
 561static umode_t rbd_bus_is_visible(struct kobject *kobj,
 562				  struct attribute *attr, int index)
 563{
 564	if (!single_major &&
 565	    (attr == &bus_attr_add_single_major.attr ||
 566	     attr == &bus_attr_remove_single_major.attr))
 567		return 0;
 568
 569	return attr->mode;
 570}
 571
 572static const struct attribute_group rbd_bus_group = {
 573	.attrs = rbd_bus_attrs,
 574	.is_visible = rbd_bus_is_visible,
 575};
 576__ATTRIBUTE_GROUPS(rbd_bus);
 577
 578static const struct bus_type rbd_bus_type = {
 579	.name		= "rbd",
 580	.bus_groups	= rbd_bus_groups,
 581};
 582
 583static void rbd_root_dev_release(struct device *dev)
 584{
 585}
 586
 587static struct device rbd_root_dev = {
 588	.init_name =    "rbd",
 589	.release =      rbd_root_dev_release,
 590};
 591
 592static __printf(2, 3)
 593void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
 594{
 595	struct va_format vaf;
 596	va_list args;
 597
 598	va_start(args, fmt);
 599	vaf.fmt = fmt;
 600	vaf.va = &args;
 601
 602	if (!rbd_dev)
 603		printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
 604	else if (rbd_dev->disk)
 605		printk(KERN_WARNING "%s: %s: %pV\n",
 606			RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
 607	else if (rbd_dev->spec && rbd_dev->spec->image_name)
 608		printk(KERN_WARNING "%s: image %s: %pV\n",
 609			RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
 610	else if (rbd_dev->spec && rbd_dev->spec->image_id)
 611		printk(KERN_WARNING "%s: id %s: %pV\n",
 612			RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
 613	else	/* punt */
 614		printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
 615			RBD_DRV_NAME, rbd_dev, &vaf);
 616	va_end(args);
 617}
 618
 619#ifdef RBD_DEBUG
 620#define rbd_assert(expr)						\
 621		if (unlikely(!(expr))) {				\
 622			printk(KERN_ERR "\nAssertion failure in %s() "	\
 623						"at line %d:\n\n"	\
 624					"\trbd_assert(%s);\n\n",	\
 625					__func__, __LINE__, #expr);	\
 626			BUG();						\
 627		}
 628#else /* !RBD_DEBUG */
 629#  define rbd_assert(expr)	((void) 0)
 630#endif /* !RBD_DEBUG */
 631
 
 
 632static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
 633
 634static int rbd_dev_refresh(struct rbd_device *rbd_dev);
 635static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
 636				     struct rbd_image_header *header);
 637static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
 638					u64 snap_id);
 639static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
 640				u8 *order, u64 *snap_size);
 641static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
 
 
 642
 643static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
 644static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
 645
 646/*
 647 * Return true if nothing else is pending.
 648 */
 649static bool pending_result_dec(struct pending_result *pending, int *result)
 650{
 651	rbd_assert(pending->num_pending > 0);
 
 652
 653	if (*result && !pending->result)
 654		pending->result = *result;
 655	if (--pending->num_pending)
 656		return false;
 657
 658	*result = pending->result;
 659	return true;
 660}
 661
 662static int rbd_open(struct gendisk *disk, blk_mode_t mode)
 663{
 664	struct rbd_device *rbd_dev = disk->private_data;
 665	bool removing = false;
 666
 667	spin_lock_irq(&rbd_dev->lock);
 668	if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
 669		removing = true;
 670	else
 671		rbd_dev->open_count++;
 672	spin_unlock_irq(&rbd_dev->lock);
 673	if (removing)
 674		return -ENOENT;
 675
 676	(void) get_device(&rbd_dev->dev);
 
 677
 678	return 0;
 679}
 680
 681static void rbd_release(struct gendisk *disk)
 682{
 683	struct rbd_device *rbd_dev = disk->private_data;
 684	unsigned long open_count_before;
 685
 686	spin_lock_irq(&rbd_dev->lock);
 687	open_count_before = rbd_dev->open_count--;
 688	spin_unlock_irq(&rbd_dev->lock);
 689	rbd_assert(open_count_before > 0);
 690
 691	put_device(&rbd_dev->dev);
 692}
 693
 694static const struct block_device_operations rbd_bd_ops = {
 695	.owner			= THIS_MODULE,
 696	.open			= rbd_open,
 697	.release		= rbd_release,
 698};
 699
 700/*
 701 * Initialize an rbd client instance.  Success or not, this function
 702 * consumes ceph_opts.  Caller holds client_mutex.
 703 */
 704static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
 705{
 706	struct rbd_client *rbdc;
 707	int ret = -ENOMEM;
 708
 709	dout("%s:\n", __func__);
 710	rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
 711	if (!rbdc)
 712		goto out_opt;
 713
 714	kref_init(&rbdc->kref);
 715	INIT_LIST_HEAD(&rbdc->node);
 716
 717	rbdc->client = ceph_create_client(ceph_opts, rbdc);
 718	if (IS_ERR(rbdc->client))
 719		goto out_rbdc;
 720	ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
 721
 722	ret = ceph_open_session(rbdc->client);
 723	if (ret < 0)
 724		goto out_client;
 725
 726	spin_lock(&rbd_client_list_lock);
 727	list_add_tail(&rbdc->node, &rbd_client_list);
 728	spin_unlock(&rbd_client_list_lock);
 729
 730	dout("%s: rbdc %p\n", __func__, rbdc);
 731
 732	return rbdc;
 733out_client:
 734	ceph_destroy_client(rbdc->client);
 735out_rbdc:
 736	kfree(rbdc);
 737out_opt:
 738	if (ceph_opts)
 739		ceph_destroy_options(ceph_opts);
 740	dout("%s: error %d\n", __func__, ret);
 741
 742	return ERR_PTR(ret);
 743}
 744
 745static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
 746{
 747	kref_get(&rbdc->kref);
 748
 749	return rbdc;
 750}
 751
 752/*
 753 * Find a ceph client with specific addr and configuration.  If
 754 * found, bump its reference count.
 755 */
 756static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
 757{
 758	struct rbd_client *rbdc = NULL, *iter;
 
 759
 760	if (ceph_opts->flags & CEPH_OPT_NOSHARE)
 761		return NULL;
 762
 763	spin_lock(&rbd_client_list_lock);
 764	list_for_each_entry(iter, &rbd_client_list, node) {
 765		if (!ceph_compare_options(ceph_opts, iter->client)) {
 766			__rbd_get_client(iter);
 767
 768			rbdc = iter;
 769			break;
 770		}
 771	}
 772	spin_unlock(&rbd_client_list_lock);
 773
 774	return rbdc;
 775}
 776
 777/*
 778 * (Per device) rbd map options
 779 */
 780enum {
 781	Opt_queue_depth,
 782	Opt_alloc_size,
 783	Opt_lock_timeout,
 784	/* int args above */
 785	Opt_pool_ns,
 786	Opt_compression_hint,
 787	/* string args above */
 788	Opt_read_only,
 789	Opt_read_write,
 790	Opt_lock_on_read,
 791	Opt_exclusive,
 792	Opt_notrim,
 793};
 794
 795enum {
 796	Opt_compression_hint_none,
 797	Opt_compression_hint_compressible,
 798	Opt_compression_hint_incompressible,
 799};
 800
 801static const struct constant_table rbd_param_compression_hint[] = {
 802	{"none",		Opt_compression_hint_none},
 803	{"compressible",	Opt_compression_hint_compressible},
 804	{"incompressible",	Opt_compression_hint_incompressible},
 805	{}
 806};
 807
 808static const struct fs_parameter_spec rbd_parameters[] = {
 809	fsparam_u32	("alloc_size",			Opt_alloc_size),
 810	fsparam_enum	("compression_hint",		Opt_compression_hint,
 811			 rbd_param_compression_hint),
 812	fsparam_flag	("exclusive",			Opt_exclusive),
 813	fsparam_flag	("lock_on_read",		Opt_lock_on_read),
 814	fsparam_u32	("lock_timeout",		Opt_lock_timeout),
 815	fsparam_flag	("notrim",			Opt_notrim),
 816	fsparam_string	("_pool_ns",			Opt_pool_ns),
 817	fsparam_u32	("queue_depth",			Opt_queue_depth),
 818	fsparam_flag	("read_only",			Opt_read_only),
 819	fsparam_flag	("read_write",			Opt_read_write),
 820	fsparam_flag	("ro",				Opt_read_only),
 821	fsparam_flag	("rw",				Opt_read_write),
 822	{}
 823};
 824
 825struct rbd_options {
 826	int	queue_depth;
 827	int	alloc_size;
 828	unsigned long	lock_timeout;
 829	bool	read_only;
 830	bool	lock_on_read;
 831	bool	exclusive;
 832	bool	trim;
 833
 834	u32 alloc_hint_flags;  /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
 835};
 836
 837#define RBD_QUEUE_DEPTH_DEFAULT	BLKDEV_DEFAULT_RQ
 838#define RBD_ALLOC_SIZE_DEFAULT	(64 * 1024)
 839#define RBD_LOCK_TIMEOUT_DEFAULT 0  /* no timeout */
 840#define RBD_READ_ONLY_DEFAULT	false
 841#define RBD_LOCK_ON_READ_DEFAULT false
 842#define RBD_EXCLUSIVE_DEFAULT	false
 843#define RBD_TRIM_DEFAULT	true
 844
 845struct rbd_parse_opts_ctx {
 846	struct rbd_spec		*spec;
 847	struct ceph_options	*copts;
 848	struct rbd_options	*opts;
 849};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 850
 851static char* obj_op_name(enum obj_operation_type op_type)
 852{
 853	switch (op_type) {
 854	case OBJ_OP_READ:
 855		return "read";
 856	case OBJ_OP_WRITE:
 857		return "write";
 858	case OBJ_OP_DISCARD:
 859		return "discard";
 860	case OBJ_OP_ZEROOUT:
 861		return "zeroout";
 862	default:
 863		return "???";
 
 864	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 865}
 866
 867/*
 868 * Destroy ceph client
 869 *
 870 * Caller must hold rbd_client_list_lock.
 871 */
 872static void rbd_client_release(struct kref *kref)
 873{
 874	struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
 875
 876	dout("%s: rbdc %p\n", __func__, rbdc);
 877	spin_lock(&rbd_client_list_lock);
 878	list_del(&rbdc->node);
 879	spin_unlock(&rbd_client_list_lock);
 880
 881	ceph_destroy_client(rbdc->client);
 882	kfree(rbdc);
 883}
 884
 885/*
 886 * Drop reference to ceph client node. If it's not referenced anymore, release
 887 * it.
 888 */
 889static void rbd_put_client(struct rbd_client *rbdc)
 890{
 891	if (rbdc)
 892		kref_put(&rbdc->kref, rbd_client_release);
 893}
 894
 895/*
 896 * Get a ceph client with specific addr and configuration, if one does
 897 * not exist create it.  Either way, ceph_opts is consumed by this
 898 * function.
 899 */
 900static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
 901{
 902	struct rbd_client *rbdc;
 903	int ret;
 904
 905	mutex_lock(&client_mutex);
 906	rbdc = rbd_client_find(ceph_opts);
 907	if (rbdc) {
 908		ceph_destroy_options(ceph_opts);
 909
 910		/*
 911		 * Using an existing client.  Make sure ->pg_pools is up to
 912		 * date before we look up the pool id in do_rbd_add().
 913		 */
 914		ret = ceph_wait_for_latest_osdmap(rbdc->client,
 915					rbdc->client->options->mount_timeout);
 916		if (ret) {
 917			rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
 918			rbd_put_client(rbdc);
 919			rbdc = ERR_PTR(ret);
 920		}
 921	} else {
 922		rbdc = rbd_client_create(ceph_opts);
 923	}
 924	mutex_unlock(&client_mutex);
 925
 926	return rbdc;
 927}
 928
 929static bool rbd_image_format_valid(u32 image_format)
 930{
 931	return image_format == 1 || image_format == 2;
 932}
 933
 934static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
 935{
 936	size_t size;
 937	u32 snap_count;
 938
 939	/* The header has to start with the magic rbd header text */
 940	if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
 941		return false;
 942
 943	/* The bio layer requires at least sector-sized I/O */
 944
 945	if (ondisk->options.order < SECTOR_SHIFT)
 946		return false;
 947
 948	/* If we use u64 in a few spots we may be able to loosen this */
 949
 950	if (ondisk->options.order > 8 * sizeof (int) - 1)
 951		return false;
 952
 953	/*
 954	 * The size of a snapshot header has to fit in a size_t, and
 955	 * that limits the number of snapshots.
 956	 */
 957	snap_count = le32_to_cpu(ondisk->snap_count);
 958	size = SIZE_MAX - sizeof (struct ceph_snap_context);
 959	if (snap_count > size / sizeof (__le64))
 960		return false;
 961
 962	/*
 963	 * Not only that, but the size of the entire the snapshot
 964	 * header must also be representable in a size_t.
 965	 */
 966	size -= snap_count * sizeof (__le64);
 967	if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
 968		return false;
 969
 970	return true;
 971}
 972
 973/*
 974 * returns the size of an object in the image
 975 */
 976static u32 rbd_obj_bytes(struct rbd_image_header *header)
 977{
 978	return 1U << header->obj_order;
 979}
 980
 981static void rbd_init_layout(struct rbd_device *rbd_dev)
 982{
 983	if (rbd_dev->header.stripe_unit == 0 ||
 984	    rbd_dev->header.stripe_count == 0) {
 985		rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
 986		rbd_dev->header.stripe_count = 1;
 987	}
 988
 989	rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
 990	rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
 991	rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
 992	rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
 993			  rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
 994	RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
 995}
 996
 997static void rbd_image_header_cleanup(struct rbd_image_header *header)
 998{
 999	kfree(header->object_prefix);
1000	ceph_put_snap_context(header->snapc);
1001	kfree(header->snap_sizes);
1002	kfree(header->snap_names);
1003
1004	memset(header, 0, sizeof(*header));
1005}
1006
1007/*
1008 * Fill an rbd image header with information from the given format 1
1009 * on-disk header.
1010 */
1011static int rbd_header_from_disk(struct rbd_image_header *header,
1012				struct rbd_image_header_ondisk *ondisk,
1013				bool first_time)
1014{
 
 
1015	struct ceph_snap_context *snapc;
1016	char *object_prefix = NULL;
1017	char *snap_names = NULL;
1018	u64 *snap_sizes = NULL;
1019	u32 snap_count;
 
1020	int ret = -ENOMEM;
1021	u32 i;
1022
1023	/* Allocate this now to avoid having to handle failure below */
1024
1025	if (first_time) {
1026		object_prefix = kstrndup(ondisk->object_prefix,
1027					 sizeof(ondisk->object_prefix),
1028					 GFP_KERNEL);
 
 
1029		if (!object_prefix)
1030			return -ENOMEM;
 
 
1031	}
1032
1033	/* Allocate the snapshot context and fill it in */
1034
1035	snap_count = le32_to_cpu(ondisk->snap_count);
1036	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1037	if (!snapc)
1038		goto out_err;
1039	snapc->seq = le64_to_cpu(ondisk->snap_seq);
1040	if (snap_count) {
1041		struct rbd_image_snap_ondisk *snaps;
1042		u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1043
1044		/* We'll keep a copy of the snapshot names... */
1045
1046		if (snap_names_len > (u64)SIZE_MAX)
1047			goto out_2big;
1048		snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1049		if (!snap_names)
1050			goto out_err;
1051
1052		/* ...as well as the array of their sizes. */
1053		snap_sizes = kmalloc_array(snap_count,
1054					   sizeof(*header->snap_sizes),
1055					   GFP_KERNEL);
1056		if (!snap_sizes)
1057			goto out_err;
1058
1059		/*
1060		 * Copy the names, and fill in each snapshot's id
1061		 * and size.
1062		 *
1063		 * Note that rbd_dev_v1_header_info() guarantees the
1064		 * ondisk buffer we're working with has
1065		 * snap_names_len bytes beyond the end of the
1066		 * snapshot id array, this memcpy() is safe.
1067		 */
1068		memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1069		snaps = ondisk->snaps;
1070		for (i = 0; i < snap_count; i++) {
1071			snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1072			snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1073		}
1074	}
1075
1076	/* We won't fail any more, fill in the header */
1077
1078	if (first_time) {
1079		header->object_prefix = object_prefix;
1080		header->obj_order = ondisk->options.order;
 
 
 
 
 
 
 
 
 
 
1081	}
1082
1083	/* The remaining fields always get updated (when we refresh) */
1084
1085	header->image_size = le64_to_cpu(ondisk->image_size);
1086	header->snapc = snapc;
1087	header->snap_names = snap_names;
1088	header->snap_sizes = snap_sizes;
1089
 
 
 
 
 
 
1090	return 0;
1091out_2big:
1092	ret = -EIO;
1093out_err:
1094	kfree(snap_sizes);
1095	kfree(snap_names);
1096	ceph_put_snap_context(snapc);
1097	kfree(object_prefix);
1098
1099	return ret;
1100}
1101
1102static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1103{
1104	const char *snap_name;
1105
1106	rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1107
1108	/* Skip over names until we find the one we are looking for */
1109
1110	snap_name = rbd_dev->header.snap_names;
1111	while (which--)
1112		snap_name += strlen(snap_name) + 1;
1113
1114	return kstrdup(snap_name, GFP_KERNEL);
1115}
1116
1117/*
1118 * Snapshot id comparison function for use with qsort()/bsearch().
1119 * Note that result is for snapshots in *descending* order.
1120 */
1121static int snapid_compare_reverse(const void *s1, const void *s2)
1122{
1123	u64 snap_id1 = *(u64 *)s1;
1124	u64 snap_id2 = *(u64 *)s2;
1125
1126	if (snap_id1 < snap_id2)
1127		return 1;
1128	return snap_id1 == snap_id2 ? 0 : -1;
1129}
1130
1131/*
1132 * Search a snapshot context to see if the given snapshot id is
1133 * present.
1134 *
1135 * Returns the position of the snapshot id in the array if it's found,
1136 * or BAD_SNAP_INDEX otherwise.
1137 *
1138 * Note: The snapshot array is in kept sorted (by the osd) in
1139 * reverse order, highest snapshot id first.
1140 */
1141static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1142{
1143	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1144	u64 *found;
1145
1146	found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1147				sizeof (snap_id), snapid_compare_reverse);
1148
1149	return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1150}
1151
1152static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1153					u64 snap_id)
1154{
1155	u32 which;
1156	const char *snap_name;
1157
1158	which = rbd_dev_snap_index(rbd_dev, snap_id);
1159	if (which == BAD_SNAP_INDEX)
1160		return ERR_PTR(-ENOENT);
1161
1162	snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1163	return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1164}
1165
1166static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1167{
1168	if (snap_id == CEPH_NOSNAP)
1169		return RBD_SNAP_HEAD_NAME;
1170
1171	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1172	if (rbd_dev->image_format == 1)
1173		return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1174
1175	return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1176}
1177
1178static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1179				u64 *snap_size)
1180{
1181	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1182	if (snap_id == CEPH_NOSNAP) {
1183		*snap_size = rbd_dev->header.image_size;
1184	} else if (rbd_dev->image_format == 1) {
1185		u32 which;
1186
1187		which = rbd_dev_snap_index(rbd_dev, snap_id);
1188		if (which == BAD_SNAP_INDEX)
1189			return -ENOENT;
1190
1191		*snap_size = rbd_dev->header.snap_sizes[which];
1192	} else {
1193		u64 size = 0;
1194		int ret;
1195
1196		ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1197		if (ret)
1198			return ret;
1199
1200		*snap_size = size;
1201	}
1202	return 0;
1203}
1204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1205static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1206{
1207	u64 snap_id = rbd_dev->spec->snap_id;
1208	u64 size = 0;
 
1209	int ret;
1210
1211	ret = rbd_snap_size(rbd_dev, snap_id, &size);
1212	if (ret)
1213		return ret;
 
 
 
1214
1215	rbd_dev->mapping.size = size;
 
 
1216	return 0;
1217}
1218
1219static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1220{
1221	rbd_dev->mapping.size = 0;
 
1222}
1223
1224static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1225{
1226	struct ceph_bio_iter it = *bio_pos;
 
 
 
1227
1228	ceph_bio_iter_advance(&it, off);
1229	ceph_bio_iter_advance_step(&it, bytes, ({
1230		memzero_bvec(&bv);
1231	}));
 
 
 
 
 
 
 
 
 
 
 
 
 
1232}
1233
1234static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1235{
1236	struct ceph_bvec_iter it = *bvec_pos;
1237
1238	ceph_bvec_iter_advance(&it, off);
1239	ceph_bvec_iter_advance_step(&it, bytes, ({
1240		memzero_bvec(&bv);
1241	}));
1242}
1243
1244/*
1245 * Zero a range in @obj_req data buffer defined by a bio (list) or
1246 * (private) bio_vec array.
1247 *
1248 * @off is relative to the start of the data buffer.
1249 */
1250static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1251			       u32 bytes)
1252{
1253	dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1254
1255	switch (obj_req->img_request->data_type) {
1256	case OBJ_REQUEST_BIO:
1257		zero_bios(&obj_req->bio_pos, off, bytes);
1258		break;
1259	case OBJ_REQUEST_BVECS:
1260	case OBJ_REQUEST_OWN_BVECS:
1261		zero_bvecs(&obj_req->bvec_pos, off, bytes);
1262		break;
1263	default:
1264		BUG();
1265	}
1266}
1267
1268static void rbd_obj_request_destroy(struct kref *kref);
1269static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1270{
1271	rbd_assert(obj_request != NULL);
1272	dout("%s: obj %p (was %d)\n", __func__, obj_request,
1273		kref_read(&obj_request->kref));
1274	kref_put(&obj_request->kref, rbd_obj_request_destroy);
1275}
1276
1277static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1278					struct rbd_obj_request *obj_request)
1279{
1280	rbd_assert(obj_request->img_request == NULL);
1281
1282	/* Image request now owns object's original reference */
1283	obj_request->img_request = img_request;
1284	dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1285}
1286
1287static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1288					struct rbd_obj_request *obj_request)
1289{
1290	dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1291	list_del(&obj_request->ex.oe_item);
1292	rbd_assert(obj_request->img_request == img_request);
1293	rbd_obj_request_put(obj_request);
1294}
1295
1296static void rbd_osd_submit(struct ceph_osd_request *osd_req)
 
 
 
1297{
1298	struct rbd_obj_request *obj_req = osd_req->r_priv;
1299
1300	dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1301	     __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1302	     obj_req->ex.oe_off, obj_req->ex.oe_len);
1303	ceph_osdc_start_request(osd_req->r_osdc, osd_req);
1304}
1305
1306/*
1307 * The default/initial value for all image request flags is 0.  Each
1308 * is conditionally set to 1 at image request initialization time
1309 * and currently never change thereafter.
1310 */
1311static void img_request_layered_set(struct rbd_img_request *img_request)
1312{
1313	set_bit(IMG_REQ_LAYERED, &img_request->flags);
1314}
1315
1316static bool img_request_layered_test(struct rbd_img_request *img_request)
1317{
1318	return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1319}
1320
1321static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1322{
1323	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1324
1325	return !obj_req->ex.oe_off &&
1326	       obj_req->ex.oe_len == rbd_dev->layout.object_size;
1327}
1328
1329static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1330{
1331	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1332
1333	return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1334					rbd_dev->layout.object_size;
1335}
1336
1337/*
1338 * Must be called after rbd_obj_calc_img_extents().
1339 */
1340static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
1341{
1342	rbd_assert(obj_req->img_request->snapc);
 
 
 
 
1343
1344	if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
1345		dout("%s %p objno %llu discard\n", __func__, obj_req,
1346		     obj_req->ex.oe_objno);
1347		return;
1348	}
1349
1350	if (!obj_req->num_img_extents) {
1351		dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
1352		     obj_req->ex.oe_objno);
1353		return;
1354	}
 
1355
1356	if (rbd_obj_is_entire(obj_req) &&
1357	    !obj_req->img_request->snapc->num_snaps) {
1358		dout("%s %p objno %llu entire\n", __func__, obj_req,
1359		     obj_req->ex.oe_objno);
1360		return;
1361	}
1362
1363	obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
1364}
1365
1366static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1367{
1368	return ceph_file_extents_bytes(obj_req->img_extents,
1369				       obj_req->num_img_extents);
1370}
1371
1372static bool rbd_img_is_write(struct rbd_img_request *img_req)
1373{
1374	switch (img_req->op_type) {
1375	case OBJ_OP_READ:
1376		return false;
1377	case OBJ_OP_WRITE:
1378	case OBJ_OP_DISCARD:
1379	case OBJ_OP_ZEROOUT:
1380		return true;
1381	default:
1382		BUG();
1383	}
1384}
1385
1386static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1387{
1388	struct rbd_obj_request *obj_req = osd_req->r_priv;
1389	int result;
1390
1391	dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1392	     osd_req->r_result, obj_req);
1393
1394	/*
1395	 * Writes aren't allowed to return a data payload.  In some
1396	 * guarded write cases (e.g. stat + zero on an empty object)
1397	 * a stat response makes it through, but we don't care.
1398	 */
1399	if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1400		result = 0;
1401	else
1402		result = osd_req->r_result;
1403
1404	rbd_obj_handle_request(obj_req, result);
1405}
1406
1407static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
1408{
1409	struct rbd_obj_request *obj_request = osd_req->r_priv;
1410	struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1411	struct ceph_options *opt = rbd_dev->rbd_client->client->options;
1412
1413	osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica;
1414	osd_req->r_snapid = obj_request->img_request->snap_id;
1415}
1416
1417static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
1418{
1419	struct rbd_obj_request *obj_request = osd_req->r_priv;
1420
1421	osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1422	ktime_get_real_ts64(&osd_req->r_mtime);
1423	osd_req->r_data_offset = obj_request->ex.oe_off;
1424}
1425
1426static struct ceph_osd_request *
1427__rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1428			  struct ceph_snap_context *snapc, int num_ops)
1429{
1430	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1431	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1432	struct ceph_osd_request *req;
1433	const char *name_format = rbd_dev->image_format == 1 ?
1434				      RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1435	int ret;
1436
1437	req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1438	if (!req)
1439		return ERR_PTR(-ENOMEM);
1440
1441	list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
1442	req->r_callback = rbd_osd_req_callback;
1443	req->r_priv = obj_req;
1444
1445	/*
1446	 * Data objects may be stored in a separate pool, but always in
1447	 * the same namespace in that pool as the header in its pool.
1448	 */
1449	ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1450	req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1451
1452	ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1453			       rbd_dev->header.object_prefix,
1454			       obj_req->ex.oe_objno);
1455	if (ret)
1456		return ERR_PTR(ret);
1457
1458	return req;
1459}
1460
1461static struct ceph_osd_request *
1462rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
1463{
1464	rbd_assert(obj_req->img_request->snapc);
1465	return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1466					 num_ops);
1467}
1468
1469static struct rbd_obj_request *rbd_obj_request_create(void)
1470{
1471	struct rbd_obj_request *obj_request;
 
 
 
 
1472
1473	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1474	if (!obj_request)
1475		return NULL;
 
 
 
 
 
1476
1477	ceph_object_extent_init(&obj_request->ex);
1478	INIT_LIST_HEAD(&obj_request->osd_reqs);
1479	mutex_init(&obj_request->state_mutex);
1480	kref_init(&obj_request->kref);
1481
1482	dout("%s %p\n", __func__, obj_request);
1483	return obj_request;
1484}
1485
1486static void rbd_obj_request_destroy(struct kref *kref)
1487{
1488	struct rbd_obj_request *obj_request;
1489	struct ceph_osd_request *osd_req;
1490	u32 i;
1491
1492	obj_request = container_of(kref, struct rbd_obj_request, kref);
1493
1494	dout("%s: obj %p\n", __func__, obj_request);
1495
1496	while (!list_empty(&obj_request->osd_reqs)) {
1497		osd_req = list_first_entry(&obj_request->osd_reqs,
1498				    struct ceph_osd_request, r_private_item);
1499		list_del_init(&osd_req->r_private_item);
1500		ceph_osdc_put_request(osd_req);
1501	}
1502
1503	switch (obj_request->img_request->data_type) {
1504	case OBJ_REQUEST_NODATA:
1505	case OBJ_REQUEST_BIO:
1506	case OBJ_REQUEST_BVECS:
1507		break;		/* Nothing to do */
1508	case OBJ_REQUEST_OWN_BVECS:
1509		kfree(obj_request->bvec_pos.bvecs);
1510		break;
1511	default:
1512		BUG();
1513	}
1514
1515	kfree(obj_request->img_extents);
1516	if (obj_request->copyup_bvecs) {
1517		for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1518			if (obj_request->copyup_bvecs[i].bv_page)
1519				__free_page(obj_request->copyup_bvecs[i].bv_page);
1520		}
1521		kfree(obj_request->copyup_bvecs);
1522	}
1523
1524	kmem_cache_free(rbd_obj_request_cache, obj_request);
1525}
1526
1527/* It's OK to call this for a device with no parent */
1528
1529static void rbd_spec_put(struct rbd_spec *spec);
1530static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1531{
1532	rbd_dev_remove_parent(rbd_dev);
1533	rbd_spec_put(rbd_dev->parent_spec);
1534	rbd_dev->parent_spec = NULL;
1535	rbd_dev->parent_overlap = 0;
1536}
1537
1538/*
1539 * Parent image reference counting is used to determine when an
1540 * image's parent fields can be safely torn down--after there are no
1541 * more in-flight requests to the parent image.  When the last
1542 * reference is dropped, cleaning them up is safe.
1543 */
1544static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
 
 
 
1545{
1546	int counter;
1547
1548	if (!rbd_dev->parent_spec)
1549		return;
 
1550
1551	counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1552	if (counter > 0)
1553		return;
1554
1555	/* Last reference; clean up parent data structures */
1556
1557	if (!counter)
1558		rbd_dev_unparent(rbd_dev);
1559	else
1560		rbd_warn(rbd_dev, "parent reference underflow");
1561}
1562
1563/*
1564 * If an image has a non-zero parent overlap, get a reference to its
1565 * parent.
 
 
 
 
 
 
1566 *
1567 * Returns true if the rbd device has a parent with a non-zero
1568 * overlap and a reference for it was successfully taken, or
1569 * false otherwise.
1570 */
1571static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
 
 
 
1572{
1573	int counter = 0;
 
 
 
 
 
1574
1575	if (!rbd_dev->parent_spec)
1576		return false;
1577
1578	if (rbd_dev->parent_overlap)
1579		counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
 
 
1580
1581	if (counter < 0)
1582		rbd_warn(rbd_dev, "parent reference overflow");
 
 
 
 
 
 
1583
1584	return counter > 0;
1585}
1586
1587static void rbd_img_request_init(struct rbd_img_request *img_request,
1588				 struct rbd_device *rbd_dev,
1589				 enum obj_operation_type op_type)
1590{
1591	memset(img_request, 0, sizeof(*img_request));
 
 
 
 
1592
1593	img_request->rbd_dev = rbd_dev;
1594	img_request->op_type = op_type;
 
1595
1596	INIT_LIST_HEAD(&img_request->lock_item);
1597	INIT_LIST_HEAD(&img_request->object_extents);
1598	mutex_init(&img_request->state_mutex);
1599}
1600
1601/*
1602 * Only snap_id is captured here, for reads.  For writes, snapshot
1603 * context is captured in rbd_img_object_requests() after exclusive
1604 * lock is ensured to be held.
1605 */
1606static void rbd_img_capture_header(struct rbd_img_request *img_req)
1607{
1608	struct rbd_device *rbd_dev = img_req->rbd_dev;
 
1609
1610	lockdep_assert_held(&rbd_dev->header_rwsem);
1611
1612	if (!rbd_img_is_write(img_req))
1613		img_req->snap_id = rbd_dev->spec->snap_id;
1614
1615	if (rbd_dev_parent_get(rbd_dev))
1616		img_request_layered_set(img_req);
1617}
1618
1619static void rbd_img_request_destroy(struct rbd_img_request *img_request)
1620{
1621	struct rbd_obj_request *obj_request;
1622	struct rbd_obj_request *next_obj_request;
1623
1624	dout("%s: img %p\n", __func__, img_request);
1625
1626	WARN_ON(!list_empty(&img_request->lock_item));
1627	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1628		rbd_img_obj_request_del(img_request, obj_request);
1629
1630	if (img_request_layered_test(img_request))
1631		rbd_dev_parent_put(img_request->rbd_dev);
1632
1633	if (rbd_img_is_write(img_request))
1634		ceph_put_snap_context(img_request->snapc);
1635
1636	if (test_bit(IMG_REQ_CHILD, &img_request->flags))
1637		kmem_cache_free(rbd_img_request_cache, img_request);
1638}
1639
1640#define BITS_PER_OBJ	2
1641#define OBJS_PER_BYTE	(BITS_PER_BYTE / BITS_PER_OBJ)
1642#define OBJ_MASK	((1 << BITS_PER_OBJ) - 1)
1643
1644static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1645				   u64 *index, u8 *shift)
1646{
1647	u32 off;
 
1648
1649	rbd_assert(objno < rbd_dev->object_map_size);
1650	*index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1651	*shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
 
 
1652}
1653
1654static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1655{
1656	u64 index;
1657	u8 shift;
1658
1659	lockdep_assert_held(&rbd_dev->object_map_lock);
1660	__rbd_object_map_index(rbd_dev, objno, &index, &shift);
1661	return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1662}
1663
1664static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
 
 
 
 
 
 
 
 
 
 
 
1665{
1666	u64 index;
1667	u8 shift;
1668	u8 *p;
1669
1670	lockdep_assert_held(&rbd_dev->object_map_lock);
1671	rbd_assert(!(val & ~OBJ_MASK));
1672
1673	__rbd_object_map_index(rbd_dev, objno, &index, &shift);
1674	p = &rbd_dev->object_map[index];
1675	*p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
1676}
1677
1678static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1679{
1680	u8 state;
1681
1682	spin_lock(&rbd_dev->object_map_lock);
1683	state = __rbd_object_map_get(rbd_dev, objno);
1684	spin_unlock(&rbd_dev->object_map_lock);
1685	return state;
1686}
1687
1688static bool use_object_map(struct rbd_device *rbd_dev)
1689{
1690	/*
1691	 * An image mapped read-only can't use the object map -- it isn't
1692	 * loaded because the header lock isn't acquired.  Someone else can
1693	 * write to the image and update the object map behind our back.
1694	 *
1695	 * A snapshot can't be written to, so using the object map is always
1696	 * safe.
1697	 */
1698	if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1699		return false;
1700
1701	return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1702		!(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1703}
1704
1705static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1706{
1707	u8 state;
1708
1709	/* fall back to default logic if object map is disabled or invalid */
1710	if (!use_object_map(rbd_dev))
1711		return true;
1712
1713	state = rbd_object_map_get(rbd_dev, objno);
1714	return state != OBJECT_NONEXISTENT;
1715}
1716
1717static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1718				struct ceph_object_id *oid)
1719{
1720	if (snap_id == CEPH_NOSNAP)
1721		ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1722				rbd_dev->spec->image_id);
1723	else
1724		ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1725				rbd_dev->spec->image_id, snap_id);
1726}
1727
1728static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1729{
1730	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1731	CEPH_DEFINE_OID_ONSTACK(oid);
1732	u8 lock_type;
1733	char *lock_tag;
1734	struct ceph_locker *lockers;
1735	u32 num_lockers;
1736	bool broke_lock = false;
1737	int ret;
1738
1739	rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1740
1741again:
1742	ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1743			    CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1744	if (ret != -EBUSY || broke_lock) {
1745		if (ret == -EEXIST)
1746			ret = 0; /* already locked by myself */
1747		if (ret)
1748			rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1749		return ret;
1750	}
1751
1752	ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1753				 RBD_LOCK_NAME, &lock_type, &lock_tag,
1754				 &lockers, &num_lockers);
1755	if (ret) {
1756		if (ret == -ENOENT)
1757			goto again;
1758
1759		rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1760		return ret;
1761	}
1762
1763	kfree(lock_tag);
1764	if (num_lockers == 0)
1765		goto again;
1766
1767	rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1768		 ENTITY_NAME(lockers[0].id.name));
1769
1770	ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1771				  RBD_LOCK_NAME, lockers[0].id.cookie,
1772				  &lockers[0].id.name);
1773	ceph_free_lockers(lockers, num_lockers);
1774	if (ret) {
1775		if (ret == -ENOENT)
1776			goto again;
1777
1778		rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1779		return ret;
1780	}
1781
1782	broke_lock = true;
1783	goto again;
1784}
1785
1786static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
 
1787{
1788	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1789	CEPH_DEFINE_OID_ONSTACK(oid);
1790	int ret;
1791
1792	rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1793
1794	ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1795			      "");
1796	if (ret && ret != -ENOENT)
1797		rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
 
 
 
 
1798}
1799
1800static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
 
1801{
1802	u8 struct_v;
1803	u32 struct_len;
1804	u32 header_len;
1805	void *header_end;
1806	int ret;
1807
1808	ceph_decode_32_safe(p, end, header_len, e_inval);
1809	header_end = *p + header_len;
1810
1811	ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1812				  &struct_len);
1813	if (ret)
1814		return ret;
1815
1816	ceph_decode_64_safe(p, end, *object_map_size, e_inval);
1817
1818	*p = header_end;
1819	return 0;
1820
1821e_inval:
1822	return -EINVAL;
1823}
1824
1825static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1826{
1827	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1828	CEPH_DEFINE_OID_ONSTACK(oid);
1829	struct page **pages;
1830	void *p, *end;
1831	size_t reply_len;
1832	u64 num_objects;
1833	u64 object_map_bytes;
1834	u64 object_map_size;
1835	int num_pages;
1836	int ret;
1837
1838	rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1839
1840	num_objects = ceph_get_num_objects(&rbd_dev->layout,
1841					   rbd_dev->mapping.size);
1842	object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1843					    BITS_PER_BYTE);
1844	num_pages = calc_pages_for(0, object_map_bytes) + 1;
1845	pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1846	if (IS_ERR(pages))
1847		return PTR_ERR(pages);
1848
1849	reply_len = num_pages * PAGE_SIZE;
1850	rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1851	ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1852			     "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1853			     NULL, 0, pages, &reply_len);
1854	if (ret)
1855		goto out;
1856
1857	p = page_address(pages[0]);
1858	end = p + min(reply_len, (size_t)PAGE_SIZE);
1859	ret = decode_object_map_header(&p, end, &object_map_size);
1860	if (ret)
1861		goto out;
1862
1863	if (object_map_size != num_objects) {
1864		rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1865			 object_map_size, num_objects);
1866		ret = -EINVAL;
1867		goto out;
1868	}
1869
1870	if (offset_in_page(p) + object_map_bytes > reply_len) {
1871		ret = -EINVAL;
1872		goto out;
1873	}
1874
1875	rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1876	if (!rbd_dev->object_map) {
1877		ret = -ENOMEM;
1878		goto out;
1879	}
1880
1881	rbd_dev->object_map_size = object_map_size;
1882	ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1883				   offset_in_page(p), object_map_bytes);
1884
1885out:
1886	ceph_release_page_vector(pages, num_pages);
1887	return ret;
1888}
1889
1890static void rbd_object_map_free(struct rbd_device *rbd_dev)
 
1891{
1892	kvfree(rbd_dev->object_map);
1893	rbd_dev->object_map = NULL;
1894	rbd_dev->object_map_size = 0;
1895}
1896
1897static int rbd_object_map_load(struct rbd_device *rbd_dev)
1898{
1899	int ret;
1900
1901	ret = __rbd_object_map_load(rbd_dev);
1902	if (ret)
1903		return ret;
1904
1905	ret = rbd_dev_v2_get_flags(rbd_dev);
1906	if (ret) {
1907		rbd_object_map_free(rbd_dev);
1908		return ret;
1909	}
1910
1911	if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1912		rbd_warn(rbd_dev, "object map is invalid");
1913
1914	return 0;
1915}
1916
1917static int rbd_object_map_open(struct rbd_device *rbd_dev)
1918{
1919	int ret;
1920
1921	ret = rbd_object_map_lock(rbd_dev);
1922	if (ret)
1923		return ret;
1924
1925	ret = rbd_object_map_load(rbd_dev);
1926	if (ret) {
1927		rbd_object_map_unlock(rbd_dev);
1928		return ret;
1929	}
1930
1931	return 0;
1932}
1933
1934static void rbd_object_map_close(struct rbd_device *rbd_dev)
1935{
1936	rbd_object_map_free(rbd_dev);
1937	rbd_object_map_unlock(rbd_dev);
1938}
1939
1940/*
1941 * This function needs snap_id (or more precisely just something to
1942 * distinguish between HEAD and snapshot object maps), new_state and
1943 * current_state that were passed to rbd_object_map_update().
1944 *
1945 * To avoid allocating and stashing a context we piggyback on the OSD
1946 * request.  A HEAD update has two ops (assert_locked).  For new_state
1947 * and current_state we decode our own object_map_update op, encoded in
1948 * rbd_cls_object_map_update().
1949 */
1950static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
1951					struct ceph_osd_request *osd_req)
1952{
1953	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1954	struct ceph_osd_data *osd_data;
1955	u64 objno;
1956	u8 state, new_state, current_state;
1957	bool has_current_state;
1958	void *p;
1959
1960	if (osd_req->r_result)
1961		return osd_req->r_result;
1962
1963	/*
1964	 * Nothing to do for a snapshot object map.
 
 
 
1965	 */
1966	if (osd_req->r_num_ops == 1)
1967		return 0;
 
1968
1969	/*
1970	 * Update in-memory HEAD object map.
1971	 */
1972	rbd_assert(osd_req->r_num_ops == 2);
1973	osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
1974	rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
1975
1976	p = page_address(osd_data->pages[0]);
1977	objno = ceph_decode_64(&p);
1978	rbd_assert(objno == obj_req->ex.oe_objno);
1979	rbd_assert(ceph_decode_64(&p) == objno + 1);
1980	new_state = ceph_decode_8(&p);
1981	has_current_state = ceph_decode_8(&p);
1982	if (has_current_state)
1983		current_state = ceph_decode_8(&p);
1984
1985	spin_lock(&rbd_dev->object_map_lock);
1986	state = __rbd_object_map_get(rbd_dev, objno);
1987	if (!has_current_state || current_state == state ||
1988	    (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
1989		__rbd_object_map_set(rbd_dev, objno, new_state);
1990	spin_unlock(&rbd_dev->object_map_lock);
1991
1992	return 0;
 
 
 
1993}
1994
1995static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
1996{
1997	struct rbd_obj_request *obj_req = osd_req->r_priv;
1998	int result;
1999
2000	dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
2001	     osd_req->r_result, obj_req);
2002
2003	result = rbd_object_map_update_finish(obj_req, osd_req);
2004	rbd_obj_handle_request(obj_req, result);
2005}
2006
2007static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2008{
2009	u8 state = rbd_object_map_get(rbd_dev, objno);
2010
2011	if (state == new_state ||
2012	    (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
2013	    (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
2014		return false;
2015
2016	return true;
2017}
2018
2019static int rbd_cls_object_map_update(struct ceph_osd_request *req,
2020				     int which, u64 objno, u8 new_state,
2021				     const u8 *current_state)
 
 
 
2022{
2023	struct page **pages;
2024	void *p, *start;
2025	int ret;
2026
2027	ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2028	if (ret)
2029		return ret;
2030
2031	pages = ceph_alloc_page_vector(1, GFP_NOIO);
2032	if (IS_ERR(pages))
2033		return PTR_ERR(pages);
2034
2035	p = start = page_address(pages[0]);
2036	ceph_encode_64(&p, objno);
2037	ceph_encode_64(&p, objno + 1);
2038	ceph_encode_8(&p, new_state);
2039	if (current_state) {
2040		ceph_encode_8(&p, 1);
2041		ceph_encode_8(&p, *current_state);
2042	} else {
2043		ceph_encode_8(&p, 0);
2044	}
2045
2046	osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2047					  false, true);
2048	return 0;
2049}
2050
2051/*
2052 * Return:
2053 *   0 - object map update sent
2054 *   1 - object map update isn't needed
2055 *  <0 - error
2056 */
2057static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2058				 u8 new_state, const u8 *current_state)
2059{
2060	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2061	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2062	struct ceph_osd_request *req;
2063	int num_ops = 1;
2064	int which = 0;
2065	int ret;
2066
2067	if (snap_id == CEPH_NOSNAP) {
2068		if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2069			return 1;
2070
2071		num_ops++; /* assert_locked */
2072	}
2073
2074	req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2075	if (!req)
2076		return -ENOMEM;
2077
2078	list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2079	req->r_callback = rbd_object_map_callback;
2080	req->r_priv = obj_req;
2081
2082	rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2083	ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2084	req->r_flags = CEPH_OSD_FLAG_WRITE;
2085	ktime_get_real_ts64(&req->r_mtime);
2086
2087	if (snap_id == CEPH_NOSNAP) {
2088		/*
2089		 * Protect against possible race conditions during lock
2090		 * ownership transitions.
2091		 */
2092		ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2093					     CEPH_CLS_LOCK_EXCLUSIVE, "", "");
2094		if (ret)
2095			return ret;
2096	}
2097
2098	ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2099					new_state, current_state);
2100	if (ret)
2101		return ret;
2102
2103	ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2104	if (ret)
2105		return ret;
2106
2107	ceph_osdc_start_request(osdc, req);
2108	return 0;
2109}
2110
2111static void prune_extents(struct ceph_file_extent *img_extents,
2112			  u32 *num_img_extents, u64 overlap)
2113{
2114	u32 cnt = *num_img_extents;
2115
2116	/* drop extents completely beyond the overlap */
2117	while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2118		cnt--;
2119
2120	if (cnt) {
2121		struct ceph_file_extent *ex = &img_extents[cnt - 1];
2122
2123		/* trim final overlapping extent */
2124		if (ex->fe_off + ex->fe_len > overlap)
2125			ex->fe_len = overlap - ex->fe_off;
2126	}
2127
2128	*num_img_extents = cnt;
2129}
2130
2131/*
2132 * Determine the byte range(s) covered by either just the object extent
2133 * or the entire object in the parent image.
2134 */
2135static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2136				    bool entire)
2137{
2138	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2139	int ret;
2140
2141	if (!rbd_dev->parent_overlap)
2142		return 0;
2143
2144	ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2145				  entire ? 0 : obj_req->ex.oe_off,
2146				  entire ? rbd_dev->layout.object_size :
2147							obj_req->ex.oe_len,
2148				  &obj_req->img_extents,
2149				  &obj_req->num_img_extents);
2150	if (ret)
2151		return ret;
2152
2153	prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2154		      rbd_dev->parent_overlap);
2155	return 0;
2156}
2157
2158static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
2159{
2160	struct rbd_obj_request *obj_req = osd_req->r_priv;
2161
2162	switch (obj_req->img_request->data_type) {
2163	case OBJ_REQUEST_BIO:
2164		osd_req_op_extent_osd_data_bio(osd_req, which,
2165					       &obj_req->bio_pos,
2166					       obj_req->ex.oe_len);
2167		break;
2168	case OBJ_REQUEST_BVECS:
2169	case OBJ_REQUEST_OWN_BVECS:
2170		rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2171							obj_req->ex.oe_len);
2172		rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
2173		osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
2174						    &obj_req->bvec_pos);
2175		break;
2176	default:
2177		BUG();
2178	}
2179}
2180
2181static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
2182{
2183	struct page **pages;
2184
2185	/*
2186	 * The response data for a STAT call consists of:
2187	 *     le64 length;
2188	 *     struct {
2189	 *         le32 tv_sec;
2190	 *         le32 tv_nsec;
2191	 *     } mtime;
2192	 */
2193	pages = ceph_alloc_page_vector(1, GFP_NOIO);
2194	if (IS_ERR(pages))
2195		return PTR_ERR(pages);
2196
2197	osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2198	osd_req_op_raw_data_in_pages(osd_req, which, pages,
2199				     8 + sizeof(struct ceph_timespec),
2200				     0, false, true);
2201	return 0;
2202}
2203
2204static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2205				u32 bytes)
2206{
2207	struct rbd_obj_request *obj_req = osd_req->r_priv;
2208	int ret;
2209
2210	ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2211	if (ret)
2212		return ret;
2213
2214	osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2215					  obj_req->copyup_bvec_count, bytes);
2216	return 0;
2217}
2218
2219static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2220{
2221	obj_req->read_state = RBD_OBJ_READ_START;
2222	return 0;
2223}
2224
2225static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2226				      int which)
2227{
2228	struct rbd_obj_request *obj_req = osd_req->r_priv;
2229	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2230	u16 opcode;
2231
2232	if (!use_object_map(rbd_dev) ||
2233	    !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2234		osd_req_op_alloc_hint_init(osd_req, which++,
2235					   rbd_dev->layout.object_size,
2236					   rbd_dev->layout.object_size,
2237					   rbd_dev->opts->alloc_hint_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2238	}
2239
2240	if (rbd_obj_is_entire(obj_req))
2241		opcode = CEPH_OSD_OP_WRITEFULL;
2242	else
2243		opcode = CEPH_OSD_OP_WRITE;
2244
2245	osd_req_op_extent_init(osd_req, which, opcode,
2246			       obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2247	rbd_osd_setup_data(osd_req, which);
2248}
2249
2250static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
2251{
2252	int ret;
2253
2254	/* reverse map the entire object onto the parent */
2255	ret = rbd_obj_calc_img_extents(obj_req, true);
2256	if (ret)
2257		return ret;
2258
2259	obj_req->write_state = RBD_OBJ_WRITE_START;
2260	return 0;
2261}
2262
2263static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2264{
2265	return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2266					  CEPH_OSD_OP_ZERO;
2267}
2268
2269static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2270					int which)
2271{
2272	struct rbd_obj_request *obj_req = osd_req->r_priv;
 
 
2273
2274	if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2275		rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2276		osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
2277	} else {
2278		osd_req_op_extent_init(osd_req, which,
2279				       truncate_or_zero_opcode(obj_req),
2280				       obj_req->ex.oe_off, obj_req->ex.oe_len,
2281				       0, 0);
2282	}
 
 
 
 
 
 
 
2283}
2284
2285static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
2286{
2287	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2288	u64 off, next_off;
2289	int ret;
2290
2291	/*
2292	 * Align the range to alloc_size boundary and punt on discards
2293	 * that are too small to free up any space.
2294	 *
2295	 * alloc_size == object_size && is_tail() is a special case for
2296	 * filestore with filestore_punch_hole = false, needed to allow
2297	 * truncate (in addition to delete).
2298	 */
2299	if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2300	    !rbd_obj_is_tail(obj_req)) {
2301		off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2302		next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2303				      rbd_dev->opts->alloc_size);
2304		if (off >= next_off)
2305			return 1;
2306
2307		dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2308		     obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2309		     off, next_off - off);
2310		obj_req->ex.oe_off = off;
2311		obj_req->ex.oe_len = next_off - off;
2312	}
2313
2314	/* reverse map the entire object onto the parent */
2315	ret = rbd_obj_calc_img_extents(obj_req, true);
2316	if (ret)
2317		return ret;
2318
2319	obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2320	if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2321		obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2322
2323	obj_req->write_state = RBD_OBJ_WRITE_START;
2324	return 0;
2325}
2326
2327static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2328					int which)
2329{
2330	struct rbd_obj_request *obj_req = osd_req->r_priv;
2331	u16 opcode;
2332
2333	if (rbd_obj_is_entire(obj_req)) {
2334		if (obj_req->num_img_extents) {
2335			if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2336				osd_req_op_init(osd_req, which++,
2337						CEPH_OSD_OP_CREATE, 0);
2338			opcode = CEPH_OSD_OP_TRUNCATE;
2339		} else {
2340			rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2341			osd_req_op_init(osd_req, which++,
2342					CEPH_OSD_OP_DELETE, 0);
2343			opcode = 0;
2344		}
2345	} else {
2346		opcode = truncate_or_zero_opcode(obj_req);
2347	}
2348
2349	if (opcode)
2350		osd_req_op_extent_init(osd_req, which, opcode,
2351				       obj_req->ex.oe_off, obj_req->ex.oe_len,
2352				       0, 0);
2353}
2354
2355static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
2356{
2357	int ret;
2358
2359	/* reverse map the entire object onto the parent */
2360	ret = rbd_obj_calc_img_extents(obj_req, true);
2361	if (ret)
2362		return ret;
 
 
2363
2364	if (!obj_req->num_img_extents) {
2365		obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2366		if (rbd_obj_is_entire(obj_req))
2367			obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2368	}
2369
2370	obj_req->write_state = RBD_OBJ_WRITE_START;
2371	return 0;
2372}
2373
2374static int count_write_ops(struct rbd_obj_request *obj_req)
2375{
2376	struct rbd_img_request *img_req = obj_req->img_request;
2377
2378	switch (img_req->op_type) {
2379	case OBJ_OP_WRITE:
2380		if (!use_object_map(img_req->rbd_dev) ||
2381		    !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2382			return 2; /* setallochint + write/writefull */
2383
2384		return 1; /* write/writefull */
2385	case OBJ_OP_DISCARD:
2386		return 1; /* delete/truncate/zero */
2387	case OBJ_OP_ZEROOUT:
2388		if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2389		    !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2390			return 2; /* create + truncate */
2391
2392		return 1; /* delete/truncate/zero */
2393	default:
2394		BUG();
2395	}
2396}
2397
2398static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2399				    int which)
2400{
2401	struct rbd_obj_request *obj_req = osd_req->r_priv;
2402
2403	switch (obj_req->img_request->op_type) {
2404	case OBJ_OP_WRITE:
2405		__rbd_osd_setup_write_ops(osd_req, which);
2406		break;
2407	case OBJ_OP_DISCARD:
2408		__rbd_osd_setup_discard_ops(osd_req, which);
2409		break;
2410	case OBJ_OP_ZEROOUT:
2411		__rbd_osd_setup_zeroout_ops(osd_req, which);
 
 
2412		break;
2413	default:
2414		BUG();
 
 
2415	}
 
 
 
2416}
2417
2418/*
2419 * Prune the list of object requests (adjust offset and/or length, drop
2420 * redundant requests).  Prepare object request state machines and image
2421 * request state machine for execution.
2422 */
2423static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2424{
2425	struct rbd_obj_request *obj_req, *next_obj_req;
2426	int ret;
 
2427
2428	for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2429		switch (img_req->op_type) {
2430		case OBJ_OP_READ:
2431			ret = rbd_obj_init_read(obj_req);
2432			break;
2433		case OBJ_OP_WRITE:
2434			ret = rbd_obj_init_write(obj_req);
2435			break;
2436		case OBJ_OP_DISCARD:
2437			ret = rbd_obj_init_discard(obj_req);
2438			break;
2439		case OBJ_OP_ZEROOUT:
2440			ret = rbd_obj_init_zeroout(obj_req);
2441			break;
2442		default:
2443			BUG();
2444		}
2445		if (ret < 0)
2446			return ret;
2447		if (ret > 0) {
2448			rbd_img_obj_request_del(img_req, obj_req);
2449			continue;
2450		}
2451	}
2452
2453	img_req->state = RBD_IMG_START;
2454	return 0;
 
2455}
2456
2457union rbd_img_fill_iter {
2458	struct ceph_bio_iter	bio_iter;
2459	struct ceph_bvec_iter	bvec_iter;
2460};
2461
2462struct rbd_img_fill_ctx {
2463	enum obj_request_type	pos_type;
2464	union rbd_img_fill_iter	*pos;
2465	union rbd_img_fill_iter	iter;
2466	ceph_object_extent_fn_t	set_pos_fn;
2467	ceph_object_extent_fn_t	count_fn;
2468	ceph_object_extent_fn_t	copy_fn;
2469};
2470
2471static struct ceph_object_extent *alloc_object_extent(void *arg)
2472{
2473	struct rbd_img_request *img_req = arg;
2474	struct rbd_obj_request *obj_req;
 
 
2475
2476	obj_req = rbd_obj_request_create();
2477	if (!obj_req)
2478		return NULL;
2479
2480	rbd_img_obj_request_add(img_req, obj_req);
2481	return &obj_req->ex;
 
2482}
2483
2484/*
2485 * While su != os && sc == 1 is technically not fancy (it's the same
2486 * layout as su == os && sc == 1), we can't use the nocopy path for it
2487 * because ->set_pos_fn() should be called only once per object.
2488 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2489 * treat su != os && sc == 1 as fancy.
2490 */
2491static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
 
 
 
 
2492{
2493	return l->stripe_unit != l->object_size;
2494}
2495
2496static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2497				       struct ceph_file_extent *img_extents,
2498				       u32 num_img_extents,
2499				       struct rbd_img_fill_ctx *fctx)
2500{
2501	u32 i;
2502	int ret;
2503
2504	img_req->data_type = fctx->pos_type;
2505
2506	/*
2507	 * Create object requests and set each object request's starting
2508	 * position in the provided bio (list) or bio_vec array.
2509	 */
2510	fctx->iter = *fctx->pos;
2511	for (i = 0; i < num_img_extents; i++) {
2512		ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2513					   img_extents[i].fe_off,
2514					   img_extents[i].fe_len,
2515					   &img_req->object_extents,
2516					   alloc_object_extent, img_req,
2517					   fctx->set_pos_fn, &fctx->iter);
2518		if (ret)
2519			return ret;
2520	}
2521
2522	return __rbd_img_fill_request(img_req);
2523}
2524
2525/*
2526 * Map a list of image extents to a list of object extents, create the
2527 * corresponding object requests (normally each to a different object,
2528 * but not always) and add them to @img_req.  For each object request,
2529 * set up its data descriptor to point to the corresponding chunk(s) of
2530 * @fctx->pos data buffer.
2531 *
2532 * Because ceph_file_to_extents() will merge adjacent object extents
2533 * together, each object request's data descriptor may point to multiple
2534 * different chunks of @fctx->pos data buffer.
2535 *
2536 * @fctx->pos data buffer is assumed to be large enough.
2537 */
2538static int rbd_img_fill_request(struct rbd_img_request *img_req,
2539				struct ceph_file_extent *img_extents,
2540				u32 num_img_extents,
2541				struct rbd_img_fill_ctx *fctx)
2542{
2543	struct rbd_device *rbd_dev = img_req->rbd_dev;
2544	struct rbd_obj_request *obj_req;
2545	u32 i;
2546	int ret;
2547
2548	if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2549	    !rbd_layout_is_fancy(&rbd_dev->layout))
2550		return rbd_img_fill_request_nocopy(img_req, img_extents,
2551						   num_img_extents, fctx);
2552
2553	img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2554
2555	/*
2556	 * Create object requests and determine ->bvec_count for each object
2557	 * request.  Note that ->bvec_count sum over all object requests may
2558	 * be greater than the number of bio_vecs in the provided bio (list)
2559	 * or bio_vec array because when mapped, those bio_vecs can straddle
2560	 * stripe unit boundaries.
2561	 */
2562	fctx->iter = *fctx->pos;
2563	for (i = 0; i < num_img_extents; i++) {
2564		ret = ceph_file_to_extents(&rbd_dev->layout,
2565					   img_extents[i].fe_off,
2566					   img_extents[i].fe_len,
2567					   &img_req->object_extents,
2568					   alloc_object_extent, img_req,
2569					   fctx->count_fn, &fctx->iter);
2570		if (ret)
2571			return ret;
2572	}
2573
2574	for_each_obj_request(img_req, obj_req) {
2575		obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2576					      sizeof(*obj_req->bvec_pos.bvecs),
2577					      GFP_NOIO);
2578		if (!obj_req->bvec_pos.bvecs)
2579			return -ENOMEM;
2580	}
2581
2582	/*
2583	 * Fill in each object request's private bio_vec array, splitting and
2584	 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2585	 */
2586	fctx->iter = *fctx->pos;
2587	for (i = 0; i < num_img_extents; i++) {
2588		ret = ceph_iterate_extents(&rbd_dev->layout,
2589					   img_extents[i].fe_off,
2590					   img_extents[i].fe_len,
2591					   &img_req->object_extents,
2592					   fctx->copy_fn, &fctx->iter);
2593		if (ret)
2594			return ret;
2595	}
2596
2597	return __rbd_img_fill_request(img_req);
2598}
 
 
 
2599
2600static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2601			       u64 off, u64 len)
2602{
2603	struct ceph_file_extent ex = { off, len };
2604	union rbd_img_fill_iter dummy = {};
2605	struct rbd_img_fill_ctx fctx = {
2606		.pos_type = OBJ_REQUEST_NODATA,
2607		.pos = &dummy,
2608	};
2609
2610	return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2611}
2612
2613static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2614{
2615	struct rbd_obj_request *obj_req =
2616	    container_of(ex, struct rbd_obj_request, ex);
2617	struct ceph_bio_iter *it = arg;
2618
2619	dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2620	obj_req->bio_pos = *it;
2621	ceph_bio_iter_advance(it, bytes);
2622}
2623
2624static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
 
 
 
 
 
 
2625{
2626	struct rbd_obj_request *obj_req =
2627	    container_of(ex, struct rbd_obj_request, ex);
2628	struct ceph_bio_iter *it = arg;
 
 
2629
2630	dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2631	ceph_bio_iter_advance_step(it, bytes, ({
2632		obj_req->bvec_count++;
2633	}));
2634
2635}
2636
2637static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2638{
2639	struct rbd_obj_request *obj_req =
2640	    container_of(ex, struct rbd_obj_request, ex);
2641	struct ceph_bio_iter *it = arg;
 
2642
2643	dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2644	ceph_bio_iter_advance_step(it, bytes, ({
2645		obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2646		obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2647	}));
2648}
2649
2650static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2651				   struct ceph_file_extent *img_extents,
2652				   u32 num_img_extents,
2653				   struct ceph_bio_iter *bio_pos)
2654{
2655	struct rbd_img_fill_ctx fctx = {
2656		.pos_type = OBJ_REQUEST_BIO,
2657		.pos = (union rbd_img_fill_iter *)bio_pos,
2658		.set_pos_fn = set_bio_pos,
2659		.count_fn = count_bio_bvecs,
2660		.copy_fn = copy_bio_bvecs,
2661	};
2662
2663	return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2664				    &fctx);
2665}
2666
2667static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2668				 u64 off, u64 len, struct bio *bio)
2669{
2670	struct ceph_file_extent ex = { off, len };
2671	struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2672
2673	return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2674}
2675
2676static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2677{
2678	struct rbd_obj_request *obj_req =
2679	    container_of(ex, struct rbd_obj_request, ex);
2680	struct ceph_bvec_iter *it = arg;
2681
2682	obj_req->bvec_pos = *it;
2683	ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2684	ceph_bvec_iter_advance(it, bytes);
2685}
2686
2687static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2688{
2689	struct rbd_obj_request *obj_req =
2690	    container_of(ex, struct rbd_obj_request, ex);
2691	struct ceph_bvec_iter *it = arg;
2692
2693	ceph_bvec_iter_advance_step(it, bytes, ({
2694		obj_req->bvec_count++;
2695	}));
2696}
2697
2698static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
 
 
2699{
2700	struct rbd_obj_request *obj_req =
2701	    container_of(ex, struct rbd_obj_request, ex);
2702	struct ceph_bvec_iter *it = arg;
2703
2704	ceph_bvec_iter_advance_step(it, bytes, ({
2705		obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2706		obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2707	}));
2708}
2709
2710static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2711				     struct ceph_file_extent *img_extents,
2712				     u32 num_img_extents,
2713				     struct ceph_bvec_iter *bvec_pos)
2714{
2715	struct rbd_img_fill_ctx fctx = {
2716		.pos_type = OBJ_REQUEST_BVECS,
2717		.pos = (union rbd_img_fill_iter *)bvec_pos,
2718		.set_pos_fn = set_bvec_pos,
2719		.count_fn = count_bvecs,
2720		.copy_fn = copy_bvecs,
2721	};
2722
2723	return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2724				    &fctx);
2725}
 
 
2726
2727static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2728				   struct ceph_file_extent *img_extents,
2729				   u32 num_img_extents,
2730				   struct bio_vec *bvecs)
2731{
2732	struct ceph_bvec_iter it = {
2733		.bvecs = bvecs,
2734		.iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2735							     num_img_extents) },
2736	};
2737
2738	return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2739					 &it);
2740}
2741
2742static void rbd_img_handle_request_work(struct work_struct *work)
2743{
2744	struct rbd_img_request *img_req =
2745	    container_of(work, struct rbd_img_request, work);
2746
2747	rbd_img_handle_request(img_req, img_req->work_result);
2748}
2749
2750static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2751{
2752	INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2753	img_req->work_result = result;
2754	queue_work(rbd_wq, &img_req->work);
2755}
2756
2757static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2758{
2759	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2760
2761	if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2762		obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2763		return true;
2764	}
2765
2766	dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2767	     obj_req->ex.oe_objno);
2768	return false;
2769}
2770
2771static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2772{
2773	struct ceph_osd_request *osd_req;
2774	int ret;
2775
2776	osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2777	if (IS_ERR(osd_req))
2778		return PTR_ERR(osd_req);
2779
2780	osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2781			       obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2782	rbd_osd_setup_data(osd_req, 0);
2783	rbd_osd_format_read(osd_req);
2784
2785	ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2786	if (ret)
2787		return ret;
2788
2789	rbd_osd_submit(osd_req);
2790	return 0;
2791}
2792
2793static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2794{
2795	struct rbd_img_request *img_req = obj_req->img_request;
2796	struct rbd_device *parent = img_req->rbd_dev->parent;
2797	struct rbd_img_request *child_img_req;
2798	int ret;
2799
2800	child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2801	if (!child_img_req)
2802		return -ENOMEM;
2803
2804	rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
2805	__set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2806	child_img_req->obj_request = obj_req;
2807
2808	down_read(&parent->header_rwsem);
2809	rbd_img_capture_header(child_img_req);
2810	up_read(&parent->header_rwsem);
2811
2812	dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2813	     obj_req);
2814
2815	if (!rbd_img_is_write(img_req)) {
2816		switch (img_req->data_type) {
2817		case OBJ_REQUEST_BIO:
2818			ret = __rbd_img_fill_from_bio(child_img_req,
2819						      obj_req->img_extents,
2820						      obj_req->num_img_extents,
2821						      &obj_req->bio_pos);
2822			break;
2823		case OBJ_REQUEST_BVECS:
2824		case OBJ_REQUEST_OWN_BVECS:
2825			ret = __rbd_img_fill_from_bvecs(child_img_req,
2826						      obj_req->img_extents,
2827						      obj_req->num_img_extents,
2828						      &obj_req->bvec_pos);
2829			break;
2830		default:
2831			BUG();
2832		}
2833	} else {
2834		ret = rbd_img_fill_from_bvecs(child_img_req,
2835					      obj_req->img_extents,
2836					      obj_req->num_img_extents,
2837					      obj_req->copyup_bvecs);
2838	}
2839	if (ret) {
2840		rbd_img_request_destroy(child_img_req);
2841		return ret;
2842	}
2843
2844	/* avoid parent chain recursion */
2845	rbd_img_schedule(child_img_req, 0);
2846	return 0;
2847}
2848
2849static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
2850{
2851	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2852	int ret;
2853
2854again:
2855	switch (obj_req->read_state) {
2856	case RBD_OBJ_READ_START:
2857		rbd_assert(!*result);
2858
2859		if (!rbd_obj_may_exist(obj_req)) {
2860			*result = -ENOENT;
2861			obj_req->read_state = RBD_OBJ_READ_OBJECT;
2862			goto again;
2863		}
2864
2865		ret = rbd_obj_read_object(obj_req);
2866		if (ret) {
2867			*result = ret;
2868			return true;
2869		}
2870		obj_req->read_state = RBD_OBJ_READ_OBJECT;
2871		return false;
2872	case RBD_OBJ_READ_OBJECT:
2873		if (*result == -ENOENT && rbd_dev->parent_overlap) {
2874			/* reverse map this object extent onto the parent */
2875			ret = rbd_obj_calc_img_extents(obj_req, false);
2876			if (ret) {
2877				*result = ret;
2878				return true;
2879			}
2880			if (obj_req->num_img_extents) {
2881				ret = rbd_obj_read_from_parent(obj_req);
2882				if (ret) {
2883					*result = ret;
2884					return true;
2885				}
2886				obj_req->read_state = RBD_OBJ_READ_PARENT;
2887				return false;
2888			}
2889		}
2890
2891		/*
2892		 * -ENOENT means a hole in the image -- zero-fill the entire
2893		 * length of the request.  A short read also implies zero-fill
2894		 * to the end of the request.
2895		 */
2896		if (*result == -ENOENT) {
2897			rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
2898			*result = 0;
2899		} else if (*result >= 0) {
2900			if (*result < obj_req->ex.oe_len)
2901				rbd_obj_zero_range(obj_req, *result,
2902						obj_req->ex.oe_len - *result);
2903			else
2904				rbd_assert(*result == obj_req->ex.oe_len);
2905			*result = 0;
2906		}
2907		return true;
2908	case RBD_OBJ_READ_PARENT:
2909		/*
2910		 * The parent image is read only up to the overlap -- zero-fill
2911		 * from the overlap to the end of the request.
2912		 */
2913		if (!*result) {
2914			u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
2915
2916			if (obj_overlap < obj_req->ex.oe_len)
2917				rbd_obj_zero_range(obj_req, obj_overlap,
2918					    obj_req->ex.oe_len - obj_overlap);
2919		}
2920		return true;
2921	default:
2922		BUG();
2923	}
2924}
2925
2926static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
2927{
2928	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2929
2930	if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2931		obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2932
2933	if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
2934	    (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
2935		dout("%s %p noop for nonexistent\n", __func__, obj_req);
2936		return true;
2937	}
2938
2939	return false;
2940}
2941
2942/*
2943 * Return:
2944 *   0 - object map update sent
2945 *   1 - object map update isn't needed
2946 *  <0 - error
2947 */
2948static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
2949{
2950	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2951	u8 new_state;
2952
2953	if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
2954		return 1;
2955
2956	if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
2957		new_state = OBJECT_PENDING;
2958	else
2959		new_state = OBJECT_EXISTS;
2960
2961	return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
2962}
2963
2964static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
2965{
2966	struct ceph_osd_request *osd_req;
2967	int num_ops = count_write_ops(obj_req);
2968	int which = 0;
2969	int ret;
2970
2971	if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
2972		num_ops++; /* stat */
2973
2974	osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
2975	if (IS_ERR(osd_req))
2976		return PTR_ERR(osd_req);
2977
2978	if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
2979		ret = rbd_osd_setup_stat(osd_req, which++);
2980		if (ret)
2981			return ret;
2982	}
2983
2984	rbd_osd_setup_write_ops(osd_req, which);
2985	rbd_osd_format_write(osd_req);
2986
2987	ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2988	if (ret)
2989		return ret;
2990
2991	rbd_osd_submit(osd_req);
2992	return 0;
2993}
2994
2995/*
2996 * copyup_bvecs pages are never highmem pages
 
 
 
 
 
 
 
 
 
 
2997 */
2998static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2999{
3000	struct ceph_bvec_iter it = {
3001		.bvecs = bvecs,
3002		.iter = { .bi_size = bytes },
3003	};
3004
3005	ceph_bvec_iter_advance_step(&it, bytes, ({
3006		if (memchr_inv(bvec_virt(&bv), 0, bv.bv_len))
3007			return false;
3008	}));
3009	return true;
3010}
3011
3012#define MODS_ONLY	U32_MAX
 
 
3013
3014static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3015				      u32 bytes)
3016{
3017	struct ceph_osd_request *osd_req;
3018	int ret;
3019
3020	dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3021	rbd_assert(bytes > 0 && bytes != MODS_ONLY);
3022
3023	osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3024	if (IS_ERR(osd_req))
3025		return PTR_ERR(osd_req);
3026
3027	ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
3028	if (ret)
3029		return ret;
3030
3031	rbd_osd_format_write(osd_req);
3032
3033	ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3034	if (ret)
3035		return ret;
3036
3037	rbd_osd_submit(osd_req);
3038	return 0;
3039}
3040
3041static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3042					u32 bytes)
 
 
 
 
 
 
 
3043{
3044	struct ceph_osd_request *osd_req;
3045	int num_ops = count_write_ops(obj_req);
3046	int which = 0;
3047	int ret;
3048
3049	dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
 
 
3050
3051	if (bytes != MODS_ONLY)
3052		num_ops++; /* copyup */
 
 
 
3053
3054	osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3055	if (IS_ERR(osd_req))
3056		return PTR_ERR(osd_req);
3057
3058	if (bytes != MODS_ONLY) {
3059		ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3060		if (ret)
3061			return ret;
 
 
3062	}
 
 
 
 
 
 
 
 
 
3063
3064	rbd_osd_setup_write_ops(osd_req, which);
3065	rbd_osd_format_write(osd_req);
 
3066
3067	ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3068	if (ret)
3069		return ret;
3070
3071	rbd_osd_submit(osd_req);
3072	return 0;
3073}
3074
3075static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3076{
3077	u32 i;
 
 
3078
3079	rbd_assert(!obj_req->copyup_bvecs);
3080	obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3081	obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3082					sizeof(*obj_req->copyup_bvecs),
3083					GFP_NOIO);
3084	if (!obj_req->copyup_bvecs)
3085		return -ENOMEM;
3086
3087	for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3088		unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3089		struct page *page = alloc_page(GFP_NOIO);
3090
3091		if (!page)
3092			return -ENOMEM;
 
3093
3094		bvec_set_page(&obj_req->copyup_bvecs[i], page, len, 0);
3095		obj_overlap -= len;
 
3096	}
3097
3098	rbd_assert(!obj_overlap);
3099	return 0;
3100}
3101
3102/*
3103 * The target object doesn't exist.  Read the data for the entire
3104 * target object up to the overlap point (if any) from the parent,
3105 * so we can use it for a copyup.
3106 */
3107static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
3108{
3109	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3110	int ret;
3111
3112	rbd_assert(obj_req->num_img_extents);
3113	prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3114		      rbd_dev->parent_overlap);
3115	if (!obj_req->num_img_extents) {
3116		/*
3117		 * The overlap has become 0 (most likely because the
3118		 * image has been flattened).  Re-submit the original write
3119		 * request -- pass MODS_ONLY since the copyup isn't needed
3120		 * anymore.
3121		 */
3122		return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
3123	}
3124
3125	ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3126	if (ret)
3127		return ret;
3128
3129	return rbd_obj_read_from_parent(obj_req);
3130}
3131
3132static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
 
 
3133{
3134	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3135	struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3136	u8 new_state;
3137	u32 i;
3138	int ret;
3139
3140	rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
 
3141
3142	if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3143		return;
 
 
3144
3145	if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3146		return;
3147
3148	for (i = 0; i < snapc->num_snaps; i++) {
3149		if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3150		    i + 1 < snapc->num_snaps)
3151			new_state = OBJECT_EXISTS_CLEAN;
3152		else
3153			new_state = OBJECT_EXISTS;
3154
3155		ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3156					    new_state, NULL);
3157		if (ret < 0) {
3158			obj_req->pending.result = ret;
3159			return;
3160		}
3161
3162		rbd_assert(!ret);
3163		obj_req->pending.num_pending++;
3164	}
3165}
3166
3167static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3168{
3169	u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3170	int ret;
3171
3172	rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3173
3174	/*
3175	 * Only send non-zero copyup data to save some I/O and network
3176	 * bandwidth -- zero copyup data is equivalent to the object not
3177	 * existing.
3178	 */
3179	if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3180		bytes = 0;
3181
3182	if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3183		/*
3184		 * Send a copyup request with an empty snapshot context to
3185		 * deep-copyup the object through all existing snapshots.
3186		 * A second request with the current snapshot context will be
3187		 * sent for the actual modification.
3188		 */
3189		ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3190		if (ret) {
3191			obj_req->pending.result = ret;
3192			return;
3193		}
3194
3195		obj_req->pending.num_pending++;
3196		bytes = MODS_ONLY;
3197	}
3198
3199	ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3200	if (ret) {
3201		obj_req->pending.result = ret;
3202		return;
3203	}
3204
3205	obj_req->pending.num_pending++;
3206}
3207
3208static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3209{
3210	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3211	int ret;
 
 
3212
3213again:
3214	switch (obj_req->copyup_state) {
3215	case RBD_OBJ_COPYUP_START:
3216		rbd_assert(!*result);
3217
3218		ret = rbd_obj_copyup_read_parent(obj_req);
3219		if (ret) {
3220			*result = ret;
3221			return true;
3222		}
3223		if (obj_req->num_img_extents)
3224			obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3225		else
3226			obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3227		return false;
3228	case RBD_OBJ_COPYUP_READ_PARENT:
3229		if (*result)
3230			return true;
3231
3232		if (is_zero_bvecs(obj_req->copyup_bvecs,
3233				  rbd_obj_img_extents_bytes(obj_req))) {
3234			dout("%s %p detected zeros\n", __func__, obj_req);
3235			obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3236		}
3237
3238		rbd_obj_copyup_object_maps(obj_req);
3239		if (!obj_req->pending.num_pending) {
3240			*result = obj_req->pending.result;
3241			obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3242			goto again;
3243		}
3244		obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3245		return false;
3246	case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3247		if (!pending_result_dec(&obj_req->pending, result))
3248			return false;
3249		fallthrough;
3250	case RBD_OBJ_COPYUP_OBJECT_MAPS:
3251		if (*result) {
3252			rbd_warn(rbd_dev, "snap object map update failed: %d",
3253				 *result);
3254			return true;
3255		}
3256
3257		rbd_obj_copyup_write_object(obj_req);
3258		if (!obj_req->pending.num_pending) {
3259			*result = obj_req->pending.result;
3260			obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3261			goto again;
3262		}
3263		obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3264		return false;
3265	case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3266		if (!pending_result_dec(&obj_req->pending, result))
3267			return false;
3268		fallthrough;
3269	case RBD_OBJ_COPYUP_WRITE_OBJECT:
3270		return true;
3271	default:
3272		BUG();
3273	}
3274}
3275
3276/*
3277 * Return:
3278 *   0 - object map update sent
3279 *   1 - object map update isn't needed
3280 *  <0 - error
3281 */
3282static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3283{
3284	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3285	u8 current_state = OBJECT_PENDING;
3286
3287	if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3288		return 1;
3289
3290	if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3291		return 1;
3292
3293	return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3294				     &current_state);
3295}
3296
3297static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
3298{
3299	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3300	int ret;
3301
3302again:
3303	switch (obj_req->write_state) {
3304	case RBD_OBJ_WRITE_START:
3305		rbd_assert(!*result);
3306
3307		rbd_obj_set_copyup_enabled(obj_req);
3308		if (rbd_obj_write_is_noop(obj_req))
3309			return true;
3310
3311		ret = rbd_obj_write_pre_object_map(obj_req);
3312		if (ret < 0) {
3313			*result = ret;
3314			return true;
3315		}
3316		obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3317		if (ret > 0)
3318			goto again;
3319		return false;
3320	case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3321		if (*result) {
3322			rbd_warn(rbd_dev, "pre object map update failed: %d",
3323				 *result);
3324			return true;
3325		}
3326		ret = rbd_obj_write_object(obj_req);
3327		if (ret) {
3328			*result = ret;
3329			return true;
3330		}
3331		obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3332		return false;
3333	case RBD_OBJ_WRITE_OBJECT:
3334		if (*result == -ENOENT) {
3335			if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3336				*result = 0;
3337				obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3338				obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3339				goto again;
3340			}
3341			/*
3342			 * On a non-existent object:
3343			 *   delete - -ENOENT, truncate/zero - 0
3344			 */
3345			if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3346				*result = 0;
3347		}
3348		if (*result)
3349			return true;
3350
3351		obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3352		goto again;
3353	case __RBD_OBJ_WRITE_COPYUP:
3354		if (!rbd_obj_advance_copyup(obj_req, result))
3355			return false;
3356		fallthrough;
3357	case RBD_OBJ_WRITE_COPYUP:
3358		if (*result) {
3359			rbd_warn(rbd_dev, "copyup failed: %d", *result);
3360			return true;
3361		}
3362		ret = rbd_obj_write_post_object_map(obj_req);
3363		if (ret < 0) {
3364			*result = ret;
3365			return true;
3366		}
3367		obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3368		if (ret > 0)
3369			goto again;
3370		return false;
3371	case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3372		if (*result)
3373			rbd_warn(rbd_dev, "post object map update failed: %d",
3374				 *result);
3375		return true;
3376	default:
3377		BUG();
3378	}
3379}
3380
3381/*
3382 * Return true if @obj_req is completed.
3383 */
3384static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3385				     int *result)
3386{
3387	struct rbd_img_request *img_req = obj_req->img_request;
3388	struct rbd_device *rbd_dev = img_req->rbd_dev;
3389	bool done;
3390
3391	mutex_lock(&obj_req->state_mutex);
3392	if (!rbd_img_is_write(img_req))
3393		done = rbd_obj_advance_read(obj_req, result);
3394	else
3395		done = rbd_obj_advance_write(obj_req, result);
3396	mutex_unlock(&obj_req->state_mutex);
3397
3398	if (done && *result) {
3399		rbd_assert(*result < 0);
3400		rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3401			 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3402			 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3403	}
3404	return done;
3405}
3406
3407/*
3408 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3409 * recursion.
3410 */
3411static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3412{
3413	if (__rbd_obj_handle_request(obj_req, &result))
3414		rbd_img_handle_request(obj_req->img_request, result);
3415}
3416
3417static bool need_exclusive_lock(struct rbd_img_request *img_req)
3418{
3419	struct rbd_device *rbd_dev = img_req->rbd_dev;
 
 
3420
3421	if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3422		return false;
3423
3424	if (rbd_is_ro(rbd_dev))
3425		return false;
 
 
 
3426
3427	rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
3428	if (rbd_dev->opts->lock_on_read ||
3429	    (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3430		return true;
3431
3432	return rbd_img_is_write(img_req);
3433}
 
3434
3435static bool rbd_lock_add_request(struct rbd_img_request *img_req)
3436{
3437	struct rbd_device *rbd_dev = img_req->rbd_dev;
3438	bool locked;
3439
3440	lockdep_assert_held(&rbd_dev->lock_rwsem);
3441	locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3442	spin_lock(&rbd_dev->lock_lists_lock);
3443	rbd_assert(list_empty(&img_req->lock_item));
3444	if (!locked)
3445		list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3446	else
3447		list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3448	spin_unlock(&rbd_dev->lock_lists_lock);
3449	return locked;
3450}
3451
3452static void rbd_lock_del_request(struct rbd_img_request *img_req)
3453{
3454	struct rbd_device *rbd_dev = img_req->rbd_dev;
3455	bool need_wakeup = false;
3456
3457	lockdep_assert_held(&rbd_dev->lock_rwsem);
3458	spin_lock(&rbd_dev->lock_lists_lock);
3459	if (!list_empty(&img_req->lock_item)) {
3460		rbd_assert(!list_empty(&rbd_dev->running_list));
3461		list_del_init(&img_req->lock_item);
3462		need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING &&
3463			       list_empty(&rbd_dev->running_list));
3464	}
3465	spin_unlock(&rbd_dev->lock_lists_lock);
3466	if (need_wakeup)
3467		complete(&rbd_dev->quiescing_wait);
3468}
3469
3470static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3471{
3472	struct rbd_device *rbd_dev = img_req->rbd_dev;
3473
3474	if (!need_exclusive_lock(img_req))
3475		return 1;
3476
3477	if (rbd_lock_add_request(img_req))
3478		return 1;
3479
3480	/*
3481	 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3482	 * and cancel_delayed_work() in wake_lock_waiters().
3483	 */
3484	dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3485	queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3486	return 0;
3487}
3488
3489static void rbd_img_object_requests(struct rbd_img_request *img_req)
3490{
3491	struct rbd_device *rbd_dev = img_req->rbd_dev;
3492	struct rbd_obj_request *obj_req;
3493
3494	rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3495	rbd_assert(!need_exclusive_lock(img_req) ||
3496		   __rbd_is_lock_owner(rbd_dev));
3497
3498	if (rbd_img_is_write(img_req)) {
3499		rbd_assert(!img_req->snapc);
3500		down_read(&rbd_dev->header_rwsem);
3501		img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
3502		up_read(&rbd_dev->header_rwsem);
3503	}
3504
3505	for_each_obj_request(img_req, obj_req) {
3506		int result = 0;
 
 
3507
3508		if (__rbd_obj_handle_request(obj_req, &result)) {
3509			if (result) {
3510				img_req->pending.result = result;
3511				return;
3512			}
3513		} else {
3514			img_req->pending.num_pending++;
3515		}
3516	}
3517}
3518
3519static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3520{
3521	int ret;
3522
3523again:
3524	switch (img_req->state) {
3525	case RBD_IMG_START:
3526		rbd_assert(!*result);
3527
3528		ret = rbd_img_exclusive_lock(img_req);
3529		if (ret < 0) {
3530			*result = ret;
3531			return true;
3532		}
3533		img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3534		if (ret > 0)
3535			goto again;
3536		return false;
3537	case RBD_IMG_EXCLUSIVE_LOCK:
3538		if (*result)
3539			return true;
3540
3541		rbd_img_object_requests(img_req);
3542		if (!img_req->pending.num_pending) {
3543			*result = img_req->pending.result;
3544			img_req->state = RBD_IMG_OBJECT_REQUESTS;
3545			goto again;
3546		}
3547		img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3548		return false;
3549	case __RBD_IMG_OBJECT_REQUESTS:
3550		if (!pending_result_dec(&img_req->pending, result))
3551			return false;
3552		fallthrough;
3553	case RBD_IMG_OBJECT_REQUESTS:
3554		return true;
3555	default:
3556		BUG();
3557	}
3558}
3559
3560/*
3561 * Return true if @img_req is completed.
 
 
 
 
 
3562 */
3563static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3564				     int *result)
 
3565{
3566	struct rbd_device *rbd_dev = img_req->rbd_dev;
3567	bool done;
3568
3569	if (need_exclusive_lock(img_req)) {
3570		down_read(&rbd_dev->lock_rwsem);
3571		mutex_lock(&img_req->state_mutex);
3572		done = rbd_img_advance(img_req, result);
3573		if (done)
3574			rbd_lock_del_request(img_req);
3575		mutex_unlock(&img_req->state_mutex);
3576		up_read(&rbd_dev->lock_rwsem);
3577	} else {
3578		mutex_lock(&img_req->state_mutex);
3579		done = rbd_img_advance(img_req, result);
3580		mutex_unlock(&img_req->state_mutex);
3581	}
3582
3583	if (done && *result) {
3584		rbd_assert(*result < 0);
3585		rbd_warn(rbd_dev, "%s%s result %d",
3586		      test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3587		      obj_op_name(img_req->op_type), *result);
3588	}
3589	return done;
3590}
3591
3592static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3593{
3594again:
3595	if (!__rbd_img_handle_request(img_req, &result))
3596		return;
3597
3598	if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
3599		struct rbd_obj_request *obj_req = img_req->obj_request;
3600
3601		rbd_img_request_destroy(img_req);
3602		if (__rbd_obj_handle_request(obj_req, &result)) {
3603			img_req = obj_req->img_request;
3604			goto again;
3605		}
3606	} else {
3607		struct request *rq = blk_mq_rq_from_pdu(img_req);
3608
3609		rbd_img_request_destroy(img_req);
3610		blk_mq_end_request(rq, errno_to_blk_status(result));
3611	}
3612}
3613
3614static const struct rbd_client_id rbd_empty_cid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3615
3616static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3617			  const struct rbd_client_id *rhs)
3618{
3619	return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3620}
3621
3622static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3623{
3624	struct rbd_client_id cid;
3625
3626	mutex_lock(&rbd_dev->watch_mutex);
3627	cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3628	cid.handle = rbd_dev->watch_cookie;
3629	mutex_unlock(&rbd_dev->watch_mutex);
3630	return cid;
3631}
 
 
 
 
 
3632
3633/*
3634 * lock_rwsem must be held for write
3635 */
3636static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3637			      const struct rbd_client_id *cid)
3638{
3639	dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3640	     rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3641	     cid->gid, cid->handle);
3642	rbd_dev->owner_cid = *cid; /* struct */
3643}
 
 
 
 
 
 
 
 
 
 
 
3644
3645static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3646{
3647	mutex_lock(&rbd_dev->watch_mutex);
3648	sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3649	mutex_unlock(&rbd_dev->watch_mutex);
3650}
 
 
 
3651
3652static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3653{
3654	struct rbd_client_id cid = rbd_get_cid(rbd_dev);
 
3655
3656	rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3657	strcpy(rbd_dev->lock_cookie, cookie);
3658	rbd_set_owner_cid(rbd_dev, &cid);
3659	queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3660}
3661
3662/*
3663 * lock_rwsem must be held for write
3664 */
3665static int rbd_lock(struct rbd_device *rbd_dev)
3666{
3667	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3668	char cookie[32];
3669	int ret;
3670
3671	WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3672		rbd_dev->lock_cookie[0] != '\0');
3673
3674	format_lock_cookie(rbd_dev, cookie);
3675	ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3676			    RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3677			    RBD_LOCK_TAG, "", 0);
3678	if (ret && ret != -EEXIST)
3679		return ret;
3680
3681	__rbd_lock(rbd_dev, cookie);
3682	return 0;
3683}
3684
3685/*
3686 * lock_rwsem must be held for write
3687 */
3688static void rbd_unlock(struct rbd_device *rbd_dev)
3689{
3690	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3691	int ret;
3692
3693	WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3694		rbd_dev->lock_cookie[0] == '\0');
3695
3696	ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3697			      RBD_LOCK_NAME, rbd_dev->lock_cookie);
3698	if (ret && ret != -ENOENT)
3699		rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3700
3701	/* treat errors as the image is unlocked */
3702	rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3703	rbd_dev->lock_cookie[0] = '\0';
3704	rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3705	queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3706}
3707
3708static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3709				enum rbd_notify_op notify_op,
3710				struct page ***preply_pages,
3711				size_t *preply_len)
3712{
3713	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3714	struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3715	char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3716	int buf_size = sizeof(buf);
3717	void *p = buf;
3718
3719	dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3720
3721	/* encode *LockPayload NotifyMessage (op + ClientId) */
3722	ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3723	ceph_encode_32(&p, notify_op);
3724	ceph_encode_64(&p, cid.gid);
3725	ceph_encode_64(&p, cid.handle);
3726
3727	return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3728				&rbd_dev->header_oloc, buf, buf_size,
3729				RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3730}
3731
3732static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3733			       enum rbd_notify_op notify_op)
3734{
3735	__rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
3736}
 
 
3737
3738static void rbd_notify_acquired_lock(struct work_struct *work)
3739{
3740	struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3741						  acquired_lock_work);
 
 
 
 
 
 
 
 
 
 
 
3742
3743	rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3744}
 
 
 
 
 
 
3745
3746static void rbd_notify_released_lock(struct work_struct *work)
3747{
3748	struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3749						  released_lock_work);
3750
3751	rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3752}
3753
3754static int rbd_request_lock(struct rbd_device *rbd_dev)
 
3755{
3756	struct page **reply_pages;
3757	size_t reply_len;
3758	bool lock_owner_responded = false;
3759	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3760
3761	dout("%s rbd_dev %p\n", __func__, rbd_dev);
 
 
 
 
 
 
3762
3763	ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3764				   &reply_pages, &reply_len);
3765	if (ret && ret != -ETIMEDOUT) {
3766		rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3767		goto out;
3768	}
3769
3770	if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3771		void *p = page_address(reply_pages[0]);
3772		void *const end = p + reply_len;
3773		u32 n;
3774
3775		ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3776		while (n--) {
3777			u8 struct_v;
3778			u32 len;
3779
3780			ceph_decode_need(&p, end, 8 + 8, e_inval);
3781			p += 8 + 8; /* skip gid and cookie */
3782
3783			ceph_decode_32_safe(&p, end, len, e_inval);
3784			if (!len)
3785				continue;
3786
3787			if (lock_owner_responded) {
3788				rbd_warn(rbd_dev,
3789					 "duplicate lock owners detected");
3790				ret = -EIO;
3791				goto out;
3792			}
3793
3794			lock_owner_responded = true;
3795			ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3796						  &struct_v, &len);
3797			if (ret) {
3798				rbd_warn(rbd_dev,
3799					 "failed to decode ResponseMessage: %d",
3800					 ret);
3801				goto e_inval;
3802			}
3803
3804			ret = ceph_decode_32(&p);
3805		}
3806	}
3807
3808	if (!lock_owner_responded) {
3809		rbd_warn(rbd_dev, "no lock owners detected");
3810		ret = -ETIMEDOUT;
3811	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3812
3813out:
3814	ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3815	return ret;
3816
3817e_inval:
3818	ret = -EINVAL;
3819	goto out;
3820}
3821
3822/*
3823 * Either image request state machine(s) or rbd_add_acquire_lock()
3824 * (i.e. "rbd map").
 
 
 
 
 
 
 
 
 
 
3825 */
3826static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3827{
3828	struct rbd_img_request *img_req;
 
 
 
 
 
 
 
3829
3830	dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3831	lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3832
3833	cancel_delayed_work(&rbd_dev->lock_dwork);
3834	if (!completion_done(&rbd_dev->acquire_wait)) {
3835		rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3836			   list_empty(&rbd_dev->running_list));
3837		rbd_dev->acquire_err = result;
3838		complete_all(&rbd_dev->acquire_wait);
3839		return;
3840	}
3841
3842	while (!list_empty(&rbd_dev->acquiring_list)) {
3843		img_req = list_first_entry(&rbd_dev->acquiring_list,
3844					   struct rbd_img_request, lock_item);
3845		mutex_lock(&img_req->state_mutex);
3846		rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3847		if (!result)
3848			list_move_tail(&img_req->lock_item,
3849				       &rbd_dev->running_list);
3850		else
3851			list_del_init(&img_req->lock_item);
3852		rbd_img_schedule(img_req, result);
3853		mutex_unlock(&img_req->state_mutex);
3854	}
3855}
3856
3857static bool locker_equal(const struct ceph_locker *lhs,
3858			 const struct ceph_locker *rhs)
3859{
3860	return lhs->id.name.type == rhs->id.name.type &&
3861	       lhs->id.name.num == rhs->id.name.num &&
3862	       !strcmp(lhs->id.cookie, rhs->id.cookie) &&
3863	       ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
3864}
3865
3866static void free_locker(struct ceph_locker *locker)
3867{
3868	if (locker)
3869		ceph_free_lockers(locker, 1);
3870}
3871
3872static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
3873{
3874	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3875	struct ceph_locker *lockers;
3876	u32 num_lockers;
3877	u8 lock_type;
3878	char *lock_tag;
3879	u64 handle;
3880	int ret;
3881
3882	ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3883				 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3884				 &lock_type, &lock_tag, &lockers, &num_lockers);
3885	if (ret) {
3886		rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
3887		return ERR_PTR(ret);
3888	}
3889
3890	if (num_lockers == 0) {
3891		dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3892		lockers = NULL;
3893		goto out;
 
 
 
 
 
 
3894	}
3895
3896	if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3897		rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3898			 lock_tag);
3899		goto err_busy;
3900	}
3901
3902	if (lock_type != CEPH_CLS_LOCK_EXCLUSIVE) {
3903		rbd_warn(rbd_dev, "incompatible lock type detected");
3904		goto err_busy;
3905	}
3906
3907	WARN_ON(num_lockers != 1);
3908	ret = sscanf(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu",
3909		     &handle);
3910	if (ret != 1) {
3911		rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3912			 lockers[0].id.cookie);
3913		goto err_busy;
3914	}
3915	if (ceph_addr_is_blank(&lockers[0].info.addr)) {
3916		rbd_warn(rbd_dev, "locker has a blank address");
3917		goto err_busy;
3918	}
3919
3920	dout("%s rbd_dev %p got locker %s%llu@%pISpc/%u handle %llu\n",
3921	     __func__, rbd_dev, ENTITY_NAME(lockers[0].id.name),
3922	     &lockers[0].info.addr.in_addr,
3923	     le32_to_cpu(lockers[0].info.addr.nonce), handle);
3924
3925out:
3926	kfree(lock_tag);
3927	return lockers;
 
 
3928
3929err_busy:
3930	kfree(lock_tag);
3931	ceph_free_lockers(lockers, num_lockers);
3932	return ERR_PTR(-EBUSY);
3933}
3934
3935static int find_watcher(struct rbd_device *rbd_dev,
3936			const struct ceph_locker *locker)
3937{
3938	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3939	struct ceph_watch_item *watchers;
3940	u32 num_watchers;
3941	u64 cookie;
3942	int i;
3943	int ret;
 
 
 
3944
3945	ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3946				      &rbd_dev->header_oloc, &watchers,
3947				      &num_watchers);
3948	if (ret) {
3949		rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
3950		return ret;
3951	}
3952
3953	sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3954	for (i = 0; i < num_watchers; i++) {
3955		/*
3956		 * Ignore addr->type while comparing.  This mimics
3957		 * entity_addr_t::get_legacy_str() + strcmp().
3958		 */
3959		if (ceph_addr_equal_no_type(&watchers[i].addr,
3960					    &locker->info.addr) &&
3961		    watchers[i].cookie == cookie) {
3962			struct rbd_client_id cid = {
3963				.gid = le64_to_cpu(watchers[i].name.num),
3964				.handle = cookie,
3965			};
3966
3967			dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3968			     rbd_dev, cid.gid, cid.handle);
3969			rbd_set_owner_cid(rbd_dev, &cid);
3970			ret = 1;
3971			goto out;
3972		}
3973	}
3974
3975	dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3976	ret = 0;
3977out:
3978	kfree(watchers);
3979	return ret;
3980}
3981
3982/*
3983 * lock_rwsem must be held for write
3984 */
3985static int rbd_try_lock(struct rbd_device *rbd_dev)
3986{
3987	struct ceph_client *client = rbd_dev->rbd_client->client;
3988	struct ceph_locker *locker, *refreshed_locker;
3989	int ret;
3990
3991	for (;;) {
3992		locker = refreshed_locker = NULL;
3993
3994		ret = rbd_lock(rbd_dev);
3995		if (!ret)
3996			goto out;
3997		if (ret != -EBUSY) {
3998			rbd_warn(rbd_dev, "failed to lock header: %d", ret);
3999			goto out;
4000		}
 
 
 
 
 
 
 
 
 
 
 
4001
4002		/* determine if the current lock holder is still alive */
4003		locker = get_lock_owner_info(rbd_dev);
4004		if (IS_ERR(locker)) {
4005			ret = PTR_ERR(locker);
4006			locker = NULL;
4007			goto out;
4008		}
4009		if (!locker)
4010			goto again;
4011
4012		ret = find_watcher(rbd_dev, locker);
4013		if (ret)
4014			goto out; /* request lock or error */
 
 
4015
4016		refreshed_locker = get_lock_owner_info(rbd_dev);
4017		if (IS_ERR(refreshed_locker)) {
4018			ret = PTR_ERR(refreshed_locker);
4019			refreshed_locker = NULL;
4020			goto out;
4021		}
4022		if (!refreshed_locker ||
4023		    !locker_equal(locker, refreshed_locker))
4024			goto again;
4025
4026		rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
4027			 ENTITY_NAME(locker->id.name));
4028
4029		ret = ceph_monc_blocklist_add(&client->monc,
4030					      &locker->info.addr);
4031		if (ret) {
4032			rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
4033				 ENTITY_NAME(locker->id.name), ret);
4034			goto out;
4035		}
4036
4037		ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4038					  &rbd_dev->header_oloc, RBD_LOCK_NAME,
4039					  locker->id.cookie, &locker->id.name);
4040		if (ret && ret != -ENOENT) {
4041			rbd_warn(rbd_dev, "failed to break header lock: %d",
4042				 ret);
4043			goto out;
4044		}
4045
4046again:
4047		free_locker(refreshed_locker);
4048		free_locker(locker);
4049	}
4050
 
 
 
 
 
4051out:
4052	free_locker(refreshed_locker);
4053	free_locker(locker);
4054	return ret;
4055}
4056
4057static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4058{
 
 
 
 
 
 
4059	int ret;
4060
4061	ret = rbd_dev_refresh(rbd_dev);
4062	if (ret)
4063		return ret;
 
 
 
 
 
 
 
 
 
 
4064
4065	if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4066		ret = rbd_object_map_open(rbd_dev);
4067		if (ret)
4068			return ret;
4069	}
4070
4071	return 0;
4072}
4073
4074/*
4075 * Return:
4076 *   0 - lock acquired
4077 *   1 - caller should call rbd_request_lock()
4078 *  <0 - error
4079 */
4080static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
4081{
4082	int ret;
4083
4084	down_read(&rbd_dev->lock_rwsem);
4085	dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4086	     rbd_dev->lock_state);
4087	if (__rbd_is_lock_owner(rbd_dev)) {
4088		up_read(&rbd_dev->lock_rwsem);
4089		return 0;
4090	}
4091
4092	up_read(&rbd_dev->lock_rwsem);
4093	down_write(&rbd_dev->lock_rwsem);
4094	dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4095	     rbd_dev->lock_state);
4096	if (__rbd_is_lock_owner(rbd_dev)) {
4097		up_write(&rbd_dev->lock_rwsem);
4098		return 0;
4099	}
4100
4101	ret = rbd_try_lock(rbd_dev);
4102	if (ret < 0) {
4103		rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
4104		goto out;
4105	}
4106	if (ret > 0) {
4107		up_write(&rbd_dev->lock_rwsem);
4108		return ret;
4109	}
4110
4111	rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4112	rbd_assert(list_empty(&rbd_dev->running_list));
 
 
4113
4114	ret = rbd_post_acquire_action(rbd_dev);
4115	if (ret) {
4116		rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4117		/*
4118		 * Can't stay in RBD_LOCK_STATE_LOCKED because
4119		 * rbd_lock_add_request() would let the request through,
4120		 * assuming that e.g. object map is locked and loaded.
4121		 */
4122		rbd_unlock(rbd_dev);
4123	}
4124
4125out:
4126	wake_lock_waiters(rbd_dev, ret);
4127	up_write(&rbd_dev->lock_rwsem);
4128	return ret;
4129}
4130
4131static void rbd_acquire_lock(struct work_struct *work)
4132{
4133	struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4134					    struct rbd_device, lock_dwork);
4135	int ret;
4136
4137	dout("%s rbd_dev %p\n", __func__, rbd_dev);
4138again:
4139	ret = rbd_try_acquire_lock(rbd_dev);
4140	if (ret <= 0) {
4141		dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4142		return;
4143	}
4144
4145	ret = rbd_request_lock(rbd_dev);
4146	if (ret == -ETIMEDOUT) {
4147		goto again; /* treat this as a dead client */
4148	} else if (ret == -EROFS) {
4149		rbd_warn(rbd_dev, "peer will not release lock");
4150		down_write(&rbd_dev->lock_rwsem);
4151		wake_lock_waiters(rbd_dev, ret);
4152		up_write(&rbd_dev->lock_rwsem);
4153	} else if (ret < 0) {
4154		rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4155		mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4156				 RBD_RETRY_DELAY);
4157	} else {
4158		/*
4159		 * lock owner acked, but resend if we don't see them
4160		 * release the lock
4161		 */
4162		dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
4163		     rbd_dev);
4164		mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4165		    msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4166	}
4167}
4168
4169static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4170{
4171	dout("%s rbd_dev %p\n", __func__, rbd_dev);
4172	lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4173
4174	if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4175		return false;
4176
4177	/*
4178	 * Ensure that all in-flight IO is flushed.
 
 
 
 
 
 
 
4179	 */
4180	rbd_dev->lock_state = RBD_LOCK_STATE_QUIESCING;
4181	rbd_assert(!completion_done(&rbd_dev->quiescing_wait));
4182	if (list_empty(&rbd_dev->running_list))
4183		return true;
 
4184
4185	up_write(&rbd_dev->lock_rwsem);
4186	wait_for_completion(&rbd_dev->quiescing_wait);
4187
4188	down_write(&rbd_dev->lock_rwsem);
4189	if (rbd_dev->lock_state != RBD_LOCK_STATE_QUIESCING)
4190		return false;
4191
4192	rbd_assert(list_empty(&rbd_dev->running_list));
4193	return true;
4194}
4195
4196static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4197{
4198	if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4199		rbd_object_map_close(rbd_dev);
4200}
4201
4202static void __rbd_release_lock(struct rbd_device *rbd_dev)
4203{
4204	rbd_assert(list_empty(&rbd_dev->running_list));
4205
4206	rbd_pre_release_action(rbd_dev);
4207	rbd_unlock(rbd_dev);
4208}
4209
4210/*
4211 * lock_rwsem must be held for write
4212 */
4213static void rbd_release_lock(struct rbd_device *rbd_dev)
4214{
4215	if (!rbd_quiesce_lock(rbd_dev))
4216		return;
4217
4218	__rbd_release_lock(rbd_dev);
4219
4220	/*
4221	 * Give others a chance to grab the lock - we would re-acquire
4222	 * almost immediately if we got new IO while draining the running
4223	 * list otherwise.  We need to ack our own notifications, so this
4224	 * lock_dwork will be requeued from rbd_handle_released_lock() by
4225	 * way of maybe_kick_acquire().
4226	 */
4227	cancel_delayed_work(&rbd_dev->lock_dwork);
4228}
4229
4230static void rbd_release_lock_work(struct work_struct *work)
4231{
4232	struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4233						  unlock_work);
4234
4235	down_write(&rbd_dev->lock_rwsem);
4236	rbd_release_lock(rbd_dev);
4237	up_write(&rbd_dev->lock_rwsem);
4238}
4239
4240static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4241{
4242	bool have_requests;
 
4243
4244	dout("%s rbd_dev %p\n", __func__, rbd_dev);
4245	if (__rbd_is_lock_owner(rbd_dev))
4246		return;
4247
4248	spin_lock(&rbd_dev->lock_lists_lock);
4249	have_requests = !list_empty(&rbd_dev->acquiring_list);
4250	spin_unlock(&rbd_dev->lock_lists_lock);
4251	if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4252		dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4253		mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4254	}
 
 
4255}
4256
4257static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4258				     void **p)
4259{
4260	struct rbd_client_id cid = { 0 };
 
 
 
 
 
 
 
 
 
 
 
 
 
4261
4262	if (struct_v >= 2) {
4263		cid.gid = ceph_decode_64(p);
4264		cid.handle = ceph_decode_64(p);
 
 
 
 
 
 
 
 
 
 
 
 
4265	}
4266
4267	dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4268	     cid.handle);
4269	if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4270		down_write(&rbd_dev->lock_rwsem);
4271		if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4272			dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
4273			     __func__, rbd_dev, cid.gid, cid.handle);
4274		} else {
4275			rbd_set_owner_cid(rbd_dev, &cid);
4276		}
4277		downgrade_write(&rbd_dev->lock_rwsem);
4278	} else {
4279		down_read(&rbd_dev->lock_rwsem);
4280	}
4281
4282	maybe_kick_acquire(rbd_dev);
4283	up_read(&rbd_dev->lock_rwsem);
4284}
 
 
 
 
 
 
 
 
 
 
 
 
4285
4286static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4287				     void **p)
4288{
4289	struct rbd_client_id cid = { 0 };
4290
4291	if (struct_v >= 2) {
4292		cid.gid = ceph_decode_64(p);
4293		cid.handle = ceph_decode_64(p);
4294	}
4295
4296	dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4297	     cid.handle);
4298	if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4299		down_write(&rbd_dev->lock_rwsem);
4300		if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4301			dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
4302			     __func__, rbd_dev, cid.gid, cid.handle,
4303			     rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4304		} else {
4305			rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4306		}
4307		downgrade_write(&rbd_dev->lock_rwsem);
4308	} else {
4309		down_read(&rbd_dev->lock_rwsem);
4310	}
4311
4312	maybe_kick_acquire(rbd_dev);
4313	up_read(&rbd_dev->lock_rwsem);
4314}
4315
4316/*
4317 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4318 * ResponseMessage is needed.
4319 */
4320static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4321				   void **p)
4322{
4323	struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4324	struct rbd_client_id cid = { 0 };
4325	int result = 1;
4326
4327	if (struct_v >= 2) {
4328		cid.gid = ceph_decode_64(p);
4329		cid.handle = ceph_decode_64(p);
4330	}
 
 
 
 
 
 
 
 
4331
4332	dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4333	     cid.handle);
4334	if (rbd_cid_equal(&cid, &my_cid))
4335		return result;
 
 
 
 
4336
4337	down_read(&rbd_dev->lock_rwsem);
4338	if (__rbd_is_lock_owner(rbd_dev)) {
4339		if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4340		    rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4341			goto out_unlock;
4342
4343		/*
4344		 * encode ResponseMessage(0) so the peer can detect
4345		 * a missing owner
4346		 */
4347		result = 0;
4348
4349		if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4350			if (!rbd_dev->opts->exclusive) {
4351				dout("%s rbd_dev %p queueing unlock_work\n",
4352				     __func__, rbd_dev);
4353				queue_work(rbd_dev->task_wq,
4354					   &rbd_dev->unlock_work);
4355			} else {
4356				/* refuse to release the lock */
4357				result = -EROFS;
4358			}
4359		}
4360	}
4361
4362out_unlock:
4363	up_read(&rbd_dev->lock_rwsem);
4364	return result;
4365}
4366
4367static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4368				     u64 notify_id, u64 cookie, s32 *result)
4369{
 
4370	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4371	char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4372	int buf_size = sizeof(buf);
4373	int ret;
4374
4375	if (result) {
4376		void *p = buf;
 
 
 
 
 
 
 
 
4377
4378		/* encode ResponseMessage */
4379		ceph_start_encoding(&p, 1, 1,
4380				    buf_size - CEPH_ENCODING_START_BLK_LEN);
4381		ceph_encode_32(&p, *result);
4382	} else {
4383		buf_size = 0;
4384	}
4385
4386	ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4387				   &rbd_dev->header_oloc, notify_id, cookie,
4388				   buf, buf_size);
4389	if (ret)
4390		rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4391}
 
 
4392
4393static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4394				   u64 cookie)
4395{
4396	dout("%s rbd_dev %p\n", __func__, rbd_dev);
4397	__rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4398}
4399
4400static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4401					  u64 notify_id, u64 cookie, s32 result)
4402{
4403	dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4404	__rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4405}
4406
4407static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4408			 u64 notifier_id, void *data, size_t data_len)
4409{
4410	struct rbd_device *rbd_dev = arg;
4411	void *p = data;
4412	void *const end = p + data_len;
4413	u8 struct_v = 0;
4414	u32 len;
4415	u32 notify_op;
4416	int ret;
4417
4418	dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4419	     __func__, rbd_dev, cookie, notify_id, data_len);
4420	if (data_len) {
4421		ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4422					  &struct_v, &len);
4423		if (ret) {
4424			rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4425				 ret);
4426			return;
4427		}
4428
4429		notify_op = ceph_decode_32(&p);
4430	} else {
4431		/* legacy notification for header updates */
4432		notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4433		len = 0;
4434	}
4435
4436	dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4437	switch (notify_op) {
4438	case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4439		rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4440		rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4441		break;
4442	case RBD_NOTIFY_OP_RELEASED_LOCK:
4443		rbd_handle_released_lock(rbd_dev, struct_v, &p);
4444		rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4445		break;
4446	case RBD_NOTIFY_OP_REQUEST_LOCK:
4447		ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4448		if (ret <= 0)
4449			rbd_acknowledge_notify_result(rbd_dev, notify_id,
4450						      cookie, ret);
4451		else
4452			rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4453		break;
4454	case RBD_NOTIFY_OP_HEADER_UPDATE:
4455		ret = rbd_dev_refresh(rbd_dev);
4456		if (ret)
4457			rbd_warn(rbd_dev, "refresh failed: %d", ret);
4458
4459		rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4460		break;
4461	default:
4462		if (rbd_is_lock_owner(rbd_dev))
4463			rbd_acknowledge_notify_result(rbd_dev, notify_id,
4464						      cookie, -EOPNOTSUPP);
4465		else
4466			rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4467		break;
4468	}
4469}
4470
4471static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4472
4473static void rbd_watch_errcb(void *arg, u64 cookie, int err)
4474{
4475	struct rbd_device *rbd_dev = arg;
4476
4477	rbd_warn(rbd_dev, "encountered watch error: %d", err);
4478
4479	down_write(&rbd_dev->lock_rwsem);
4480	rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4481	up_write(&rbd_dev->lock_rwsem);
4482
4483	mutex_lock(&rbd_dev->watch_mutex);
4484	if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4485		__rbd_unregister_watch(rbd_dev);
4486		rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4487
4488		queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4489	}
4490	mutex_unlock(&rbd_dev->watch_mutex);
4491}
4492
4493/*
4494 * watch_mutex must be locked
 
4495 */
4496static int __rbd_register_watch(struct rbd_device *rbd_dev)
4497{
4498	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4499	struct ceph_osd_linger_request *handle;
 
4500
4501	rbd_assert(!rbd_dev->watch_handle);
4502	dout("%s rbd_dev %p\n", __func__, rbd_dev);
4503
4504	handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4505				 &rbd_dev->header_oloc, rbd_watch_cb,
4506				 rbd_watch_errcb, rbd_dev);
4507	if (IS_ERR(handle))
4508		return PTR_ERR(handle);
 
 
 
 
 
 
 
 
4509
4510	rbd_dev->watch_handle = handle;
4511	return 0;
4512}
 
4513
4514/*
4515 * watch_mutex must be locked
4516 */
4517static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4518{
4519	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4520	int ret;
4521
4522	rbd_assert(rbd_dev->watch_handle);
4523	dout("%s rbd_dev %p\n", __func__, rbd_dev);
 
4524
4525	ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
 
 
 
 
 
 
4526	if (ret)
4527		rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4528
4529	rbd_dev->watch_handle = NULL;
4530}
 
 
 
 
 
 
 
 
4531
4532static int rbd_register_watch(struct rbd_device *rbd_dev)
4533{
4534	int ret;
4535
4536	mutex_lock(&rbd_dev->watch_mutex);
4537	rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4538	ret = __rbd_register_watch(rbd_dev);
4539	if (ret)
4540		goto out;
4541
4542	rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4543	rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
 
 
 
 
 
 
4544
4545out:
4546	mutex_unlock(&rbd_dev->watch_mutex);
4547	return ret;
4548}
4549
4550static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4551{
4552	dout("%s rbd_dev %p\n", __func__, rbd_dev);
4553
4554	cancel_work_sync(&rbd_dev->acquired_lock_work);
4555	cancel_work_sync(&rbd_dev->released_lock_work);
4556	cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4557	cancel_work_sync(&rbd_dev->unlock_work);
4558}
4559
4560/*
4561 * header_rwsem must not be held to avoid a deadlock with
4562 * rbd_dev_refresh() when flushing notifies.
4563 */
4564static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4565{
4566	cancel_tasks_sync(rbd_dev);
4567
4568	mutex_lock(&rbd_dev->watch_mutex);
4569	if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4570		__rbd_unregister_watch(rbd_dev);
4571	rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4572	mutex_unlock(&rbd_dev->watch_mutex);
4573
4574	cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4575	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4576}
4577
4578/*
4579 * lock_rwsem must be held for write
4580 */
4581static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4582{
4583	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4584	char cookie[32];
4585	int ret;
4586
4587	if (!rbd_quiesce_lock(rbd_dev))
4588		return;
4589
4590	format_lock_cookie(rbd_dev, cookie);
4591	ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4592				  &rbd_dev->header_oloc, RBD_LOCK_NAME,
4593				  CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4594				  RBD_LOCK_TAG, cookie);
4595	if (ret) {
4596		if (ret != -EOPNOTSUPP)
4597			rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4598				 ret);
4599
4600		if (rbd_dev->opts->exclusive)
4601			rbd_warn(rbd_dev,
4602			     "temporarily releasing lock on exclusive mapping");
4603
4604		/*
4605		 * Lock cookie cannot be updated on older OSDs, so do
4606		 * a manual release and queue an acquire.
4607		 */
4608		__rbd_release_lock(rbd_dev);
4609		queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4610	} else {
4611		__rbd_lock(rbd_dev, cookie);
4612		wake_lock_waiters(rbd_dev, 0);
4613	}
4614}
4615
4616static void rbd_reregister_watch(struct work_struct *work)
4617{
4618	struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4619					    struct rbd_device, watch_dwork);
4620	int ret;
4621
4622	dout("%s rbd_dev %p\n", __func__, rbd_dev);
4623
4624	mutex_lock(&rbd_dev->watch_mutex);
4625	if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4626		mutex_unlock(&rbd_dev->watch_mutex);
4627		return;
4628	}
4629
4630	ret = __rbd_register_watch(rbd_dev);
4631	if (ret) {
4632		rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4633		if (ret != -EBLOCKLISTED && ret != -ENOENT) {
4634			queue_delayed_work(rbd_dev->task_wq,
4635					   &rbd_dev->watch_dwork,
4636					   RBD_RETRY_DELAY);
4637			mutex_unlock(&rbd_dev->watch_mutex);
4638			return;
4639		}
4640
4641		mutex_unlock(&rbd_dev->watch_mutex);
4642		down_write(&rbd_dev->lock_rwsem);
4643		wake_lock_waiters(rbd_dev, ret);
4644		up_write(&rbd_dev->lock_rwsem);
4645		return;
4646	}
4647
4648	rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4649	rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4650	mutex_unlock(&rbd_dev->watch_mutex);
4651
4652	down_write(&rbd_dev->lock_rwsem);
4653	if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4654		rbd_reacquire_lock(rbd_dev);
4655	up_write(&rbd_dev->lock_rwsem);
4656
4657	ret = rbd_dev_refresh(rbd_dev);
4658	if (ret)
4659		rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4660}
4661
4662/*
4663 * Synchronous osd object method call.  Returns the number of bytes
4664 * returned in the outbound buffer, or a negative error code.
4665 */
4666static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4667			     struct ceph_object_id *oid,
4668			     struct ceph_object_locator *oloc,
4669			     const char *method_name,
4670			     const void *outbound,
4671			     size_t outbound_size,
4672			     void *inbound,
4673			     size_t inbound_size)
4674{
4675	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4676	struct page *req_page = NULL;
4677	struct page *reply_page;
 
4678	int ret;
4679
4680	/*
4681	 * Method calls are ultimately read operations.  The result
4682	 * should placed into the inbound buffer provided.  They
4683	 * also supply outbound data--parameters for the object
4684	 * method.  Currently if this is present it will be a
4685	 * snapshot id.
4686	 */
4687	if (outbound) {
4688		if (outbound_size > PAGE_SIZE)
4689			return -E2BIG;
 
 
 
 
 
 
 
4690
4691		req_page = alloc_page(GFP_KERNEL);
4692		if (!req_page)
4693			return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4694
4695		memcpy(page_address(req_page), outbound, outbound_size);
4696	}
 
 
 
 
4697
4698	reply_page = alloc_page(GFP_KERNEL);
4699	if (!reply_page) {
4700		if (req_page)
4701			__free_page(req_page);
4702		return -ENOMEM;
4703	}
4704
4705	ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4706			     CEPH_OSD_FLAG_READ, req_page, outbound_size,
4707			     &reply_page, &inbound_size);
4708	if (!ret) {
4709		memcpy(inbound, page_address(reply_page), inbound_size);
4710		ret = inbound_size;
4711	}
 
4712
4713	if (req_page)
4714		__free_page(req_page);
4715	__free_page(reply_page);
4716	return ret;
4717}
4718
4719static void rbd_queue_workfn(struct work_struct *work)
 
4720{
4721	struct rbd_img_request *img_request =
4722	    container_of(work, struct rbd_img_request, work);
4723	struct rbd_device *rbd_dev = img_request->rbd_dev;
4724	enum obj_operation_type op_type = img_request->op_type;
4725	struct request *rq = blk_mq_rq_from_pdu(img_request);
4726	u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4727	u64 length = blk_rq_bytes(rq);
4728	u64 mapping_size;
4729	int result;
4730
4731	/* Ignore/skip any zero-length requests */
4732	if (!length) {
4733		dout("%s: zero-length request\n", __func__);
4734		result = 0;
4735		goto err_img_request;
4736	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4737
4738	blk_mq_start_request(rq);
 
 
 
 
 
 
 
 
 
 
 
 
4739
4740	down_read(&rbd_dev->header_rwsem);
4741	mapping_size = rbd_dev->mapping.size;
4742	rbd_img_capture_header(img_request);
4743	up_read(&rbd_dev->header_rwsem);
 
 
4744
4745	if (offset + length > mapping_size) {
4746		rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4747			 length, mapping_size);
4748		result = -EIO;
4749		goto err_img_request;
4750	}
 
 
 
 
 
 
 
 
 
4751
4752	dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4753	     img_request, obj_op_name(op_type), offset, length);
4754
4755	if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
4756		result = rbd_img_fill_nodata(img_request, offset, length);
4757	else
4758		result = rbd_img_fill_from_bio(img_request, offset, length,
4759					       rq->bio);
4760	if (result)
4761		goto err_img_request;
 
 
 
 
 
4762
4763	rbd_img_handle_request(img_request, 0);
4764	return;
4765
4766err_img_request:
4767	rbd_img_request_destroy(img_request);
4768	if (result)
4769		rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4770			 obj_op_name(op_type), length, offset, result);
4771	blk_mq_end_request(rq, errno_to_blk_status(result));
4772}
4773
4774static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4775		const struct blk_mq_queue_data *bd)
 
 
 
 
 
4776{
4777	struct rbd_device *rbd_dev = hctx->queue->queuedata;
4778	struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
4779	enum obj_operation_type op_type;
 
 
4780
4781	switch (req_op(bd->rq)) {
4782	case REQ_OP_DISCARD:
4783		op_type = OBJ_OP_DISCARD;
4784		break;
4785	case REQ_OP_WRITE_ZEROES:
4786		op_type = OBJ_OP_ZEROOUT;
4787		break;
4788	case REQ_OP_WRITE:
4789		op_type = OBJ_OP_WRITE;
4790		break;
4791	case REQ_OP_READ:
4792		op_type = OBJ_OP_READ;
4793		break;
4794	default:
4795		rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
4796		return BLK_STS_IOERR;
4797	}
4798
4799	rbd_img_request_init(img_req, rbd_dev, op_type);
 
 
 
 
 
 
 
 
4800
4801	if (rbd_img_is_write(img_req)) {
4802		if (rbd_is_ro(rbd_dev)) {
4803			rbd_warn(rbd_dev, "%s on read-only mapping",
4804				 obj_op_name(img_req->op_type));
4805			return BLK_STS_IOERR;
4806		}
4807		rbd_assert(!rbd_is_snap(rbd_dev));
4808	}
 
4809
4810	INIT_WORK(&img_req->work, rbd_queue_workfn);
4811	queue_work(rbd_wq, &img_req->work);
4812	return BLK_STS_OK;
4813}
4814
4815static void rbd_free_disk(struct rbd_device *rbd_dev)
4816{
4817	put_disk(rbd_dev->disk);
4818	blk_mq_free_tag_set(&rbd_dev->tag_set);
 
 
 
4819	rbd_dev->disk = NULL;
 
 
 
 
 
 
4820}
4821
4822static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4823			     struct ceph_object_id *oid,
4824			     struct ceph_object_locator *oloc,
4825			     void *buf, int buf_len)
4826
4827{
4828	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4829	struct ceph_osd_request *req;
4830	struct page **pages;
4831	int num_pages = calc_pages_for(0, buf_len);
 
4832	int ret;
4833
4834	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4835	if (!req)
4836		return -ENOMEM;
 
 
 
 
 
 
 
4837
4838	ceph_oid_copy(&req->r_base_oid, oid);
4839	ceph_oloc_copy(&req->r_base_oloc, oloc);
4840	req->r_flags = CEPH_OSD_FLAG_READ;
4841
4842	pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4843	if (IS_ERR(pages)) {
4844		ret = PTR_ERR(pages);
4845		goto out_req;
4846	}
4847
4848	osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4849	osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4850					 true);
 
 
 
 
 
4851
4852	ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
 
 
 
4853	if (ret)
4854		goto out_req;
4855
4856	ceph_osdc_start_request(osdc, req);
4857	ret = ceph_osdc_wait_request(osdc, req);
4858	if (ret >= 0)
4859		ceph_copy_from_page_vector(pages, buf, 0, ret);
 
 
 
 
 
 
 
 
 
 
4860
4861out_req:
4862	ceph_osdc_put_request(req);
4863	return ret;
4864}
4865
4866/*
4867 * Read the complete header for the given rbd device.  On successful
4868 * return, the rbd_dev->header field will contain up-to-date
4869 * information about the image.
4870 */
4871static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
4872				  struct rbd_image_header *header,
4873				  bool first_time)
4874{
4875	struct rbd_image_header_ondisk *ondisk = NULL;
4876	u32 snap_count = 0;
4877	u64 names_size = 0;
4878	u32 want_count;
4879	int ret;
4880
4881	/*
4882	 * The complete header will include an array of its 64-bit
4883	 * snapshot ids, followed by the names of those snapshots as
4884	 * a contiguous block of NUL-terminated strings.  Note that
4885	 * the number of snapshots could change by the time we read
4886	 * it in, in which case we re-read it.
4887	 */
4888	do {
4889		size_t size;
4890
4891		kfree(ondisk);
4892
4893		size = sizeof (*ondisk);
4894		size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4895		size += names_size;
4896		ondisk = kmalloc(size, GFP_KERNEL);
4897		if (!ondisk)
4898			return -ENOMEM;
4899
4900		ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4901					&rbd_dev->header_oloc, ondisk, size);
4902		if (ret < 0)
4903			goto out;
4904		if ((size_t)ret < size) {
4905			ret = -ENXIO;
4906			rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4907				size, ret);
4908			goto out;
4909		}
4910		if (!rbd_dev_ondisk_valid(ondisk)) {
4911			ret = -ENXIO;
4912			rbd_warn(rbd_dev, "invalid header");
4913			goto out;
4914		}
4915
4916		names_size = le64_to_cpu(ondisk->snap_names_len);
4917		want_count = snap_count;
4918		snap_count = le32_to_cpu(ondisk->snap_count);
4919	} while (snap_count != want_count);
4920
4921	ret = rbd_header_from_disk(header, ondisk, first_time);
4922out:
4923	kfree(ondisk);
4924
4925	return ret;
4926}
4927
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4928static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4929{
4930	sector_t size;
 
4931
4932	/*
4933	 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4934	 * try to update its size.  If REMOVING is set, updating size
4935	 * is just useless work since the device can't be opened.
4936	 */
4937	if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4938	    !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
 
 
 
 
 
 
4939		size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4940		dout("setting size to %llu sectors", (unsigned long long)size);
4941		set_capacity_and_notify(rbd_dev->disk, size);
 
4942	}
4943}
4944
4945static const struct blk_mq_ops rbd_mq_ops = {
4946	.queue_rq	= rbd_queue_rq,
4947};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4948
4949static int rbd_init_disk(struct rbd_device *rbd_dev)
4950{
4951	struct gendisk *disk;
4952	unsigned int objset_bytes =
4953	    rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
4954	struct queue_limits lim = {
4955		.max_hw_sectors		= objset_bytes >> SECTOR_SHIFT,
4956		.io_opt			= objset_bytes,
4957		.io_min			= rbd_dev->opts->alloc_size,
4958		.max_segments		= USHRT_MAX,
4959		.max_segment_size	= UINT_MAX,
4960	};
4961	int err;
4962
4963	memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4964	rbd_dev->tag_set.ops = &rbd_mq_ops;
4965	rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4966	rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4967	rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
4968	rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
4969	rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
4970
4971	err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4972	if (err)
4973		return err;
4974
4975	if (rbd_dev->opts->trim) {
4976		lim.discard_granularity = rbd_dev->opts->alloc_size;
4977		lim.max_hw_discard_sectors = objset_bytes >> SECTOR_SHIFT;
4978		lim.max_write_zeroes_sectors = objset_bytes >> SECTOR_SHIFT;
4979	}
4980
4981	if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4982		lim.features |= BLK_FEAT_STABLE_WRITES;
4983
4984	disk = blk_mq_alloc_disk(&rbd_dev->tag_set, &lim, rbd_dev);
4985	if (IS_ERR(disk)) {
4986		err = PTR_ERR(disk);
4987		goto out_tag_set;
4988	}
4989
4990	snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
4991		 rbd_dev->dev_id);
4992	disk->major = rbd_dev->major;
4993	disk->first_minor = rbd_dev->minor;
4994	if (single_major)
4995		disk->minors = (1 << RBD_SINGLE_MAJOR_PART_SHIFT);
4996	else
4997		disk->minors = RBD_MINORS_PER_MAJOR;
4998	disk->fops = &rbd_bd_ops;
4999	disk->private_data = rbd_dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5000	rbd_dev->disk = disk;
5001
5002	return 0;
5003out_tag_set:
5004	blk_mq_free_tag_set(&rbd_dev->tag_set);
5005	return err;
 
5006}
5007
5008/*
5009  sysfs
5010*/
5011
5012static struct rbd_device *dev_to_rbd_dev(struct device *dev)
5013{
5014	return container_of(dev, struct rbd_device, dev);
5015}
5016
5017static ssize_t rbd_size_show(struct device *dev,
5018			     struct device_attribute *attr, char *buf)
5019{
5020	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5021
5022	return sprintf(buf, "%llu\n",
5023		(unsigned long long)rbd_dev->mapping.size);
5024}
5025
 
 
 
 
5026static ssize_t rbd_features_show(struct device *dev,
5027			     struct device_attribute *attr, char *buf)
5028{
5029	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5030
5031	return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
 
5032}
5033
5034static ssize_t rbd_major_show(struct device *dev,
5035			      struct device_attribute *attr, char *buf)
5036{
5037	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5038
5039	if (rbd_dev->major)
5040		return sprintf(buf, "%d\n", rbd_dev->major);
5041
5042	return sprintf(buf, "(none)\n");
5043}
5044
5045static ssize_t rbd_minor_show(struct device *dev,
5046			      struct device_attribute *attr, char *buf)
5047{
5048	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5049
5050	return sprintf(buf, "%d\n", rbd_dev->minor);
5051}
5052
5053static ssize_t rbd_client_addr_show(struct device *dev,
5054				    struct device_attribute *attr, char *buf)
5055{
5056	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5057	struct ceph_entity_addr *client_addr =
5058	    ceph_client_addr(rbd_dev->rbd_client->client);
5059
5060	return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5061		       le32_to_cpu(client_addr->nonce));
5062}
5063
5064static ssize_t rbd_client_id_show(struct device *dev,
5065				  struct device_attribute *attr, char *buf)
5066{
5067	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5068
5069	return sprintf(buf, "client%lld\n",
5070		       ceph_client_gid(rbd_dev->rbd_client->client));
5071}
5072
5073static ssize_t rbd_cluster_fsid_show(struct device *dev,
5074				     struct device_attribute *attr, char *buf)
5075{
5076	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5077
5078	return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5079}
5080
5081static ssize_t rbd_config_info_show(struct device *dev,
5082				    struct device_attribute *attr, char *buf)
5083{
5084	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5085
5086	if (!capable(CAP_SYS_ADMIN))
5087		return -EPERM;
5088
5089	return sprintf(buf, "%s\n", rbd_dev->config_info);
5090}
5091
5092static ssize_t rbd_pool_show(struct device *dev,
5093			     struct device_attribute *attr, char *buf)
5094{
5095	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5096
5097	return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5098}
5099
5100static ssize_t rbd_pool_id_show(struct device *dev,
5101			     struct device_attribute *attr, char *buf)
5102{
5103	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5104
5105	return sprintf(buf, "%llu\n",
5106			(unsigned long long) rbd_dev->spec->pool_id);
5107}
5108
5109static ssize_t rbd_pool_ns_show(struct device *dev,
5110				struct device_attribute *attr, char *buf)
5111{
5112	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5113
5114	return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5115}
5116
5117static ssize_t rbd_name_show(struct device *dev,
5118			     struct device_attribute *attr, char *buf)
5119{
5120	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5121
5122	if (rbd_dev->spec->image_name)
5123		return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5124
5125	return sprintf(buf, "(unknown)\n");
5126}
5127
5128static ssize_t rbd_image_id_show(struct device *dev,
5129			     struct device_attribute *attr, char *buf)
5130{
5131	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5132
5133	return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5134}
5135
5136/*
5137 * Shows the name of the currently-mapped snapshot (or
5138 * RBD_SNAP_HEAD_NAME for the base image).
5139 */
5140static ssize_t rbd_snap_show(struct device *dev,
5141			     struct device_attribute *attr,
5142			     char *buf)
5143{
5144	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5145
5146	return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5147}
5148
5149static ssize_t rbd_snap_id_show(struct device *dev,
5150				struct device_attribute *attr, char *buf)
5151{
5152	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5153
5154	return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5155}
5156
5157/*
5158 * For a v2 image, shows the chain of parent images, separated by empty
5159 * lines.  For v1 images or if there is no parent, shows "(no parent
5160 * image)".
5161 */
5162static ssize_t rbd_parent_show(struct device *dev,
5163			       struct device_attribute *attr,
5164			       char *buf)
5165{
5166	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5167	ssize_t count = 0;
 
 
5168
5169	if (!rbd_dev->parent)
5170		return sprintf(buf, "(no parent image)\n");
5171
5172	for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5173		struct rbd_spec *spec = rbd_dev->parent_spec;
5174
5175		count += sprintf(&buf[count], "%s"
5176			    "pool_id %llu\npool_name %s\n"
5177			    "pool_ns %s\n"
5178			    "image_id %s\nimage_name %s\n"
5179			    "snap_id %llu\nsnap_name %s\n"
5180			    "overlap %llu\n",
5181			    !count ? "" : "\n", /* first? */
5182			    spec->pool_id, spec->pool_name,
5183			    spec->pool_ns ?: "",
5184			    spec->image_id, spec->image_name ?: "(unknown)",
5185			    spec->snap_id, spec->snap_name,
5186			    rbd_dev->parent_overlap);
5187	}
 
 
 
 
 
 
5188
5189	return count;
5190}
5191
5192static ssize_t rbd_image_refresh(struct device *dev,
5193				 struct device_attribute *attr,
5194				 const char *buf,
5195				 size_t size)
5196{
5197	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5198	int ret;
5199
5200	if (!capable(CAP_SYS_ADMIN))
5201		return -EPERM;
5202
5203	ret = rbd_dev_refresh(rbd_dev);
5204	if (ret)
5205		return ret;
5206
5207	return size;
5208}
5209
5210static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5211static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5212static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5213static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5214static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5215static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5216static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5217static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5218static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5219static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
5220static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5221static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5222static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5223static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5224static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5225static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5226static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
5227
5228static struct attribute *rbd_attrs[] = {
5229	&dev_attr_size.attr,
5230	&dev_attr_features.attr,
5231	&dev_attr_major.attr,
5232	&dev_attr_minor.attr,
5233	&dev_attr_client_addr.attr,
5234	&dev_attr_client_id.attr,
5235	&dev_attr_cluster_fsid.attr,
5236	&dev_attr_config_info.attr,
5237	&dev_attr_pool.attr,
5238	&dev_attr_pool_id.attr,
5239	&dev_attr_pool_ns.attr,
5240	&dev_attr_name.attr,
5241	&dev_attr_image_id.attr,
5242	&dev_attr_current_snap.attr,
5243	&dev_attr_snap_id.attr,
5244	&dev_attr_parent.attr,
5245	&dev_attr_refresh.attr,
5246	NULL
5247};
5248
5249static struct attribute_group rbd_attr_group = {
5250	.attrs = rbd_attrs,
5251};
5252
5253static const struct attribute_group *rbd_attr_groups[] = {
5254	&rbd_attr_group,
5255	NULL
5256};
5257
5258static void rbd_dev_release(struct device *dev);
 
 
5259
5260static const struct device_type rbd_device_type = {
5261	.name		= "rbd",
5262	.groups		= rbd_attr_groups,
5263	.release	= rbd_dev_release,
5264};
5265
5266static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5267{
5268	kref_get(&spec->kref);
5269
5270	return spec;
5271}
5272
5273static void rbd_spec_free(struct kref *kref);
5274static void rbd_spec_put(struct rbd_spec *spec)
5275{
5276	if (spec)
5277		kref_put(&spec->kref, rbd_spec_free);
5278}
5279
5280static struct rbd_spec *rbd_spec_alloc(void)
5281{
5282	struct rbd_spec *spec;
5283
5284	spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5285	if (!spec)
5286		return NULL;
5287
5288	spec->pool_id = CEPH_NOPOOL;
5289	spec->snap_id = CEPH_NOSNAP;
5290	kref_init(&spec->kref);
5291
5292	return spec;
5293}
5294
5295static void rbd_spec_free(struct kref *kref)
5296{
5297	struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5298
5299	kfree(spec->pool_name);
5300	kfree(spec->pool_ns);
5301	kfree(spec->image_id);
5302	kfree(spec->image_name);
5303	kfree(spec->snap_name);
5304	kfree(spec);
5305}
5306
5307static void rbd_dev_free(struct rbd_device *rbd_dev)
5308{
5309	WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5310	WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5311
5312	ceph_oid_destroy(&rbd_dev->header_oid);
5313	ceph_oloc_destroy(&rbd_dev->header_oloc);
5314	kfree(rbd_dev->config_info);
5315
5316	rbd_put_client(rbd_dev->rbd_client);
5317	rbd_spec_put(rbd_dev->spec);
5318	kfree(rbd_dev->opts);
5319	kfree(rbd_dev);
5320}
5321
5322static void rbd_dev_release(struct device *dev)
5323{
5324	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5325	bool need_put = !!rbd_dev->opts;
5326
5327	if (need_put) {
5328		destroy_workqueue(rbd_dev->task_wq);
5329		ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
5330	}
5331
5332	rbd_dev_free(rbd_dev);
5333
5334	/*
5335	 * This is racy, but way better than putting module outside of
5336	 * the release callback.  The race window is pretty small, so
5337	 * doing something similar to dm (dm-builtin.c) is overkill.
5338	 */
5339	if (need_put)
5340		module_put(THIS_MODULE);
5341}
5342
5343static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
5344{
5345	struct rbd_device *rbd_dev;
5346
5347	rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5348	if (!rbd_dev)
5349		return NULL;
5350
5351	spin_lock_init(&rbd_dev->lock);
 
 
5352	INIT_LIST_HEAD(&rbd_dev->node);
5353	init_rwsem(&rbd_dev->header_rwsem);
5354
5355	rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5356	ceph_oid_init(&rbd_dev->header_oid);
5357	rbd_dev->header_oloc.pool = spec->pool_id;
5358	if (spec->pool_ns) {
5359		WARN_ON(!*spec->pool_ns);
5360		rbd_dev->header_oloc.pool_ns =
5361		    ceph_find_or_create_string(spec->pool_ns,
5362					       strlen(spec->pool_ns));
5363	}
5364
5365	mutex_init(&rbd_dev->watch_mutex);
5366	rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5367	INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5368
5369	init_rwsem(&rbd_dev->lock_rwsem);
5370	rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5371	INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5372	INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5373	INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5374	INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5375	spin_lock_init(&rbd_dev->lock_lists_lock);
5376	INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5377	INIT_LIST_HEAD(&rbd_dev->running_list);
5378	init_completion(&rbd_dev->acquire_wait);
5379	init_completion(&rbd_dev->quiescing_wait);
5380
5381	spin_lock_init(&rbd_dev->object_map_lock);
5382
5383	rbd_dev->dev.bus = &rbd_bus_type;
5384	rbd_dev->dev.type = &rbd_device_type;
5385	rbd_dev->dev.parent = &rbd_root_dev;
5386	device_initialize(&rbd_dev->dev);
5387
5388	return rbd_dev;
5389}
5390
5391/*
5392 * Create a mapping rbd_dev.
5393 */
5394static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5395					 struct rbd_spec *spec,
5396					 struct rbd_options *opts)
5397{
5398	struct rbd_device *rbd_dev;
5399
5400	rbd_dev = __rbd_dev_create(spec);
5401	if (!rbd_dev)
5402		return NULL;
5403
5404	/* get an id and fill in device name */
5405	rbd_dev->dev_id = ida_alloc_max(&rbd_dev_id_ida,
5406					minor_to_rbd_dev_id(1 << MINORBITS) - 1,
5407					GFP_KERNEL);
5408	if (rbd_dev->dev_id < 0)
5409		goto fail_rbd_dev;
5410
5411	sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5412	rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5413						   rbd_dev->name);
5414	if (!rbd_dev->task_wq)
5415		goto fail_dev_id;
5416
5417	/* we have a ref from do_rbd_add() */
5418	__module_get(THIS_MODULE);
5419
5420	rbd_dev->rbd_client = rbdc;
5421	rbd_dev->spec = spec;
5422	rbd_dev->opts = opts;
 
5423
5424	dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5425	return rbd_dev;
5426
5427fail_dev_id:
5428	ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
5429fail_rbd_dev:
5430	rbd_dev_free(rbd_dev);
5431	return NULL;
5432}
5433
5434static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5435{
5436	if (rbd_dev)
5437		put_device(&rbd_dev->dev);
 
5438}
5439
5440/*
5441 * Get the size and object order for an image snapshot, or if
5442 * snap_id is CEPH_NOSNAP, gets this information for the base
5443 * image.
5444 */
5445static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5446				u8 *order, u64 *snap_size)
5447{
5448	__le64 snapid = cpu_to_le64(snap_id);
5449	int ret;
5450	struct {
5451		u8 order;
5452		__le64 size;
5453	} __attribute__ ((packed)) size_buf = { 0 };
5454
5455	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5456				  &rbd_dev->header_oloc, "get_size",
5457				  &snapid, sizeof(snapid),
5458				  &size_buf, sizeof(size_buf));
5459	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5460	if (ret < 0)
5461		return ret;
5462	if (ret < sizeof (size_buf))
5463		return -ERANGE;
5464
5465	if (order) {
5466		*order = size_buf.order;
5467		dout("  order %u", (unsigned int)*order);
5468	}
5469	*snap_size = le64_to_cpu(size_buf.size);
5470
5471	dout("  snap_id 0x%016llx snap_size = %llu\n",
5472		(unsigned long long)snap_id,
5473		(unsigned long long)*snap_size);
5474
5475	return 0;
5476}
5477
5478static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
5479				    char **pobject_prefix)
 
 
 
 
 
 
5480{
5481	size_t size;
5482	void *reply_buf;
5483	char *object_prefix;
5484	int ret;
5485	void *p;
5486
5487	/* Response will be an encoded string, which includes a length */
5488	size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5489	reply_buf = kzalloc(size, GFP_KERNEL);
5490	if (!reply_buf)
5491		return -ENOMEM;
5492
5493	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5494				  &rbd_dev->header_oloc, "get_object_prefix",
5495				  NULL, 0, reply_buf, size);
5496	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5497	if (ret < 0)
5498		goto out;
5499
5500	p = reply_buf;
5501	object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
5502						    GFP_NOIO);
5503	if (IS_ERR(object_prefix)) {
5504		ret = PTR_ERR(object_prefix);
5505		goto out;
5506	}
5507	ret = 0;
5508
5509	*pobject_prefix = object_prefix;
5510	dout("  object_prefix = %s\n", object_prefix);
 
 
 
 
5511out:
5512	kfree(reply_buf);
5513
5514	return ret;
5515}
5516
5517static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5518				     bool read_only, u64 *snap_features)
5519{
5520	struct {
5521		__le64 snap_id;
5522		u8 read_only;
5523	} features_in;
5524	struct {
5525		__le64 features;
5526		__le64 incompat;
5527	} __attribute__ ((packed)) features_buf = { 0 };
5528	u64 unsup;
5529	int ret;
5530
5531	features_in.snap_id = cpu_to_le64(snap_id);
5532	features_in.read_only = read_only;
5533
5534	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5535				  &rbd_dev->header_oloc, "get_features",
5536				  &features_in, sizeof(features_in),
5537				  &features_buf, sizeof(features_buf));
5538	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5539	if (ret < 0)
5540		return ret;
5541	if (ret < sizeof (features_buf))
5542		return -ERANGE;
5543
5544	unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5545	if (unsup) {
5546		rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5547			 unsup);
5548		return -ENXIO;
5549	}
5550
5551	*snap_features = le64_to_cpu(features_buf.features);
5552
5553	dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5554		(unsigned long long)snap_id,
5555		(unsigned long long)*snap_features,
5556		(unsigned long long)le64_to_cpu(features_buf.incompat));
5557
5558	return 0;
5559}
5560
5561/*
5562 * These are generic image flags, but since they are used only for
5563 * object map, store them in rbd_dev->object_map_flags.
5564 *
5565 * For the same reason, this function is called only on object map
5566 * (re)load and not on header refresh.
5567 */
5568static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5569{
5570	__le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5571	__le64 flags;
5572	int ret;
5573
5574	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5575				  &rbd_dev->header_oloc, "get_flags",
5576				  &snapid, sizeof(snapid),
5577				  &flags, sizeof(flags));
5578	if (ret < 0)
5579		return ret;
5580	if (ret < sizeof(flags))
5581		return -EBADMSG;
5582
5583	rbd_dev->object_map_flags = le64_to_cpu(flags);
5584	return 0;
5585}
5586
5587struct parent_image_info {
5588	u64		pool_id;
5589	const char	*pool_ns;
5590	const char	*image_id;
5591	u64		snap_id;
5592
5593	bool		has_overlap;
5594	u64		overlap;
5595};
5596
5597static void rbd_parent_info_cleanup(struct parent_image_info *pii)
5598{
5599	kfree(pii->pool_ns);
5600	kfree(pii->image_id);
5601
5602	memset(pii, 0, sizeof(*pii));
5603}
5604
5605/*
5606 * The caller is responsible for @pii.
5607 */
5608static int decode_parent_image_spec(void **p, void *end,
5609				    struct parent_image_info *pii)
5610{
5611	u8 struct_v;
5612	u32 struct_len;
5613	int ret;
5614
5615	ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5616				  &struct_v, &struct_len);
5617	if (ret)
5618		return ret;
5619
5620	ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5621	pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5622	if (IS_ERR(pii->pool_ns)) {
5623		ret = PTR_ERR(pii->pool_ns);
5624		pii->pool_ns = NULL;
5625		return ret;
5626	}
5627	pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5628	if (IS_ERR(pii->image_id)) {
5629		ret = PTR_ERR(pii->image_id);
5630		pii->image_id = NULL;
5631		return ret;
5632	}
5633	ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5634	return 0;
5635
5636e_inval:
5637	return -EINVAL;
5638}
5639
5640static int __get_parent_info(struct rbd_device *rbd_dev,
5641			     struct page *req_page,
5642			     struct page *reply_page,
5643			     struct parent_image_info *pii)
5644{
5645	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5646	size_t reply_len = PAGE_SIZE;
5647	void *p, *end;
5648	int ret;
5649
5650	ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5651			     "rbd", "parent_get", CEPH_OSD_FLAG_READ,
5652			     req_page, sizeof(u64), &reply_page, &reply_len);
5653	if (ret)
5654		return ret == -EOPNOTSUPP ? 1 : ret;
5655
5656	p = page_address(reply_page);
5657	end = p + reply_len;
5658	ret = decode_parent_image_spec(&p, end, pii);
5659	if (ret)
5660		return ret;
5661
5662	ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5663			     "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
5664			     req_page, sizeof(u64), &reply_page, &reply_len);
5665	if (ret)
5666		return ret;
5667
5668	p = page_address(reply_page);
5669	end = p + reply_len;
5670	ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5671	if (pii->has_overlap)
5672		ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5673
5674	dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5675	     __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
5676	     pii->has_overlap, pii->overlap);
5677	return 0;
5678
5679e_inval:
5680	return -EINVAL;
5681}
5682
5683/*
5684 * The caller is responsible for @pii.
5685 */
5686static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5687				    struct page *req_page,
5688				    struct page *reply_page,
5689				    struct parent_image_info *pii)
5690{
5691	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5692	size_t reply_len = PAGE_SIZE;
5693	void *p, *end;
5694	int ret;
5695
5696	ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5697			     "rbd", "get_parent", CEPH_OSD_FLAG_READ,
5698			     req_page, sizeof(u64), &reply_page, &reply_len);
5699	if (ret)
5700		return ret;
5701
5702	p = page_address(reply_page);
5703	end = p + reply_len;
5704	ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5705	pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5706	if (IS_ERR(pii->image_id)) {
5707		ret = PTR_ERR(pii->image_id);
5708		pii->image_id = NULL;
5709		return ret;
5710	}
5711	ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
5712	pii->has_overlap = true;
5713	ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5714
5715	dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5716	     __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
5717	     pii->has_overlap, pii->overlap);
5718	return 0;
5719
5720e_inval:
5721	return -EINVAL;
5722}
5723
5724static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
5725				  struct parent_image_info *pii)
5726{
5727	struct page *req_page, *reply_page;
5728	void *p;
 
 
 
 
 
5729	int ret;
5730
5731	req_page = alloc_page(GFP_KERNEL);
5732	if (!req_page)
5733		return -ENOMEM;
5734
5735	reply_page = alloc_page(GFP_KERNEL);
5736	if (!reply_page) {
5737		__free_page(req_page);
5738		return -ENOMEM;
 
 
 
 
5739	}
5740
5741	p = page_address(req_page);
5742	ceph_encode_64(&p, rbd_dev->spec->snap_id);
5743	ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5744	if (ret > 0)
5745		ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5746					       pii);
 
 
5747
5748	__free_page(req_page);
5749	__free_page(reply_page);
5750	return ret;
5751}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5752
5753static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
5754{
5755	struct rbd_spec *parent_spec;
5756	struct parent_image_info pii = { 0 };
5757	int ret;
5758
5759	parent_spec = rbd_spec_alloc();
5760	if (!parent_spec)
5761		return -ENOMEM;
5762
5763	ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
5764	if (ret)
5765		goto out_err;
5766
5767	if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
5768		goto out;	/* No parent?  No problem. */
 
5769
5770	/* The ceph file layout needs to fit pool id in 32 bits */
5771
5772	ret = -EIO;
5773	if (pii.pool_id > (u64)U32_MAX) {
5774		rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5775			(unsigned long long)pii.pool_id, U32_MAX);
5776		goto out_err;
5777	}
5778
 
 
 
 
 
 
 
 
5779	/*
5780	 * The parent won't change except when the clone is flattened,
5781	 * so we only need to record the parent image spec once.
 
5782	 */
5783	parent_spec->pool_id = pii.pool_id;
5784	if (pii.pool_ns && *pii.pool_ns) {
5785		parent_spec->pool_ns = pii.pool_ns;
5786		pii.pool_ns = NULL;
 
 
5787	}
5788	parent_spec->image_id = pii.image_id;
5789	pii.image_id = NULL;
5790	parent_spec->snap_id = pii.snap_id;
5791
5792	rbd_assert(!rbd_dev->parent_spec);
5793	rbd_dev->parent_spec = parent_spec;
5794	parent_spec = NULL;	/* rbd_dev now owns this */
5795
5796	/*
5797	 * Record the parent overlap.  If it's zero, issue a warning as
5798	 * we will proceed as if there is no parent.
5799	 */
5800	if (!pii.overlap)
5801		rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5802	rbd_dev->parent_overlap = pii.overlap;
 
 
5803
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5804out:
5805	ret = 0;
5806out_err:
5807	rbd_parent_info_cleanup(&pii);
5808	rbd_spec_put(parent_spec);
 
5809	return ret;
5810}
5811
5812static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
5813				    u64 *stripe_unit, u64 *stripe_count)
5814{
5815	struct {
5816		__le64 stripe_unit;
5817		__le64 stripe_count;
5818	} __attribute__ ((packed)) striping_info_buf = { 0 };
5819	size_t size = sizeof (striping_info_buf);
 
 
 
 
5820	int ret;
5821
5822	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5823				&rbd_dev->header_oloc, "get_stripe_unit_count",
5824				NULL, 0, &striping_info_buf, size);
5825	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5826	if (ret < 0)
5827		return ret;
5828	if (ret < size)
5829		return -ERANGE;
5830
5831	*stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
5832	*stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
5833	dout("  stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
5834	     *stripe_count);
5835
5836	return 0;
5837}
5838
5839static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
5840{
5841	__le64 data_pool_buf;
5842	int ret;
5843
5844	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5845				  &rbd_dev->header_oloc, "get_data_pool",
5846				  NULL, 0, &data_pool_buf,
5847				  sizeof(data_pool_buf));
5848	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5849	if (ret < 0)
5850		return ret;
5851	if (ret < sizeof(data_pool_buf))
5852		return -EBADMSG;
5853
5854	*data_pool_id = le64_to_cpu(data_pool_buf);
5855	dout("  data_pool_id = %lld\n", *data_pool_id);
5856	WARN_ON(*data_pool_id == CEPH_NOPOOL);
5857
5858	return 0;
5859}
5860
5861static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5862{
5863	CEPH_DEFINE_OID_ONSTACK(oid);
5864	size_t image_id_size;
5865	char *image_id;
5866	void *p;
5867	void *end;
5868	size_t size;
5869	void *reply_buf = NULL;
5870	size_t len = 0;
5871	char *image_name = NULL;
5872	int ret;
5873
5874	rbd_assert(!rbd_dev->spec->image_name);
5875
5876	len = strlen(rbd_dev->spec->image_id);
5877	image_id_size = sizeof (__le32) + len;
5878	image_id = kmalloc(image_id_size, GFP_KERNEL);
5879	if (!image_id)
5880		return NULL;
5881
5882	p = image_id;
5883	end = image_id + image_id_size;
5884	ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5885
5886	size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5887	reply_buf = kmalloc(size, GFP_KERNEL);
5888	if (!reply_buf)
5889		goto out;
5890
5891	ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5892	ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5893				  "dir_get_name", image_id, image_id_size,
5894				  reply_buf, size);
5895	if (ret < 0)
5896		goto out;
5897	p = reply_buf;
5898	end = reply_buf + ret;
5899
5900	image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5901	if (IS_ERR(image_name))
5902		image_name = NULL;
5903	else
5904		dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5905out:
5906	kfree(reply_buf);
5907	kfree(image_id);
5908
5909	return image_name;
5910}
5911
5912static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5913{
5914	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5915	const char *snap_name;
5916	u32 which = 0;
5917
5918	/* Skip over names until we find the one we are looking for */
5919
5920	snap_name = rbd_dev->header.snap_names;
5921	while (which < snapc->num_snaps) {
5922		if (!strcmp(name, snap_name))
5923			return snapc->snaps[which];
5924		snap_name += strlen(snap_name) + 1;
5925		which++;
5926	}
5927	return CEPH_NOSNAP;
5928}
5929
5930static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5931{
5932	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5933	u32 which;
5934	bool found = false;
5935	u64 snap_id;
5936
5937	for (which = 0; !found && which < snapc->num_snaps; which++) {
5938		const char *snap_name;
5939
5940		snap_id = snapc->snaps[which];
5941		snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
5942		if (IS_ERR(snap_name)) {
5943			/* ignore no-longer existing snapshots */
5944			if (PTR_ERR(snap_name) == -ENOENT)
5945				continue;
5946			else
5947				break;
5948		}
5949		found = !strcmp(name, snap_name);
5950		kfree(snap_name);
5951	}
5952	return found ? snap_id : CEPH_NOSNAP;
5953}
5954
5955/*
5956 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5957 * no snapshot by that name is found, or if an error occurs.
5958 */
5959static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5960{
5961	if (rbd_dev->image_format == 1)
5962		return rbd_v1_snap_id_by_name(rbd_dev, name);
5963
5964	return rbd_v2_snap_id_by_name(rbd_dev, name);
5965}
5966
5967/*
5968 * An image being mapped will have everything but the snap id.
5969 */
5970static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
5971{
5972	struct rbd_spec *spec = rbd_dev->spec;
5973
5974	rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
5975	rbd_assert(spec->image_id && spec->image_name);
5976	rbd_assert(spec->snap_name);
5977
5978	if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5979		u64 snap_id;
5980
5981		snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5982		if (snap_id == CEPH_NOSNAP)
5983			return -ENOENT;
5984
5985		spec->snap_id = snap_id;
5986	} else {
5987		spec->snap_id = CEPH_NOSNAP;
5988	}
5989
5990	return 0;
5991}
5992
5993/*
5994 * A parent image will have all ids but none of the names.
5995 *
5996 * All names in an rbd spec are dynamically allocated.  It's OK if we
5997 * can't figure out the name for an image id.
 
5998 */
5999static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
6000{
6001	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6002	struct rbd_spec *spec = rbd_dev->spec;
6003	const char *pool_name;
6004	const char *image_name;
6005	const char *snap_name;
6006	int ret;
6007
6008	rbd_assert(spec->pool_id != CEPH_NOPOOL);
6009	rbd_assert(spec->image_id);
6010	rbd_assert(spec->snap_id != CEPH_NOSNAP);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6011
6012	/* Get the pool name; we have to make our own copy of this */
6013
6014	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
6015	if (!pool_name) {
6016		rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
6017		return -EIO;
6018	}
6019	pool_name = kstrdup(pool_name, GFP_KERNEL);
6020	if (!pool_name)
6021		return -ENOMEM;
6022
6023	/* Fetch the image name; tolerate failure here */
6024
6025	image_name = rbd_dev_image_name(rbd_dev);
6026	if (!image_name)
6027		rbd_warn(rbd_dev, "unable to get image name");
6028
6029	/* Fetch the snapshot name */
6030
6031	snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6032	if (IS_ERR(snap_name)) {
6033		ret = PTR_ERR(snap_name);
6034		goto out_err;
6035	}
6036
6037	spec->pool_name = pool_name;
6038	spec->image_name = image_name;
6039	spec->snap_name = snap_name;
6040
6041	return 0;
6042
6043out_err:
6044	kfree(image_name);
6045	kfree(pool_name);
 
6046	return ret;
6047}
6048
6049static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
6050				   struct ceph_snap_context **psnapc)
6051{
6052	size_t size;
6053	int ret;
6054	void *reply_buf;
6055	void *p;
6056	void *end;
6057	u64 seq;
6058	u32 snap_count;
6059	struct ceph_snap_context *snapc;
6060	u32 i;
6061
6062	/*
6063	 * We'll need room for the seq value (maximum snapshot id),
6064	 * snapshot count, and array of that many snapshot ids.
6065	 * For now we have a fixed upper limit on the number we're
6066	 * prepared to receive.
6067	 */
6068	size = sizeof (__le64) + sizeof (__le32) +
6069			RBD_MAX_SNAP_COUNT * sizeof (__le64);
6070	reply_buf = kzalloc(size, GFP_KERNEL);
6071	if (!reply_buf)
6072		return -ENOMEM;
6073
6074	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6075				  &rbd_dev->header_oloc, "get_snapcontext",
6076				  NULL, 0, reply_buf, size);
6077	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6078	if (ret < 0)
6079		goto out;
6080
6081	p = reply_buf;
6082	end = reply_buf + ret;
6083	ret = -ERANGE;
6084	ceph_decode_64_safe(&p, end, seq, out);
6085	ceph_decode_32_safe(&p, end, snap_count, out);
6086
6087	/*
6088	 * Make sure the reported number of snapshot ids wouldn't go
6089	 * beyond the end of our buffer.  But before checking that,
6090	 * make sure the computed size of the snapshot context we
6091	 * allocate is representable in a size_t.
6092	 */
6093	if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6094				 / sizeof (u64)) {
6095		ret = -EINVAL;
6096		goto out;
6097	}
6098	if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6099		goto out;
6100	ret = 0;
6101
6102	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6103	if (!snapc) {
6104		ret = -ENOMEM;
6105		goto out;
6106	}
6107	snapc->seq = seq;
6108	for (i = 0; i < snap_count; i++)
6109		snapc->snaps[i] = ceph_decode_64(&p);
6110
6111	*psnapc = snapc;
 
 
6112	dout("  snap context seq = %llu, snap_count = %u\n",
6113		(unsigned long long)seq, (unsigned int)snap_count);
6114out:
6115	kfree(reply_buf);
6116
6117	return ret;
6118}
6119
6120static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6121					u64 snap_id)
6122{
6123	size_t size;
6124	void *reply_buf;
6125	__le64 snapid;
6126	int ret;
6127	void *p;
6128	void *end;
6129	char *snap_name;
6130
6131	size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6132	reply_buf = kmalloc(size, GFP_KERNEL);
6133	if (!reply_buf)
6134		return ERR_PTR(-ENOMEM);
6135
6136	snapid = cpu_to_le64(snap_id);
6137	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6138				  &rbd_dev->header_oloc, "get_snapshot_name",
6139				  &snapid, sizeof(snapid), reply_buf, size);
 
6140	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6141	if (ret < 0) {
6142		snap_name = ERR_PTR(ret);
6143		goto out;
6144	}
6145
6146	p = reply_buf;
6147	end = reply_buf + ret;
6148	snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
6149	if (IS_ERR(snap_name))
6150		goto out;
6151
6152	dout("  snap_id 0x%016llx snap_name = %s\n",
6153		(unsigned long long)snap_id, snap_name);
6154out:
6155	kfree(reply_buf);
6156
6157	return snap_name;
6158}
6159
6160static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
6161				  struct rbd_image_header *header,
6162				  bool first_time)
6163{
 
6164	int ret;
6165
6166	ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
6167				    first_time ? &header->obj_order : NULL,
6168				    &header->image_size);
6169	if (ret)
6170		return ret;
6171
6172	if (first_time) {
6173		ret = rbd_dev_v2_header_onetime(rbd_dev, header);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6174		if (ret)
6175			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
6176	}
6177
6178	ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
6179	if (ret)
6180		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6181
6182	return 0;
6183}
6184
6185static int rbd_dev_header_info(struct rbd_device *rbd_dev,
6186			       struct rbd_image_header *header,
6187			       bool first_time)
 
 
6188{
6189	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6190	rbd_assert(!header->object_prefix && !header->snapc);
 
6191
6192	if (rbd_dev->image_format == 1)
6193		return rbd_dev_v1_header_info(rbd_dev, header, first_time);
6194
6195	return rbd_dev_v2_header_info(rbd_dev, header, first_time);
6196}
6197
6198/*
6199 * Skips over white space at *buf, and updates *buf to point to the
6200 * first found non-space character (if any). Returns the length of
6201 * the token (string of non-white space characters) found.  Note
6202 * that *buf must be terminated with '\0'.
6203 */
6204static inline size_t next_token(const char **buf)
6205{
6206        /*
6207        * These are the characters that produce nonzero for
6208        * isspace() in the "C" and "POSIX" locales.
6209        */
6210	static const char spaces[] = " \f\n\r\t\v";
6211
6212        *buf += strspn(*buf, spaces);	/* Find start of token */
6213
6214	return strcspn(*buf, spaces);   /* Return token length */
6215}
6216
6217/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6218 * Finds the next token in *buf, dynamically allocates a buffer big
6219 * enough to hold a copy of it, and copies the token into the new
6220 * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
6221 * that a duplicate buffer is created even for a zero-length token.
6222 *
6223 * Returns a pointer to the newly-allocated duplicate, or a null
6224 * pointer if memory for the duplicate was not available.  If
6225 * the lenp argument is a non-null pointer, the length of the token
6226 * (not including the '\0') is returned in *lenp.
6227 *
6228 * If successful, the *buf pointer will be updated to point beyond
6229 * the end of the found token.
6230 *
6231 * Note: uses GFP_KERNEL for allocation.
6232 */
6233static inline char *dup_token(const char **buf, size_t *lenp)
6234{
6235	char *dup;
6236	size_t len;
6237
6238	len = next_token(buf);
6239	dup = kmemdup(*buf, len + 1, GFP_KERNEL);
6240	if (!dup)
6241		return NULL;
6242	*(dup + len) = '\0';
6243	*buf += len;
6244
6245	if (lenp)
6246		*lenp = len;
6247
6248	return dup;
6249}
6250
6251static int rbd_parse_param(struct fs_parameter *param,
6252			    struct rbd_parse_opts_ctx *pctx)
6253{
6254	struct rbd_options *opt = pctx->opts;
6255	struct fs_parse_result result;
6256	struct p_log log = {.prefix = "rbd"};
6257	int token, ret;
6258
6259	ret = ceph_parse_param(param, pctx->copts, NULL);
6260	if (ret != -ENOPARAM)
6261		return ret;
6262
6263	token = __fs_parse(&log, rbd_parameters, param, &result);
6264	dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
6265	if (token < 0) {
6266		if (token == -ENOPARAM)
6267			return inval_plog(&log, "Unknown parameter '%s'",
6268					  param->key);
6269		return token;
6270	}
6271
6272	switch (token) {
6273	case Opt_queue_depth:
6274		if (result.uint_32 < 1)
6275			goto out_of_range;
6276		opt->queue_depth = result.uint_32;
6277		break;
6278	case Opt_alloc_size:
6279		if (result.uint_32 < SECTOR_SIZE)
6280			goto out_of_range;
6281		if (!is_power_of_2(result.uint_32))
6282			return inval_plog(&log, "alloc_size must be a power of 2");
6283		opt->alloc_size = result.uint_32;
6284		break;
6285	case Opt_lock_timeout:
6286		/* 0 is "wait forever" (i.e. infinite timeout) */
6287		if (result.uint_32 > INT_MAX / 1000)
6288			goto out_of_range;
6289		opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
6290		break;
6291	case Opt_pool_ns:
6292		kfree(pctx->spec->pool_ns);
6293		pctx->spec->pool_ns = param->string;
6294		param->string = NULL;
6295		break;
6296	case Opt_compression_hint:
6297		switch (result.uint_32) {
6298		case Opt_compression_hint_none:
6299			opt->alloc_hint_flags &=
6300			    ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
6301			      CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
6302			break;
6303		case Opt_compression_hint_compressible:
6304			opt->alloc_hint_flags |=
6305			    CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6306			opt->alloc_hint_flags &=
6307			    ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6308			break;
6309		case Opt_compression_hint_incompressible:
6310			opt->alloc_hint_flags |=
6311			    CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6312			opt->alloc_hint_flags &=
6313			    ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6314			break;
6315		default:
6316			BUG();
6317		}
6318		break;
6319	case Opt_read_only:
6320		opt->read_only = true;
6321		break;
6322	case Opt_read_write:
6323		opt->read_only = false;
6324		break;
6325	case Opt_lock_on_read:
6326		opt->lock_on_read = true;
6327		break;
6328	case Opt_exclusive:
6329		opt->exclusive = true;
6330		break;
6331	case Opt_notrim:
6332		opt->trim = false;
6333		break;
6334	default:
6335		BUG();
6336	}
6337
6338	return 0;
6339
6340out_of_range:
6341	return inval_plog(&log, "%s out of range", param->key);
6342}
6343
6344/*
6345 * This duplicates most of generic_parse_monolithic(), untying it from
6346 * fs_context and skipping standard superblock and security options.
6347 */
6348static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
6349{
6350	char *key;
6351	int ret = 0;
6352
6353	dout("%s '%s'\n", __func__, options);
6354	while ((key = strsep(&options, ",")) != NULL) {
6355		if (*key) {
6356			struct fs_parameter param = {
6357				.key	= key,
6358				.type	= fs_value_is_flag,
6359			};
6360			char *value = strchr(key, '=');
6361			size_t v_len = 0;
6362
6363			if (value) {
6364				if (value == key)
6365					continue;
6366				*value++ = 0;
6367				v_len = strlen(value);
6368				param.string = kmemdup_nul(value, v_len,
6369							   GFP_KERNEL);
6370				if (!param.string)
6371					return -ENOMEM;
6372				param.type = fs_value_is_string;
6373			}
6374			param.size = v_len;
6375
6376			ret = rbd_parse_param(&param, pctx);
6377			kfree(param.string);
6378			if (ret)
6379				break;
6380		}
6381	}
6382
6383	return ret;
6384}
6385
6386/*
6387 * Parse the options provided for an "rbd add" (i.e., rbd image
6388 * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
6389 * and the data written is passed here via a NUL-terminated buffer.
6390 * Returns 0 if successful or an error code otherwise.
6391 *
6392 * The information extracted from these options is recorded in
6393 * the other parameters which return dynamically-allocated
6394 * structures:
6395 *  ceph_opts
6396 *      The address of a pointer that will refer to a ceph options
6397 *      structure.  Caller must release the returned pointer using
6398 *      ceph_destroy_options() when it is no longer needed.
6399 *  rbd_opts
6400 *	Address of an rbd options pointer.  Fully initialized by
6401 *	this function; caller must release with kfree().
6402 *  spec
6403 *	Address of an rbd image specification pointer.  Fully
6404 *	initialized by this function based on parsed options.
6405 *	Caller must release with rbd_spec_put().
6406 *
6407 * The options passed take this form:
6408 *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6409 * where:
6410 *  <mon_addrs>
6411 *      A comma-separated list of one or more monitor addresses.
6412 *      A monitor address is an ip address, optionally followed
6413 *      by a port number (separated by a colon).
6414 *        I.e.:  ip1[:port1][,ip2[:port2]...]
6415 *  <options>
6416 *      A comma-separated list of ceph and/or rbd options.
6417 *  <pool_name>
6418 *      The name of the rados pool containing the rbd image.
6419 *  <image_name>
6420 *      The name of the image in that pool to map.
6421 *  <snap_id>
6422 *      An optional snapshot id.  If provided, the mapping will
6423 *      present data from the image at the time that snapshot was
6424 *      created.  The image head is used if no snapshot id is
6425 *      provided.  Snapshot mappings are always read-only.
6426 */
6427static int rbd_add_parse_args(const char *buf,
6428				struct ceph_options **ceph_opts,
6429				struct rbd_options **opts,
6430				struct rbd_spec **rbd_spec)
6431{
6432	size_t len;
6433	char *options;
6434	const char *mon_addrs;
6435	char *snap_name;
6436	size_t mon_addrs_size;
6437	struct rbd_parse_opts_ctx pctx = { 0 };
 
 
6438	int ret;
6439
6440	/* The first four tokens are required */
6441
6442	len = next_token(&buf);
6443	if (!len) {
6444		rbd_warn(NULL, "no monitor address(es) provided");
6445		return -EINVAL;
6446	}
6447	mon_addrs = buf;
6448	mon_addrs_size = len;
6449	buf += len;
6450
6451	ret = -EINVAL;
6452	options = dup_token(&buf, NULL);
6453	if (!options)
6454		return -ENOMEM;
6455	if (!*options) {
6456		rbd_warn(NULL, "no options provided");
6457		goto out_err;
6458	}
6459
6460	pctx.spec = rbd_spec_alloc();
6461	if (!pctx.spec)
6462		goto out_mem;
6463
6464	pctx.spec->pool_name = dup_token(&buf, NULL);
6465	if (!pctx.spec->pool_name)
6466		goto out_mem;
6467	if (!*pctx.spec->pool_name) {
6468		rbd_warn(NULL, "no pool name provided");
6469		goto out_err;
6470	}
6471
6472	pctx.spec->image_name = dup_token(&buf, NULL);
6473	if (!pctx.spec->image_name)
6474		goto out_mem;
6475	if (!*pctx.spec->image_name) {
6476		rbd_warn(NULL, "no image name provided");
6477		goto out_err;
6478	}
6479
6480	/*
6481	 * Snapshot name is optional; default is to use "-"
6482	 * (indicating the head/no snapshot).
6483	 */
6484	len = next_token(&buf);
6485	if (!len) {
6486		buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6487		len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
6488	} else if (len > RBD_MAX_SNAP_NAME_LEN) {
6489		ret = -ENAMETOOLONG;
6490		goto out_err;
6491	}
6492	snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6493	if (!snap_name)
6494		goto out_mem;
6495	*(snap_name + len) = '\0';
6496	pctx.spec->snap_name = snap_name;
6497
6498	pctx.copts = ceph_alloc_options();
6499	if (!pctx.copts)
6500		goto out_mem;
6501
6502	/* Initialize all rbd options to the defaults */
6503
6504	pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6505	if (!pctx.opts)
6506		goto out_mem;
6507
6508	pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6509	pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
6510	pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
6511	pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6512	pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6513	pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6514	pctx.opts->trim = RBD_TRIM_DEFAULT;
6515
6516	ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL,
6517				 ',');
6518	if (ret)
 
 
6519		goto out_err;
 
 
6520
6521	ret = rbd_parse_options(options, &pctx);
6522	if (ret)
6523		goto out_err;
6524
6525	*ceph_opts = pctx.copts;
6526	*opts = pctx.opts;
6527	*rbd_spec = pctx.spec;
6528	kfree(options);
6529	return 0;
6530
6531out_mem:
6532	ret = -ENOMEM;
6533out_err:
6534	kfree(pctx.opts);
6535	ceph_destroy_options(pctx.copts);
6536	rbd_spec_put(pctx.spec);
6537	kfree(options);
 
6538	return ret;
6539}
6540
6541static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6542{
6543	down_write(&rbd_dev->lock_rwsem);
6544	if (__rbd_is_lock_owner(rbd_dev))
6545		__rbd_release_lock(rbd_dev);
6546	up_write(&rbd_dev->lock_rwsem);
6547}
6548
6549/*
6550 * If the wait is interrupted, an error is returned even if the lock
6551 * was successfully acquired.  rbd_dev_image_unlock() will release it
6552 * if needed.
6553 */
6554static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6555{
6556	long ret;
6557
6558	if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6559		if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6560			return 0;
6561
6562		rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6563		return -EINVAL;
6564	}
6565
6566	if (rbd_is_ro(rbd_dev))
6567		return 0;
6568
6569	rbd_assert(!rbd_is_lock_owner(rbd_dev));
6570	queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6571	ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6572			    ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6573	if (ret > 0) {
6574		ret = rbd_dev->acquire_err;
6575	} else {
6576		cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6577		if (!ret)
6578			ret = -ETIMEDOUT;
6579
6580		rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
6581	}
6582	if (ret)
6583		return ret;
6584
6585	return 0;
6586}
6587
6588/*
6589 * An rbd format 2 image has a unique identifier, distinct from the
6590 * name given to it by the user.  Internally, that identifier is
6591 * what's used to specify the names of objects related to the image.
6592 *
6593 * A special "rbd id" object is used to map an rbd image name to its
6594 * id.  If that object doesn't exist, then there is no v2 rbd image
6595 * with the supplied name.
6596 *
6597 * This function will record the given rbd_dev's image_id field if
6598 * it can be determined, and in that case will return 0.  If any
6599 * errors occur a negative errno will be returned and the rbd_dev's
6600 * image_id field will be unchanged (and should be NULL).
6601 */
6602static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6603{
6604	int ret;
6605	size_t size;
6606	CEPH_DEFINE_OID_ONSTACK(oid);
6607	void *response;
6608	char *image_id;
6609
6610	/*
6611	 * When probing a parent image, the image id is already
6612	 * known (and the image name likely is not).  There's no
6613	 * need to fetch the image id again in this case.  We
6614	 * do still need to set the image format though.
6615	 */
6616	if (rbd_dev->spec->image_id) {
6617		rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6618
6619		return 0;
6620	}
6621
6622	/*
6623	 * First, see if the format 2 image id file exists, and if
6624	 * so, get the image's persistent id from it.
6625	 */
6626	ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6627			       rbd_dev->spec->image_name);
6628	if (ret)
6629		return ret;
 
 
6630
6631	dout("rbd id object name is %s\n", oid.name);
6632
6633	/* Response will be an encoded string, which includes a length */
6634	size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6635	response = kzalloc(size, GFP_NOIO);
6636	if (!response) {
6637		ret = -ENOMEM;
6638		goto out;
6639	}
6640
6641	/* If it doesn't exist we'll assume it's a format 1 image */
6642
6643	ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6644				  "get_id", NULL, 0,
6645				  response, size);
6646	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6647	if (ret == -ENOENT) {
6648		image_id = kstrdup("", GFP_KERNEL);
6649		ret = image_id ? 0 : -ENOMEM;
6650		if (!ret)
6651			rbd_dev->image_format = 1;
6652	} else if (ret >= 0) {
6653		void *p = response;
6654
6655		image_id = ceph_extract_encoded_string(&p, p + ret,
6656						NULL, GFP_NOIO);
6657		ret = PTR_ERR_OR_ZERO(image_id);
6658		if (!ret)
6659			rbd_dev->image_format = 2;
 
 
6660	}
6661
6662	if (!ret) {
6663		rbd_dev->spec->image_id = image_id;
6664		dout("image_id is %s\n", image_id);
6665	}
6666out:
6667	kfree(response);
6668	ceph_oid_destroy(&oid);
 
6669	return ret;
6670}
6671
6672/*
6673 * Undo whatever state changes are made by v1 or v2 header info
6674 * call.
6675 */
6676static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6677{
6678	rbd_dev_parent_put(rbd_dev);
6679	rbd_object_map_free(rbd_dev);
6680	rbd_dev_mapping_clear(rbd_dev);
 
 
 
6681
6682	/* Free dynamic fields from the header, then zero it out */
6683
6684	rbd_image_header_cleanup(&rbd_dev->header);
 
 
 
 
 
6685}
6686
6687static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
6688				     struct rbd_image_header *header)
6689{
6690	int ret;
6691
6692	ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
6693	if (ret)
6694		return ret;
6695
6696	/*
6697	 * Get the and check features for the image.  Currently the
6698	 * features are assumed to never change.
6699	 */
6700	ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
6701					rbd_is_ro(rbd_dev), &header->features);
6702	if (ret)
6703		return ret;
6704
6705	/* If the image supports fancy striping, get its parameters */
6706
6707	if (header->features & RBD_FEATURE_STRIPINGV2) {
6708		ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
6709					       &header->stripe_count);
6710		if (ret)
6711			return ret;
6712	}
 
6713
6714	if (header->features & RBD_FEATURE_DATA_POOL) {
6715		ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
6716		if (ret)
6717			return ret;
6718	}
6719
6720	return 0;
6721}
6722
6723/*
6724 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6725 * rbd_dev_image_probe() recursion depth, which means it's also the
6726 * length of the already discovered part of the parent chain.
6727 */
6728static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6729{
6730	struct rbd_device *parent = NULL;
 
 
6731	int ret;
6732
6733	if (!rbd_dev->parent_spec)
6734		return 0;
6735
6736	if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6737		pr_info("parent chain is too long (%d)\n", depth);
6738		ret = -EINVAL;
6739		goto out_err;
6740	}
6741
6742	parent = __rbd_dev_create(rbd_dev->parent_spec);
6743	if (!parent) {
6744		ret = -ENOMEM;
6745		goto out_err;
6746	}
6747
6748	/*
6749	 * Images related by parent/child relationships always share
6750	 * rbd_client and spec/parent_spec, so bump their refcounts.
 
6751	 */
6752	parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
6753	parent->spec = rbd_spec_get(rbd_dev->parent_spec);
6754
6755	__set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
 
 
 
6756
6757	ret = rbd_dev_image_probe(parent, depth);
6758	if (ret < 0)
6759		goto out_err;
6760
6761	rbd_dev->parent = parent;
6762	atomic_set(&rbd_dev->parent_ref, 1);
 
6763	return 0;
 
 
 
 
 
 
 
 
 
6764
6765out_err:
6766	rbd_dev_unparent(rbd_dev);
6767	rbd_dev_destroy(parent);
6768	return ret;
6769}
6770
6771static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6772{
6773	clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6774	rbd_free_disk(rbd_dev);
6775	if (!single_major)
6776		unregister_blkdev(rbd_dev->major, rbd_dev->name);
6777}
6778
6779/*
6780 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6781 * upon return.
6782 */
6783static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6784{
6785	int ret;
6786
 
 
 
 
 
 
 
 
 
 
6787	/* Record our major and minor device numbers. */
6788
6789	if (!single_major) {
6790		ret = register_blkdev(0, rbd_dev->name);
6791		if (ret < 0)
6792			goto err_out_unlock;
6793
6794		rbd_dev->major = ret;
6795		rbd_dev->minor = 0;
6796	} else {
6797		rbd_dev->major = rbd_major;
6798		rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6799	}
6800
6801	/* Set up the blkdev mapping. */
6802
6803	ret = rbd_init_disk(rbd_dev);
6804	if (ret)
6805		goto err_out_blkdev;
6806
 
 
 
6807	set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6808	set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
6809
6810	ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6811	if (ret)
6812		goto err_out_disk;
 
 
6813
6814	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6815	up_write(&rbd_dev->header_rwsem);
6816	return 0;
 
 
 
 
6817
 
 
6818err_out_disk:
6819	rbd_free_disk(rbd_dev);
6820err_out_blkdev:
6821	if (!single_major)
6822		unregister_blkdev(rbd_dev->major, rbd_dev->name);
6823err_out_unlock:
6824	up_write(&rbd_dev->header_rwsem);
 
 
6825	return ret;
6826}
6827
6828static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6829{
6830	struct rbd_spec *spec = rbd_dev->spec;
6831	int ret;
6832
6833	/* Record the header object name for this rbd image. */
6834
6835	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
 
6836	if (rbd_dev->image_format == 1)
6837		ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6838				       spec->image_name, RBD_SUFFIX);
6839	else
6840		ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6841				       RBD_HEADER_PREFIX, spec->image_id);
6842
6843	return ret;
6844}
 
6845
6846static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6847{
6848	if (!is_snap) {
6849		pr_info("image %s/%s%s%s does not exist\n",
6850			rbd_dev->spec->pool_name,
6851			rbd_dev->spec->pool_ns ?: "",
6852			rbd_dev->spec->pool_ns ? "/" : "",
6853			rbd_dev->spec->image_name);
6854	} else {
6855		pr_info("snap %s/%s%s%s@%s does not exist\n",
6856			rbd_dev->spec->pool_name,
6857			rbd_dev->spec->pool_ns ?: "",
6858			rbd_dev->spec->pool_ns ? "/" : "",
6859			rbd_dev->spec->image_name,
6860			rbd_dev->spec->snap_name);
6861	}
6862}
6863
6864static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6865{
6866	if (!rbd_is_ro(rbd_dev))
6867		rbd_unregister_watch(rbd_dev);
6868
6869	rbd_dev_unprobe(rbd_dev);
 
 
6870	rbd_dev->image_format = 0;
6871	kfree(rbd_dev->spec->image_id);
6872	rbd_dev->spec->image_id = NULL;
 
 
6873}
6874
6875/*
6876 * Probe for the existence of the header object for the given rbd
6877 * device.  If this image is the one being mapped (i.e., not a
6878 * parent), initiate a watch on its header object before using that
6879 * object to get detailed information about the rbd image.
6880 *
6881 * On success, returns with header_rwsem held for write if called
6882 * with @depth == 0.
6883 */
6884static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6885{
6886	bool need_watch = !rbd_is_ro(rbd_dev);
6887	int ret;
6888
6889	/*
6890	 * Get the id from the image id object.  Unless there's an
6891	 * error, rbd_dev->spec->image_id will be filled in with
6892	 * a dynamically-allocated string, and rbd_dev->image_format
6893	 * will be set to either 1 or 2.
6894	 */
6895	ret = rbd_dev_image_id(rbd_dev);
6896	if (ret)
6897		return ret;
 
 
6898
6899	ret = rbd_dev_header_name(rbd_dev);
6900	if (ret)
6901		goto err_out_format;
6902
6903	if (need_watch) {
6904		ret = rbd_register_watch(rbd_dev);
6905		if (ret) {
6906			if (ret == -ENOENT)
6907				rbd_print_dne(rbd_dev, false);
6908			goto err_out_format;
6909		}
6910	}
6911
6912	if (!depth)
6913		down_write(&rbd_dev->header_rwsem);
6914
6915	ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
6916	if (ret) {
6917		if (ret == -ENOENT && !need_watch)
6918			rbd_print_dne(rbd_dev, false);
6919		goto err_out_probe;
6920	}
6921
6922	rbd_init_layout(rbd_dev);
6923
6924	/*
6925	 * If this image is the one being mapped, we have pool name and
6926	 * id, image name and id, and snap name - need to fill snap id.
6927	 * Otherwise this is a parent image, identified by pool, image
6928	 * and snap ids - need to fill in names for those ids.
6929	 */
6930	if (!depth)
6931		ret = rbd_spec_fill_snap_id(rbd_dev);
6932	else
6933		ret = rbd_spec_fill_names(rbd_dev);
6934	if (ret) {
6935		if (ret == -ENOENT)
6936			rbd_print_dne(rbd_dev, true);
6937		goto err_out_probe;
6938	}
6939
6940	ret = rbd_dev_mapping_set(rbd_dev);
6941	if (ret)
6942		goto err_out_probe;
6943
6944	if (rbd_is_snap(rbd_dev) &&
6945	    (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
6946		ret = rbd_object_map_load(rbd_dev);
6947		if (ret)
6948			goto err_out_probe;
6949	}
6950
6951	if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
6952		ret = rbd_dev_setup_parent(rbd_dev);
6953		if (ret)
6954			goto err_out_probe;
6955	}
6956
6957	ret = rbd_dev_probe_parent(rbd_dev, depth);
6958	if (ret)
6959		goto err_out_probe;
6960
6961	dout("discovered format %u image, header name is %s\n",
6962		rbd_dev->image_format, rbd_dev->header_oid.name);
 
6963	return 0;
6964
6965err_out_probe:
6966	if (!depth)
6967		up_write(&rbd_dev->header_rwsem);
6968	if (need_watch)
6969		rbd_unregister_watch(rbd_dev);
6970	rbd_dev_unprobe(rbd_dev);
 
 
 
 
 
 
6971err_out_format:
6972	rbd_dev->image_format = 0;
6973	kfree(rbd_dev->spec->image_id);
6974	rbd_dev->spec->image_id = NULL;
6975	return ret;
6976}
6977
6978static void rbd_dev_update_header(struct rbd_device *rbd_dev,
6979				  struct rbd_image_header *header)
6980{
6981	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6982	rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
6983
6984	if (rbd_dev->header.image_size != header->image_size) {
6985		rbd_dev->header.image_size = header->image_size;
6986
6987		if (!rbd_is_snap(rbd_dev)) {
6988			rbd_dev->mapping.size = header->image_size;
6989			rbd_dev_update_size(rbd_dev);
6990		}
6991	}
6992
6993	ceph_put_snap_context(rbd_dev->header.snapc);
6994	rbd_dev->header.snapc = header->snapc;
6995	header->snapc = NULL;
6996
6997	if (rbd_dev->image_format == 1) {
6998		kfree(rbd_dev->header.snap_names);
6999		rbd_dev->header.snap_names = header->snap_names;
7000		header->snap_names = NULL;
7001
7002		kfree(rbd_dev->header.snap_sizes);
7003		rbd_dev->header.snap_sizes = header->snap_sizes;
7004		header->snap_sizes = NULL;
7005	}
7006}
7007
7008static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
7009				  struct parent_image_info *pii)
7010{
7011	if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
7012		/*
7013		 * Either the parent never existed, or we have
7014		 * record of it but the image got flattened so it no
7015		 * longer has a parent.  When the parent of a
7016		 * layered image disappears we immediately set the
7017		 * overlap to 0.  The effect of this is that all new
7018		 * requests will be treated as if the image had no
7019		 * parent.
7020		 *
7021		 * If !pii.has_overlap, the parent image spec is not
7022		 * applicable.  It's there to avoid duplication in each
7023		 * snapshot record.
7024		 */
7025		if (rbd_dev->parent_overlap) {
7026			rbd_dev->parent_overlap = 0;
7027			rbd_dev_parent_put(rbd_dev);
7028			pr_info("%s: clone has been flattened\n",
7029				rbd_dev->disk->disk_name);
7030		}
7031	} else {
7032		rbd_assert(rbd_dev->parent_spec);
7033
7034		/*
7035		 * Update the parent overlap.  If it became zero, issue
7036		 * a warning as we will proceed as if there is no parent.
7037		 */
7038		if (!pii->overlap && rbd_dev->parent_overlap)
7039			rbd_warn(rbd_dev,
7040				 "clone has become standalone (overlap 0)");
7041		rbd_dev->parent_overlap = pii->overlap;
7042	}
7043}
7044
7045static int rbd_dev_refresh(struct rbd_device *rbd_dev)
7046{
7047	struct rbd_image_header	header = { 0 };
7048	struct parent_image_info pii = { 0 };
7049	int ret;
7050
7051	dout("%s rbd_dev %p\n", __func__, rbd_dev);
7052
7053	ret = rbd_dev_header_info(rbd_dev, &header, false);
7054	if (ret)
7055		goto out;
7056
7057	/*
7058	 * If there is a parent, see if it has disappeared due to the
7059	 * mapped image getting flattened.
7060	 */
7061	if (rbd_dev->parent) {
7062		ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
7063		if (ret)
7064			goto out;
7065	}
7066
7067	down_write(&rbd_dev->header_rwsem);
7068	rbd_dev_update_header(rbd_dev, &header);
7069	if (rbd_dev->parent)
7070		rbd_dev_update_parent(rbd_dev, &pii);
7071	up_write(&rbd_dev->header_rwsem);
7072
7073out:
7074	rbd_parent_info_cleanup(&pii);
7075	rbd_image_header_cleanup(&header);
7076	return ret;
7077}
7078
7079static ssize_t do_rbd_add(const char *buf, size_t count)
 
 
7080{
7081	struct rbd_device *rbd_dev = NULL;
7082	struct ceph_options *ceph_opts = NULL;
7083	struct rbd_options *rbd_opts = NULL;
7084	struct rbd_spec *spec = NULL;
7085	struct rbd_client *rbdc;
7086	int rc;
7087
7088	if (!capable(CAP_SYS_ADMIN))
7089		return -EPERM;
7090
7091	if (!try_module_get(THIS_MODULE))
7092		return -ENODEV;
7093
7094	/* parse add command */
7095	rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
7096	if (rc < 0)
7097		goto out;
 
 
 
7098
7099	rbdc = rbd_get_client(ceph_opts);
7100	if (IS_ERR(rbdc)) {
7101		rc = PTR_ERR(rbdc);
7102		goto err_out_args;
7103	}
7104
7105	/* pick the pool */
7106	rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
7107	if (rc < 0) {
7108		if (rc == -ENOENT)
7109			pr_info("pool %s does not exist\n", spec->pool_name);
7110		goto err_out_client;
7111	}
7112	spec->pool_id = (u64)rc;
7113
7114	rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7115	if (!rbd_dev) {
7116		rc = -ENOMEM;
 
 
 
7117		goto err_out_client;
7118	}
 
 
 
 
7119	rbdc = NULL;		/* rbd_dev now owns this */
7120	spec = NULL;		/* rbd_dev now owns this */
7121	rbd_opts = NULL;	/* rbd_dev now owns this */
7122
7123	/* if we are mapping a snapshot it will be a read-only mapping */
7124	if (rbd_dev->opts->read_only ||
7125	    strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7126		__set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7127
7128	rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7129	if (!rbd_dev->config_info) {
7130		rc = -ENOMEM;
7131		goto err_out_rbd_dev;
7132	}
7133
7134	rc = rbd_dev_image_probe(rbd_dev, 0);
7135	if (rc < 0)
7136		goto err_out_rbd_dev;
7137
7138	if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7139		rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7140			 rbd_dev->layout.object_size);
7141		rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7142	}
7143
7144	rc = rbd_dev_device_setup(rbd_dev);
7145	if (rc)
7146		goto err_out_image_probe;
 
 
 
 
 
 
 
 
7147
7148	rc = rbd_add_acquire_lock(rbd_dev);
7149	if (rc)
7150		goto err_out_image_lock;
7151
7152	/* Everything's ready.  Announce the disk to the world. */
7153
7154	rc = device_add(&rbd_dev->dev);
7155	if (rc)
7156		goto err_out_image_lock;
7157
7158	rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7159	if (rc)
7160		goto err_out_cleanup_disk;
7161
7162	spin_lock(&rbd_dev_list_lock);
7163	list_add_tail(&rbd_dev->node, &rbd_dev_list);
7164	spin_unlock(&rbd_dev_list_lock);
7165
7166	pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7167		(unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7168		rbd_dev->header.features);
7169	rc = count;
7170out:
7171	module_put(THIS_MODULE);
7172	return rc;
7173
7174err_out_cleanup_disk:
7175	rbd_free_disk(rbd_dev);
7176err_out_image_lock:
7177	rbd_dev_image_unlock(rbd_dev);
7178	rbd_dev_device_release(rbd_dev);
7179err_out_image_probe:
7180	rbd_dev_image_release(rbd_dev);
7181err_out_rbd_dev:
7182	rbd_dev_destroy(rbd_dev);
7183err_out_client:
7184	rbd_put_client(rbdc);
7185err_out_args:
7186	rbd_spec_put(spec);
7187	kfree(rbd_opts);
7188	goto out;
 
 
 
 
7189}
7190
7191static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count)
 
 
7192{
7193	if (single_major)
7194		return -EINVAL;
7195
7196	return do_rbd_add(buf, count);
 
 
 
 
 
 
 
7197}
7198
7199static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf,
7200				      size_t count)
7201{
7202	return do_rbd_add(buf, count);
 
 
 
 
 
 
 
 
7203}
7204
7205static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7206{
7207	while (rbd_dev->parent) {
7208		struct rbd_device *first = rbd_dev;
7209		struct rbd_device *second = first->parent;
7210		struct rbd_device *third;
7211
7212		/*
7213		 * Follow to the parent with no grandparent and
7214		 * remove it.
7215		 */
7216		while (second && (third = second->parent)) {
7217			first = second;
7218			second = third;
7219		}
7220		rbd_assert(second);
7221		rbd_dev_image_release(second);
7222		rbd_dev_destroy(second);
7223		first->parent = NULL;
7224		first->parent_overlap = 0;
7225
7226		rbd_assert(first->parent_spec);
7227		rbd_spec_put(first->parent_spec);
7228		first->parent_spec = NULL;
7229	}
7230}
7231
7232static ssize_t do_rbd_remove(const char *buf, size_t count)
 
 
7233{
7234	struct rbd_device *rbd_dev = NULL;
 
7235	int dev_id;
7236	char opt_buf[6];
7237	bool force = false;
7238	int ret;
7239
7240	if (!capable(CAP_SYS_ADMIN))
7241		return -EPERM;
 
7242
7243	dev_id = -1;
7244	opt_buf[0] = '\0';
7245	sscanf(buf, "%d %5s", &dev_id, opt_buf);
7246	if (dev_id < 0) {
7247		pr_err("dev_id out of range\n");
7248		return -EINVAL;
7249	}
7250	if (opt_buf[0] != '\0') {
7251		if (!strcmp(opt_buf, "force")) {
7252			force = true;
7253		} else {
7254			pr_err("bad remove option at '%s'\n", opt_buf);
7255			return -EINVAL;
7256		}
7257	}
7258
7259	ret = -ENOENT;
7260	spin_lock(&rbd_dev_list_lock);
7261	list_for_each_entry(rbd_dev, &rbd_dev_list, node) {
 
7262		if (rbd_dev->dev_id == dev_id) {
7263			ret = 0;
7264			break;
7265		}
7266	}
7267	if (!ret) {
7268		spin_lock_irq(&rbd_dev->lock);
7269		if (rbd_dev->open_count && !force)
7270			ret = -EBUSY;
7271		else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7272					  &rbd_dev->flags))
7273			ret = -EINPROGRESS;
7274		spin_unlock_irq(&rbd_dev->lock);
7275	}
7276	spin_unlock(&rbd_dev_list_lock);
7277	if (ret)
7278		return ret;
7279
7280	if (force) {
7281		/*
7282		 * Prevent new IO from being queued and wait for existing
7283		 * IO to complete/fail.
7284		 */
7285		blk_mq_freeze_queue(rbd_dev->disk->queue);
7286		blk_mark_disk_dead(rbd_dev->disk);
7287		blk_mq_unfreeze_queue(rbd_dev->disk->queue);
7288	}
7289
7290	del_gendisk(rbd_dev->disk);
7291	spin_lock(&rbd_dev_list_lock);
7292	list_del_init(&rbd_dev->node);
7293	spin_unlock(&rbd_dev_list_lock);
7294	device_del(&rbd_dev->dev);
 
 
 
 
7295
7296	rbd_dev_image_unlock(rbd_dev);
7297	rbd_dev_device_release(rbd_dev);
7298	rbd_dev_image_release(rbd_dev);
7299	rbd_dev_destroy(rbd_dev);
7300	return count;
7301}
7302
7303static ssize_t remove_store(const struct bus_type *bus, const char *buf, size_t count)
 
 
7304{
7305	if (single_major)
7306		return -EINVAL;
7307
7308	return do_rbd_remove(buf, count);
7309}
7310
7311static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf,
7312					 size_t count)
 
7313{
7314	return do_rbd_remove(buf, count);
7315}
7316
7317/*
7318 * create control files in sysfs
7319 * /sys/bus/rbd/...
7320 */
7321static int __init rbd_sysfs_init(void)
7322{
7323	int ret;
7324
7325	ret = device_register(&rbd_root_dev);
7326	if (ret < 0) {
7327		put_device(&rbd_root_dev);
7328		return ret;
7329	}
7330
7331	ret = bus_register(&rbd_bus_type);
7332	if (ret < 0)
7333		device_unregister(&rbd_root_dev);
7334
7335	return ret;
7336}
7337
7338static void __exit rbd_sysfs_cleanup(void)
7339{
7340	bus_unregister(&rbd_bus_type);
7341	device_unregister(&rbd_root_dev);
7342}
7343
7344static int __init rbd_slab_init(void)
7345{
7346	rbd_assert(!rbd_img_request_cache);
7347	rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
 
 
 
7348	if (!rbd_img_request_cache)
7349		return -ENOMEM;
7350
7351	rbd_assert(!rbd_obj_request_cache);
7352	rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
 
 
 
7353	if (!rbd_obj_request_cache)
7354		goto out_err;
7355
7356	return 0;
 
 
 
 
 
 
 
 
 
7357
7358out_err:
7359	kmem_cache_destroy(rbd_img_request_cache);
7360	rbd_img_request_cache = NULL;
 
7361	return -ENOMEM;
7362}
7363
7364static void rbd_slab_exit(void)
7365{
 
 
 
 
7366	rbd_assert(rbd_obj_request_cache);
7367	kmem_cache_destroy(rbd_obj_request_cache);
7368	rbd_obj_request_cache = NULL;
7369
7370	rbd_assert(rbd_img_request_cache);
7371	kmem_cache_destroy(rbd_img_request_cache);
7372	rbd_img_request_cache = NULL;
7373}
7374
7375static int __init rbd_init(void)
7376{
7377	int rc;
7378
7379	if (!libceph_compatible(NULL)) {
7380		rbd_warn(NULL, "libceph incompatibility (quitting)");
7381		return -EINVAL;
7382	}
7383
7384	rc = rbd_slab_init();
7385	if (rc)
7386		return rc;
7387
7388	/*
7389	 * The number of active work items is limited by the number of
7390	 * rbd devices * queue depth, so leave @max_active at default.
7391	 */
7392	rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7393	if (!rbd_wq) {
7394		rc = -ENOMEM;
7395		goto err_out_slab;
7396	}
7397
7398	if (single_major) {
7399		rbd_major = register_blkdev(0, RBD_DRV_NAME);
7400		if (rbd_major < 0) {
7401			rc = rbd_major;
7402			goto err_out_wq;
7403		}
7404	}
7405
7406	rc = rbd_sysfs_init();
7407	if (rc)
7408		goto err_out_blkdev;
7409
7410	if (single_major)
7411		pr_info("loaded (major %d)\n", rbd_major);
7412	else
7413		pr_info("loaded\n");
7414
7415	return 0;
7416
7417err_out_blkdev:
7418	if (single_major)
7419		unregister_blkdev(rbd_major, RBD_DRV_NAME);
7420err_out_wq:
7421	destroy_workqueue(rbd_wq);
7422err_out_slab:
7423	rbd_slab_exit();
7424	return rc;
7425}
7426
7427static void __exit rbd_exit(void)
7428{
7429	ida_destroy(&rbd_dev_id_ida);
7430	rbd_sysfs_cleanup();
7431	if (single_major)
7432		unregister_blkdev(rbd_major, RBD_DRV_NAME);
7433	destroy_workqueue(rbd_wq);
7434	rbd_slab_exit();
7435}
7436
7437module_init(rbd_init);
7438module_exit(rbd_exit);
7439
7440MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7441MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7442MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7443/* following authorship retained from original osdblk.c */
7444MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7445
7446MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7447MODULE_LICENSE("GPL");