Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/ceph/ceph_debug.h>
  3
  4#include <linux/sort.h>
  5#include <linux/slab.h>
  6
  7#include "super.h"
  8#include "mds_client.h"
  9
 10#include <linux/ceph/decode.h>
 11
 12/*
 13 * Snapshots in ceph are driven in large part by cooperation from the
 14 * client.  In contrast to local file systems or file servers that
 15 * implement snapshots at a single point in the system, ceph's
 16 * distributed access to storage requires clients to help decide
 17 * whether a write logically occurs before or after a recently created
 18 * snapshot.
 19 *
 20 * This provides a perfect instantanous client-wide snapshot.  Between
 21 * clients, however, snapshots may appear to be applied at slightly
 22 * different points in time, depending on delays in delivering the
 23 * snapshot notification.
 24 *
 25 * Snapshots are _not_ file system-wide.  Instead, each snapshot
 26 * applies to the subdirectory nested beneath some directory.  This
 27 * effectively divides the hierarchy into multiple "realms," where all
 28 * of the files contained by each realm share the same set of
 29 * snapshots.  An individual realm's snap set contains snapshots
 30 * explicitly created on that realm, as well as any snaps in its
 31 * parent's snap set _after_ the point at which the parent became it's
 32 * parent (due to, say, a rename).  Similarly, snaps from prior parents
 33 * during the time intervals during which they were the parent are included.
 34 *
 35 * The client is spared most of this detail, fortunately... it must only
 36 * maintains a hierarchy of realms reflecting the current parent/child
 37 * realm relationship, and for each realm has an explicit list of snaps
 38 * inherited from prior parents.
 39 *
 40 * A snap_realm struct is maintained for realms containing every inode
 41 * with an open cap in the system.  (The needed snap realm information is
 42 * provided by the MDS whenever a cap is issued, i.e., on open.)  A 'seq'
 43 * version number is used to ensure that as realm parameters change (new
 44 * snapshot, new parent, etc.) the client's realm hierarchy is updated.
 45 *
 46 * The realm hierarchy drives the generation of a 'snap context' for each
 47 * realm, which simply lists the resulting set of snaps for the realm.  This
 48 * is attached to any writes sent to OSDs.
 49 */
 50/*
 51 * Unfortunately error handling is a bit mixed here.  If we get a snap
 52 * update, but don't have enough memory to update our realm hierarchy,
 53 * it's not clear what we can do about it (besides complaining to the
 54 * console).
 55 */
 56
 57
 58/*
 59 * increase ref count for the realm
 60 *
 61 * caller must hold snap_rwsem for write.
 62 */
 63void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
 64			 struct ceph_snap_realm *realm)
 65{
 66	dout("get_realm %p %d -> %d\n", realm,
 67	     atomic_read(&realm->nref), atomic_read(&realm->nref)+1);
 68	/*
 69	 * since we _only_ increment realm refs or empty the empty
 70	 * list with snap_rwsem held, adjusting the empty list here is
 71	 * safe.  we do need to protect against concurrent empty list
 72	 * additions, however.
 73	 */
 74	if (atomic_inc_return(&realm->nref) == 1) {
 75		spin_lock(&mdsc->snap_empty_lock);
 76		list_del_init(&realm->empty_item);
 77		spin_unlock(&mdsc->snap_empty_lock);
 78	}
 
 
 79}
 80
 81static void __insert_snap_realm(struct rb_root *root,
 82				struct ceph_snap_realm *new)
 83{
 84	struct rb_node **p = &root->rb_node;
 85	struct rb_node *parent = NULL;
 86	struct ceph_snap_realm *r = NULL;
 87
 88	while (*p) {
 89		parent = *p;
 90		r = rb_entry(parent, struct ceph_snap_realm, node);
 91		if (new->ino < r->ino)
 92			p = &(*p)->rb_left;
 93		else if (new->ino > r->ino)
 94			p = &(*p)->rb_right;
 95		else
 96			BUG();
 97	}
 98
 99	rb_link_node(&new->node, parent, p);
100	rb_insert_color(&new->node, root);
101}
102
103/*
104 * create and get the realm rooted at @ino and bump its ref count.
105 *
106 * caller must hold snap_rwsem for write.
107 */
108static struct ceph_snap_realm *ceph_create_snap_realm(
109	struct ceph_mds_client *mdsc,
110	u64 ino)
111{
112	struct ceph_snap_realm *realm;
113
114	realm = kzalloc(sizeof(*realm), GFP_NOFS);
115	if (!realm)
116		return ERR_PTR(-ENOMEM);
117
118	atomic_set(&realm->nref, 1);    /* for caller */
119	realm->ino = ino;
120	INIT_LIST_HEAD(&realm->children);
121	INIT_LIST_HEAD(&realm->child_item);
122	INIT_LIST_HEAD(&realm->empty_item);
123	INIT_LIST_HEAD(&realm->dirty_item);
124	INIT_LIST_HEAD(&realm->inodes_with_caps);
125	spin_lock_init(&realm->inodes_with_caps_lock);
126	__insert_snap_realm(&mdsc->snap_realms, realm);
127	dout("create_snap_realm %llx %p\n", realm->ino, realm);
128	return realm;
129}
130
131/*
132 * lookup the realm rooted at @ino.
133 *
134 * caller must hold snap_rwsem for write.
135 */
136static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
137						   u64 ino)
138{
139	struct rb_node *n = mdsc->snap_realms.rb_node;
140	struct ceph_snap_realm *r;
141
142	while (n) {
143		r = rb_entry(n, struct ceph_snap_realm, node);
144		if (ino < r->ino)
145			n = n->rb_left;
146		else if (ino > r->ino)
147			n = n->rb_right;
148		else {
149			dout("lookup_snap_realm %llx %p\n", r->ino, r);
150			return r;
151		}
152	}
153	return NULL;
154}
155
156struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
157					       u64 ino)
158{
159	struct ceph_snap_realm *r;
160	r = __lookup_snap_realm(mdsc, ino);
161	if (r)
162		ceph_get_snap_realm(mdsc, r);
163	return r;
164}
165
166static void __put_snap_realm(struct ceph_mds_client *mdsc,
167			     struct ceph_snap_realm *realm);
168
169/*
170 * called with snap_rwsem (write)
171 */
172static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
173				 struct ceph_snap_realm *realm)
174{
175	dout("__destroy_snap_realm %p %llx\n", realm, realm->ino);
176
177	rb_erase(&realm->node, &mdsc->snap_realms);
178
179	if (realm->parent) {
180		list_del_init(&realm->child_item);
181		__put_snap_realm(mdsc, realm->parent);
182	}
183
184	kfree(realm->prior_parent_snaps);
185	kfree(realm->snaps);
186	ceph_put_snap_context(realm->cached_context);
187	kfree(realm);
188}
189
190/*
191 * caller holds snap_rwsem (write)
192 */
193static void __put_snap_realm(struct ceph_mds_client *mdsc,
194			     struct ceph_snap_realm *realm)
195{
196	dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
197	     atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
198	if (atomic_dec_and_test(&realm->nref))
199		__destroy_snap_realm(mdsc, realm);
200}
201
202/*
203 * caller needn't hold any locks
204 */
205void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
206			 struct ceph_snap_realm *realm)
207{
208	dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
209	     atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
210	if (!atomic_dec_and_test(&realm->nref))
211		return;
212
213	if (down_write_trylock(&mdsc->snap_rwsem)) {
214		__destroy_snap_realm(mdsc, realm);
215		up_write(&mdsc->snap_rwsem);
216	} else {
217		spin_lock(&mdsc->snap_empty_lock);
218		list_add(&realm->empty_item, &mdsc->snap_empty);
219		spin_unlock(&mdsc->snap_empty_lock);
220	}
221}
222
223/*
224 * Clean up any realms whose ref counts have dropped to zero.  Note
225 * that this does not include realms who were created but not yet
226 * used.
227 *
228 * Called under snap_rwsem (write)
229 */
230static void __cleanup_empty_realms(struct ceph_mds_client *mdsc)
231{
232	struct ceph_snap_realm *realm;
233
234	spin_lock(&mdsc->snap_empty_lock);
235	while (!list_empty(&mdsc->snap_empty)) {
236		realm = list_first_entry(&mdsc->snap_empty,
237				   struct ceph_snap_realm, empty_item);
238		list_del(&realm->empty_item);
239		spin_unlock(&mdsc->snap_empty_lock);
240		__destroy_snap_realm(mdsc, realm);
241		spin_lock(&mdsc->snap_empty_lock);
242	}
243	spin_unlock(&mdsc->snap_empty_lock);
244}
245
246void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc)
247{
248	down_write(&mdsc->snap_rwsem);
249	__cleanup_empty_realms(mdsc);
250	up_write(&mdsc->snap_rwsem);
251}
252
253/*
254 * adjust the parent realm of a given @realm.  adjust child list, and parent
255 * pointers, and ref counts appropriately.
256 *
257 * return true if parent was changed, 0 if unchanged, <0 on error.
258 *
259 * caller must hold snap_rwsem for write.
260 */
261static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
262				    struct ceph_snap_realm *realm,
263				    u64 parentino)
264{
265	struct ceph_snap_realm *parent;
266
267	if (realm->parent_ino == parentino)
268		return 0;
269
270	parent = ceph_lookup_snap_realm(mdsc, parentino);
271	if (!parent) {
272		parent = ceph_create_snap_realm(mdsc, parentino);
273		if (IS_ERR(parent))
274			return PTR_ERR(parent);
275	}
276	dout("adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n",
277	     realm->ino, realm, realm->parent_ino, realm->parent,
278	     parentino, parent);
279	if (realm->parent) {
280		list_del_init(&realm->child_item);
281		ceph_put_snap_realm(mdsc, realm->parent);
282	}
283	realm->parent_ino = parentino;
284	realm->parent = parent;
 
285	list_add(&realm->child_item, &parent->children);
286	return 1;
287}
288
289
290static int cmpu64_rev(const void *a, const void *b)
291{
292	if (*(u64 *)a < *(u64 *)b)
293		return 1;
294	if (*(u64 *)a > *(u64 *)b)
295		return -1;
296	return 0;
297}
298
299
300/*
301 * build the snap context for a given realm.
302 */
303static int build_snap_context(struct ceph_snap_realm *realm,
304			      struct list_head* dirty_realms)
305{
306	struct ceph_snap_realm *parent = realm->parent;
307	struct ceph_snap_context *snapc;
308	int err = 0;
309	u32 num = realm->num_prior_parent_snaps + realm->num_snaps;
 
310
311	/*
312	 * build parent context, if it hasn't been built.
313	 * conservatively estimate that all parent snaps might be
314	 * included by us.
315	 */
316	if (parent) {
317		if (!parent->cached_context) {
318			err = build_snap_context(parent, dirty_realms);
319			if (err)
320				goto fail;
321		}
322		num += parent->cached_context->num_snaps;
323	}
324
325	/* do i actually need to update?  not if my context seq
326	   matches realm seq, and my parents' does to.  (this works
327	   because we rebuild_snap_realms() works _downward_ in
328	   hierarchy after each update.) */
329	if (realm->cached_context &&
330	    realm->cached_context->seq == realm->seq &&
331	    (!parent ||
332	     realm->cached_context->seq >= parent->cached_context->seq)) {
333		dout("build_snap_context %llx %p: %p seq %lld (%u snaps)"
334		     " (unchanged)\n",
335		     realm->ino, realm, realm->cached_context,
336		     realm->cached_context->seq,
337		     (unsigned int)realm->cached_context->num_snaps);
338		return 0;
339	}
340
341	/* alloc new snap context */
342	err = -ENOMEM;
343	if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
344		goto fail;
345	snapc = ceph_create_snap_context(num, GFP_NOFS);
346	if (!snapc)
347		goto fail;
 
348
349	/* build (reverse sorted) snap vector */
350	num = 0;
351	snapc->seq = realm->seq;
352	if (parent) {
353		u32 i;
354
355		/* include any of parent's snaps occurring _after_ my
356		   parent became my parent */
357		for (i = 0; i < parent->cached_context->num_snaps; i++)
358			if (parent->cached_context->snaps[i] >=
359			    realm->parent_since)
360				snapc->snaps[num++] =
361					parent->cached_context->snaps[i];
362		if (parent->cached_context->seq > snapc->seq)
363			snapc->seq = parent->cached_context->seq;
364	}
365	memcpy(snapc->snaps + num, realm->snaps,
366	       sizeof(u64)*realm->num_snaps);
367	num += realm->num_snaps;
368	memcpy(snapc->snaps + num, realm->prior_parent_snaps,
369	       sizeof(u64)*realm->num_prior_parent_snaps);
370	num += realm->num_prior_parent_snaps;
371
372	sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
373	snapc->num_snaps = num;
374	dout("build_snap_context %llx %p: %p seq %lld (%u snaps)\n",
375	     realm->ino, realm, snapc, snapc->seq,
376	     (unsigned int) snapc->num_snaps);
377
378	ceph_put_snap_context(realm->cached_context);
 
379	realm->cached_context = snapc;
380	/* queue realm for cap_snap creation */
381	list_add_tail(&realm->dirty_item, dirty_realms);
382	return 0;
383
384fail:
385	/*
386	 * if we fail, clear old (incorrect) cached_context... hopefully
387	 * we'll have better luck building it later
388	 */
389	if (realm->cached_context) {
390		ceph_put_snap_context(realm->cached_context);
391		realm->cached_context = NULL;
392	}
393	pr_err("build_snap_context %llx %p fail %d\n", realm->ino,
394	       realm, err);
395	return err;
396}
397
398/*
399 * rebuild snap context for the given realm and all of its children.
400 */
401static void rebuild_snap_realms(struct ceph_snap_realm *realm,
402				struct list_head *dirty_realms)
403{
404	struct ceph_snap_realm *child;
405
406	dout("rebuild_snap_realms %llx %p\n", realm->ino, realm);
407	build_snap_context(realm, dirty_realms);
408
409	list_for_each_entry(child, &realm->children, child_item)
410		rebuild_snap_realms(child, dirty_realms);
411}
412
413
414/*
415 * helper to allocate and decode an array of snapids.  free prior
416 * instance, if any.
417 */
418static int dup_array(u64 **dst, __le64 *src, u32 num)
419{
420	u32 i;
421
422	kfree(*dst);
423	if (num) {
424		*dst = kcalloc(num, sizeof(u64), GFP_NOFS);
425		if (!*dst)
426			return -ENOMEM;
427		for (i = 0; i < num; i++)
428			(*dst)[i] = get_unaligned_le64(src + i);
429	} else {
430		*dst = NULL;
431	}
432	return 0;
433}
434
435static bool has_new_snaps(struct ceph_snap_context *o,
436			  struct ceph_snap_context *n)
437{
438	if (n->num_snaps == 0)
439		return false;
440	/* snaps are in descending order */
441	return n->snaps[0] > o->seq;
442}
443
444/*
445 * When a snapshot is applied, the size/mtime inode metadata is queued
446 * in a ceph_cap_snap (one for each snapshot) until writeback
447 * completes and the metadata can be flushed back to the MDS.
448 *
449 * However, if a (sync) write is currently in-progress when we apply
450 * the snapshot, we have to wait until the write succeeds or fails
451 * (and a final size/mtime is known).  In this case the
452 * cap_snap->writing = 1, and is said to be "pending."  When the write
453 * finishes, we __ceph_finish_cap_snap().
454 *
455 * Caller must hold snap_rwsem for read (i.e., the realm topology won't
456 * change).
457 */
458void ceph_queue_cap_snap(struct ceph_inode_info *ci)
459{
460	struct inode *inode = &ci->vfs_inode;
461	struct ceph_cap_snap *capsnap;
462	struct ceph_snap_context *old_snapc, *new_snapc;
463	int used, dirty;
464
465	capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
466	if (!capsnap) {
467		pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
468		return;
469	}
470
471	spin_lock(&ci->i_ceph_lock);
472	used = __ceph_caps_used(ci);
473	dirty = __ceph_caps_dirty(ci);
474
475	old_snapc = ci->i_head_snapc;
476	new_snapc = ci->i_snap_realm->cached_context;
477
478	/*
479	 * If there is a write in progress, treat that as a dirty Fw,
480	 * even though it hasn't completed yet; by the time we finish
481	 * up this capsnap it will be.
482	 */
483	if (used & CEPH_CAP_FILE_WR)
484		dirty |= CEPH_CAP_FILE_WR;
485
486	if (__ceph_have_pending_cap_snap(ci)) {
487		/* there is no point in queuing multiple "pending" cap_snaps,
488		   as no new writes are allowed to start when pending, so any
489		   writes in progress now were started before the previous
490		   cap_snap.  lucky us. */
491		dout("queue_cap_snap %p already pending\n", inode);
492		goto update_snapc;
493	}
494	if (ci->i_wrbuffer_ref_head == 0 &&
495	    !(dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))) {
496		dout("queue_cap_snap %p nothing dirty|writing\n", inode);
497		goto update_snapc;
498	}
499
500	BUG_ON(!old_snapc);
501
502	/*
503	 * There is no need to send FLUSHSNAP message to MDS if there is
504	 * no new snapshot. But when there is dirty pages or on-going
505	 * writes, we still need to create cap_snap. cap_snap is needed
506	 * by the write path and page writeback path.
507	 *
508	 * also see ceph_try_drop_cap_snap()
509	 */
510	if (has_new_snaps(old_snapc, new_snapc)) {
511		if (dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))
512			capsnap->need_flush = true;
513	} else {
514		if (!(used & CEPH_CAP_FILE_WR) &&
515		    ci->i_wrbuffer_ref_head == 0) {
516			dout("queue_cap_snap %p "
517			     "no new_snap|dirty_page|writing\n", inode);
518			goto update_snapc;
519		}
520	}
521
522	dout("queue_cap_snap %p cap_snap %p queuing under %p %s %s\n",
523	     inode, capsnap, old_snapc, ceph_cap_string(dirty),
524	     capsnap->need_flush ? "" : "no_flush");
525	ihold(inode);
526
527	refcount_set(&capsnap->nref, 1);
528	INIT_LIST_HEAD(&capsnap->ci_item);
529
530	capsnap->follows = old_snapc->seq;
531	capsnap->issued = __ceph_caps_issued(ci, NULL);
532	capsnap->dirty = dirty;
533
534	capsnap->mode = inode->i_mode;
535	capsnap->uid = inode->i_uid;
536	capsnap->gid = inode->i_gid;
537
538	if (dirty & CEPH_CAP_XATTR_EXCL) {
539		__ceph_build_xattrs_blob(ci);
540		capsnap->xattr_blob =
541			ceph_buffer_get(ci->i_xattrs.blob);
542		capsnap->xattr_version = ci->i_xattrs.version;
543	} else {
544		capsnap->xattr_blob = NULL;
545		capsnap->xattr_version = 0;
546	}
547
548	capsnap->inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
549
550	/* dirty page count moved from _head to this cap_snap;
551	   all subsequent writes page dirties occur _after_ this
552	   snapshot. */
553	capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
554	ci->i_wrbuffer_ref_head = 0;
555	capsnap->context = old_snapc;
556	list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
557
558	if (used & CEPH_CAP_FILE_WR) {
559		dout("queue_cap_snap %p cap_snap %p snapc %p"
560		     " seq %llu used WR, now pending\n", inode,
561		     capsnap, old_snapc, old_snapc->seq);
562		capsnap->writing = 1;
 
 
 
 
 
 
 
563	} else {
564		/* note mtime, size NOW. */
565		__ceph_finish_cap_snap(ci, capsnap);
566	}
567	capsnap = NULL;
568	old_snapc = NULL;
569
570update_snapc:
571	if (ci->i_head_snapc) {
572		ci->i_head_snapc = ceph_get_snap_context(new_snapc);
573		dout(" new snapc is %p\n", new_snapc);
574	}
575	spin_unlock(&ci->i_ceph_lock);
576
577	kfree(capsnap);
578	ceph_put_snap_context(old_snapc);
579}
580
581/*
582 * Finalize the size, mtime for a cap_snap.. that is, settle on final values
583 * to be used for the snapshot, to be flushed back to the mds.
584 *
585 * If capsnap can now be flushed, add to snap_flush list, and return 1.
586 *
587 * Caller must hold i_ceph_lock.
588 */
589int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
590			    struct ceph_cap_snap *capsnap)
591{
592	struct inode *inode = &ci->vfs_inode;
593	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
594
595	BUG_ON(capsnap->writing);
596	capsnap->size = inode->i_size;
597	capsnap->mtime = inode->i_mtime;
598	capsnap->atime = inode->i_atime;
599	capsnap->ctime = inode->i_ctime;
600	capsnap->time_warp_seq = ci->i_time_warp_seq;
601	capsnap->truncate_size = ci->i_truncate_size;
602	capsnap->truncate_seq = ci->i_truncate_seq;
603	if (capsnap->dirty_pages) {
604		dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
605		     "still has %d dirty pages\n", inode, capsnap,
606		     capsnap->context, capsnap->context->seq,
607		     ceph_cap_string(capsnap->dirty), capsnap->size,
608		     capsnap->dirty_pages);
609		return 0;
610	}
611
612	ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
613	dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
614	     inode, capsnap, capsnap->context,
615	     capsnap->context->seq, ceph_cap_string(capsnap->dirty),
616	     capsnap->size);
617
618	spin_lock(&mdsc->snap_flush_lock);
619	list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
620	spin_unlock(&mdsc->snap_flush_lock);
621	return 1;  /* caller may want to ceph_flush_snaps */
622}
623
624/*
625 * Queue cap_snaps for snap writeback for this realm and its children.
626 * Called under snap_rwsem, so realm topology won't change.
627 */
628static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
629{
630	struct ceph_inode_info *ci;
631	struct inode *lastinode = NULL;
 
632
633	dout("queue_realm_cap_snaps %p %llx inodes\n", realm, realm->ino);
634
635	spin_lock(&realm->inodes_with_caps_lock);
636	list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) {
 
637		struct inode *inode = igrab(&ci->vfs_inode);
638		if (!inode)
639			continue;
640		spin_unlock(&realm->inodes_with_caps_lock);
641		iput(lastinode);
 
642		lastinode = inode;
643		ceph_queue_cap_snap(ci);
644		spin_lock(&realm->inodes_with_caps_lock);
645	}
646	spin_unlock(&realm->inodes_with_caps_lock);
647	iput(lastinode);
 
 
 
 
 
 
 
 
648
 
649	dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
650}
651
652/*
653 * Parse and apply a snapblob "snap trace" from the MDS.  This specifies
654 * the snap realm parameters from a given realm and all of its ancestors,
655 * up to the root.
656 *
657 * Caller must hold snap_rwsem for write.
658 */
659int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
660			   void *p, void *e, bool deletion,
661			   struct ceph_snap_realm **realm_ret)
662{
663	struct ceph_mds_snap_realm *ri;    /* encoded */
664	__le64 *snaps;                     /* encoded */
665	__le64 *prior_parent_snaps;        /* encoded */
666	struct ceph_snap_realm *realm = NULL;
667	struct ceph_snap_realm *first_realm = NULL;
668	int invalidate = 0;
669	int err = -ENOMEM;
670	LIST_HEAD(dirty_realms);
671
672	dout("update_snap_trace deletion=%d\n", deletion);
673more:
674	ceph_decode_need(&p, e, sizeof(*ri), bad);
675	ri = p;
676	p += sizeof(*ri);
677	ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) +
678			    le32_to_cpu(ri->num_prior_parent_snaps)), bad);
679	snaps = p;
680	p += sizeof(u64) * le32_to_cpu(ri->num_snaps);
681	prior_parent_snaps = p;
682	p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps);
683
684	realm = ceph_lookup_snap_realm(mdsc, le64_to_cpu(ri->ino));
685	if (!realm) {
686		realm = ceph_create_snap_realm(mdsc, le64_to_cpu(ri->ino));
687		if (IS_ERR(realm)) {
688			err = PTR_ERR(realm);
689			goto fail;
690		}
691	}
692
693	/* ensure the parent is correct */
694	err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
695	if (err < 0)
696		goto fail;
697	invalidate += err;
698
699	if (le64_to_cpu(ri->seq) > realm->seq) {
700		dout("update_snap_trace updating %llx %p %lld -> %lld\n",
701		     realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
702		/* update realm parameters, snap lists */
703		realm->seq = le64_to_cpu(ri->seq);
704		realm->created = le64_to_cpu(ri->created);
705		realm->parent_since = le64_to_cpu(ri->parent_since);
706
707		realm->num_snaps = le32_to_cpu(ri->num_snaps);
708		err = dup_array(&realm->snaps, snaps, realm->num_snaps);
709		if (err < 0)
710			goto fail;
711
712		realm->num_prior_parent_snaps =
713			le32_to_cpu(ri->num_prior_parent_snaps);
714		err = dup_array(&realm->prior_parent_snaps, prior_parent_snaps,
715				realm->num_prior_parent_snaps);
716		if (err < 0)
717			goto fail;
718
719		if (realm->seq > mdsc->last_snap_seq)
720			mdsc->last_snap_seq = realm->seq;
721
722		invalidate = 1;
723	} else if (!realm->cached_context) {
724		dout("update_snap_trace %llx %p seq %lld new\n",
725		     realm->ino, realm, realm->seq);
726		invalidate = 1;
727	} else {
728		dout("update_snap_trace %llx %p seq %lld unchanged\n",
729		     realm->ino, realm, realm->seq);
730	}
731
732	dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
733	     realm, invalidate, p, e);
734
735	/* invalidate when we reach the _end_ (root) of the trace */
736	if (invalidate && p >= e)
737		rebuild_snap_realms(realm, &dirty_realms);
738
739	if (!first_realm)
740		first_realm = realm;
741	else
742		ceph_put_snap_realm(mdsc, realm);
743
744	if (p < e)
745		goto more;
746
 
 
 
 
747	/*
748	 * queue cap snaps _after_ we've built the new snap contexts,
749	 * so that i_head_snapc can be set appropriately.
750	 */
751	while (!list_empty(&dirty_realms)) {
752		realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
753					 dirty_item);
754		list_del_init(&realm->dirty_item);
755		queue_realm_cap_snaps(realm);
756	}
757
758	if (realm_ret)
759		*realm_ret = first_realm;
760	else
761		ceph_put_snap_realm(mdsc, first_realm);
762
763	__cleanup_empty_realms(mdsc);
764	return 0;
765
766bad:
767	err = -EINVAL;
768fail:
769	if (realm && !IS_ERR(realm))
770		ceph_put_snap_realm(mdsc, realm);
771	if (first_realm)
772		ceph_put_snap_realm(mdsc, first_realm);
773	pr_err("update_snap_trace error %d\n", err);
774	return err;
775}
776
777
778/*
779 * Send any cap_snaps that are queued for flush.  Try to carry
780 * s_mutex across multiple snap flushes to avoid locking overhead.
781 *
782 * Caller holds no locks.
783 */
784static void flush_snaps(struct ceph_mds_client *mdsc)
785{
786	struct ceph_inode_info *ci;
787	struct inode *inode;
788	struct ceph_mds_session *session = NULL;
789
790	dout("flush_snaps\n");
791	spin_lock(&mdsc->snap_flush_lock);
792	while (!list_empty(&mdsc->snap_flush_list)) {
793		ci = list_first_entry(&mdsc->snap_flush_list,
794				struct ceph_inode_info, i_snap_flush_item);
795		inode = &ci->vfs_inode;
796		ihold(inode);
797		spin_unlock(&mdsc->snap_flush_lock);
798		ceph_flush_snaps(ci, &session);
 
 
799		iput(inode);
800		spin_lock(&mdsc->snap_flush_lock);
801	}
802	spin_unlock(&mdsc->snap_flush_lock);
803
804	if (session) {
805		mutex_unlock(&session->s_mutex);
806		ceph_put_mds_session(session);
807	}
808	dout("flush_snaps done\n");
809}
810
811
812/*
813 * Handle a snap notification from the MDS.
814 *
815 * This can take two basic forms: the simplest is just a snap creation
816 * or deletion notification on an existing realm.  This should update the
817 * realm and its children.
818 *
819 * The more difficult case is realm creation, due to snap creation at a
820 * new point in the file hierarchy, or due to a rename that moves a file or
821 * directory into another realm.
822 */
823void ceph_handle_snap(struct ceph_mds_client *mdsc,
824		      struct ceph_mds_session *session,
825		      struct ceph_msg *msg)
826{
827	struct super_block *sb = mdsc->fsc->sb;
828	int mds = session->s_mds;
829	u64 split;
830	int op;
831	int trace_len;
832	struct ceph_snap_realm *realm = NULL;
833	void *p = msg->front.iov_base;
834	void *e = p + msg->front.iov_len;
835	struct ceph_mds_snap_head *h;
836	int num_split_inos, num_split_realms;
837	__le64 *split_inos = NULL, *split_realms = NULL;
838	int i;
839	int locked_rwsem = 0;
840
841	/* decode */
842	if (msg->front.iov_len < sizeof(*h))
843		goto bad;
844	h = p;
845	op = le32_to_cpu(h->op);
846	split = le64_to_cpu(h->split);   /* non-zero if we are splitting an
847					  * existing realm */
848	num_split_inos = le32_to_cpu(h->num_split_inos);
849	num_split_realms = le32_to_cpu(h->num_split_realms);
850	trace_len = le32_to_cpu(h->trace_len);
851	p += sizeof(*h);
852
853	dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds,
854	     ceph_snap_op_name(op), split, trace_len);
855
856	mutex_lock(&session->s_mutex);
857	session->s_seq++;
858	mutex_unlock(&session->s_mutex);
859
860	down_write(&mdsc->snap_rwsem);
861	locked_rwsem = 1;
862
863	if (op == CEPH_SNAP_OP_SPLIT) {
864		struct ceph_mds_snap_realm *ri;
865
866		/*
867		 * A "split" breaks part of an existing realm off into
868		 * a new realm.  The MDS provides a list of inodes
869		 * (with caps) and child realms that belong to the new
870		 * child.
871		 */
872		split_inos = p;
873		p += sizeof(u64) * num_split_inos;
874		split_realms = p;
875		p += sizeof(u64) * num_split_realms;
876		ceph_decode_need(&p, e, sizeof(*ri), bad);
877		/* we will peek at realm info here, but will _not_
878		 * advance p, as the realm update will occur below in
879		 * ceph_update_snap_trace. */
880		ri = p;
881
882		realm = ceph_lookup_snap_realm(mdsc, split);
883		if (!realm) {
884			realm = ceph_create_snap_realm(mdsc, split);
885			if (IS_ERR(realm))
886				goto out;
887		}
 
888
889		dout("splitting snap_realm %llx %p\n", realm->ino, realm);
890		for (i = 0; i < num_split_inos; i++) {
891			struct ceph_vino vino = {
892				.ino = le64_to_cpu(split_inos[i]),
893				.snap = CEPH_NOSNAP,
894			};
895			struct inode *inode = ceph_find_inode(sb, vino);
896			struct ceph_inode_info *ci;
897			struct ceph_snap_realm *oldrealm;
898
899			if (!inode)
900				continue;
901			ci = ceph_inode(inode);
902
903			spin_lock(&ci->i_ceph_lock);
904			if (!ci->i_snap_realm)
905				goto skip_inode;
906			/*
907			 * If this inode belongs to a realm that was
908			 * created after our new realm, we experienced
909			 * a race (due to another split notifications
910			 * arriving from a different MDS).  So skip
911			 * this inode.
912			 */
913			if (ci->i_snap_realm->created >
914			    le64_to_cpu(ri->created)) {
915				dout(" leaving %p in newer realm %llx %p\n",
916				     inode, ci->i_snap_realm->ino,
917				     ci->i_snap_realm);
918				goto skip_inode;
919			}
920			dout(" will move %p to split realm %llx %p\n",
921			     inode, realm->ino, realm);
922			/*
923			 * Move the inode to the new realm
924			 */
925			oldrealm = ci->i_snap_realm;
926			spin_lock(&oldrealm->inodes_with_caps_lock);
927			list_del_init(&ci->i_snap_realm_item);
928			spin_unlock(&oldrealm->inodes_with_caps_lock);
929
930			spin_lock(&realm->inodes_with_caps_lock);
 
931			list_add(&ci->i_snap_realm_item,
932				 &realm->inodes_with_caps);
 
933			ci->i_snap_realm = realm;
934			if (realm->ino == ci->i_vino.ino)
935                                realm->inode = inode;
936			spin_unlock(&realm->inodes_with_caps_lock);
937
938			spin_unlock(&ci->i_ceph_lock);
939
940			ceph_get_snap_realm(mdsc, realm);
941			ceph_put_snap_realm(mdsc, oldrealm);
942
943			iput(inode);
944			continue;
945
946skip_inode:
947			spin_unlock(&ci->i_ceph_lock);
948			iput(inode);
949		}
950
951		/* we may have taken some of the old realm's children. */
952		for (i = 0; i < num_split_realms; i++) {
953			struct ceph_snap_realm *child =
954				__lookup_snap_realm(mdsc,
955					   le64_to_cpu(split_realms[i]));
956			if (!child)
957				continue;
958			adjust_snap_realm_parent(mdsc, child, realm->ino);
959		}
960	}
961
962	/*
963	 * update using the provided snap trace. if we are deleting a
964	 * snap, we can avoid queueing cap_snaps.
965	 */
966	ceph_update_snap_trace(mdsc, p, e,
967			       op == CEPH_SNAP_OP_DESTROY, NULL);
968
969	if (op == CEPH_SNAP_OP_SPLIT)
970		/* we took a reference when we created the realm, above */
971		ceph_put_snap_realm(mdsc, realm);
972
973	__cleanup_empty_realms(mdsc);
974
975	up_write(&mdsc->snap_rwsem);
976
977	flush_snaps(mdsc);
978	return;
979
980bad:
981	pr_err("corrupt snap message from mds%d\n", mds);
982	ceph_msg_dump(msg);
983out:
984	if (locked_rwsem)
985		up_write(&mdsc->snap_rwsem);
986	return;
987}
v3.5.6
 
  1#include <linux/ceph/ceph_debug.h>
  2
  3#include <linux/sort.h>
  4#include <linux/slab.h>
  5
  6#include "super.h"
  7#include "mds_client.h"
  8
  9#include <linux/ceph/decode.h>
 10
 11/*
 12 * Snapshots in ceph are driven in large part by cooperation from the
 13 * client.  In contrast to local file systems or file servers that
 14 * implement snapshots at a single point in the system, ceph's
 15 * distributed access to storage requires clients to help decide
 16 * whether a write logically occurs before or after a recently created
 17 * snapshot.
 18 *
 19 * This provides a perfect instantanous client-wide snapshot.  Between
 20 * clients, however, snapshots may appear to be applied at slightly
 21 * different points in time, depending on delays in delivering the
 22 * snapshot notification.
 23 *
 24 * Snapshots are _not_ file system-wide.  Instead, each snapshot
 25 * applies to the subdirectory nested beneath some directory.  This
 26 * effectively divides the hierarchy into multiple "realms," where all
 27 * of the files contained by each realm share the same set of
 28 * snapshots.  An individual realm's snap set contains snapshots
 29 * explicitly created on that realm, as well as any snaps in its
 30 * parent's snap set _after_ the point at which the parent became it's
 31 * parent (due to, say, a rename).  Similarly, snaps from prior parents
 32 * during the time intervals during which they were the parent are included.
 33 *
 34 * The client is spared most of this detail, fortunately... it must only
 35 * maintains a hierarchy of realms reflecting the current parent/child
 36 * realm relationship, and for each realm has an explicit list of snaps
 37 * inherited from prior parents.
 38 *
 39 * A snap_realm struct is maintained for realms containing every inode
 40 * with an open cap in the system.  (The needed snap realm information is
 41 * provided by the MDS whenever a cap is issued, i.e., on open.)  A 'seq'
 42 * version number is used to ensure that as realm parameters change (new
 43 * snapshot, new parent, etc.) the client's realm hierarchy is updated.
 44 *
 45 * The realm hierarchy drives the generation of a 'snap context' for each
 46 * realm, which simply lists the resulting set of snaps for the realm.  This
 47 * is attached to any writes sent to OSDs.
 48 */
 49/*
 50 * Unfortunately error handling is a bit mixed here.  If we get a snap
 51 * update, but don't have enough memory to update our realm hierarchy,
 52 * it's not clear what we can do about it (besides complaining to the
 53 * console).
 54 */
 55
 56
 57/*
 58 * increase ref count for the realm
 59 *
 60 * caller must hold snap_rwsem for write.
 61 */
 62void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
 63			 struct ceph_snap_realm *realm)
 64{
 65	dout("get_realm %p %d -> %d\n", realm,
 66	     atomic_read(&realm->nref), atomic_read(&realm->nref)+1);
 67	/*
 68	 * since we _only_ increment realm refs or empty the empty
 69	 * list with snap_rwsem held, adjusting the empty list here is
 70	 * safe.  we do need to protect against concurrent empty list
 71	 * additions, however.
 72	 */
 73	if (atomic_read(&realm->nref) == 0) {
 74		spin_lock(&mdsc->snap_empty_lock);
 75		list_del_init(&realm->empty_item);
 76		spin_unlock(&mdsc->snap_empty_lock);
 77	}
 78
 79	atomic_inc(&realm->nref);
 80}
 81
 82static void __insert_snap_realm(struct rb_root *root,
 83				struct ceph_snap_realm *new)
 84{
 85	struct rb_node **p = &root->rb_node;
 86	struct rb_node *parent = NULL;
 87	struct ceph_snap_realm *r = NULL;
 88
 89	while (*p) {
 90		parent = *p;
 91		r = rb_entry(parent, struct ceph_snap_realm, node);
 92		if (new->ino < r->ino)
 93			p = &(*p)->rb_left;
 94		else if (new->ino > r->ino)
 95			p = &(*p)->rb_right;
 96		else
 97			BUG();
 98	}
 99
100	rb_link_node(&new->node, parent, p);
101	rb_insert_color(&new->node, root);
102}
103
104/*
105 * create and get the realm rooted at @ino and bump its ref count.
106 *
107 * caller must hold snap_rwsem for write.
108 */
109static struct ceph_snap_realm *ceph_create_snap_realm(
110	struct ceph_mds_client *mdsc,
111	u64 ino)
112{
113	struct ceph_snap_realm *realm;
114
115	realm = kzalloc(sizeof(*realm), GFP_NOFS);
116	if (!realm)
117		return ERR_PTR(-ENOMEM);
118
119	atomic_set(&realm->nref, 0);    /* tree does not take a ref */
120	realm->ino = ino;
121	INIT_LIST_HEAD(&realm->children);
122	INIT_LIST_HEAD(&realm->child_item);
123	INIT_LIST_HEAD(&realm->empty_item);
124	INIT_LIST_HEAD(&realm->dirty_item);
125	INIT_LIST_HEAD(&realm->inodes_with_caps);
126	spin_lock_init(&realm->inodes_with_caps_lock);
127	__insert_snap_realm(&mdsc->snap_realms, realm);
128	dout("create_snap_realm %llx %p\n", realm->ino, realm);
129	return realm;
130}
131
132/*
133 * lookup the realm rooted at @ino.
134 *
135 * caller must hold snap_rwsem for write.
136 */
137struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
138					       u64 ino)
139{
140	struct rb_node *n = mdsc->snap_realms.rb_node;
141	struct ceph_snap_realm *r;
142
143	while (n) {
144		r = rb_entry(n, struct ceph_snap_realm, node);
145		if (ino < r->ino)
146			n = n->rb_left;
147		else if (ino > r->ino)
148			n = n->rb_right;
149		else {
150			dout("lookup_snap_realm %llx %p\n", r->ino, r);
151			return r;
152		}
153	}
154	return NULL;
155}
156
 
 
 
 
 
 
 
 
 
 
157static void __put_snap_realm(struct ceph_mds_client *mdsc,
158			     struct ceph_snap_realm *realm);
159
160/*
161 * called with snap_rwsem (write)
162 */
163static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
164				 struct ceph_snap_realm *realm)
165{
166	dout("__destroy_snap_realm %p %llx\n", realm, realm->ino);
167
168	rb_erase(&realm->node, &mdsc->snap_realms);
169
170	if (realm->parent) {
171		list_del_init(&realm->child_item);
172		__put_snap_realm(mdsc, realm->parent);
173	}
174
175	kfree(realm->prior_parent_snaps);
176	kfree(realm->snaps);
177	ceph_put_snap_context(realm->cached_context);
178	kfree(realm);
179}
180
181/*
182 * caller holds snap_rwsem (write)
183 */
184static void __put_snap_realm(struct ceph_mds_client *mdsc,
185			     struct ceph_snap_realm *realm)
186{
187	dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
188	     atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
189	if (atomic_dec_and_test(&realm->nref))
190		__destroy_snap_realm(mdsc, realm);
191}
192
193/*
194 * caller needn't hold any locks
195 */
196void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
197			 struct ceph_snap_realm *realm)
198{
199	dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
200	     atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
201	if (!atomic_dec_and_test(&realm->nref))
202		return;
203
204	if (down_write_trylock(&mdsc->snap_rwsem)) {
205		__destroy_snap_realm(mdsc, realm);
206		up_write(&mdsc->snap_rwsem);
207	} else {
208		spin_lock(&mdsc->snap_empty_lock);
209		list_add(&realm->empty_item, &mdsc->snap_empty);
210		spin_unlock(&mdsc->snap_empty_lock);
211	}
212}
213
214/*
215 * Clean up any realms whose ref counts have dropped to zero.  Note
216 * that this does not include realms who were created but not yet
217 * used.
218 *
219 * Called under snap_rwsem (write)
220 */
221static void __cleanup_empty_realms(struct ceph_mds_client *mdsc)
222{
223	struct ceph_snap_realm *realm;
224
225	spin_lock(&mdsc->snap_empty_lock);
226	while (!list_empty(&mdsc->snap_empty)) {
227		realm = list_first_entry(&mdsc->snap_empty,
228				   struct ceph_snap_realm, empty_item);
229		list_del(&realm->empty_item);
230		spin_unlock(&mdsc->snap_empty_lock);
231		__destroy_snap_realm(mdsc, realm);
232		spin_lock(&mdsc->snap_empty_lock);
233	}
234	spin_unlock(&mdsc->snap_empty_lock);
235}
236
237void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc)
238{
239	down_write(&mdsc->snap_rwsem);
240	__cleanup_empty_realms(mdsc);
241	up_write(&mdsc->snap_rwsem);
242}
243
244/*
245 * adjust the parent realm of a given @realm.  adjust child list, and parent
246 * pointers, and ref counts appropriately.
247 *
248 * return true if parent was changed, 0 if unchanged, <0 on error.
249 *
250 * caller must hold snap_rwsem for write.
251 */
252static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
253				    struct ceph_snap_realm *realm,
254				    u64 parentino)
255{
256	struct ceph_snap_realm *parent;
257
258	if (realm->parent_ino == parentino)
259		return 0;
260
261	parent = ceph_lookup_snap_realm(mdsc, parentino);
262	if (!parent) {
263		parent = ceph_create_snap_realm(mdsc, parentino);
264		if (IS_ERR(parent))
265			return PTR_ERR(parent);
266	}
267	dout("adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n",
268	     realm->ino, realm, realm->parent_ino, realm->parent,
269	     parentino, parent);
270	if (realm->parent) {
271		list_del_init(&realm->child_item);
272		ceph_put_snap_realm(mdsc, realm->parent);
273	}
274	realm->parent_ino = parentino;
275	realm->parent = parent;
276	ceph_get_snap_realm(mdsc, parent);
277	list_add(&realm->child_item, &parent->children);
278	return 1;
279}
280
281
282static int cmpu64_rev(const void *a, const void *b)
283{
284	if (*(u64 *)a < *(u64 *)b)
285		return 1;
286	if (*(u64 *)a > *(u64 *)b)
287		return -1;
288	return 0;
289}
290
 
291/*
292 * build the snap context for a given realm.
293 */
294static int build_snap_context(struct ceph_snap_realm *realm)
 
295{
296	struct ceph_snap_realm *parent = realm->parent;
297	struct ceph_snap_context *snapc;
298	int err = 0;
299	int i;
300	int num = realm->num_prior_parent_snaps + realm->num_snaps;
301
302	/*
303	 * build parent context, if it hasn't been built.
304	 * conservatively estimate that all parent snaps might be
305	 * included by us.
306	 */
307	if (parent) {
308		if (!parent->cached_context) {
309			err = build_snap_context(parent);
310			if (err)
311				goto fail;
312		}
313		num += parent->cached_context->num_snaps;
314	}
315
316	/* do i actually need to update?  not if my context seq
317	   matches realm seq, and my parents' does to.  (this works
318	   because we rebuild_snap_realms() works _downward_ in
319	   hierarchy after each update.) */
320	if (realm->cached_context &&
321	    realm->cached_context->seq == realm->seq &&
322	    (!parent ||
323	     realm->cached_context->seq >= parent->cached_context->seq)) {
324		dout("build_snap_context %llx %p: %p seq %lld (%d snaps)"
325		     " (unchanged)\n",
326		     realm->ino, realm, realm->cached_context,
327		     realm->cached_context->seq,
328		     realm->cached_context->num_snaps);
329		return 0;
330	}
331
332	/* alloc new snap context */
333	err = -ENOMEM;
334	if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
335		goto fail;
336	snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS);
337	if (!snapc)
338		goto fail;
339	atomic_set(&snapc->nref, 1);
340
341	/* build (reverse sorted) snap vector */
342	num = 0;
343	snapc->seq = realm->seq;
344	if (parent) {
 
 
345		/* include any of parent's snaps occurring _after_ my
346		   parent became my parent */
347		for (i = 0; i < parent->cached_context->num_snaps; i++)
348			if (parent->cached_context->snaps[i] >=
349			    realm->parent_since)
350				snapc->snaps[num++] =
351					parent->cached_context->snaps[i];
352		if (parent->cached_context->seq > snapc->seq)
353			snapc->seq = parent->cached_context->seq;
354	}
355	memcpy(snapc->snaps + num, realm->snaps,
356	       sizeof(u64)*realm->num_snaps);
357	num += realm->num_snaps;
358	memcpy(snapc->snaps + num, realm->prior_parent_snaps,
359	       sizeof(u64)*realm->num_prior_parent_snaps);
360	num += realm->num_prior_parent_snaps;
361
362	sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
363	snapc->num_snaps = num;
364	dout("build_snap_context %llx %p: %p seq %lld (%d snaps)\n",
365	     realm->ino, realm, snapc, snapc->seq, snapc->num_snaps);
 
366
367	if (realm->cached_context)
368		ceph_put_snap_context(realm->cached_context);
369	realm->cached_context = snapc;
 
 
370	return 0;
371
372fail:
373	/*
374	 * if we fail, clear old (incorrect) cached_context... hopefully
375	 * we'll have better luck building it later
376	 */
377	if (realm->cached_context) {
378		ceph_put_snap_context(realm->cached_context);
379		realm->cached_context = NULL;
380	}
381	pr_err("build_snap_context %llx %p fail %d\n", realm->ino,
382	       realm, err);
383	return err;
384}
385
386/*
387 * rebuild snap context for the given realm and all of its children.
388 */
389static void rebuild_snap_realms(struct ceph_snap_realm *realm)
 
390{
391	struct ceph_snap_realm *child;
392
393	dout("rebuild_snap_realms %llx %p\n", realm->ino, realm);
394	build_snap_context(realm);
395
396	list_for_each_entry(child, &realm->children, child_item)
397		rebuild_snap_realms(child);
398}
399
400
401/*
402 * helper to allocate and decode an array of snapids.  free prior
403 * instance, if any.
404 */
405static int dup_array(u64 **dst, __le64 *src, int num)
406{
407	int i;
408
409	kfree(*dst);
410	if (num) {
411		*dst = kcalloc(num, sizeof(u64), GFP_NOFS);
412		if (!*dst)
413			return -ENOMEM;
414		for (i = 0; i < num; i++)
415			(*dst)[i] = get_unaligned_le64(src + i);
416	} else {
417		*dst = NULL;
418	}
419	return 0;
420}
421
 
 
 
 
 
 
 
 
422
423/*
424 * When a snapshot is applied, the size/mtime inode metadata is queued
425 * in a ceph_cap_snap (one for each snapshot) until writeback
426 * completes and the metadata can be flushed back to the MDS.
427 *
428 * However, if a (sync) write is currently in-progress when we apply
429 * the snapshot, we have to wait until the write succeeds or fails
430 * (and a final size/mtime is known).  In this case the
431 * cap_snap->writing = 1, and is said to be "pending."  When the write
432 * finishes, we __ceph_finish_cap_snap().
433 *
434 * Caller must hold snap_rwsem for read (i.e., the realm topology won't
435 * change).
436 */
437void ceph_queue_cap_snap(struct ceph_inode_info *ci)
438{
439	struct inode *inode = &ci->vfs_inode;
440	struct ceph_cap_snap *capsnap;
 
441	int used, dirty;
442
443	capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
444	if (!capsnap) {
445		pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
446		return;
447	}
448
449	spin_lock(&ci->i_ceph_lock);
450	used = __ceph_caps_used(ci);
451	dirty = __ceph_caps_dirty(ci);
452
 
 
 
453	/*
454	 * If there is a write in progress, treat that as a dirty Fw,
455	 * even though it hasn't completed yet; by the time we finish
456	 * up this capsnap it will be.
457	 */
458	if (used & CEPH_CAP_FILE_WR)
459		dirty |= CEPH_CAP_FILE_WR;
460
461	if (__ceph_have_pending_cap_snap(ci)) {
462		/* there is no point in queuing multiple "pending" cap_snaps,
463		   as no new writes are allowed to start when pending, so any
464		   writes in progress now were started before the previous
465		   cap_snap.  lucky us. */
466		dout("queue_cap_snap %p already pending\n", inode);
467		kfree(capsnap);
468	} else if (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
469			    CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR)) {
470		struct ceph_snap_context *snapc = ci->i_head_snapc;
 
 
 
 
 
471
472		/*
473		 * if we are a sync write, we may need to go to the snaprealm
474		 * to get the current snapc.
475		 */
476		if (!snapc)
477			snapc = ci->i_snap_realm->cached_context;
 
 
 
 
 
 
 
 
 
 
 
 
 
478
479		dout("queue_cap_snap %p cap_snap %p queuing under %p %s\n",
480		     inode, capsnap, snapc, ceph_cap_string(dirty));
481		ihold(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
482
483		atomic_set(&capsnap->nref, 1);
484		capsnap->ci = ci;
485		INIT_LIST_HEAD(&capsnap->ci_item);
486		INIT_LIST_HEAD(&capsnap->flushing_item);
487
488		capsnap->follows = snapc->seq;
489		capsnap->issued = __ceph_caps_issued(ci, NULL);
490		capsnap->dirty = dirty;
491
492		capsnap->mode = inode->i_mode;
493		capsnap->uid = inode->i_uid;
494		capsnap->gid = inode->i_gid;
495
496		if (dirty & CEPH_CAP_XATTR_EXCL) {
497			__ceph_build_xattrs_blob(ci);
498			capsnap->xattr_blob =
499				ceph_buffer_get(ci->i_xattrs.blob);
500			capsnap->xattr_version = ci->i_xattrs.version;
501		} else {
502			capsnap->xattr_blob = NULL;
503			capsnap->xattr_version = 0;
504		}
505
506		/* dirty page count moved from _head to this cap_snap;
507		   all subsequent writes page dirties occur _after_ this
508		   snapshot. */
509		capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
510		ci->i_wrbuffer_ref_head = 0;
511		capsnap->context = snapc;
512		ci->i_head_snapc =
513			ceph_get_snap_context(ci->i_snap_realm->cached_context);
514		dout(" new snapc is %p\n", ci->i_head_snapc);
515		list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
516
517		if (used & CEPH_CAP_FILE_WR) {
518			dout("queue_cap_snap %p cap_snap %p snapc %p"
519			     " seq %llu used WR, now pending\n", inode,
520			     capsnap, snapc, snapc->seq);
521			capsnap->writing = 1;
522		} else {
523			/* note mtime, size NOW. */
524			__ceph_finish_cap_snap(ci, capsnap);
525		}
526	} else {
527		dout("queue_cap_snap %p nothing dirty|writing\n", inode);
528		kfree(capsnap);
529	}
 
 
530
 
 
 
 
 
531	spin_unlock(&ci->i_ceph_lock);
 
 
 
532}
533
534/*
535 * Finalize the size, mtime for a cap_snap.. that is, settle on final values
536 * to be used for the snapshot, to be flushed back to the mds.
537 *
538 * If capsnap can now be flushed, add to snap_flush list, and return 1.
539 *
540 * Caller must hold i_ceph_lock.
541 */
542int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
543			    struct ceph_cap_snap *capsnap)
544{
545	struct inode *inode = &ci->vfs_inode;
546	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
547
548	BUG_ON(capsnap->writing);
549	capsnap->size = inode->i_size;
550	capsnap->mtime = inode->i_mtime;
551	capsnap->atime = inode->i_atime;
552	capsnap->ctime = inode->i_ctime;
553	capsnap->time_warp_seq = ci->i_time_warp_seq;
 
 
554	if (capsnap->dirty_pages) {
555		dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
556		     "still has %d dirty pages\n", inode, capsnap,
557		     capsnap->context, capsnap->context->seq,
558		     ceph_cap_string(capsnap->dirty), capsnap->size,
559		     capsnap->dirty_pages);
560		return 0;
561	}
 
 
562	dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
563	     inode, capsnap, capsnap->context,
564	     capsnap->context->seq, ceph_cap_string(capsnap->dirty),
565	     capsnap->size);
566
567	spin_lock(&mdsc->snap_flush_lock);
568	list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
569	spin_unlock(&mdsc->snap_flush_lock);
570	return 1;  /* caller may want to ceph_flush_snaps */
571}
572
573/*
574 * Queue cap_snaps for snap writeback for this realm and its children.
575 * Called under snap_rwsem, so realm topology won't change.
576 */
577static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
578{
579	struct ceph_inode_info *ci;
580	struct inode *lastinode = NULL;
581	struct ceph_snap_realm *child;
582
583	dout("queue_realm_cap_snaps %p %llx inodes\n", realm, realm->ino);
584
585	spin_lock(&realm->inodes_with_caps_lock);
586	list_for_each_entry(ci, &realm->inodes_with_caps,
587			    i_snap_realm_item) {
588		struct inode *inode = igrab(&ci->vfs_inode);
589		if (!inode)
590			continue;
591		spin_unlock(&realm->inodes_with_caps_lock);
592		if (lastinode)
593			iput(lastinode);
594		lastinode = inode;
595		ceph_queue_cap_snap(ci);
596		spin_lock(&realm->inodes_with_caps_lock);
597	}
598	spin_unlock(&realm->inodes_with_caps_lock);
599	if (lastinode)
600		iput(lastinode);
601
602	list_for_each_entry(child, &realm->children, child_item) {
603		dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n",
604		     realm, realm->ino, child, child->ino);
605		list_del_init(&child->dirty_item);
606		list_add(&child->dirty_item, &realm->dirty_item);
607	}
608
609	list_del_init(&realm->dirty_item);
610	dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
611}
612
613/*
614 * Parse and apply a snapblob "snap trace" from the MDS.  This specifies
615 * the snap realm parameters from a given realm and all of its ancestors,
616 * up to the root.
617 *
618 * Caller must hold snap_rwsem for write.
619 */
620int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
621			   void *p, void *e, bool deletion)
 
622{
623	struct ceph_mds_snap_realm *ri;    /* encoded */
624	__le64 *snaps;                     /* encoded */
625	__le64 *prior_parent_snaps;        /* encoded */
626	struct ceph_snap_realm *realm;
 
627	int invalidate = 0;
628	int err = -ENOMEM;
629	LIST_HEAD(dirty_realms);
630
631	dout("update_snap_trace deletion=%d\n", deletion);
632more:
633	ceph_decode_need(&p, e, sizeof(*ri), bad);
634	ri = p;
635	p += sizeof(*ri);
636	ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) +
637			    le32_to_cpu(ri->num_prior_parent_snaps)), bad);
638	snaps = p;
639	p += sizeof(u64) * le32_to_cpu(ri->num_snaps);
640	prior_parent_snaps = p;
641	p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps);
642
643	realm = ceph_lookup_snap_realm(mdsc, le64_to_cpu(ri->ino));
644	if (!realm) {
645		realm = ceph_create_snap_realm(mdsc, le64_to_cpu(ri->ino));
646		if (IS_ERR(realm)) {
647			err = PTR_ERR(realm);
648			goto fail;
649		}
650	}
651
652	/* ensure the parent is correct */
653	err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
654	if (err < 0)
655		goto fail;
656	invalidate += err;
657
658	if (le64_to_cpu(ri->seq) > realm->seq) {
659		dout("update_snap_trace updating %llx %p %lld -> %lld\n",
660		     realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
661		/* update realm parameters, snap lists */
662		realm->seq = le64_to_cpu(ri->seq);
663		realm->created = le64_to_cpu(ri->created);
664		realm->parent_since = le64_to_cpu(ri->parent_since);
665
666		realm->num_snaps = le32_to_cpu(ri->num_snaps);
667		err = dup_array(&realm->snaps, snaps, realm->num_snaps);
668		if (err < 0)
669			goto fail;
670
671		realm->num_prior_parent_snaps =
672			le32_to_cpu(ri->num_prior_parent_snaps);
673		err = dup_array(&realm->prior_parent_snaps, prior_parent_snaps,
674				realm->num_prior_parent_snaps);
675		if (err < 0)
676			goto fail;
677
678		/* queue realm for cap_snap creation */
679		list_add(&realm->dirty_item, &dirty_realms);
680
681		invalidate = 1;
682	} else if (!realm->cached_context) {
683		dout("update_snap_trace %llx %p seq %lld new\n",
684		     realm->ino, realm, realm->seq);
685		invalidate = 1;
686	} else {
687		dout("update_snap_trace %llx %p seq %lld unchanged\n",
688		     realm->ino, realm, realm->seq);
689	}
690
691	dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
692	     realm, invalidate, p, e);
693
 
 
 
 
 
 
 
 
 
694	if (p < e)
695		goto more;
696
697	/* invalidate when we reach the _end_ (root) of the trace */
698	if (invalidate)
699		rebuild_snap_realms(realm);
700
701	/*
702	 * queue cap snaps _after_ we've built the new snap contexts,
703	 * so that i_head_snapc can be set appropriately.
704	 */
705	while (!list_empty(&dirty_realms)) {
706		realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
707					 dirty_item);
 
708		queue_realm_cap_snaps(realm);
709	}
710
 
 
 
 
 
711	__cleanup_empty_realms(mdsc);
712	return 0;
713
714bad:
715	err = -EINVAL;
716fail:
 
 
 
 
717	pr_err("update_snap_trace error %d\n", err);
718	return err;
719}
720
721
722/*
723 * Send any cap_snaps that are queued for flush.  Try to carry
724 * s_mutex across multiple snap flushes to avoid locking overhead.
725 *
726 * Caller holds no locks.
727 */
728static void flush_snaps(struct ceph_mds_client *mdsc)
729{
730	struct ceph_inode_info *ci;
731	struct inode *inode;
732	struct ceph_mds_session *session = NULL;
733
734	dout("flush_snaps\n");
735	spin_lock(&mdsc->snap_flush_lock);
736	while (!list_empty(&mdsc->snap_flush_list)) {
737		ci = list_first_entry(&mdsc->snap_flush_list,
738				struct ceph_inode_info, i_snap_flush_item);
739		inode = &ci->vfs_inode;
740		ihold(inode);
741		spin_unlock(&mdsc->snap_flush_lock);
742		spin_lock(&ci->i_ceph_lock);
743		__ceph_flush_snaps(ci, &session, 0);
744		spin_unlock(&ci->i_ceph_lock);
745		iput(inode);
746		spin_lock(&mdsc->snap_flush_lock);
747	}
748	spin_unlock(&mdsc->snap_flush_lock);
749
750	if (session) {
751		mutex_unlock(&session->s_mutex);
752		ceph_put_mds_session(session);
753	}
754	dout("flush_snaps done\n");
755}
756
757
758/*
759 * Handle a snap notification from the MDS.
760 *
761 * This can take two basic forms: the simplest is just a snap creation
762 * or deletion notification on an existing realm.  This should update the
763 * realm and its children.
764 *
765 * The more difficult case is realm creation, due to snap creation at a
766 * new point in the file hierarchy, or due to a rename that moves a file or
767 * directory into another realm.
768 */
769void ceph_handle_snap(struct ceph_mds_client *mdsc,
770		      struct ceph_mds_session *session,
771		      struct ceph_msg *msg)
772{
773	struct super_block *sb = mdsc->fsc->sb;
774	int mds = session->s_mds;
775	u64 split;
776	int op;
777	int trace_len;
778	struct ceph_snap_realm *realm = NULL;
779	void *p = msg->front.iov_base;
780	void *e = p + msg->front.iov_len;
781	struct ceph_mds_snap_head *h;
782	int num_split_inos, num_split_realms;
783	__le64 *split_inos = NULL, *split_realms = NULL;
784	int i;
785	int locked_rwsem = 0;
786
787	/* decode */
788	if (msg->front.iov_len < sizeof(*h))
789		goto bad;
790	h = p;
791	op = le32_to_cpu(h->op);
792	split = le64_to_cpu(h->split);   /* non-zero if we are splitting an
793					  * existing realm */
794	num_split_inos = le32_to_cpu(h->num_split_inos);
795	num_split_realms = le32_to_cpu(h->num_split_realms);
796	trace_len = le32_to_cpu(h->trace_len);
797	p += sizeof(*h);
798
799	dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds,
800	     ceph_snap_op_name(op), split, trace_len);
801
802	mutex_lock(&session->s_mutex);
803	session->s_seq++;
804	mutex_unlock(&session->s_mutex);
805
806	down_write(&mdsc->snap_rwsem);
807	locked_rwsem = 1;
808
809	if (op == CEPH_SNAP_OP_SPLIT) {
810		struct ceph_mds_snap_realm *ri;
811
812		/*
813		 * A "split" breaks part of an existing realm off into
814		 * a new realm.  The MDS provides a list of inodes
815		 * (with caps) and child realms that belong to the new
816		 * child.
817		 */
818		split_inos = p;
819		p += sizeof(u64) * num_split_inos;
820		split_realms = p;
821		p += sizeof(u64) * num_split_realms;
822		ceph_decode_need(&p, e, sizeof(*ri), bad);
823		/* we will peek at realm info here, but will _not_
824		 * advance p, as the realm update will occur below in
825		 * ceph_update_snap_trace. */
826		ri = p;
827
828		realm = ceph_lookup_snap_realm(mdsc, split);
829		if (!realm) {
830			realm = ceph_create_snap_realm(mdsc, split);
831			if (IS_ERR(realm))
832				goto out;
833		}
834		ceph_get_snap_realm(mdsc, realm);
835
836		dout("splitting snap_realm %llx %p\n", realm->ino, realm);
837		for (i = 0; i < num_split_inos; i++) {
838			struct ceph_vino vino = {
839				.ino = le64_to_cpu(split_inos[i]),
840				.snap = CEPH_NOSNAP,
841			};
842			struct inode *inode = ceph_find_inode(sb, vino);
843			struct ceph_inode_info *ci;
844			struct ceph_snap_realm *oldrealm;
845
846			if (!inode)
847				continue;
848			ci = ceph_inode(inode);
849
850			spin_lock(&ci->i_ceph_lock);
851			if (!ci->i_snap_realm)
852				goto skip_inode;
853			/*
854			 * If this inode belongs to a realm that was
855			 * created after our new realm, we experienced
856			 * a race (due to another split notifications
857			 * arriving from a different MDS).  So skip
858			 * this inode.
859			 */
860			if (ci->i_snap_realm->created >
861			    le64_to_cpu(ri->created)) {
862				dout(" leaving %p in newer realm %llx %p\n",
863				     inode, ci->i_snap_realm->ino,
864				     ci->i_snap_realm);
865				goto skip_inode;
866			}
867			dout(" will move %p to split realm %llx %p\n",
868			     inode, realm->ino, realm);
869			/*
870			 * Move the inode to the new realm
871			 */
 
 
 
 
 
872			spin_lock(&realm->inodes_with_caps_lock);
873			list_del_init(&ci->i_snap_realm_item);
874			list_add(&ci->i_snap_realm_item,
875				 &realm->inodes_with_caps);
876			oldrealm = ci->i_snap_realm;
877			ci->i_snap_realm = realm;
 
 
878			spin_unlock(&realm->inodes_with_caps_lock);
 
879			spin_unlock(&ci->i_ceph_lock);
880
881			ceph_get_snap_realm(mdsc, realm);
882			ceph_put_snap_realm(mdsc, oldrealm);
883
884			iput(inode);
885			continue;
886
887skip_inode:
888			spin_unlock(&ci->i_ceph_lock);
889			iput(inode);
890		}
891
892		/* we may have taken some of the old realm's children. */
893		for (i = 0; i < num_split_realms; i++) {
894			struct ceph_snap_realm *child =
895				ceph_lookup_snap_realm(mdsc,
896					   le64_to_cpu(split_realms[i]));
897			if (!child)
898				continue;
899			adjust_snap_realm_parent(mdsc, child, realm->ino);
900		}
901	}
902
903	/*
904	 * update using the provided snap trace. if we are deleting a
905	 * snap, we can avoid queueing cap_snaps.
906	 */
907	ceph_update_snap_trace(mdsc, p, e,
908			       op == CEPH_SNAP_OP_DESTROY);
909
910	if (op == CEPH_SNAP_OP_SPLIT)
911		/* we took a reference when we created the realm, above */
912		ceph_put_snap_realm(mdsc, realm);
913
914	__cleanup_empty_realms(mdsc);
915
916	up_write(&mdsc->snap_rwsem);
917
918	flush_snaps(mdsc);
919	return;
920
921bad:
922	pr_err("corrupt snap message from mds%d\n", mds);
923	ceph_msg_dump(msg);
924out:
925	if (locked_rwsem)
926		up_write(&mdsc->snap_rwsem);
927	return;
928}
929
930
931