Linux Audio

Check our new training course

Loading...
  1/* -*- mode: c; c-basic-offset: 8; -*-
  2 * vim: noexpandtab sw=8 ts=8 sts=0:
  3 *
  4 * dlmfs.c
  5 *
  6 * Code which implements the kernel side of a minimal userspace
  7 * interface to our DLM. This file handles the virtual file system
  8 * used for communication with userspace. Credit should go to ramfs,
  9 * which was a template for the fs side of this module.
 10 *
 11 * Copyright (C) 2003, 2004 Oracle.  All rights reserved.
 12 *
 13 * This program is free software; you can redistribute it and/or
 14 * modify it under the terms of the GNU General Public
 15 * License as published by the Free Software Foundation; either
 16 * version 2 of the License, or (at your option) any later version.
 17 *
 18 * This program is distributed in the hope that it will be useful,
 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 21 * General Public License for more details.
 22 *
 23 * You should have received a copy of the GNU General Public
 24 * License along with this program; if not, write to the
 25 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 26 * Boston, MA 021110-1307, USA.
 27 */
 28
 29/* Simple VFS hooks based on: */
 30/*
 31 * Resizable simple ram filesystem for Linux.
 32 *
 33 * Copyright (C) 2000 Linus Torvalds.
 34 *               2000 Transmeta Corp.
 35 */
 36
 37#include <linux/module.h>
 38#include <linux/fs.h>
 39#include <linux/pagemap.h>
 40#include <linux/types.h>
 41#include <linux/slab.h>
 42#include <linux/highmem.h>
 43#include <linux/init.h>
 44#include <linux/string.h>
 45#include <linux/backing-dev.h>
 46#include <linux/poll.h>
 47
 48#include <linux/uaccess.h>
 49
 50#include "stackglue.h"
 51#include "userdlm.h"
 52
 53#define MLOG_MASK_PREFIX ML_DLMFS
 54#include "cluster/masklog.h"
 55
 56
 57static const struct super_operations dlmfs_ops;
 58static const struct file_operations dlmfs_file_operations;
 59static const struct inode_operations dlmfs_dir_inode_operations;
 60static const struct inode_operations dlmfs_root_inode_operations;
 61static const struct inode_operations dlmfs_file_inode_operations;
 62static struct kmem_cache *dlmfs_inode_cache;
 63
 64struct workqueue_struct *user_dlm_worker;
 65
 66
 67
 68/*
 69 * These are the ABI capabilities of dlmfs.
 70 *
 71 * Over time, dlmfs has added some features that were not part of the
 72 * initial ABI.  Unfortunately, some of these features are not detectable
 73 * via standard usage.  For example, Linux's default poll always returns
 74 * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs
 75 * added poll support.  Instead, we provide this list of new capabilities.
 76 *
 77 * Capabilities is a read-only attribute.  We do it as a module parameter
 78 * so we can discover it whether dlmfs is built in, loaded, or even not
 79 * loaded.
 80 *
 81 * The ABI features are local to this machine's dlmfs mount.  This is
 82 * distinct from the locking protocol, which is concerned with inter-node
 83 * interaction.
 84 *
 85 * Capabilities:
 86 * - bast	: POLLIN against the file descriptor of a held lock
 87 *		  signifies a bast fired on the lock.
 88 */
 89#define DLMFS_CAPABILITIES "bast stackglue"
 90static int param_set_dlmfs_capabilities(const char *val,
 91					struct kernel_param *kp)
 92{
 93	printk(KERN_ERR "%s: readonly parameter\n", kp->name);
 94	return -EINVAL;
 95}
 96static int param_get_dlmfs_capabilities(char *buffer,
 97					struct kernel_param *kp)
 98{
 99	return strlcpy(buffer, DLMFS_CAPABILITIES,
100		       strlen(DLMFS_CAPABILITIES) + 1);
101}
102module_param_call(capabilities, param_set_dlmfs_capabilities,
103		  param_get_dlmfs_capabilities, NULL, 0444);
104MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
105
106
107/*
108 * decodes a set of open flags into a valid lock level and a set of flags.
109 * returns < 0 if we have invalid flags
110 * flags which mean something to us:
111 * O_RDONLY -> PRMODE level
112 * O_WRONLY -> EXMODE level
113 *
114 * O_NONBLOCK -> NOQUEUE
115 */
116static int dlmfs_decode_open_flags(int open_flags,
117				   int *level,
118				   int *flags)
119{
120	if (open_flags & (O_WRONLY|O_RDWR))
121		*level = DLM_LOCK_EX;
122	else
123		*level = DLM_LOCK_PR;
124
125	*flags = 0;
126	if (open_flags & O_NONBLOCK)
127		*flags |= DLM_LKF_NOQUEUE;
128
129	return 0;
130}
131
132static int dlmfs_file_open(struct inode *inode,
133			   struct file *file)
134{
135	int status, level, flags;
136	struct dlmfs_filp_private *fp = NULL;
137	struct dlmfs_inode_private *ip;
138
139	if (S_ISDIR(inode->i_mode))
140		BUG();
141
142	mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino,
143		file->f_flags);
144
145	status = dlmfs_decode_open_flags(file->f_flags, &level, &flags);
146	if (status < 0)
147		goto bail;
148
149	/* We don't want to honor O_APPEND at read/write time as it
150	 * doesn't make sense for LVB writes. */
151	file->f_flags &= ~O_APPEND;
152
153	fp = kmalloc(sizeof(*fp), GFP_NOFS);
154	if (!fp) {
155		status = -ENOMEM;
156		goto bail;
157	}
158	fp->fp_lock_level = level;
159
160	ip = DLMFS_I(inode);
161
162	status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags);
163	if (status < 0) {
164		/* this is a strange error to return here but I want
165		 * to be able userspace to be able to distinguish a
166		 * valid lock request from one that simply couldn't be
167		 * granted. */
168		if (flags & DLM_LKF_NOQUEUE && status == -EAGAIN)
169			status = -ETXTBSY;
170		kfree(fp);
171		goto bail;
172	}
173
174	file->private_data = fp;
175bail:
176	return status;
177}
178
179static int dlmfs_file_release(struct inode *inode,
180			      struct file *file)
181{
182	int level, status;
183	struct dlmfs_inode_private *ip = DLMFS_I(inode);
184	struct dlmfs_filp_private *fp = file->private_data;
185
186	if (S_ISDIR(inode->i_mode))
187		BUG();
188
189	mlog(0, "close called on inode %lu\n", inode->i_ino);
190
191	status = 0;
192	if (fp) {
193		level = fp->fp_lock_level;
194		if (level != DLM_LOCK_IV)
195			user_dlm_cluster_unlock(&ip->ip_lockres, level);
196
197		kfree(fp);
198		file->private_data = NULL;
199	}
200
201	return 0;
202}
203
204/*
205 * We do ->setattr() just to override size changes.  Our size is the size
206 * of the LVB and nothing else.
207 */
208static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
209{
210	int error;
211	struct inode *inode = d_inode(dentry);
212
213	attr->ia_valid &= ~ATTR_SIZE;
214	error = setattr_prepare(dentry, attr);
215	if (error)
216		return error;
217
218	setattr_copy(inode, attr);
219	mark_inode_dirty(inode);
220	return 0;
221}
222
223static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
224{
225	int event = 0;
226	struct inode *inode = file_inode(file);
227	struct dlmfs_inode_private *ip = DLMFS_I(inode);
228
229	poll_wait(file, &ip->ip_lockres.l_event, wait);
230
231	spin_lock(&ip->ip_lockres.l_lock);
232	if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
233		event = POLLIN | POLLRDNORM;
234	spin_unlock(&ip->ip_lockres.l_lock);
235
236	return event;
237}
238
239static ssize_t dlmfs_file_read(struct file *filp,
240			       char __user *buf,
241			       size_t count,
242			       loff_t *ppos)
243{
244	int bytes_left;
245	ssize_t readlen, got;
246	char *lvb_buf;
247	struct inode *inode = file_inode(filp);
248
249	mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
250		inode->i_ino, count, *ppos);
251
252	if (*ppos >= i_size_read(inode))
253		return 0;
254
255	if (!count)
256		return 0;
257
258	if (!access_ok(VERIFY_WRITE, buf, count))
259		return -EFAULT;
260
261	/* don't read past the lvb */
262	if ((count + *ppos) > i_size_read(inode))
263		readlen = i_size_read(inode) - *ppos;
264	else
265		readlen = count;
266
267	lvb_buf = kmalloc(readlen, GFP_NOFS);
268	if (!lvb_buf)
269		return -ENOMEM;
270
271	got = user_dlm_read_lvb(inode, lvb_buf, readlen);
272	if (got) {
273		BUG_ON(got != readlen);
274		bytes_left = __copy_to_user(buf, lvb_buf, readlen);
275		readlen -= bytes_left;
276	} else
277		readlen = 0;
278
279	kfree(lvb_buf);
280
281	*ppos = *ppos + readlen;
282
283	mlog(0, "read %zd bytes\n", readlen);
284	return readlen;
285}
286
287static ssize_t dlmfs_file_write(struct file *filp,
288				const char __user *buf,
289				size_t count,
290				loff_t *ppos)
291{
292	int bytes_left;
293	ssize_t writelen;
294	char *lvb_buf;
295	struct inode *inode = file_inode(filp);
296
297	mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
298		inode->i_ino, count, *ppos);
299
300	if (*ppos >= i_size_read(inode))
301		return -ENOSPC;
302
303	if (!count)
304		return 0;
305
306	if (!access_ok(VERIFY_READ, buf, count))
307		return -EFAULT;
308
309	/* don't write past the lvb */
310	if ((count + *ppos) > i_size_read(inode))
311		writelen = i_size_read(inode) - *ppos;
312	else
313		writelen = count - *ppos;
314
315	lvb_buf = kmalloc(writelen, GFP_NOFS);
316	if (!lvb_buf)
317		return -ENOMEM;
318
319	bytes_left = copy_from_user(lvb_buf, buf, writelen);
320	writelen -= bytes_left;
321	if (writelen)
322		user_dlm_write_lvb(inode, lvb_buf, writelen);
323
324	kfree(lvb_buf);
325
326	*ppos = *ppos + writelen;
327	mlog(0, "wrote %zd bytes\n", writelen);
328	return writelen;
329}
330
331static void dlmfs_init_once(void *foo)
332{
333	struct dlmfs_inode_private *ip =
334		(struct dlmfs_inode_private *) foo;
335
336	ip->ip_conn = NULL;
337	ip->ip_parent = NULL;
338
339	inode_init_once(&ip->ip_vfs_inode);
340}
341
342static struct inode *dlmfs_alloc_inode(struct super_block *sb)
343{
344	struct dlmfs_inode_private *ip;
345
346	ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS);
347	if (!ip)
348		return NULL;
349
350	return &ip->ip_vfs_inode;
351}
352
353static void dlmfs_i_callback(struct rcu_head *head)
354{
355	struct inode *inode = container_of(head, struct inode, i_rcu);
356	kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
357}
358
359static void dlmfs_destroy_inode(struct inode *inode)
360{
361	call_rcu(&inode->i_rcu, dlmfs_i_callback);
362}
363
364static void dlmfs_evict_inode(struct inode *inode)
365{
366	int status;
367	struct dlmfs_inode_private *ip;
368
369	clear_inode(inode);
370
371	mlog(0, "inode %lu\n", inode->i_ino);
372
373	ip = DLMFS_I(inode);
374
375	if (S_ISREG(inode->i_mode)) {
376		status = user_dlm_destroy_lock(&ip->ip_lockres);
377		if (status < 0)
378			mlog_errno(status);
379		iput(ip->ip_parent);
380		goto clear_fields;
381	}
382
383	mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
384	/* we must be a directory. If required, lets unregister the
385	 * dlm context now. */
386	if (ip->ip_conn)
387		user_dlm_unregister(ip->ip_conn);
388clear_fields:
389	ip->ip_parent = NULL;
390	ip->ip_conn = NULL;
391}
392
 
 
 
 
 
 
393static struct inode *dlmfs_get_root_inode(struct super_block *sb)
394{
395	struct inode *inode = new_inode(sb);
396	umode_t mode = S_IFDIR | 0755;
397
398	if (inode) {
399		inode->i_ino = get_next_ino();
400		inode_init_owner(inode, NULL, mode);
401		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 
402		inc_nlink(inode);
403
404		inode->i_fop = &simple_dir_operations;
405		inode->i_op = &dlmfs_root_inode_operations;
406	}
407
408	return inode;
409}
410
411static struct inode *dlmfs_get_inode(struct inode *parent,
412				     struct dentry *dentry,
413				     umode_t mode)
414{
415	struct super_block *sb = parent->i_sb;
416	struct inode * inode = new_inode(sb);
417	struct dlmfs_inode_private *ip;
418
419	if (!inode)
420		return NULL;
421
422	inode->i_ino = get_next_ino();
423	inode_init_owner(inode, parent, mode);
424	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 
425
426	ip = DLMFS_I(inode);
427	ip->ip_conn = DLMFS_I(parent)->ip_conn;
428
429	switch (mode & S_IFMT) {
430	default:
431		/* for now we don't support anything other than
432		 * directories and regular files. */
433		BUG();
434		break;
435	case S_IFREG:
436		inode->i_op = &dlmfs_file_inode_operations;
437		inode->i_fop = &dlmfs_file_operations;
438
439		i_size_write(inode,  DLM_LVB_LEN);
440
441		user_dlm_lock_res_init(&ip->ip_lockres, dentry);
442
443		/* released at clear_inode time, this insures that we
444		 * get to drop the dlm reference on each lock *before*
445		 * we call the unregister code for releasing parent
446		 * directories. */
447		ip->ip_parent = igrab(parent);
448		BUG_ON(!ip->ip_parent);
449		break;
450	case S_IFDIR:
451		inode->i_op = &dlmfs_dir_inode_operations;
452		inode->i_fop = &simple_dir_operations;
453
454		/* directory inodes start off with i_nlink ==
455		 * 2 (for "." entry) */
456		inc_nlink(inode);
457		break;
458	}
459	return inode;
460}
461
462/*
463 * File creation. Allocate an inode, and we're done..
464 */
465/* SMP-safe */
466static int dlmfs_mkdir(struct inode * dir,
467		       struct dentry * dentry,
468		       umode_t mode)
469{
470	int status;
471	struct inode *inode = NULL;
472	const struct qstr *domain = &dentry->d_name;
473	struct dlmfs_inode_private *ip;
474	struct ocfs2_cluster_connection *conn;
475
476	mlog(0, "mkdir %.*s\n", domain->len, domain->name);
477
478	/* verify that we have a proper domain */
479	if (domain->len >= GROUP_NAME_MAX) {
480		status = -EINVAL;
481		mlog(ML_ERROR, "invalid domain name for directory.\n");
482		goto bail;
483	}
484
485	inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR);
486	if (!inode) {
487		status = -ENOMEM;
488		mlog_errno(status);
489		goto bail;
490	}
491
492	ip = DLMFS_I(inode);
493
494	conn = user_dlm_register(domain);
495	if (IS_ERR(conn)) {
496		status = PTR_ERR(conn);
497		mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
498		     status, domain->len, domain->name);
499		goto bail;
500	}
501	ip->ip_conn = conn;
502
503	inc_nlink(dir);
504	d_instantiate(dentry, inode);
505	dget(dentry);	/* Extra count - pin the dentry in core */
506
507	status = 0;
508bail:
509	if (status < 0)
510		iput(inode);
511	return status;
512}
513
514static int dlmfs_create(struct inode *dir,
515			struct dentry *dentry,
516			umode_t mode,
517			bool excl)
518{
519	int status = 0;
520	struct inode *inode;
521	const struct qstr *name = &dentry->d_name;
522
523	mlog(0, "create %.*s\n", name->len, name->name);
524
525	/* verify name is valid and doesn't contain any dlm reserved
526	 * characters */
527	if (name->len >= USER_DLM_LOCK_ID_MAX_LEN ||
528	    name->name[0] == '$') {
529		status = -EINVAL;
530		mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len,
531		     name->name);
532		goto bail;
533	}
534
535	inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG);
536	if (!inode) {
537		status = -ENOMEM;
538		mlog_errno(status);
539		goto bail;
540	}
541
542	d_instantiate(dentry, inode);
543	dget(dentry);	/* Extra count - pin the dentry in core */
544bail:
545	return status;
546}
547
548static int dlmfs_unlink(struct inode *dir,
549			struct dentry *dentry)
550{
551	int status;
552	struct inode *inode = d_inode(dentry);
553
554	mlog(0, "unlink inode %lu\n", inode->i_ino);
555
556	/* if there are no current holders, or none that are waiting
557	 * to acquire a lock, this basically destroys our lockres. */
558	status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres);
559	if (status < 0) {
560		mlog(ML_ERROR, "unlink %pd, error %d from destroy\n",
561		     dentry, status);
562		goto bail;
563	}
564	status = simple_unlink(dir, dentry);
565bail:
566	return status;
567}
568
569static int dlmfs_fill_super(struct super_block * sb,
570			    void * data,
571			    int silent)
572{
573	sb->s_maxbytes = MAX_LFS_FILESIZE;
574	sb->s_blocksize = PAGE_SIZE;
575	sb->s_blocksize_bits = PAGE_SHIFT;
576	sb->s_magic = DLMFS_MAGIC;
577	sb->s_op = &dlmfs_ops;
578	sb->s_root = d_make_root(dlmfs_get_root_inode(sb));
579	if (!sb->s_root)
580		return -ENOMEM;
581	return 0;
582}
583
584static const struct file_operations dlmfs_file_operations = {
585	.open		= dlmfs_file_open,
586	.release	= dlmfs_file_release,
587	.poll		= dlmfs_file_poll,
588	.read		= dlmfs_file_read,
589	.write		= dlmfs_file_write,
590	.llseek		= default_llseek,
591};
592
593static const struct inode_operations dlmfs_dir_inode_operations = {
594	.create		= dlmfs_create,
595	.lookup		= simple_lookup,
596	.unlink		= dlmfs_unlink,
597};
598
599/* this way we can restrict mkdir to only the toplevel of the fs. */
600static const struct inode_operations dlmfs_root_inode_operations = {
601	.lookup		= simple_lookup,
602	.mkdir		= dlmfs_mkdir,
603	.rmdir		= simple_rmdir,
604};
605
606static const struct super_operations dlmfs_ops = {
607	.statfs		= simple_statfs,
608	.alloc_inode	= dlmfs_alloc_inode,
609	.destroy_inode	= dlmfs_destroy_inode,
610	.evict_inode	= dlmfs_evict_inode,
611	.drop_inode	= generic_delete_inode,
612};
613
614static const struct inode_operations dlmfs_file_inode_operations = {
615	.getattr	= simple_getattr,
616	.setattr	= dlmfs_file_setattr,
617};
618
619static struct dentry *dlmfs_mount(struct file_system_type *fs_type,
620	int flags, const char *dev_name, void *data)
621{
622	return mount_nodev(fs_type, flags, data, dlmfs_fill_super);
623}
624
625static struct file_system_type dlmfs_fs_type = {
626	.owner		= THIS_MODULE,
627	.name		= "ocfs2_dlmfs",
628	.mount		= dlmfs_mount,
629	.kill_sb	= kill_litter_super,
630};
631MODULE_ALIAS_FS("ocfs2_dlmfs");
632
633static int __init init_dlmfs_fs(void)
634{
635	int status;
636	int cleanup_inode = 0, cleanup_worker = 0;
637
 
 
 
 
638	dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
639				sizeof(struct dlmfs_inode_private),
640				0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
641					SLAB_MEM_SPREAD|SLAB_ACCOUNT),
642				dlmfs_init_once);
643	if (!dlmfs_inode_cache) {
644		status = -ENOMEM;
645		goto bail;
646	}
647	cleanup_inode = 1;
648
649	user_dlm_worker = alloc_workqueue("user_dlm", WQ_MEM_RECLAIM, 0);
650	if (!user_dlm_worker) {
651		status = -ENOMEM;
652		goto bail;
653	}
654	cleanup_worker = 1;
655
656	user_dlm_set_locking_protocol();
657	status = register_filesystem(&dlmfs_fs_type);
658bail:
659	if (status) {
660		if (cleanup_inode)
661			kmem_cache_destroy(dlmfs_inode_cache);
662		if (cleanup_worker)
663			destroy_workqueue(user_dlm_worker);
 
664	} else
665		printk("OCFS2 User DLM kernel interface loaded\n");
666	return status;
667}
668
669static void __exit exit_dlmfs_fs(void)
670{
671	unregister_filesystem(&dlmfs_fs_type);
672
673	flush_workqueue(user_dlm_worker);
674	destroy_workqueue(user_dlm_worker);
675
676	/*
677	 * Make sure all delayed rcu free inodes are flushed before we
678	 * destroy cache.
679	 */
680	rcu_barrier();
681	kmem_cache_destroy(dlmfs_inode_cache);
682
 
683}
684
685MODULE_AUTHOR("Oracle");
686MODULE_LICENSE("GPL");
687MODULE_DESCRIPTION("OCFS2 DLM-Filesystem");
688
689module_init(init_dlmfs_fs)
690module_exit(exit_dlmfs_fs)
  1/* -*- mode: c; c-basic-offset: 8; -*-
  2 * vim: noexpandtab sw=8 ts=8 sts=0:
  3 *
  4 * dlmfs.c
  5 *
  6 * Code which implements the kernel side of a minimal userspace
  7 * interface to our DLM. This file handles the virtual file system
  8 * used for communication with userspace. Credit should go to ramfs,
  9 * which was a template for the fs side of this module.
 10 *
 11 * Copyright (C) 2003, 2004 Oracle.  All rights reserved.
 12 *
 13 * This program is free software; you can redistribute it and/or
 14 * modify it under the terms of the GNU General Public
 15 * License as published by the Free Software Foundation; either
 16 * version 2 of the License, or (at your option) any later version.
 17 *
 18 * This program is distributed in the hope that it will be useful,
 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 21 * General Public License for more details.
 22 *
 23 * You should have received a copy of the GNU General Public
 24 * License along with this program; if not, write to the
 25 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 26 * Boston, MA 021110-1307, USA.
 27 */
 28
 29/* Simple VFS hooks based on: */
 30/*
 31 * Resizable simple ram filesystem for Linux.
 32 *
 33 * Copyright (C) 2000 Linus Torvalds.
 34 *               2000 Transmeta Corp.
 35 */
 36
 37#include <linux/module.h>
 38#include <linux/fs.h>
 39#include <linux/pagemap.h>
 40#include <linux/types.h>
 41#include <linux/slab.h>
 42#include <linux/highmem.h>
 43#include <linux/init.h>
 44#include <linux/string.h>
 45#include <linux/backing-dev.h>
 46#include <linux/poll.h>
 47
 48#include <asm/uaccess.h>
 49
 50#include "stackglue.h"
 51#include "userdlm.h"
 52
 53#define MLOG_MASK_PREFIX ML_DLMFS
 54#include "cluster/masklog.h"
 55
 56
 57static const struct super_operations dlmfs_ops;
 58static const struct file_operations dlmfs_file_operations;
 59static const struct inode_operations dlmfs_dir_inode_operations;
 60static const struct inode_operations dlmfs_root_inode_operations;
 61static const struct inode_operations dlmfs_file_inode_operations;
 62static struct kmem_cache *dlmfs_inode_cache;
 63
 64struct workqueue_struct *user_dlm_worker;
 65
 66
 67
 68/*
 69 * These are the ABI capabilities of dlmfs.
 70 *
 71 * Over time, dlmfs has added some features that were not part of the
 72 * initial ABI.  Unfortunately, some of these features are not detectable
 73 * via standard usage.  For example, Linux's default poll always returns
 74 * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs
 75 * added poll support.  Instead, we provide this list of new capabilities.
 76 *
 77 * Capabilities is a read-only attribute.  We do it as a module parameter
 78 * so we can discover it whether dlmfs is built in, loaded, or even not
 79 * loaded.
 80 *
 81 * The ABI features are local to this machine's dlmfs mount.  This is
 82 * distinct from the locking protocol, which is concerned with inter-node
 83 * interaction.
 84 *
 85 * Capabilities:
 86 * - bast	: POLLIN against the file descriptor of a held lock
 87 *		  signifies a bast fired on the lock.
 88 */
 89#define DLMFS_CAPABILITIES "bast stackglue"
 90static int param_set_dlmfs_capabilities(const char *val,
 91					struct kernel_param *kp)
 92{
 93	printk(KERN_ERR "%s: readonly parameter\n", kp->name);
 94	return -EINVAL;
 95}
 96static int param_get_dlmfs_capabilities(char *buffer,
 97					struct kernel_param *kp)
 98{
 99	return strlcpy(buffer, DLMFS_CAPABILITIES,
100		       strlen(DLMFS_CAPABILITIES) + 1);
101}
102module_param_call(capabilities, param_set_dlmfs_capabilities,
103		  param_get_dlmfs_capabilities, NULL, 0444);
104MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
105
106
107/*
108 * decodes a set of open flags into a valid lock level and a set of flags.
109 * returns < 0 if we have invalid flags
110 * flags which mean something to us:
111 * O_RDONLY -> PRMODE level
112 * O_WRONLY -> EXMODE level
113 *
114 * O_NONBLOCK -> NOQUEUE
115 */
116static int dlmfs_decode_open_flags(int open_flags,
117				   int *level,
118				   int *flags)
119{
120	if (open_flags & (O_WRONLY|O_RDWR))
121		*level = DLM_LOCK_EX;
122	else
123		*level = DLM_LOCK_PR;
124
125	*flags = 0;
126	if (open_flags & O_NONBLOCK)
127		*flags |= DLM_LKF_NOQUEUE;
128
129	return 0;
130}
131
132static int dlmfs_file_open(struct inode *inode,
133			   struct file *file)
134{
135	int status, level, flags;
136	struct dlmfs_filp_private *fp = NULL;
137	struct dlmfs_inode_private *ip;
138
139	if (S_ISDIR(inode->i_mode))
140		BUG();
141
142	mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino,
143		file->f_flags);
144
145	status = dlmfs_decode_open_flags(file->f_flags, &level, &flags);
146	if (status < 0)
147		goto bail;
148
149	/* We don't want to honor O_APPEND at read/write time as it
150	 * doesn't make sense for LVB writes. */
151	file->f_flags &= ~O_APPEND;
152
153	fp = kmalloc(sizeof(*fp), GFP_NOFS);
154	if (!fp) {
155		status = -ENOMEM;
156		goto bail;
157	}
158	fp->fp_lock_level = level;
159
160	ip = DLMFS_I(inode);
161
162	status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags);
163	if (status < 0) {
164		/* this is a strange error to return here but I want
165		 * to be able userspace to be able to distinguish a
166		 * valid lock request from one that simply couldn't be
167		 * granted. */
168		if (flags & DLM_LKF_NOQUEUE && status == -EAGAIN)
169			status = -ETXTBSY;
170		kfree(fp);
171		goto bail;
172	}
173
174	file->private_data = fp;
175bail:
176	return status;
177}
178
179static int dlmfs_file_release(struct inode *inode,
180			      struct file *file)
181{
182	int level, status;
183	struct dlmfs_inode_private *ip = DLMFS_I(inode);
184	struct dlmfs_filp_private *fp = file->private_data;
185
186	if (S_ISDIR(inode->i_mode))
187		BUG();
188
189	mlog(0, "close called on inode %lu\n", inode->i_ino);
190
191	status = 0;
192	if (fp) {
193		level = fp->fp_lock_level;
194		if (level != DLM_LOCK_IV)
195			user_dlm_cluster_unlock(&ip->ip_lockres, level);
196
197		kfree(fp);
198		file->private_data = NULL;
199	}
200
201	return 0;
202}
203
204/*
205 * We do ->setattr() just to override size changes.  Our size is the size
206 * of the LVB and nothing else.
207 */
208static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
209{
210	int error;
211	struct inode *inode = dentry->d_inode;
212
213	attr->ia_valid &= ~ATTR_SIZE;
214	error = inode_change_ok(inode, attr);
215	if (error)
216		return error;
217
218	setattr_copy(inode, attr);
219	mark_inode_dirty(inode);
220	return 0;
221}
222
223static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
224{
225	int event = 0;
226	struct inode *inode = file_inode(file);
227	struct dlmfs_inode_private *ip = DLMFS_I(inode);
228
229	poll_wait(file, &ip->ip_lockres.l_event, wait);
230
231	spin_lock(&ip->ip_lockres.l_lock);
232	if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
233		event = POLLIN | POLLRDNORM;
234	spin_unlock(&ip->ip_lockres.l_lock);
235
236	return event;
237}
238
239static ssize_t dlmfs_file_read(struct file *filp,
240			       char __user *buf,
241			       size_t count,
242			       loff_t *ppos)
243{
244	int bytes_left;
245	ssize_t readlen, got;
246	char *lvb_buf;
247	struct inode *inode = file_inode(filp);
248
249	mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
250		inode->i_ino, count, *ppos);
251
252	if (*ppos >= i_size_read(inode))
253		return 0;
254
255	if (!count)
256		return 0;
257
258	if (!access_ok(VERIFY_WRITE, buf, count))
259		return -EFAULT;
260
261	/* don't read past the lvb */
262	if ((count + *ppos) > i_size_read(inode))
263		readlen = i_size_read(inode) - *ppos;
264	else
265		readlen = count;
266
267	lvb_buf = kmalloc(readlen, GFP_NOFS);
268	if (!lvb_buf)
269		return -ENOMEM;
270
271	got = user_dlm_read_lvb(inode, lvb_buf, readlen);
272	if (got) {
273		BUG_ON(got != readlen);
274		bytes_left = __copy_to_user(buf, lvb_buf, readlen);
275		readlen -= bytes_left;
276	} else
277		readlen = 0;
278
279	kfree(lvb_buf);
280
281	*ppos = *ppos + readlen;
282
283	mlog(0, "read %zd bytes\n", readlen);
284	return readlen;
285}
286
287static ssize_t dlmfs_file_write(struct file *filp,
288				const char __user *buf,
289				size_t count,
290				loff_t *ppos)
291{
292	int bytes_left;
293	ssize_t writelen;
294	char *lvb_buf;
295	struct inode *inode = file_inode(filp);
296
297	mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
298		inode->i_ino, count, *ppos);
299
300	if (*ppos >= i_size_read(inode))
301		return -ENOSPC;
302
303	if (!count)
304		return 0;
305
306	if (!access_ok(VERIFY_READ, buf, count))
307		return -EFAULT;
308
309	/* don't write past the lvb */
310	if ((count + *ppos) > i_size_read(inode))
311		writelen = i_size_read(inode) - *ppos;
312	else
313		writelen = count - *ppos;
314
315	lvb_buf = kmalloc(writelen, GFP_NOFS);
316	if (!lvb_buf)
317		return -ENOMEM;
318
319	bytes_left = copy_from_user(lvb_buf, buf, writelen);
320	writelen -= bytes_left;
321	if (writelen)
322		user_dlm_write_lvb(inode, lvb_buf, writelen);
323
324	kfree(lvb_buf);
325
326	*ppos = *ppos + writelen;
327	mlog(0, "wrote %zd bytes\n", writelen);
328	return writelen;
329}
330
331static void dlmfs_init_once(void *foo)
332{
333	struct dlmfs_inode_private *ip =
334		(struct dlmfs_inode_private *) foo;
335
336	ip->ip_conn = NULL;
337	ip->ip_parent = NULL;
338
339	inode_init_once(&ip->ip_vfs_inode);
340}
341
342static struct inode *dlmfs_alloc_inode(struct super_block *sb)
343{
344	struct dlmfs_inode_private *ip;
345
346	ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS);
347	if (!ip)
348		return NULL;
349
350	return &ip->ip_vfs_inode;
351}
352
353static void dlmfs_i_callback(struct rcu_head *head)
354{
355	struct inode *inode = container_of(head, struct inode, i_rcu);
356	kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
357}
358
359static void dlmfs_destroy_inode(struct inode *inode)
360{
361	call_rcu(&inode->i_rcu, dlmfs_i_callback);
362}
363
364static void dlmfs_evict_inode(struct inode *inode)
365{
366	int status;
367	struct dlmfs_inode_private *ip;
368
369	clear_inode(inode);
370
371	mlog(0, "inode %lu\n", inode->i_ino);
372
373	ip = DLMFS_I(inode);
374
375	if (S_ISREG(inode->i_mode)) {
376		status = user_dlm_destroy_lock(&ip->ip_lockres);
377		if (status < 0)
378			mlog_errno(status);
379		iput(ip->ip_parent);
380		goto clear_fields;
381	}
382
383	mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
384	/* we must be a directory. If required, lets unregister the
385	 * dlm context now. */
386	if (ip->ip_conn)
387		user_dlm_unregister(ip->ip_conn);
388clear_fields:
389	ip->ip_parent = NULL;
390	ip->ip_conn = NULL;
391}
392
393static struct backing_dev_info dlmfs_backing_dev_info = {
394	.name		= "ocfs2-dlmfs",
395	.ra_pages	= 0,	/* No readahead */
396	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
397};
398
399static struct inode *dlmfs_get_root_inode(struct super_block *sb)
400{
401	struct inode *inode = new_inode(sb);
402	umode_t mode = S_IFDIR | 0755;
403
404	if (inode) {
405		inode->i_ino = get_next_ino();
406		inode_init_owner(inode, NULL, mode);
407		inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
408		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
409		inc_nlink(inode);
410
411		inode->i_fop = &simple_dir_operations;
412		inode->i_op = &dlmfs_root_inode_operations;
413	}
414
415	return inode;
416}
417
418static struct inode *dlmfs_get_inode(struct inode *parent,
419				     struct dentry *dentry,
420				     umode_t mode)
421{
422	struct super_block *sb = parent->i_sb;
423	struct inode * inode = new_inode(sb);
424	struct dlmfs_inode_private *ip;
425
426	if (!inode)
427		return NULL;
428
429	inode->i_ino = get_next_ino();
430	inode_init_owner(inode, parent, mode);
431	inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
432	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
433
434	ip = DLMFS_I(inode);
435	ip->ip_conn = DLMFS_I(parent)->ip_conn;
436
437	switch (mode & S_IFMT) {
438	default:
439		/* for now we don't support anything other than
440		 * directories and regular files. */
441		BUG();
442		break;
443	case S_IFREG:
444		inode->i_op = &dlmfs_file_inode_operations;
445		inode->i_fop = &dlmfs_file_operations;
446
447		i_size_write(inode,  DLM_LVB_LEN);
448
449		user_dlm_lock_res_init(&ip->ip_lockres, dentry);
450
451		/* released at clear_inode time, this insures that we
452		 * get to drop the dlm reference on each lock *before*
453		 * we call the unregister code for releasing parent
454		 * directories. */
455		ip->ip_parent = igrab(parent);
456		BUG_ON(!ip->ip_parent);
457		break;
458	case S_IFDIR:
459		inode->i_op = &dlmfs_dir_inode_operations;
460		inode->i_fop = &simple_dir_operations;
461
462		/* directory inodes start off with i_nlink ==
463		 * 2 (for "." entry) */
464		inc_nlink(inode);
465		break;
466	}
467	return inode;
468}
469
470/*
471 * File creation. Allocate an inode, and we're done..
472 */
473/* SMP-safe */
474static int dlmfs_mkdir(struct inode * dir,
475		       struct dentry * dentry,
476		       umode_t mode)
477{
478	int status;
479	struct inode *inode = NULL;
480	struct qstr *domain = &dentry->d_name;
481	struct dlmfs_inode_private *ip;
482	struct ocfs2_cluster_connection *conn;
483
484	mlog(0, "mkdir %.*s\n", domain->len, domain->name);
485
486	/* verify that we have a proper domain */
487	if (domain->len >= GROUP_NAME_MAX) {
488		status = -EINVAL;
489		mlog(ML_ERROR, "invalid domain name for directory.\n");
490		goto bail;
491	}
492
493	inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR);
494	if (!inode) {
495		status = -ENOMEM;
496		mlog_errno(status);
497		goto bail;
498	}
499
500	ip = DLMFS_I(inode);
501
502	conn = user_dlm_register(domain);
503	if (IS_ERR(conn)) {
504		status = PTR_ERR(conn);
505		mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
506		     status, domain->len, domain->name);
507		goto bail;
508	}
509	ip->ip_conn = conn;
510
511	inc_nlink(dir);
512	d_instantiate(dentry, inode);
513	dget(dentry);	/* Extra count - pin the dentry in core */
514
515	status = 0;
516bail:
517	if (status < 0)
518		iput(inode);
519	return status;
520}
521
522static int dlmfs_create(struct inode *dir,
523			struct dentry *dentry,
524			umode_t mode,
525			bool excl)
526{
527	int status = 0;
528	struct inode *inode;
529	struct qstr *name = &dentry->d_name;
530
531	mlog(0, "create %.*s\n", name->len, name->name);
532
533	/* verify name is valid and doesn't contain any dlm reserved
534	 * characters */
535	if (name->len >= USER_DLM_LOCK_ID_MAX_LEN ||
536	    name->name[0] == '$') {
537		status = -EINVAL;
538		mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len,
539		     name->name);
540		goto bail;
541	}
542
543	inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG);
544	if (!inode) {
545		status = -ENOMEM;
546		mlog_errno(status);
547		goto bail;
548	}
549
550	d_instantiate(dentry, inode);
551	dget(dentry);	/* Extra count - pin the dentry in core */
552bail:
553	return status;
554}
555
556static int dlmfs_unlink(struct inode *dir,
557			struct dentry *dentry)
558{
559	int status;
560	struct inode *inode = dentry->d_inode;
561
562	mlog(0, "unlink inode %lu\n", inode->i_ino);
563
564	/* if there are no current holders, or none that are waiting
565	 * to acquire a lock, this basically destroys our lockres. */
566	status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres);
567	if (status < 0) {
568		mlog(ML_ERROR, "unlink %.*s, error %d from destroy\n",
569		     dentry->d_name.len, dentry->d_name.name, status);
570		goto bail;
571	}
572	status = simple_unlink(dir, dentry);
573bail:
574	return status;
575}
576
577static int dlmfs_fill_super(struct super_block * sb,
578			    void * data,
579			    int silent)
580{
581	sb->s_maxbytes = MAX_LFS_FILESIZE;
582	sb->s_blocksize = PAGE_CACHE_SIZE;
583	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
584	sb->s_magic = DLMFS_MAGIC;
585	sb->s_op = &dlmfs_ops;
586	sb->s_root = d_make_root(dlmfs_get_root_inode(sb));
587	if (!sb->s_root)
588		return -ENOMEM;
589	return 0;
590}
591
592static const struct file_operations dlmfs_file_operations = {
593	.open		= dlmfs_file_open,
594	.release	= dlmfs_file_release,
595	.poll		= dlmfs_file_poll,
596	.read		= dlmfs_file_read,
597	.write		= dlmfs_file_write,
598	.llseek		= default_llseek,
599};
600
601static const struct inode_operations dlmfs_dir_inode_operations = {
602	.create		= dlmfs_create,
603	.lookup		= simple_lookup,
604	.unlink		= dlmfs_unlink,
605};
606
607/* this way we can restrict mkdir to only the toplevel of the fs. */
608static const struct inode_operations dlmfs_root_inode_operations = {
609	.lookup		= simple_lookup,
610	.mkdir		= dlmfs_mkdir,
611	.rmdir		= simple_rmdir,
612};
613
614static const struct super_operations dlmfs_ops = {
615	.statfs		= simple_statfs,
616	.alloc_inode	= dlmfs_alloc_inode,
617	.destroy_inode	= dlmfs_destroy_inode,
618	.evict_inode	= dlmfs_evict_inode,
619	.drop_inode	= generic_delete_inode,
620};
621
622static const struct inode_operations dlmfs_file_inode_operations = {
623	.getattr	= simple_getattr,
624	.setattr	= dlmfs_file_setattr,
625};
626
627static struct dentry *dlmfs_mount(struct file_system_type *fs_type,
628	int flags, const char *dev_name, void *data)
629{
630	return mount_nodev(fs_type, flags, data, dlmfs_fill_super);
631}
632
633static struct file_system_type dlmfs_fs_type = {
634	.owner		= THIS_MODULE,
635	.name		= "ocfs2_dlmfs",
636	.mount		= dlmfs_mount,
637	.kill_sb	= kill_litter_super,
638};
639MODULE_ALIAS_FS("ocfs2_dlmfs");
640
641static int __init init_dlmfs_fs(void)
642{
643	int status;
644	int cleanup_inode = 0, cleanup_worker = 0;
645
646	status = bdi_init(&dlmfs_backing_dev_info);
647	if (status)
648		return status;
649
650	dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
651				sizeof(struct dlmfs_inode_private),
652				0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
653					SLAB_MEM_SPREAD),
654				dlmfs_init_once);
655	if (!dlmfs_inode_cache) {
656		status = -ENOMEM;
657		goto bail;
658	}
659	cleanup_inode = 1;
660
661	user_dlm_worker = create_singlethread_workqueue("user_dlm");
662	if (!user_dlm_worker) {
663		status = -ENOMEM;
664		goto bail;
665	}
666	cleanup_worker = 1;
667
668	user_dlm_set_locking_protocol();
669	status = register_filesystem(&dlmfs_fs_type);
670bail:
671	if (status) {
672		if (cleanup_inode)
673			kmem_cache_destroy(dlmfs_inode_cache);
674		if (cleanup_worker)
675			destroy_workqueue(user_dlm_worker);
676		bdi_destroy(&dlmfs_backing_dev_info);
677	} else
678		printk("OCFS2 User DLM kernel interface loaded\n");
679	return status;
680}
681
682static void __exit exit_dlmfs_fs(void)
683{
684	unregister_filesystem(&dlmfs_fs_type);
685
686	flush_workqueue(user_dlm_worker);
687	destroy_workqueue(user_dlm_worker);
688
689	/*
690	 * Make sure all delayed rcu free inodes are flushed before we
691	 * destroy cache.
692	 */
693	rcu_barrier();
694	kmem_cache_destroy(dlmfs_inode_cache);
695
696	bdi_destroy(&dlmfs_backing_dev_info);
697}
698
699MODULE_AUTHOR("Oracle");
700MODULE_LICENSE("GPL");
701MODULE_DESCRIPTION("OCFS2 DLM-Filesystem");
702
703module_init(init_dlmfs_fs)
704module_exit(exit_dlmfs_fs)