Linux Audio

Check our new training course

Loading...
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * dlmfs.c
  4 *
  5 * Code which implements the kernel side of a minimal userspace
  6 * interface to our DLM. This file handles the virtual file system
  7 * used for communication with userspace. Credit should go to ramfs,
  8 * which was a template for the fs side of this module.
  9 *
 10 * Copyright (C) 2003, 2004 Oracle.  All rights reserved.
 11 */
 12
 13/* Simple VFS hooks based on: */
 14/*
 15 * Resizable simple ram filesystem for Linux.
 16 *
 17 * Copyright (C) 2000 Linus Torvalds.
 18 *               2000 Transmeta Corp.
 19 */
 20
 21#include <linux/module.h>
 22#include <linux/fs.h>
 23#include <linux/pagemap.h>
 24#include <linux/types.h>
 25#include <linux/slab.h>
 26#include <linux/highmem.h>
 27#include <linux/init.h>
 28#include <linux/string.h>
 29#include <linux/backing-dev.h>
 30#include <linux/poll.h>
 31
 32#include <linux/uaccess.h>
 33
 34#include "../stackglue.h"
 35#include "userdlm.h"
 36
 37#define MLOG_MASK_PREFIX ML_DLMFS
 38#include "../cluster/masklog.h"
 39
 40
 41static const struct super_operations dlmfs_ops;
 42static const struct file_operations dlmfs_file_operations;
 43static const struct inode_operations dlmfs_dir_inode_operations;
 44static const struct inode_operations dlmfs_root_inode_operations;
 45static const struct inode_operations dlmfs_file_inode_operations;
 46static struct kmem_cache *dlmfs_inode_cache;
 47
 48struct workqueue_struct *user_dlm_worker;
 49
 50
 51
 52/*
 53 * These are the ABI capabilities of dlmfs.
 54 *
 55 * Over time, dlmfs has added some features that were not part of the
 56 * initial ABI.  Unfortunately, some of these features are not detectable
 57 * via standard usage.  For example, Linux's default poll always returns
 58 * EPOLLIN, so there is no way for a caller of poll(2) to know when dlmfs
 59 * added poll support.  Instead, we provide this list of new capabilities.
 60 *
 61 * Capabilities is a read-only attribute.  We do it as a module parameter
 62 * so we can discover it whether dlmfs is built in, loaded, or even not
 63 * loaded.
 64 *
 65 * The ABI features are local to this machine's dlmfs mount.  This is
 66 * distinct from the locking protocol, which is concerned with inter-node
 67 * interaction.
 68 *
 69 * Capabilities:
 70 * - bast	: EPOLLIN against the file descriptor of a held lock
 71 *		  signifies a bast fired on the lock.
 72 */
 73#define DLMFS_CAPABILITIES "bast stackglue"
 74static int param_set_dlmfs_capabilities(const char *val,
 75					const struct kernel_param *kp)
 76{
 77	printk(KERN_ERR "%s: readonly parameter\n", kp->name);
 78	return -EINVAL;
 79}
 80static int param_get_dlmfs_capabilities(char *buffer,
 81					const struct kernel_param *kp)
 82{
 83	return sysfs_emit(buffer, DLMFS_CAPABILITIES);
 84}
 85module_param_call(capabilities, param_set_dlmfs_capabilities,
 86		  param_get_dlmfs_capabilities, NULL, 0444);
 87MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
 88
 89
 90/*
 91 * decodes a set of open flags into a valid lock level and a set of flags.
 92 * returns < 0 if we have invalid flags
 93 * flags which mean something to us:
 94 * O_RDONLY -> PRMODE level
 95 * O_WRONLY -> EXMODE level
 96 *
 97 * O_NONBLOCK -> NOQUEUE
 98 */
 99static int dlmfs_decode_open_flags(int open_flags,
100				   int *level,
101				   int *flags)
102{
103	if (open_flags & (O_WRONLY|O_RDWR))
104		*level = DLM_LOCK_EX;
105	else
106		*level = DLM_LOCK_PR;
107
108	*flags = 0;
109	if (open_flags & O_NONBLOCK)
110		*flags |= DLM_LKF_NOQUEUE;
111
112	return 0;
113}
114
115static int dlmfs_file_open(struct inode *inode,
116			   struct file *file)
117{
118	int status, level, flags;
119	struct dlmfs_filp_private *fp = NULL;
120	struct dlmfs_inode_private *ip;
121
122	if (S_ISDIR(inode->i_mode))
123		BUG();
124
125	mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino,
126		file->f_flags);
127
128	status = dlmfs_decode_open_flags(file->f_flags, &level, &flags);
129	if (status < 0)
130		goto bail;
131
132	/* We don't want to honor O_APPEND at read/write time as it
133	 * doesn't make sense for LVB writes. */
134	file->f_flags &= ~O_APPEND;
135
136	fp = kmalloc(sizeof(*fp), GFP_NOFS);
137	if (!fp) {
138		status = -ENOMEM;
139		goto bail;
140	}
141	fp->fp_lock_level = level;
142
143	ip = DLMFS_I(inode);
144
145	status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags);
146	if (status < 0) {
147		/* this is a strange error to return here but I want
148		 * to be able userspace to be able to distinguish a
149		 * valid lock request from one that simply couldn't be
150		 * granted. */
151		if (flags & DLM_LKF_NOQUEUE && status == -EAGAIN)
152			status = -ETXTBSY;
153		kfree(fp);
154		goto bail;
155	}
156
157	file->private_data = fp;
158bail:
159	return status;
160}
161
162static int dlmfs_file_release(struct inode *inode,
163			      struct file *file)
164{
165	int level;
166	struct dlmfs_inode_private *ip = DLMFS_I(inode);
167	struct dlmfs_filp_private *fp = file->private_data;
168
169	if (S_ISDIR(inode->i_mode))
170		BUG();
171
172	mlog(0, "close called on inode %lu\n", inode->i_ino);
173
174	if (fp) {
175		level = fp->fp_lock_level;
176		if (level != DLM_LOCK_IV)
177			user_dlm_cluster_unlock(&ip->ip_lockres, level);
178
179		kfree(fp);
180		file->private_data = NULL;
181	}
182
183	return 0;
184}
185
186/*
187 * We do ->setattr() just to override size changes.  Our size is the size
188 * of the LVB and nothing else.
189 */
190static int dlmfs_file_setattr(struct mnt_idmap *idmap,
191			      struct dentry *dentry, struct iattr *attr)
192{
193	int error;
194	struct inode *inode = d_inode(dentry);
195
196	attr->ia_valid &= ~ATTR_SIZE;
197	error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
198	if (error)
199		return error;
200
201	setattr_copy(&nop_mnt_idmap, inode, attr);
202	mark_inode_dirty(inode);
203	return 0;
204}
205
206static __poll_t dlmfs_file_poll(struct file *file, poll_table *wait)
207{
208	__poll_t event = 0;
209	struct inode *inode = file_inode(file);
210	struct dlmfs_inode_private *ip = DLMFS_I(inode);
211
212	poll_wait(file, &ip->ip_lockres.l_event, wait);
213
214	spin_lock(&ip->ip_lockres.l_lock);
215	if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
216		event = EPOLLIN | EPOLLRDNORM;
217	spin_unlock(&ip->ip_lockres.l_lock);
218
219	return event;
220}
221
222static ssize_t dlmfs_file_read(struct file *file,
223			       char __user *buf,
224			       size_t count,
225			       loff_t *ppos)
226{
227	char lvb[DLM_LVB_LEN];
228
229	if (!user_dlm_read_lvb(file_inode(file), lvb))
230		return 0;
231
232	return simple_read_from_buffer(buf, count, ppos, lvb, sizeof(lvb));
233}
234
235static ssize_t dlmfs_file_write(struct file *filp,
236				const char __user *buf,
237				size_t count,
238				loff_t *ppos)
239{
240	char lvb_buf[DLM_LVB_LEN];
241	int bytes_left;
242	struct inode *inode = file_inode(filp);
243
244	mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
245		inode->i_ino, count, *ppos);
246
247	if (*ppos >= DLM_LVB_LEN)
248		return -ENOSPC;
249
250	/* don't write past the lvb */
251	if (count > DLM_LVB_LEN - *ppos)
252		count = DLM_LVB_LEN - *ppos;
253
254	if (!count)
255		return 0;
256
257	bytes_left = copy_from_user(lvb_buf, buf, count);
258	count -= bytes_left;
259	if (count)
260		user_dlm_write_lvb(inode, lvb_buf, count);
261
262	*ppos = *ppos + count;
263	mlog(0, "wrote %zu bytes\n", count);
264	return count;
265}
266
267static void dlmfs_init_once(void *foo)
268{
269	struct dlmfs_inode_private *ip =
270		(struct dlmfs_inode_private *) foo;
271
272	ip->ip_conn = NULL;
273	ip->ip_parent = NULL;
274
275	inode_init_once(&ip->ip_vfs_inode);
276}
277
278static struct inode *dlmfs_alloc_inode(struct super_block *sb)
279{
280	struct dlmfs_inode_private *ip;
281
282	ip = alloc_inode_sb(sb, dlmfs_inode_cache, GFP_NOFS);
283	if (!ip)
284		return NULL;
285
286	return &ip->ip_vfs_inode;
287}
288
289static void dlmfs_free_inode(struct inode *inode)
290{
291	kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
292}
293
294static void dlmfs_evict_inode(struct inode *inode)
295{
296	int status;
297	struct dlmfs_inode_private *ip;
298	struct user_lock_res *lockres;
299	int teardown;
300
301	clear_inode(inode);
302
303	mlog(0, "inode %lu\n", inode->i_ino);
304
305	ip = DLMFS_I(inode);
306	lockres = &ip->ip_lockres;
307
308	if (S_ISREG(inode->i_mode)) {
309		spin_lock(&lockres->l_lock);
310		teardown = !!(lockres->l_flags & USER_LOCK_IN_TEARDOWN);
311		spin_unlock(&lockres->l_lock);
312		if (!teardown) {
313			status = user_dlm_destroy_lock(lockres);
314			if (status < 0)
315				mlog_errno(status);
316		}
317		iput(ip->ip_parent);
318		goto clear_fields;
319	}
320
321	mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
322	/* we must be a directory. If required, lets unregister the
323	 * dlm context now. */
324	if (ip->ip_conn)
325		user_dlm_unregister(ip->ip_conn);
326clear_fields:
327	ip->ip_parent = NULL;
328	ip->ip_conn = NULL;
329}
330
331static struct inode *dlmfs_get_root_inode(struct super_block *sb)
332{
333	struct inode *inode = new_inode(sb);
334	umode_t mode = S_IFDIR | 0755;
335
336	if (inode) {
337		inode->i_ino = get_next_ino();
338		inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
339		simple_inode_init_ts(inode);
340		inc_nlink(inode);
341
342		inode->i_fop = &simple_dir_operations;
343		inode->i_op = &dlmfs_root_inode_operations;
344	}
345
346	return inode;
347}
348
349static struct inode *dlmfs_get_inode(struct inode *parent,
350				     struct dentry *dentry,
351				     umode_t mode)
352{
353	struct super_block *sb = parent->i_sb;
354	struct inode * inode = new_inode(sb);
355	struct dlmfs_inode_private *ip;
356
357	if (!inode)
358		return NULL;
359
360	inode->i_ino = get_next_ino();
361	inode_init_owner(&nop_mnt_idmap, inode, parent, mode);
362	simple_inode_init_ts(inode);
363
364	ip = DLMFS_I(inode);
365	ip->ip_conn = DLMFS_I(parent)->ip_conn;
366
367	switch (mode & S_IFMT) {
368	default:
369		/* for now we don't support anything other than
370		 * directories and regular files. */
371		BUG();
372		break;
373	case S_IFREG:
374		inode->i_op = &dlmfs_file_inode_operations;
375		inode->i_fop = &dlmfs_file_operations;
376
377		i_size_write(inode,  DLM_LVB_LEN);
378
379		user_dlm_lock_res_init(&ip->ip_lockres, dentry);
380
381		/* released at clear_inode time, this insures that we
382		 * get to drop the dlm reference on each lock *before*
383		 * we call the unregister code for releasing parent
384		 * directories. */
385		ip->ip_parent = igrab(parent);
386		BUG_ON(!ip->ip_parent);
387		break;
388	case S_IFDIR:
389		inode->i_op = &dlmfs_dir_inode_operations;
390		inode->i_fop = &simple_dir_operations;
391
392		/* directory inodes start off with i_nlink ==
393		 * 2 (for "." entry) */
394		inc_nlink(inode);
395		break;
396	}
397	return inode;
398}
399
400/*
401 * File creation. Allocate an inode, and we're done..
402 */
403/* SMP-safe */
404static int dlmfs_mkdir(struct mnt_idmap * idmap,
405		       struct inode * dir,
406		       struct dentry * dentry,
407		       umode_t mode)
408{
409	int status;
410	struct inode *inode = NULL;
411	const struct qstr *domain = &dentry->d_name;
412	struct dlmfs_inode_private *ip;
413	struct ocfs2_cluster_connection *conn;
414
415	mlog(0, "mkdir %.*s\n", domain->len, domain->name);
416
417	/* verify that we have a proper domain */
418	if (domain->len >= GROUP_NAME_MAX) {
419		status = -EINVAL;
420		mlog(ML_ERROR, "invalid domain name for directory.\n");
421		goto bail;
422	}
423
424	inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR);
425	if (!inode) {
426		status = -ENOMEM;
427		mlog_errno(status);
428		goto bail;
429	}
430
431	ip = DLMFS_I(inode);
432
433	conn = user_dlm_register(domain);
434	if (IS_ERR(conn)) {
435		status = PTR_ERR(conn);
436		mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
437		     status, domain->len, domain->name);
438		goto bail;
439	}
440	ip->ip_conn = conn;
441
442	inc_nlink(dir);
443	d_instantiate(dentry, inode);
444	dget(dentry);	/* Extra count - pin the dentry in core */
445
446	status = 0;
447bail:
448	if (status < 0)
449		iput(inode);
450	return status;
451}
452
453static int dlmfs_create(struct mnt_idmap *idmap,
454			struct inode *dir,
455			struct dentry *dentry,
456			umode_t mode,
457			bool excl)
458{
459	int status = 0;
460	struct inode *inode;
461	const struct qstr *name = &dentry->d_name;
462
463	mlog(0, "create %.*s\n", name->len, name->name);
464
465	/* verify name is valid and doesn't contain any dlm reserved
466	 * characters */
467	if (name->len >= USER_DLM_LOCK_ID_MAX_LEN ||
468	    name->name[0] == '$') {
469		status = -EINVAL;
470		mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len,
471		     name->name);
472		goto bail;
473	}
474
475	inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG);
476	if (!inode) {
477		status = -ENOMEM;
478		mlog_errno(status);
479		goto bail;
480	}
481
482	d_instantiate(dentry, inode);
483	dget(dentry);	/* Extra count - pin the dentry in core */
484bail:
485	return status;
486}
487
488static int dlmfs_unlink(struct inode *dir,
489			struct dentry *dentry)
490{
491	int status;
492	struct inode *inode = d_inode(dentry);
493
494	mlog(0, "unlink inode %lu\n", inode->i_ino);
495
496	/* if there are no current holders, or none that are waiting
497	 * to acquire a lock, this basically destroys our lockres. */
498	status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres);
499	if (status < 0) {
500		mlog(ML_ERROR, "unlink %pd, error %d from destroy\n",
501		     dentry, status);
502		goto bail;
503	}
504	status = simple_unlink(dir, dentry);
505bail:
506	return status;
507}
508
509static int dlmfs_fill_super(struct super_block * sb,
510			    void * data,
511			    int silent)
512{
513	sb->s_maxbytes = MAX_LFS_FILESIZE;
514	sb->s_blocksize = PAGE_SIZE;
515	sb->s_blocksize_bits = PAGE_SHIFT;
516	sb->s_magic = DLMFS_MAGIC;
517	sb->s_op = &dlmfs_ops;
518	sb->s_root = d_make_root(dlmfs_get_root_inode(sb));
519	if (!sb->s_root)
520		return -ENOMEM;
521	return 0;
522}
523
524static const struct file_operations dlmfs_file_operations = {
525	.open		= dlmfs_file_open,
526	.release	= dlmfs_file_release,
527	.poll		= dlmfs_file_poll,
528	.read		= dlmfs_file_read,
529	.write		= dlmfs_file_write,
530	.llseek		= default_llseek,
531};
532
533static const struct inode_operations dlmfs_dir_inode_operations = {
534	.create		= dlmfs_create,
535	.lookup		= simple_lookup,
536	.unlink		= dlmfs_unlink,
537};
538
539/* this way we can restrict mkdir to only the toplevel of the fs. */
540static const struct inode_operations dlmfs_root_inode_operations = {
541	.lookup		= simple_lookup,
542	.mkdir		= dlmfs_mkdir,
543	.rmdir		= simple_rmdir,
544};
545
546static const struct super_operations dlmfs_ops = {
547	.statfs		= simple_statfs,
548	.alloc_inode	= dlmfs_alloc_inode,
549	.free_inode	= dlmfs_free_inode,
550	.evict_inode	= dlmfs_evict_inode,
551	.drop_inode	= generic_delete_inode,
552};
553
554static const struct inode_operations dlmfs_file_inode_operations = {
555	.getattr	= simple_getattr,
556	.setattr	= dlmfs_file_setattr,
557};
558
559static struct dentry *dlmfs_mount(struct file_system_type *fs_type,
560	int flags, const char *dev_name, void *data)
561{
562	return mount_nodev(fs_type, flags, data, dlmfs_fill_super);
563}
564
565static struct file_system_type dlmfs_fs_type = {
566	.owner		= THIS_MODULE,
567	.name		= "ocfs2_dlmfs",
568	.mount		= dlmfs_mount,
569	.kill_sb	= kill_litter_super,
570};
571MODULE_ALIAS_FS("ocfs2_dlmfs");
572
573static int __init init_dlmfs_fs(void)
574{
575	int status;
576	int cleanup_inode = 0, cleanup_worker = 0;
577
578	dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
579				sizeof(struct dlmfs_inode_private),
580				0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
581					SLAB_ACCOUNT),
582				dlmfs_init_once);
583	if (!dlmfs_inode_cache) {
584		status = -ENOMEM;
585		goto bail;
586	}
587	cleanup_inode = 1;
588
589	user_dlm_worker = alloc_workqueue("user_dlm", WQ_MEM_RECLAIM, 0);
590	if (!user_dlm_worker) {
591		status = -ENOMEM;
592		goto bail;
593	}
594	cleanup_worker = 1;
595
596	user_dlm_set_locking_protocol();
597	status = register_filesystem(&dlmfs_fs_type);
598bail:
599	if (status) {
600		if (cleanup_inode)
601			kmem_cache_destroy(dlmfs_inode_cache);
602		if (cleanup_worker)
603			destroy_workqueue(user_dlm_worker);
604	} else
605		printk("OCFS2 User DLM kernel interface loaded\n");
606	return status;
607}
608
609static void __exit exit_dlmfs_fs(void)
610{
611	unregister_filesystem(&dlmfs_fs_type);
612
613	destroy_workqueue(user_dlm_worker);
614
615	/*
616	 * Make sure all delayed rcu free inodes are flushed before we
617	 * destroy cache.
618	 */
619	rcu_barrier();
620	kmem_cache_destroy(dlmfs_inode_cache);
621
622}
623
624MODULE_AUTHOR("Oracle");
625MODULE_LICENSE("GPL");
626MODULE_DESCRIPTION("OCFS2 DLM-Filesystem");
627
628module_init(init_dlmfs_fs)
629module_exit(exit_dlmfs_fs)
  1/* -*- mode: c; c-basic-offset: 8; -*-
  2 * vim: noexpandtab sw=8 ts=8 sts=0:
  3 *
  4 * dlmfs.c
  5 *
  6 * Code which implements the kernel side of a minimal userspace
  7 * interface to our DLM. This file handles the virtual file system
  8 * used for communication with userspace. Credit should go to ramfs,
  9 * which was a template for the fs side of this module.
 10 *
 11 * Copyright (C) 2003, 2004 Oracle.  All rights reserved.
 12 *
 13 * This program is free software; you can redistribute it and/or
 14 * modify it under the terms of the GNU General Public
 15 * License as published by the Free Software Foundation; either
 16 * version 2 of the License, or (at your option) any later version.
 17 *
 18 * This program is distributed in the hope that it will be useful,
 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 21 * General Public License for more details.
 22 *
 23 * You should have received a copy of the GNU General Public
 24 * License along with this program; if not, write to the
 25 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 26 * Boston, MA 021110-1307, USA.
 27 */
 28
 29/* Simple VFS hooks based on: */
 30/*
 31 * Resizable simple ram filesystem for Linux.
 32 *
 33 * Copyright (C) 2000 Linus Torvalds.
 34 *               2000 Transmeta Corp.
 35 */
 36
 37#include <linux/module.h>
 38#include <linux/fs.h>
 39#include <linux/pagemap.h>
 40#include <linux/types.h>
 41#include <linux/slab.h>
 42#include <linux/highmem.h>
 43#include <linux/init.h>
 44#include <linux/string.h>
 45#include <linux/backing-dev.h>
 46#include <linux/poll.h>
 47
 48#include <asm/uaccess.h>
 49
 50#include "stackglue.h"
 51#include "userdlm.h"
 52#include "dlmfsver.h"
 53
 54#define MLOG_MASK_PREFIX ML_DLMFS
 55#include "cluster/masklog.h"
 56
 57
 58static const struct super_operations dlmfs_ops;
 59static const struct file_operations dlmfs_file_operations;
 60static const struct inode_operations dlmfs_dir_inode_operations;
 61static const struct inode_operations dlmfs_root_inode_operations;
 62static const struct inode_operations dlmfs_file_inode_operations;
 63static struct kmem_cache *dlmfs_inode_cache;
 64
 65struct workqueue_struct *user_dlm_worker;
 66
 67
 68
 69/*
 70 * These are the ABI capabilities of dlmfs.
 71 *
 72 * Over time, dlmfs has added some features that were not part of the
 73 * initial ABI.  Unfortunately, some of these features are not detectable
 74 * via standard usage.  For example, Linux's default poll always returns
 75 * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs
 76 * added poll support.  Instead, we provide this list of new capabilities.
 77 *
 78 * Capabilities is a read-only attribute.  We do it as a module parameter
 79 * so we can discover it whether dlmfs is built in, loaded, or even not
 80 * loaded.
 81 *
 82 * The ABI features are local to this machine's dlmfs mount.  This is
 83 * distinct from the locking protocol, which is concerned with inter-node
 84 * interaction.
 85 *
 86 * Capabilities:
 87 * - bast	: POLLIN against the file descriptor of a held lock
 88 *		  signifies a bast fired on the lock.
 89 */
 90#define DLMFS_CAPABILITIES "bast stackglue"
 91static int param_set_dlmfs_capabilities(const char *val,
 92					struct kernel_param *kp)
 93{
 94	printk(KERN_ERR "%s: readonly parameter\n", kp->name);
 95	return -EINVAL;
 96}
 97static int param_get_dlmfs_capabilities(char *buffer,
 98					struct kernel_param *kp)
 99{
100	return strlcpy(buffer, DLMFS_CAPABILITIES,
101		       strlen(DLMFS_CAPABILITIES) + 1);
102}
103module_param_call(capabilities, param_set_dlmfs_capabilities,
104		  param_get_dlmfs_capabilities, NULL, 0444);
105MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
106
107
108/*
109 * decodes a set of open flags into a valid lock level and a set of flags.
110 * returns < 0 if we have invalid flags
111 * flags which mean something to us:
112 * O_RDONLY -> PRMODE level
113 * O_WRONLY -> EXMODE level
114 *
115 * O_NONBLOCK -> NOQUEUE
116 */
117static int dlmfs_decode_open_flags(int open_flags,
118				   int *level,
119				   int *flags)
120{
121	if (open_flags & (O_WRONLY|O_RDWR))
122		*level = DLM_LOCK_EX;
123	else
124		*level = DLM_LOCK_PR;
125
126	*flags = 0;
127	if (open_flags & O_NONBLOCK)
128		*flags |= DLM_LKF_NOQUEUE;
129
130	return 0;
131}
132
133static int dlmfs_file_open(struct inode *inode,
134			   struct file *file)
135{
136	int status, level, flags;
137	struct dlmfs_filp_private *fp = NULL;
138	struct dlmfs_inode_private *ip;
139
140	if (S_ISDIR(inode->i_mode))
141		BUG();
142
143	mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino,
144		file->f_flags);
145
146	status = dlmfs_decode_open_flags(file->f_flags, &level, &flags);
147	if (status < 0)
148		goto bail;
149
150	/* We don't want to honor O_APPEND at read/write time as it
151	 * doesn't make sense for LVB writes. */
152	file->f_flags &= ~O_APPEND;
153
154	fp = kmalloc(sizeof(*fp), GFP_NOFS);
155	if (!fp) {
156		status = -ENOMEM;
157		goto bail;
158	}
159	fp->fp_lock_level = level;
160
161	ip = DLMFS_I(inode);
162
163	status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags);
164	if (status < 0) {
165		/* this is a strange error to return here but I want
166		 * to be able userspace to be able to distinguish a
167		 * valid lock request from one that simply couldn't be
168		 * granted. */
169		if (flags & DLM_LKF_NOQUEUE && status == -EAGAIN)
170			status = -ETXTBSY;
171		kfree(fp);
172		goto bail;
173	}
174
175	file->private_data = fp;
176bail:
177	return status;
178}
179
180static int dlmfs_file_release(struct inode *inode,
181			      struct file *file)
182{
183	int level, status;
184	struct dlmfs_inode_private *ip = DLMFS_I(inode);
185	struct dlmfs_filp_private *fp = file->private_data;
186
187	if (S_ISDIR(inode->i_mode))
188		BUG();
189
190	mlog(0, "close called on inode %lu\n", inode->i_ino);
191
192	status = 0;
193	if (fp) {
194		level = fp->fp_lock_level;
195		if (level != DLM_LOCK_IV)
196			user_dlm_cluster_unlock(&ip->ip_lockres, level);
197
198		kfree(fp);
199		file->private_data = NULL;
200	}
201
202	return 0;
203}
204
205/*
206 * We do ->setattr() just to override size changes.  Our size is the size
207 * of the LVB and nothing else.
208 */
209static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
210{
211	int error;
212	struct inode *inode = dentry->d_inode;
213
214	attr->ia_valid &= ~ATTR_SIZE;
215	error = inode_change_ok(inode, attr);
216	if (error)
217		return error;
218
219	setattr_copy(inode, attr);
220	mark_inode_dirty(inode);
221	return 0;
222}
223
224static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
225{
226	int event = 0;
227	struct inode *inode = file->f_path.dentry->d_inode;
228	struct dlmfs_inode_private *ip = DLMFS_I(inode);
229
230	poll_wait(file, &ip->ip_lockres.l_event, wait);
231
232	spin_lock(&ip->ip_lockres.l_lock);
233	if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
234		event = POLLIN | POLLRDNORM;
235	spin_unlock(&ip->ip_lockres.l_lock);
236
237	return event;
238}
239
240static ssize_t dlmfs_file_read(struct file *filp,
241			       char __user *buf,
242			       size_t count,
243			       loff_t *ppos)
244{
245	int bytes_left;
246	ssize_t readlen, got;
247	char *lvb_buf;
248	struct inode *inode = filp->f_path.dentry->d_inode;
249
250	mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
251		inode->i_ino, count, *ppos);
252
253	if (*ppos >= i_size_read(inode))
254		return 0;
255
256	if (!count)
257		return 0;
258
259	if (!access_ok(VERIFY_WRITE, buf, count))
260		return -EFAULT;
261
262	/* don't read past the lvb */
263	if ((count + *ppos) > i_size_read(inode))
264		readlen = i_size_read(inode) - *ppos;
265	else
266		readlen = count;
267
268	lvb_buf = kmalloc(readlen, GFP_NOFS);
269	if (!lvb_buf)
270		return -ENOMEM;
271
272	got = user_dlm_read_lvb(inode, lvb_buf, readlen);
273	if (got) {
274		BUG_ON(got != readlen);
275		bytes_left = __copy_to_user(buf, lvb_buf, readlen);
276		readlen -= bytes_left;
277	} else
278		readlen = 0;
279
280	kfree(lvb_buf);
281
282	*ppos = *ppos + readlen;
283
284	mlog(0, "read %zd bytes\n", readlen);
285	return readlen;
286}
287
288static ssize_t dlmfs_file_write(struct file *filp,
289				const char __user *buf,
290				size_t count,
291				loff_t *ppos)
292{
293	int bytes_left;
294	ssize_t writelen;
295	char *lvb_buf;
296	struct inode *inode = filp->f_path.dentry->d_inode;
297
298	mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
299		inode->i_ino, count, *ppos);
300
301	if (*ppos >= i_size_read(inode))
302		return -ENOSPC;
303
304	if (!count)
305		return 0;
306
307	if (!access_ok(VERIFY_READ, buf, count))
308		return -EFAULT;
309
310	/* don't write past the lvb */
311	if ((count + *ppos) > i_size_read(inode))
312		writelen = i_size_read(inode) - *ppos;
313	else
314		writelen = count - *ppos;
315
316	lvb_buf = kmalloc(writelen, GFP_NOFS);
317	if (!lvb_buf)
318		return -ENOMEM;
319
320	bytes_left = copy_from_user(lvb_buf, buf, writelen);
321	writelen -= bytes_left;
322	if (writelen)
323		user_dlm_write_lvb(inode, lvb_buf, writelen);
324
325	kfree(lvb_buf);
326
327	*ppos = *ppos + writelen;
328	mlog(0, "wrote %zd bytes\n", writelen);
329	return writelen;
330}
331
332static void dlmfs_init_once(void *foo)
333{
334	struct dlmfs_inode_private *ip =
335		(struct dlmfs_inode_private *) foo;
336
337	ip->ip_conn = NULL;
338	ip->ip_parent = NULL;
339
340	inode_init_once(&ip->ip_vfs_inode);
341}
342
343static struct inode *dlmfs_alloc_inode(struct super_block *sb)
344{
345	struct dlmfs_inode_private *ip;
346
347	ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS);
348	if (!ip)
349		return NULL;
350
351	return &ip->ip_vfs_inode;
352}
353
354static void dlmfs_i_callback(struct rcu_head *head)
355{
356	struct inode *inode = container_of(head, struct inode, i_rcu);
357	INIT_LIST_HEAD(&inode->i_dentry);
358	kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
359}
360
361static void dlmfs_destroy_inode(struct inode *inode)
362{
363	call_rcu(&inode->i_rcu, dlmfs_i_callback);
364}
365
366static void dlmfs_evict_inode(struct inode *inode)
367{
368	int status;
369	struct dlmfs_inode_private *ip;
370
371	end_writeback(inode);
372
373	mlog(0, "inode %lu\n", inode->i_ino);
374
375	ip = DLMFS_I(inode);
376
377	if (S_ISREG(inode->i_mode)) {
378		status = user_dlm_destroy_lock(&ip->ip_lockres);
379		if (status < 0)
380			mlog_errno(status);
381		iput(ip->ip_parent);
382		goto clear_fields;
383	}
384
385	mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
386	/* we must be a directory. If required, lets unregister the
387	 * dlm context now. */
388	if (ip->ip_conn)
389		user_dlm_unregister(ip->ip_conn);
390clear_fields:
391	ip->ip_parent = NULL;
392	ip->ip_conn = NULL;
393}
394
395static struct backing_dev_info dlmfs_backing_dev_info = {
396	.name		= "ocfs2-dlmfs",
397	.ra_pages	= 0,	/* No readahead */
398	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
399};
400
401static struct inode *dlmfs_get_root_inode(struct super_block *sb)
402{
403	struct inode *inode = new_inode(sb);
404	int mode = S_IFDIR | 0755;
405	struct dlmfs_inode_private *ip;
406
407	if (inode) {
408		ip = DLMFS_I(inode);
409
410		inode->i_ino = get_next_ino();
411		inode->i_mode = mode;
412		inode->i_uid = current_fsuid();
413		inode->i_gid = current_fsgid();
414		inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
415		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
416		inc_nlink(inode);
417
418		inode->i_fop = &simple_dir_operations;
419		inode->i_op = &dlmfs_root_inode_operations;
420	}
421
422	return inode;
423}
424
425static struct inode *dlmfs_get_inode(struct inode *parent,
426				     struct dentry *dentry,
427				     int mode)
428{
429	struct super_block *sb = parent->i_sb;
430	struct inode * inode = new_inode(sb);
431	struct dlmfs_inode_private *ip;
432
433	if (!inode)
434		return NULL;
435
436	inode->i_ino = get_next_ino();
437	inode->i_mode = mode;
438	inode->i_uid = current_fsuid();
439	inode->i_gid = current_fsgid();
440	inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
441	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
442
443	ip = DLMFS_I(inode);
444	ip->ip_conn = DLMFS_I(parent)->ip_conn;
445
446	switch (mode & S_IFMT) {
447	default:
448		/* for now we don't support anything other than
449		 * directories and regular files. */
450		BUG();
451		break;
452	case S_IFREG:
453		inode->i_op = &dlmfs_file_inode_operations;
454		inode->i_fop = &dlmfs_file_operations;
455
456		i_size_write(inode,  DLM_LVB_LEN);
457
458		user_dlm_lock_res_init(&ip->ip_lockres, dentry);
459
460		/* released at clear_inode time, this insures that we
461		 * get to drop the dlm reference on each lock *before*
462		 * we call the unregister code for releasing parent
463		 * directories. */
464		ip->ip_parent = igrab(parent);
465		BUG_ON(!ip->ip_parent);
466		break;
467	case S_IFDIR:
468		inode->i_op = &dlmfs_dir_inode_operations;
469		inode->i_fop = &simple_dir_operations;
470
471		/* directory inodes start off with i_nlink ==
472		 * 2 (for "." entry) */
473		inc_nlink(inode);
474		break;
475	}
476
477	if (parent->i_mode & S_ISGID) {
478		inode->i_gid = parent->i_gid;
479		if (S_ISDIR(mode))
480			inode->i_mode |= S_ISGID;
481	}
482
483	return inode;
484}
485
486/*
487 * File creation. Allocate an inode, and we're done..
488 */
489/* SMP-safe */
490static int dlmfs_mkdir(struct inode * dir,
491		       struct dentry * dentry,
492		       int mode)
493{
494	int status;
495	struct inode *inode = NULL;
496	struct qstr *domain = &dentry->d_name;
497	struct dlmfs_inode_private *ip;
498	struct ocfs2_cluster_connection *conn;
499
500	mlog(0, "mkdir %.*s\n", domain->len, domain->name);
501
502	/* verify that we have a proper domain */
503	if (domain->len >= GROUP_NAME_MAX) {
504		status = -EINVAL;
505		mlog(ML_ERROR, "invalid domain name for directory.\n");
506		goto bail;
507	}
508
509	inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR);
510	if (!inode) {
511		status = -ENOMEM;
512		mlog_errno(status);
513		goto bail;
514	}
515
516	ip = DLMFS_I(inode);
517
518	conn = user_dlm_register(domain);
519	if (IS_ERR(conn)) {
520		status = PTR_ERR(conn);
521		mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
522		     status, domain->len, domain->name);
523		goto bail;
524	}
525	ip->ip_conn = conn;
526
527	inc_nlink(dir);
528	d_instantiate(dentry, inode);
529	dget(dentry);	/* Extra count - pin the dentry in core */
530
531	status = 0;
532bail:
533	if (status < 0)
534		iput(inode);
535	return status;
536}
537
538static int dlmfs_create(struct inode *dir,
539			struct dentry *dentry,
540			int mode,
541			struct nameidata *nd)
542{
543	int status = 0;
544	struct inode *inode;
545	struct qstr *name = &dentry->d_name;
546
547	mlog(0, "create %.*s\n", name->len, name->name);
548
549	/* verify name is valid and doesn't contain any dlm reserved
550	 * characters */
551	if (name->len >= USER_DLM_LOCK_ID_MAX_LEN ||
552	    name->name[0] == '$') {
553		status = -EINVAL;
554		mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len,
555		     name->name);
556		goto bail;
557	}
558
559	inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG);
560	if (!inode) {
561		status = -ENOMEM;
562		mlog_errno(status);
563		goto bail;
564	}
565
566	d_instantiate(dentry, inode);
567	dget(dentry);	/* Extra count - pin the dentry in core */
568bail:
569	return status;
570}
571
572static int dlmfs_unlink(struct inode *dir,
573			struct dentry *dentry)
574{
575	int status;
576	struct inode *inode = dentry->d_inode;
577
578	mlog(0, "unlink inode %lu\n", inode->i_ino);
579
580	/* if there are no current holders, or none that are waiting
581	 * to acquire a lock, this basically destroys our lockres. */
582	status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres);
583	if (status < 0) {
584		mlog(ML_ERROR, "unlink %.*s, error %d from destroy\n",
585		     dentry->d_name.len, dentry->d_name.name, status);
586		goto bail;
587	}
588	status = simple_unlink(dir, dentry);
589bail:
590	return status;
591}
592
593static int dlmfs_fill_super(struct super_block * sb,
594			    void * data,
595			    int silent)
596{
597	struct inode * inode;
598	struct dentry * root;
599
600	sb->s_maxbytes = MAX_LFS_FILESIZE;
601	sb->s_blocksize = PAGE_CACHE_SIZE;
602	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
603	sb->s_magic = DLMFS_MAGIC;
604	sb->s_op = &dlmfs_ops;
605	inode = dlmfs_get_root_inode(sb);
606	if (!inode)
607		return -ENOMEM;
608
609	root = d_alloc_root(inode);
610	if (!root) {
611		iput(inode);
612		return -ENOMEM;
613	}
614	sb->s_root = root;
615	return 0;
616}
617
618static const struct file_operations dlmfs_file_operations = {
619	.open		= dlmfs_file_open,
620	.release	= dlmfs_file_release,
621	.poll		= dlmfs_file_poll,
622	.read		= dlmfs_file_read,
623	.write		= dlmfs_file_write,
624	.llseek		= default_llseek,
625};
626
627static const struct inode_operations dlmfs_dir_inode_operations = {
628	.create		= dlmfs_create,
629	.lookup		= simple_lookup,
630	.unlink		= dlmfs_unlink,
631};
632
633/* this way we can restrict mkdir to only the toplevel of the fs. */
634static const struct inode_operations dlmfs_root_inode_operations = {
635	.lookup		= simple_lookup,
636	.mkdir		= dlmfs_mkdir,
637	.rmdir		= simple_rmdir,
638};
639
640static const struct super_operations dlmfs_ops = {
641	.statfs		= simple_statfs,
642	.alloc_inode	= dlmfs_alloc_inode,
643	.destroy_inode	= dlmfs_destroy_inode,
644	.evict_inode	= dlmfs_evict_inode,
645	.drop_inode	= generic_delete_inode,
646};
647
648static const struct inode_operations dlmfs_file_inode_operations = {
649	.getattr	= simple_getattr,
650	.setattr	= dlmfs_file_setattr,
651};
652
653static struct dentry *dlmfs_mount(struct file_system_type *fs_type,
654	int flags, const char *dev_name, void *data)
655{
656	return mount_nodev(fs_type, flags, data, dlmfs_fill_super);
657}
658
659static struct file_system_type dlmfs_fs_type = {
660	.owner		= THIS_MODULE,
661	.name		= "ocfs2_dlmfs",
662	.mount		= dlmfs_mount,
663	.kill_sb	= kill_litter_super,
664};
665
666static int __init init_dlmfs_fs(void)
667{
668	int status;
669	int cleanup_inode = 0, cleanup_worker = 0;
670
671	dlmfs_print_version();
672
673	status = bdi_init(&dlmfs_backing_dev_info);
674	if (status)
675		return status;
676
677	dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
678				sizeof(struct dlmfs_inode_private),
679				0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
680					SLAB_MEM_SPREAD),
681				dlmfs_init_once);
682	if (!dlmfs_inode_cache) {
683		status = -ENOMEM;
684		goto bail;
685	}
686	cleanup_inode = 1;
687
688	user_dlm_worker = create_singlethread_workqueue("user_dlm");
689	if (!user_dlm_worker) {
690		status = -ENOMEM;
691		goto bail;
692	}
693	cleanup_worker = 1;
694
695	user_dlm_set_locking_protocol();
696	status = register_filesystem(&dlmfs_fs_type);
697bail:
698	if (status) {
699		if (cleanup_inode)
700			kmem_cache_destroy(dlmfs_inode_cache);
701		if (cleanup_worker)
702			destroy_workqueue(user_dlm_worker);
703		bdi_destroy(&dlmfs_backing_dev_info);
704	} else
705		printk("OCFS2 User DLM kernel interface loaded\n");
706	return status;
707}
708
709static void __exit exit_dlmfs_fs(void)
710{
711	unregister_filesystem(&dlmfs_fs_type);
712
713	flush_workqueue(user_dlm_worker);
714	destroy_workqueue(user_dlm_worker);
715
716	kmem_cache_destroy(dlmfs_inode_cache);
717
718	bdi_destroy(&dlmfs_backing_dev_info);
719}
720
721MODULE_AUTHOR("Oracle");
722MODULE_LICENSE("GPL");
723
724module_init(init_dlmfs_fs)
725module_exit(exit_dlmfs_fs)