Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _BCACHEFS_FS_IO_H
  3#define _BCACHEFS_FS_IO_H
  4
  5#ifndef NO_BCACHEFS_FS
  6
  7#include "buckets.h"
  8#include "fs.h"
  9#include "io_write_types.h"
 10#include "quota.h"
 11
 12#include <linux/uio.h>
 13
 14struct folio_vec {
 15	struct folio	*fv_folio;
 16	size_t		fv_offset;
 17	size_t		fv_len;
 18};
 19
 20static inline struct folio_vec biovec_to_foliovec(struct bio_vec bv)
 21{
 22
 23	struct folio *folio	= page_folio(bv.bv_page);
 24	size_t offset		= (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
 25		bv.bv_offset;
 26	size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
 27
 28	return (struct folio_vec) {
 29		.fv_folio	= folio,
 30		.fv_offset	= offset,
 31		.fv_len		= len,
 32	};
 33}
 34
 35static inline struct folio_vec bio_iter_iovec_folio(struct bio *bio,
 36						    struct bvec_iter iter)
 37{
 38	return biovec_to_foliovec(bio_iter_iovec(bio, iter));
 39}
 40
 41#define __bio_for_each_folio(bvl, bio, iter, start)			\
 42	for (iter = (start);						\
 43	     (iter).bi_size &&						\
 44		((bvl = bio_iter_iovec_folio((bio), (iter))), 1);	\
 45	     bio_advance_iter_single((bio), &(iter), (bvl).fv_len))
 46
 47/**
 48 * bio_for_each_folio - iterate over folios within a bio
 49 *
 50 * Like other non-_all versions, this iterates over what bio->bi_iter currently
 51 * points to. This version is for drivers, where the bio may have previously
 52 * been split or cloned.
 53 */
 54#define bio_for_each_folio(bvl, bio, iter)				\
 55	__bio_for_each_folio(bvl, bio, iter, (bio)->bi_iter)
 56
 57struct quota_res {
 58	u64				sectors;
 59};
 60
 61#ifdef CONFIG_BCACHEFS_QUOTA
 62
 63static inline void __bch2_quota_reservation_put(struct bch_fs *c,
 64					 struct bch_inode_info *inode,
 65					 struct quota_res *res)
 66{
 67	BUG_ON(res->sectors > inode->ei_quota_reserved);
 68
 69	bch2_quota_acct(c, inode->ei_qid, Q_SPC,
 70			-((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
 71	inode->ei_quota_reserved -= res->sectors;
 72	res->sectors = 0;
 73}
 74
 75static inline void bch2_quota_reservation_put(struct bch_fs *c,
 76				       struct bch_inode_info *inode,
 77				       struct quota_res *res)
 78{
 79	if (res->sectors) {
 80		mutex_lock(&inode->ei_quota_lock);
 81		__bch2_quota_reservation_put(c, inode, res);
 82		mutex_unlock(&inode->ei_quota_lock);
 83	}
 84}
 85
 86static inline int bch2_quota_reservation_add(struct bch_fs *c,
 87				      struct bch_inode_info *inode,
 88				      struct quota_res *res,
 89				      u64 sectors,
 90				      bool check_enospc)
 91{
 92	int ret;
 93
 94	if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags))
 95		return 0;
 96
 97	mutex_lock(&inode->ei_quota_lock);
 98	ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
 99			      check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
100	if (likely(!ret)) {
101		inode->ei_quota_reserved += sectors;
102		res->sectors += sectors;
103	}
104	mutex_unlock(&inode->ei_quota_lock);
105
106	return ret;
107}
108
109#else
110
111static inline void __bch2_quota_reservation_put(struct bch_fs *c,
112					 struct bch_inode_info *inode,
113					 struct quota_res *res) {}
114
115static inline void bch2_quota_reservation_put(struct bch_fs *c,
116				       struct bch_inode_info *inode,
117				       struct quota_res *res) {}
118
119static inline int bch2_quota_reservation_add(struct bch_fs *c,
120				      struct bch_inode_info *inode,
121				      struct quota_res *res,
122				      unsigned sectors,
123				      bool check_enospc)
124{
125	return 0;
126}
127
128#endif
129
130void __bch2_i_sectors_acct(struct bch_fs *, struct bch_inode_info *,
131			   struct quota_res *, s64);
132
133static inline void bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
134				       struct quota_res *quota_res, s64 sectors)
135{
136	if (sectors) {
137		mutex_lock(&inode->ei_quota_lock);
138		__bch2_i_sectors_acct(c, inode, quota_res, sectors);
139		mutex_unlock(&inode->ei_quota_lock);
140	}
141}
142
143static inline struct address_space *faults_disabled_mapping(void)
144{
145	return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
146}
147
148static inline void set_fdm_dropped_locks(void)
149{
150	current->faults_disabled_mapping =
151		(void *) (((unsigned long) current->faults_disabled_mapping)|1);
152}
153
154static inline bool fdm_dropped_locks(void)
155{
156	return ((unsigned long) current->faults_disabled_mapping) & 1;
157}
158
159void bch2_inode_flush_nocow_writes_async(struct bch_fs *,
160			struct bch_inode_info *, struct closure *);
161
162int __must_check bch2_write_inode_size(struct bch_fs *,
163				       struct bch_inode_info *,
164				       loff_t, unsigned);
165
166int bch2_fsync(struct file *, loff_t, loff_t, int);
167
168int bchfs_truncate(struct mnt_idmap *,
169		  struct bch_inode_info *, struct iattr *);
170long bch2_fallocate_dispatch(struct file *, int, loff_t, loff_t);
171
172loff_t bch2_remap_file_range(struct file *, loff_t, struct file *,
173			     loff_t, loff_t, unsigned);
174
175loff_t bch2_llseek(struct file *, loff_t, int);
176
177void bch2_fs_fsio_exit(struct bch_fs *);
178int bch2_fs_fsio_init(struct bch_fs *);
179#else
180static inline void bch2_fs_fsio_exit(struct bch_fs *c) {}
181static inline int bch2_fs_fsio_init(struct bch_fs *c) { return 0; }
182#endif
183
184#endif /* _BCACHEFS_FS_IO_H */