Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 | // SPDX-License-Identifier: GPL-2.0 /* * I/O and data path helper functionality. * * Borrowed from NFS Copyright (c) 2016 Trond Myklebust */ #include <linux/kernel.h> #include <linux/netfs.h> #include "internal.h" /* * inode_dio_wait_interruptible - wait for outstanding DIO requests to finish * @inode: inode to wait for * * Waits for all pending direct I/O requests to finish so that we can * proceed with a truncate or equivalent operation. * * Must be called under a lock that serializes taking new references * to i_dio_count, usually by inode->i_mutex. */ static int inode_dio_wait_interruptible(struct inode *inode) { if (!atomic_read(&inode->i_dio_count)) return 0; wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); for (;;) { prepare_to_wait(wq, &q.wq_entry, TASK_INTERRUPTIBLE); if (!atomic_read(&inode->i_dio_count)) break; if (signal_pending(current)) break; schedule(); } finish_wait(wq, &q.wq_entry); return atomic_read(&inode->i_dio_count) ? -ERESTARTSYS : 0; } /* Call with exclusively locked inode->i_rwsem */ static int netfs_block_o_direct(struct netfs_inode *ictx) { if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags)) return 0; clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags); return inode_dio_wait_interruptible(&ictx->inode); } /** * netfs_start_io_read - declare the file is being used for buffered reads * @inode: file inode * * Declare that a buffered read operation is about to start, and ensure * that we block all direct I/O. * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is unset, * and holds a shared lock on inode->i_rwsem to ensure that the flag * cannot be changed. * In practice, this means that buffered read operations are allowed to * execute in parallel, thanks to the shared lock, whereas direct I/O * operations need to wait to grab an exclusive lock in order to set * NETFS_ICTX_ODIRECT. * Note that buffered writes and truncates both take a write lock on * inode->i_rwsem, meaning that those are serialised w.r.t. the reads. */ int netfs_start_io_read(struct inode *inode) __acquires(inode->i_rwsem) { struct netfs_inode *ictx = netfs_inode(inode); /* Be an optimist! */ if (down_read_interruptible(&inode->i_rwsem) < 0) return -ERESTARTSYS; if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) == 0) return 0; up_read(&inode->i_rwsem); /* Slow path.... */ if (down_write_killable(&inode->i_rwsem) < 0) return -ERESTARTSYS; if (netfs_block_o_direct(ictx) < 0) { up_write(&inode->i_rwsem); return -ERESTARTSYS; } downgrade_write(&inode->i_rwsem); return 0; } EXPORT_SYMBOL(netfs_start_io_read); /** * netfs_end_io_read - declare that the buffered read operation is done * @inode: file inode * * Declare that a buffered read operation is done, and release the shared * lock on inode->i_rwsem. */ void netfs_end_io_read(struct inode *inode) __releases(inode->i_rwsem) { up_read(&inode->i_rwsem); } EXPORT_SYMBOL(netfs_end_io_read); /** * netfs_start_io_write - declare the file is being used for buffered writes * @inode: file inode * * Declare that a buffered read operation is about to start, and ensure * that we block all direct I/O. */ int netfs_start_io_write(struct inode *inode) __acquires(inode->i_rwsem) { struct netfs_inode *ictx = netfs_inode(inode); if (down_write_killable(&inode->i_rwsem) < 0) return -ERESTARTSYS; if (netfs_block_o_direct(ictx) < 0) { up_write(&inode->i_rwsem); return -ERESTARTSYS; } return 0; } EXPORT_SYMBOL(netfs_start_io_write); /** * netfs_end_io_write - declare that the buffered write operation is done * @inode: file inode * * Declare that a buffered write operation is done, and release the * lock on inode->i_rwsem. */ void netfs_end_io_write(struct inode *inode) __releases(inode->i_rwsem) { up_write(&inode->i_rwsem); } EXPORT_SYMBOL(netfs_end_io_write); /* Call with exclusively locked inode->i_rwsem */ static int netfs_block_buffered(struct inode *inode) { struct netfs_inode *ictx = netfs_inode(inode); int ret; if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags)) { set_bit(NETFS_ICTX_ODIRECT, &ictx->flags); if (inode->i_mapping->nrpages != 0) { unmap_mapping_range(inode->i_mapping, 0, 0, 0); ret = filemap_fdatawait(inode->i_mapping); if (ret < 0) { clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags); return ret; } } } return 0; } /** * netfs_start_io_direct - declare the file is being used for direct i/o * @inode: file inode * * Declare that a direct I/O operation is about to start, and ensure * that we block all buffered I/O. * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is set, * and holds a shared lock on inode->i_rwsem to ensure that the flag * cannot be changed. * In practice, this means that direct I/O operations are allowed to * execute in parallel, thanks to the shared lock, whereas buffered I/O * operations need to wait to grab an exclusive lock in order to clear * NETFS_ICTX_ODIRECT. * Note that buffered writes and truncates both take a write lock on * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT. */ int netfs_start_io_direct(struct inode *inode) __acquires(inode->i_rwsem) { struct netfs_inode *ictx = netfs_inode(inode); int ret; /* Be an optimist! */ if (down_read_interruptible(&inode->i_rwsem) < 0) return -ERESTARTSYS; if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) != 0) return 0; up_read(&inode->i_rwsem); /* Slow path.... */ if (down_write_killable(&inode->i_rwsem) < 0) return -ERESTARTSYS; ret = netfs_block_buffered(inode); if (ret < 0) { up_write(&inode->i_rwsem); return ret; } downgrade_write(&inode->i_rwsem); return 0; } EXPORT_SYMBOL(netfs_start_io_direct); /** * netfs_end_io_direct - declare that the direct i/o operation is done * @inode: file inode * * Declare that a direct I/O operation is done, and release the shared * lock on inode->i_rwsem. */ void netfs_end_io_direct(struct inode *inode) __releases(inode->i_rwsem) { up_read(&inode->i_rwsem); } EXPORT_SYMBOL(netfs_end_io_direct); |