Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
4 *
5 * mmap.c
6 *
7 * Code to deal with the mess that is clustered mmap.
8 *
9 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 */
11
12#include <linux/fs.h>
13#include <linux/types.h>
14#include <linux/highmem.h>
15#include <linux/pagemap.h>
16#include <linux/uio.h>
17#include <linux/signal.h>
18#include <linux/rbtree.h>
19
20#include <cluster/masklog.h>
21
22#include "ocfs2.h"
23
24#include "aops.h"
25#include "dlmglue.h"
26#include "file.h"
27#include "inode.h"
28#include "mmap.h"
29#include "super.h"
30#include "ocfs2_trace.h"
31
32
33static vm_fault_t ocfs2_fault(struct vm_fault *vmf)
34{
35 struct vm_area_struct *vma = vmf->vma;
36 sigset_t oldset;
37 vm_fault_t ret;
38
39 ocfs2_block_signals(&oldset);
40 ret = filemap_fault(vmf);
41 ocfs2_unblock_signals(&oldset);
42
43 trace_ocfs2_fault(OCFS2_I(vma->vm_file->f_mapping->host)->ip_blkno,
44 vma, vmf->page, vmf->pgoff);
45 return ret;
46}
47
48static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
49 struct buffer_head *di_bh, struct page *page)
50{
51 int err;
52 vm_fault_t ret = VM_FAULT_NOPAGE;
53 struct inode *inode = file_inode(file);
54 struct address_space *mapping = inode->i_mapping;
55 loff_t pos = page_offset(page);
56 unsigned int len = PAGE_SIZE;
57 pgoff_t last_index;
58 struct page *locked_page = NULL;
59 void *fsdata;
60 loff_t size = i_size_read(inode);
61
62 last_index = (size - 1) >> PAGE_SHIFT;
63
64 /*
65 * There are cases that lead to the page no longer bebongs to the
66 * mapping.
67 * 1) pagecache truncates locally due to memory pressure.
68 * 2) pagecache truncates when another is taking EX lock against
69 * inode lock. see ocfs2_data_convert_worker.
70 *
71 * The i_size check doesn't catch the case where nodes truncated and
72 * then re-extended the file. We'll re-check the page mapping after
73 * taking the page lock inside of ocfs2_write_begin_nolock().
74 *
75 * Let VM retry with these cases.
76 */
77 if ((page->mapping != inode->i_mapping) ||
78 (!PageUptodate(page)) ||
79 (page_offset(page) >= size))
80 goto out;
81
82 /*
83 * Call ocfs2_write_begin() and ocfs2_write_end() to take
84 * advantage of the allocation code there. We pass a write
85 * length of the whole page (chopped to i_size) to make sure
86 * the whole thing is allocated.
87 *
88 * Since we know the page is up to date, we don't have to
89 * worry about ocfs2_write_begin() skipping some buffer reads
90 * because the "write" would invalidate their data.
91 */
92 if (page->index == last_index)
93 len = ((size - 1) & ~PAGE_MASK) + 1;
94
95 err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
96 &locked_page, &fsdata, di_bh, page);
97 if (err) {
98 if (err != -ENOSPC)
99 mlog_errno(err);
100 ret = vmf_error(err);
101 goto out;
102 }
103
104 if (!locked_page) {
105 ret = VM_FAULT_NOPAGE;
106 goto out;
107 }
108 err = ocfs2_write_end_nolock(mapping, pos, len, len, fsdata);
109 BUG_ON(err != len);
110 ret = VM_FAULT_LOCKED;
111out:
112 return ret;
113}
114
115static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
116{
117 struct page *page = vmf->page;
118 struct inode *inode = file_inode(vmf->vma->vm_file);
119 struct buffer_head *di_bh = NULL;
120 sigset_t oldset;
121 int err;
122 vm_fault_t ret;
123
124 sb_start_pagefault(inode->i_sb);
125 ocfs2_block_signals(&oldset);
126
127 /*
128 * The cluster locks taken will block a truncate from another
129 * node. Taking the data lock will also ensure that we don't
130 * attempt page truncation as part of a downconvert.
131 */
132 err = ocfs2_inode_lock(inode, &di_bh, 1);
133 if (err < 0) {
134 mlog_errno(err);
135 ret = vmf_error(err);
136 goto out;
137 }
138
139 /*
140 * The alloc sem should be enough to serialize with
141 * ocfs2_truncate_file() changing i_size as well as any thread
142 * modifying the inode btree.
143 */
144 down_write(&OCFS2_I(inode)->ip_alloc_sem);
145
146 ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page);
147
148 up_write(&OCFS2_I(inode)->ip_alloc_sem);
149
150 brelse(di_bh);
151 ocfs2_inode_unlock(inode, 1);
152
153out:
154 ocfs2_unblock_signals(&oldset);
155 sb_end_pagefault(inode->i_sb);
156 return ret;
157}
158
159static const struct vm_operations_struct ocfs2_file_vm_ops = {
160 .fault = ocfs2_fault,
161 .page_mkwrite = ocfs2_page_mkwrite,
162};
163
164int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
165{
166 int ret = 0, lock_level = 0;
167
168 ret = ocfs2_inode_lock_atime(file_inode(file),
169 file->f_path.mnt, &lock_level, 1);
170 if (ret < 0) {
171 mlog_errno(ret);
172 goto out;
173 }
174 ocfs2_inode_unlock(file_inode(file), lock_level);
175out:
176 vma->vm_ops = &ocfs2_file_vm_ops;
177 return 0;
178}
179
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * mmap.c
4 *
5 * Code to deal with the mess that is clustered mmap.
6 *
7 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
8 */
9
10#include <linux/fs.h>
11#include <linux/types.h>
12#include <linux/highmem.h>
13#include <linux/pagemap.h>
14#include <linux/uio.h>
15#include <linux/signal.h>
16#include <linux/rbtree.h>
17
18#include <cluster/masklog.h>
19
20#include "ocfs2.h"
21
22#include "aops.h"
23#include "dlmglue.h"
24#include "file.h"
25#include "inode.h"
26#include "mmap.h"
27#include "super.h"
28#include "ocfs2_trace.h"
29
30
31static vm_fault_t ocfs2_fault(struct vm_fault *vmf)
32{
33 struct vm_area_struct *vma = vmf->vma;
34 sigset_t oldset;
35 vm_fault_t ret;
36
37 ocfs2_block_signals(&oldset);
38 ret = filemap_fault(vmf);
39 ocfs2_unblock_signals(&oldset);
40
41 trace_ocfs2_fault(OCFS2_I(vma->vm_file->f_mapping->host)->ip_blkno,
42 vma, vmf->page, vmf->pgoff);
43 return ret;
44}
45
46static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
47 struct buffer_head *di_bh, struct page *page)
48{
49 int err;
50 vm_fault_t ret = VM_FAULT_NOPAGE;
51 struct inode *inode = file_inode(file);
52 struct address_space *mapping = inode->i_mapping;
53 loff_t pos = page_offset(page);
54 unsigned int len = PAGE_SIZE;
55 pgoff_t last_index;
56 struct folio *locked_folio = NULL;
57 void *fsdata;
58 loff_t size = i_size_read(inode);
59
60 last_index = (size - 1) >> PAGE_SHIFT;
61
62 /*
63 * There are cases that lead to the page no longer belonging to the
64 * mapping.
65 * 1) pagecache truncates locally due to memory pressure.
66 * 2) pagecache truncates when another is taking EX lock against
67 * inode lock. see ocfs2_data_convert_worker.
68 *
69 * The i_size check doesn't catch the case where nodes truncated and
70 * then re-extended the file. We'll re-check the page mapping after
71 * taking the page lock inside of ocfs2_write_begin_nolock().
72 *
73 * Let VM retry with these cases.
74 */
75 if ((page->mapping != inode->i_mapping) ||
76 (!PageUptodate(page)) ||
77 (page_offset(page) >= size))
78 goto out;
79
80 /*
81 * Call ocfs2_write_begin() and ocfs2_write_end() to take
82 * advantage of the allocation code there. We pass a write
83 * length of the whole page (chopped to i_size) to make sure
84 * the whole thing is allocated.
85 *
86 * Since we know the page is up to date, we don't have to
87 * worry about ocfs2_write_begin() skipping some buffer reads
88 * because the "write" would invalidate their data.
89 */
90 if (page->index == last_index)
91 len = ((size - 1) & ~PAGE_MASK) + 1;
92
93 err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
94 &locked_folio, &fsdata, di_bh, page);
95 if (err) {
96 if (err != -ENOSPC)
97 mlog_errno(err);
98 ret = vmf_error(err);
99 goto out;
100 }
101
102 if (!locked_folio) {
103 ret = VM_FAULT_NOPAGE;
104 goto out;
105 }
106 err = ocfs2_write_end_nolock(mapping, pos, len, len, fsdata);
107 BUG_ON(err != len);
108 ret = VM_FAULT_LOCKED;
109out:
110 return ret;
111}
112
113static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
114{
115 struct page *page = vmf->page;
116 struct inode *inode = file_inode(vmf->vma->vm_file);
117 struct buffer_head *di_bh = NULL;
118 sigset_t oldset;
119 int err;
120 vm_fault_t ret;
121
122 sb_start_pagefault(inode->i_sb);
123 ocfs2_block_signals(&oldset);
124
125 /*
126 * The cluster locks taken will block a truncate from another
127 * node. Taking the data lock will also ensure that we don't
128 * attempt page truncation as part of a downconvert.
129 */
130 err = ocfs2_inode_lock(inode, &di_bh, 1);
131 if (err < 0) {
132 mlog_errno(err);
133 ret = vmf_error(err);
134 goto out;
135 }
136
137 /*
138 * The alloc sem should be enough to serialize with
139 * ocfs2_truncate_file() changing i_size as well as any thread
140 * modifying the inode btree.
141 */
142 down_write(&OCFS2_I(inode)->ip_alloc_sem);
143
144 ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page);
145
146 up_write(&OCFS2_I(inode)->ip_alloc_sem);
147
148 brelse(di_bh);
149 ocfs2_inode_unlock(inode, 1);
150
151out:
152 ocfs2_unblock_signals(&oldset);
153 sb_end_pagefault(inode->i_sb);
154 return ret;
155}
156
157static const struct vm_operations_struct ocfs2_file_vm_ops = {
158 .fault = ocfs2_fault,
159 .page_mkwrite = ocfs2_page_mkwrite,
160};
161
162int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
163{
164 int ret = 0, lock_level = 0;
165
166 ret = ocfs2_inode_lock_atime(file_inode(file),
167 file->f_path.mnt, &lock_level, 1);
168 if (ret < 0) {
169 mlog_errno(ret);
170 goto out;
171 }
172 ocfs2_inode_unlock(file_inode(file), lock_level);
173out:
174 vma->vm_ops = &ocfs2_file_vm_ops;
175 return 0;
176}
177