Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/minix/dir.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * minix directory handling functions
8 *
9 * Updated to filesystem version 3 by Daniel Aragones
10 */
11
12#include "minix.h"
13#include <linux/buffer_head.h>
14#include <linux/highmem.h>
15#include <linux/swap.h>
16
17typedef struct minix_dir_entry minix_dirent;
18typedef struct minix3_dir_entry minix3_dirent;
19
20static int minix_readdir(struct file *, struct dir_context *);
21
22const struct file_operations minix_dir_operations = {
23 .llseek = generic_file_llseek,
24 .read = generic_read_dir,
25 .iterate_shared = minix_readdir,
26 .fsync = generic_file_fsync,
27};
28
29static inline void dir_put_page(struct page *page)
30{
31 kunmap(page);
32 put_page(page);
33}
34
35/*
36 * Return the offset into page `page_nr' of the last valid
37 * byte in that page, plus one.
38 */
39static unsigned
40minix_last_byte(struct inode *inode, unsigned long page_nr)
41{
42 unsigned last_byte = PAGE_SIZE;
43
44 if (page_nr == (inode->i_size >> PAGE_SHIFT))
45 last_byte = inode->i_size & (PAGE_SIZE - 1);
46 return last_byte;
47}
48
49static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
50{
51 struct address_space *mapping = page->mapping;
52 struct inode *dir = mapping->host;
53 int err = 0;
54 block_write_end(NULL, mapping, pos, len, len, page, NULL);
55
56 if (pos+len > dir->i_size) {
57 i_size_write(dir, pos+len);
58 mark_inode_dirty(dir);
59 }
60 if (IS_DIRSYNC(dir))
61 err = write_one_page(page);
62 else
63 unlock_page(page);
64 return err;
65}
66
67static struct page * dir_get_page(struct inode *dir, unsigned long n)
68{
69 struct address_space *mapping = dir->i_mapping;
70 struct page *page = read_mapping_page(mapping, n, NULL);
71 if (!IS_ERR(page))
72 kmap(page);
73 return page;
74}
75
76static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
77{
78 return (void*)((char*)de + sbi->s_dirsize);
79}
80
81static int minix_readdir(struct file *file, struct dir_context *ctx)
82{
83 struct inode *inode = file_inode(file);
84 struct super_block *sb = inode->i_sb;
85 struct minix_sb_info *sbi = minix_sb(sb);
86 unsigned chunk_size = sbi->s_dirsize;
87 unsigned long npages = dir_pages(inode);
88 unsigned long pos = ctx->pos;
89 unsigned offset;
90 unsigned long n;
91
92 ctx->pos = pos = ALIGN(pos, chunk_size);
93 if (pos >= inode->i_size)
94 return 0;
95
96 offset = pos & ~PAGE_MASK;
97 n = pos >> PAGE_SHIFT;
98
99 for ( ; n < npages; n++, offset = 0) {
100 char *p, *kaddr, *limit;
101 struct page *page = dir_get_page(inode, n);
102
103 if (IS_ERR(page))
104 continue;
105 kaddr = (char *)page_address(page);
106 p = kaddr+offset;
107 limit = kaddr + minix_last_byte(inode, n) - chunk_size;
108 for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
109 const char *name;
110 __u32 inumber;
111 if (sbi->s_version == MINIX_V3) {
112 minix3_dirent *de3 = (minix3_dirent *)p;
113 name = de3->name;
114 inumber = de3->inode;
115 } else {
116 minix_dirent *de = (minix_dirent *)p;
117 name = de->name;
118 inumber = de->inode;
119 }
120 if (inumber) {
121 unsigned l = strnlen(name, sbi->s_namelen);
122 if (!dir_emit(ctx, name, l,
123 inumber, DT_UNKNOWN)) {
124 dir_put_page(page);
125 return 0;
126 }
127 }
128 ctx->pos += chunk_size;
129 }
130 dir_put_page(page);
131 }
132 return 0;
133}
134
135static inline int namecompare(int len, int maxlen,
136 const char * name, const char * buffer)
137{
138 if (len < maxlen && buffer[len])
139 return 0;
140 return !memcmp(name, buffer, len);
141}
142
143/*
144 * minix_find_entry()
145 *
146 * finds an entry in the specified directory with the wanted name. It
147 * returns the cache buffer in which the entry was found, and the entry
148 * itself (as a parameter - res_dir). It does NOT read the inode of the
149 * entry - you'll have to do that yourself if you want to.
150 */
151minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
152{
153 const char * name = dentry->d_name.name;
154 int namelen = dentry->d_name.len;
155 struct inode * dir = d_inode(dentry->d_parent);
156 struct super_block * sb = dir->i_sb;
157 struct minix_sb_info * sbi = minix_sb(sb);
158 unsigned long n;
159 unsigned long npages = dir_pages(dir);
160 struct page *page = NULL;
161 char *p;
162
163 char *namx;
164 __u32 inumber;
165 *res_page = NULL;
166
167 for (n = 0; n < npages; n++) {
168 char *kaddr, *limit;
169
170 page = dir_get_page(dir, n);
171 if (IS_ERR(page))
172 continue;
173
174 kaddr = (char*)page_address(page);
175 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
176 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
177 if (sbi->s_version == MINIX_V3) {
178 minix3_dirent *de3 = (minix3_dirent *)p;
179 namx = de3->name;
180 inumber = de3->inode;
181 } else {
182 minix_dirent *de = (minix_dirent *)p;
183 namx = de->name;
184 inumber = de->inode;
185 }
186 if (!inumber)
187 continue;
188 if (namecompare(namelen, sbi->s_namelen, name, namx))
189 goto found;
190 }
191 dir_put_page(page);
192 }
193 return NULL;
194
195found:
196 *res_page = page;
197 return (minix_dirent *)p;
198}
199
200int minix_add_link(struct dentry *dentry, struct inode *inode)
201{
202 struct inode *dir = d_inode(dentry->d_parent);
203 const char * name = dentry->d_name.name;
204 int namelen = dentry->d_name.len;
205 struct super_block * sb = dir->i_sb;
206 struct minix_sb_info * sbi = minix_sb(sb);
207 struct page *page = NULL;
208 unsigned long npages = dir_pages(dir);
209 unsigned long n;
210 char *kaddr, *p;
211 minix_dirent *de;
212 minix3_dirent *de3;
213 loff_t pos;
214 int err;
215 char *namx = NULL;
216 __u32 inumber;
217
218 /*
219 * We take care of directory expansion in the same loop
220 * This code plays outside i_size, so it locks the page
221 * to protect that region.
222 */
223 for (n = 0; n <= npages; n++) {
224 char *limit, *dir_end;
225
226 page = dir_get_page(dir, n);
227 err = PTR_ERR(page);
228 if (IS_ERR(page))
229 goto out;
230 lock_page(page);
231 kaddr = (char*)page_address(page);
232 dir_end = kaddr + minix_last_byte(dir, n);
233 limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
234 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
235 de = (minix_dirent *)p;
236 de3 = (minix3_dirent *)p;
237 if (sbi->s_version == MINIX_V3) {
238 namx = de3->name;
239 inumber = de3->inode;
240 } else {
241 namx = de->name;
242 inumber = de->inode;
243 }
244 if (p == dir_end) {
245 /* We hit i_size */
246 if (sbi->s_version == MINIX_V3)
247 de3->inode = 0;
248 else
249 de->inode = 0;
250 goto got_it;
251 }
252 if (!inumber)
253 goto got_it;
254 err = -EEXIST;
255 if (namecompare(namelen, sbi->s_namelen, name, namx))
256 goto out_unlock;
257 }
258 unlock_page(page);
259 dir_put_page(page);
260 }
261 BUG();
262 return -EINVAL;
263
264got_it:
265 pos = page_offset(page) + p - (char *)page_address(page);
266 err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
267 if (err)
268 goto out_unlock;
269 memcpy (namx, name, namelen);
270 if (sbi->s_version == MINIX_V3) {
271 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
272 de3->inode = inode->i_ino;
273 } else {
274 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
275 de->inode = inode->i_ino;
276 }
277 err = dir_commit_chunk(page, pos, sbi->s_dirsize);
278 dir->i_mtime = dir->i_ctime = current_time(dir);
279 mark_inode_dirty(dir);
280out_put:
281 dir_put_page(page);
282out:
283 return err;
284out_unlock:
285 unlock_page(page);
286 goto out_put;
287}
288
289int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
290{
291 struct inode *inode = page->mapping->host;
292 char *kaddr = page_address(page);
293 loff_t pos = page_offset(page) + (char*)de - kaddr;
294 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
295 unsigned len = sbi->s_dirsize;
296 int err;
297
298 lock_page(page);
299 err = minix_prepare_chunk(page, pos, len);
300 if (err == 0) {
301 if (sbi->s_version == MINIX_V3)
302 ((minix3_dirent *) de)->inode = 0;
303 else
304 de->inode = 0;
305 err = dir_commit_chunk(page, pos, len);
306 } else {
307 unlock_page(page);
308 }
309 dir_put_page(page);
310 inode->i_ctime = inode->i_mtime = current_time(inode);
311 mark_inode_dirty(inode);
312 return err;
313}
314
315int minix_make_empty(struct inode *inode, struct inode *dir)
316{
317 struct page *page = grab_cache_page(inode->i_mapping, 0);
318 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
319 char *kaddr;
320 int err;
321
322 if (!page)
323 return -ENOMEM;
324 err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize);
325 if (err) {
326 unlock_page(page);
327 goto fail;
328 }
329
330 kaddr = kmap_atomic(page);
331 memset(kaddr, 0, PAGE_SIZE);
332
333 if (sbi->s_version == MINIX_V3) {
334 minix3_dirent *de3 = (minix3_dirent *)kaddr;
335
336 de3->inode = inode->i_ino;
337 strcpy(de3->name, ".");
338 de3 = minix_next_entry(de3, sbi);
339 de3->inode = dir->i_ino;
340 strcpy(de3->name, "..");
341 } else {
342 minix_dirent *de = (minix_dirent *)kaddr;
343
344 de->inode = inode->i_ino;
345 strcpy(de->name, ".");
346 de = minix_next_entry(de, sbi);
347 de->inode = dir->i_ino;
348 strcpy(de->name, "..");
349 }
350 kunmap_atomic(kaddr);
351
352 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
353fail:
354 put_page(page);
355 return err;
356}
357
358/*
359 * routine to check that the specified directory is empty (for rmdir)
360 */
361int minix_empty_dir(struct inode * inode)
362{
363 struct page *page = NULL;
364 unsigned long i, npages = dir_pages(inode);
365 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
366 char *name;
367 __u32 inumber;
368
369 for (i = 0; i < npages; i++) {
370 char *p, *kaddr, *limit;
371
372 page = dir_get_page(inode, i);
373 if (IS_ERR(page))
374 continue;
375
376 kaddr = (char *)page_address(page);
377 limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
378 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
379 if (sbi->s_version == MINIX_V3) {
380 minix3_dirent *de3 = (minix3_dirent *)p;
381 name = de3->name;
382 inumber = de3->inode;
383 } else {
384 minix_dirent *de = (minix_dirent *)p;
385 name = de->name;
386 inumber = de->inode;
387 }
388
389 if (inumber != 0) {
390 /* check for . and .. */
391 if (name[0] != '.')
392 goto not_empty;
393 if (!name[1]) {
394 if (inumber != inode->i_ino)
395 goto not_empty;
396 } else if (name[1] != '.')
397 goto not_empty;
398 else if (name[2])
399 goto not_empty;
400 }
401 }
402 dir_put_page(page);
403 }
404 return 1;
405
406not_empty:
407 dir_put_page(page);
408 return 0;
409}
410
411/* Releases the page */
412void minix_set_link(struct minix_dir_entry *de, struct page *page,
413 struct inode *inode)
414{
415 struct inode *dir = page->mapping->host;
416 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
417 loff_t pos = page_offset(page) +
418 (char *)de-(char*)page_address(page);
419 int err;
420
421 lock_page(page);
422
423 err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
424 if (err == 0) {
425 if (sbi->s_version == MINIX_V3)
426 ((minix3_dirent *) de)->inode = inode->i_ino;
427 else
428 de->inode = inode->i_ino;
429 err = dir_commit_chunk(page, pos, sbi->s_dirsize);
430 } else {
431 unlock_page(page);
432 }
433 dir_put_page(page);
434 dir->i_mtime = dir->i_ctime = current_time(dir);
435 mark_inode_dirty(dir);
436}
437
438struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
439{
440 struct page *page = dir_get_page(dir, 0);
441 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
442 struct minix_dir_entry *de = NULL;
443
444 if (!IS_ERR(page)) {
445 de = minix_next_entry(page_address(page), sbi);
446 *p = page;
447 }
448 return de;
449}
450
451ino_t minix_inode_by_name(struct dentry *dentry)
452{
453 struct page *page;
454 struct minix_dir_entry *de = minix_find_entry(dentry, &page);
455 ino_t res = 0;
456
457 if (de) {
458 struct address_space *mapping = page->mapping;
459 struct inode *inode = mapping->host;
460 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
461
462 if (sbi->s_version == MINIX_V3)
463 res = ((minix3_dirent *) de)->inode;
464 else
465 res = de->inode;
466 dir_put_page(page);
467 }
468 return res;
469}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/minix/dir.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * minix directory handling functions
8 *
9 * Updated to filesystem version 3 by Daniel Aragones
10 */
11
12#include "minix.h"
13#include <linux/buffer_head.h>
14#include <linux/highmem.h>
15#include <linux/swap.h>
16
17typedef struct minix_dir_entry minix_dirent;
18typedef struct minix3_dir_entry minix3_dirent;
19
20static int minix_readdir(struct file *, struct dir_context *);
21
22const struct file_operations minix_dir_operations = {
23 .llseek = generic_file_llseek,
24 .read = generic_read_dir,
25 .iterate_shared = minix_readdir,
26 .fsync = generic_file_fsync,
27};
28
29/*
30 * Return the offset into page `page_nr' of the last valid
31 * byte in that page, plus one.
32 */
33static unsigned
34minix_last_byte(struct inode *inode, unsigned long page_nr)
35{
36 unsigned last_byte = PAGE_SIZE;
37
38 if (page_nr == (inode->i_size >> PAGE_SHIFT))
39 last_byte = inode->i_size & (PAGE_SIZE - 1);
40 return last_byte;
41}
42
43static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
44{
45 struct address_space *mapping = folio->mapping;
46 struct inode *dir = mapping->host;
47
48 block_write_end(NULL, mapping, pos, len, len, folio, NULL);
49
50 if (pos+len > dir->i_size) {
51 i_size_write(dir, pos+len);
52 mark_inode_dirty(dir);
53 }
54 folio_unlock(folio);
55}
56
57static int minix_handle_dirsync(struct inode *dir)
58{
59 int err;
60
61 err = filemap_write_and_wait(dir->i_mapping);
62 if (!err)
63 err = sync_inode_metadata(dir, 1);
64 return err;
65}
66
67static void *dir_get_folio(struct inode *dir, unsigned long n,
68 struct folio **foliop)
69{
70 struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
71
72 if (IS_ERR(folio))
73 return ERR_CAST(folio);
74 *foliop = folio;
75 return kmap_local_folio(folio, 0);
76}
77
78static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
79{
80 return (void*)((char*)de + sbi->s_dirsize);
81}
82
83static int minix_readdir(struct file *file, struct dir_context *ctx)
84{
85 struct inode *inode = file_inode(file);
86 struct super_block *sb = inode->i_sb;
87 struct minix_sb_info *sbi = minix_sb(sb);
88 unsigned chunk_size = sbi->s_dirsize;
89 unsigned long npages = dir_pages(inode);
90 unsigned long pos = ctx->pos;
91 unsigned offset;
92 unsigned long n;
93
94 ctx->pos = pos = ALIGN(pos, chunk_size);
95 if (pos >= inode->i_size)
96 return 0;
97
98 offset = pos & ~PAGE_MASK;
99 n = pos >> PAGE_SHIFT;
100
101 for ( ; n < npages; n++, offset = 0) {
102 char *p, *kaddr, *limit;
103 struct folio *folio;
104
105 kaddr = dir_get_folio(inode, n, &folio);
106 if (IS_ERR(kaddr))
107 continue;
108 p = kaddr+offset;
109 limit = kaddr + minix_last_byte(inode, n) - chunk_size;
110 for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
111 const char *name;
112 __u32 inumber;
113 if (sbi->s_version == MINIX_V3) {
114 minix3_dirent *de3 = (minix3_dirent *)p;
115 name = de3->name;
116 inumber = de3->inode;
117 } else {
118 minix_dirent *de = (minix_dirent *)p;
119 name = de->name;
120 inumber = de->inode;
121 }
122 if (inumber) {
123 unsigned l = strnlen(name, sbi->s_namelen);
124 if (!dir_emit(ctx, name, l,
125 inumber, DT_UNKNOWN)) {
126 folio_release_kmap(folio, p);
127 return 0;
128 }
129 }
130 ctx->pos += chunk_size;
131 }
132 folio_release_kmap(folio, kaddr);
133 }
134 return 0;
135}
136
137static inline int namecompare(int len, int maxlen,
138 const char * name, const char * buffer)
139{
140 if (len < maxlen && buffer[len])
141 return 0;
142 return !memcmp(name, buffer, len);
143}
144
145/*
146 * minix_find_entry()
147 *
148 * finds an entry in the specified directory with the wanted name.
149 * It does NOT read the inode of the
150 * entry - you'll have to do that yourself if you want to.
151 *
152 * On Success folio_release_kmap() should be called on *foliop.
153 */
154minix_dirent *minix_find_entry(struct dentry *dentry, struct folio **foliop)
155{
156 const char * name = dentry->d_name.name;
157 int namelen = dentry->d_name.len;
158 struct inode * dir = d_inode(dentry->d_parent);
159 struct super_block * sb = dir->i_sb;
160 struct minix_sb_info * sbi = minix_sb(sb);
161 unsigned long n;
162 unsigned long npages = dir_pages(dir);
163 char *p;
164
165 char *namx;
166 __u32 inumber;
167
168 for (n = 0; n < npages; n++) {
169 char *kaddr, *limit;
170
171 kaddr = dir_get_folio(dir, n, foliop);
172 if (IS_ERR(kaddr))
173 continue;
174
175 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
176 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
177 if (sbi->s_version == MINIX_V3) {
178 minix3_dirent *de3 = (minix3_dirent *)p;
179 namx = de3->name;
180 inumber = de3->inode;
181 } else {
182 minix_dirent *de = (minix_dirent *)p;
183 namx = de->name;
184 inumber = de->inode;
185 }
186 if (!inumber)
187 continue;
188 if (namecompare(namelen, sbi->s_namelen, name, namx))
189 goto found;
190 }
191 folio_release_kmap(*foliop, kaddr);
192 }
193 return NULL;
194
195found:
196 return (minix_dirent *)p;
197}
198
199int minix_add_link(struct dentry *dentry, struct inode *inode)
200{
201 struct inode *dir = d_inode(dentry->d_parent);
202 const char * name = dentry->d_name.name;
203 int namelen = dentry->d_name.len;
204 struct super_block * sb = dir->i_sb;
205 struct minix_sb_info * sbi = minix_sb(sb);
206 struct folio *folio = NULL;
207 unsigned long npages = dir_pages(dir);
208 unsigned long n;
209 char *kaddr, *p;
210 minix_dirent *de;
211 minix3_dirent *de3;
212 loff_t pos;
213 int err;
214 char *namx = NULL;
215 __u32 inumber;
216
217 /*
218 * We take care of directory expansion in the same loop
219 * This code plays outside i_size, so it locks the page
220 * to protect that region.
221 */
222 for (n = 0; n <= npages; n++) {
223 char *limit, *dir_end;
224
225 kaddr = dir_get_folio(dir, n, &folio);
226 if (IS_ERR(kaddr))
227 return PTR_ERR(kaddr);
228 folio_lock(folio);
229 dir_end = kaddr + minix_last_byte(dir, n);
230 limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
231 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
232 de = (minix_dirent *)p;
233 de3 = (minix3_dirent *)p;
234 if (sbi->s_version == MINIX_V3) {
235 namx = de3->name;
236 inumber = de3->inode;
237 } else {
238 namx = de->name;
239 inumber = de->inode;
240 }
241 if (p == dir_end) {
242 /* We hit i_size */
243 if (sbi->s_version == MINIX_V3)
244 de3->inode = 0;
245 else
246 de->inode = 0;
247 goto got_it;
248 }
249 if (!inumber)
250 goto got_it;
251 err = -EEXIST;
252 if (namecompare(namelen, sbi->s_namelen, name, namx))
253 goto out_unlock;
254 }
255 folio_unlock(folio);
256 folio_release_kmap(folio, kaddr);
257 }
258 BUG();
259 return -EINVAL;
260
261got_it:
262 pos = folio_pos(folio) + offset_in_folio(folio, p);
263 err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
264 if (err)
265 goto out_unlock;
266 memcpy (namx, name, namelen);
267 if (sbi->s_version == MINIX_V3) {
268 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
269 de3->inode = inode->i_ino;
270 } else {
271 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
272 de->inode = inode->i_ino;
273 }
274 dir_commit_chunk(folio, pos, sbi->s_dirsize);
275 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
276 mark_inode_dirty(dir);
277 err = minix_handle_dirsync(dir);
278out_put:
279 folio_release_kmap(folio, kaddr);
280 return err;
281out_unlock:
282 folio_unlock(folio);
283 goto out_put;
284}
285
286int minix_delete_entry(struct minix_dir_entry *de, struct folio *folio)
287{
288 struct inode *inode = folio->mapping->host;
289 loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
290 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
291 unsigned len = sbi->s_dirsize;
292 int err;
293
294 folio_lock(folio);
295 err = minix_prepare_chunk(folio, pos, len);
296 if (err) {
297 folio_unlock(folio);
298 return err;
299 }
300 if (sbi->s_version == MINIX_V3)
301 ((minix3_dirent *)de)->inode = 0;
302 else
303 de->inode = 0;
304 dir_commit_chunk(folio, pos, len);
305 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
306 mark_inode_dirty(inode);
307 return minix_handle_dirsync(inode);
308}
309
310int minix_make_empty(struct inode *inode, struct inode *dir)
311{
312 struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
313 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
314 char *kaddr;
315 int err;
316
317 if (IS_ERR(folio))
318 return PTR_ERR(folio);
319 err = minix_prepare_chunk(folio, 0, 2 * sbi->s_dirsize);
320 if (err) {
321 folio_unlock(folio);
322 goto fail;
323 }
324
325 kaddr = kmap_local_folio(folio, 0);
326 memset(kaddr, 0, folio_size(folio));
327
328 if (sbi->s_version == MINIX_V3) {
329 minix3_dirent *de3 = (minix3_dirent *)kaddr;
330
331 de3->inode = inode->i_ino;
332 strcpy(de3->name, ".");
333 de3 = minix_next_entry(de3, sbi);
334 de3->inode = dir->i_ino;
335 strcpy(de3->name, "..");
336 } else {
337 minix_dirent *de = (minix_dirent *)kaddr;
338
339 de->inode = inode->i_ino;
340 strcpy(de->name, ".");
341 de = minix_next_entry(de, sbi);
342 de->inode = dir->i_ino;
343 strcpy(de->name, "..");
344 }
345 kunmap_local(kaddr);
346
347 dir_commit_chunk(folio, 0, 2 * sbi->s_dirsize);
348 err = minix_handle_dirsync(inode);
349fail:
350 folio_put(folio);
351 return err;
352}
353
354/*
355 * routine to check that the specified directory is empty (for rmdir)
356 */
357int minix_empty_dir(struct inode * inode)
358{
359 struct folio *folio = NULL;
360 unsigned long i, npages = dir_pages(inode);
361 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
362 char *name, *kaddr;
363 __u32 inumber;
364
365 for (i = 0; i < npages; i++) {
366 char *p, *limit;
367
368 kaddr = dir_get_folio(inode, i, &folio);
369 if (IS_ERR(kaddr))
370 continue;
371
372 limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
373 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
374 if (sbi->s_version == MINIX_V3) {
375 minix3_dirent *de3 = (minix3_dirent *)p;
376 name = de3->name;
377 inumber = de3->inode;
378 } else {
379 minix_dirent *de = (minix_dirent *)p;
380 name = de->name;
381 inumber = de->inode;
382 }
383
384 if (inumber != 0) {
385 /* check for . and .. */
386 if (name[0] != '.')
387 goto not_empty;
388 if (!name[1]) {
389 if (inumber != inode->i_ino)
390 goto not_empty;
391 } else if (name[1] != '.')
392 goto not_empty;
393 else if (name[2])
394 goto not_empty;
395 }
396 }
397 folio_release_kmap(folio, kaddr);
398 }
399 return 1;
400
401not_empty:
402 folio_release_kmap(folio, kaddr);
403 return 0;
404}
405
406/* Releases the page */
407int minix_set_link(struct minix_dir_entry *de, struct folio *folio,
408 struct inode *inode)
409{
410 struct inode *dir = folio->mapping->host;
411 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
412 loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
413 int err;
414
415 folio_lock(folio);
416 err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
417 if (err) {
418 folio_unlock(folio);
419 return err;
420 }
421 if (sbi->s_version == MINIX_V3)
422 ((minix3_dirent *)de)->inode = inode->i_ino;
423 else
424 de->inode = inode->i_ino;
425 dir_commit_chunk(folio, pos, sbi->s_dirsize);
426 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
427 mark_inode_dirty(dir);
428 return minix_handle_dirsync(dir);
429}
430
431struct minix_dir_entry *minix_dotdot(struct inode *dir, struct folio **foliop)
432{
433 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
434 struct minix_dir_entry *de = dir_get_folio(dir, 0, foliop);
435
436 if (!IS_ERR(de))
437 return minix_next_entry(de, sbi);
438 return NULL;
439}
440
441ino_t minix_inode_by_name(struct dentry *dentry)
442{
443 struct folio *folio;
444 struct minix_dir_entry *de = minix_find_entry(dentry, &folio);
445 ino_t res = 0;
446
447 if (de) {
448 struct inode *inode = folio->mapping->host;
449 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
450
451 if (sbi->s_version == MINIX_V3)
452 res = ((minix3_dirent *) de)->inode;
453 else
454 res = de->inode;
455 folio_release_kmap(folio, de);
456 }
457 return res;
458}