Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 *  linux/fs/minix/dir.c
  3 *
  4 *  Copyright (C) 1991, 1992 Linus Torvalds
  5 *
  6 *  minix directory handling functions
  7 *
  8 *  Updated to filesystem version 3 by Daniel Aragones
  9 */
 10
 11#include "minix.h"
 12#include <linux/buffer_head.h>
 13#include <linux/highmem.h>
 14#include <linux/swap.h>
 15
 16typedef struct minix_dir_entry minix_dirent;
 17typedef struct minix3_dir_entry minix3_dirent;
 18
 19static int minix_readdir(struct file *, void *, filldir_t);
 20
 21const struct file_operations minix_dir_operations = {
 22	.llseek		= generic_file_llseek,
 23	.read		= generic_read_dir,
 24	.readdir	= minix_readdir,
 25	.fsync		= generic_file_fsync,
 26};
 27
 28static inline void dir_put_page(struct page *page)
 29{
 30	kunmap(page);
 31	page_cache_release(page);
 32}
 33
 34/*
 35 * Return the offset into page `page_nr' of the last valid
 36 * byte in that page, plus one.
 37 */
 38static unsigned
 39minix_last_byte(struct inode *inode, unsigned long page_nr)
 40{
 41	unsigned last_byte = PAGE_CACHE_SIZE;
 42
 43	if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT))
 44		last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1);
 45	return last_byte;
 46}
 47
 48static inline unsigned long dir_pages(struct inode *inode)
 49{
 50	return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
 51}
 52
 53static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
 54{
 55	struct address_space *mapping = page->mapping;
 56	struct inode *dir = mapping->host;
 57	int err = 0;
 58	block_write_end(NULL, mapping, pos, len, len, page, NULL);
 59
 60	if (pos+len > dir->i_size) {
 61		i_size_write(dir, pos+len);
 62		mark_inode_dirty(dir);
 63	}
 64	if (IS_DIRSYNC(dir))
 65		err = write_one_page(page, 1);
 66	else
 67		unlock_page(page);
 
 
 
 
 
 
 68	return err;
 69}
 70
 71static struct page * dir_get_page(struct inode *dir, unsigned long n)
 
 72{
 73	struct address_space *mapping = dir->i_mapping;
 74	struct page *page = read_mapping_page(mapping, n, NULL);
 75	if (!IS_ERR(page))
 76		kmap(page);
 77	return page;
 
 78}
 79
 80static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
 81{
 82	return (void*)((char*)de + sbi->s_dirsize);
 83}
 84
 85static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
 86{
 87	unsigned long pos = filp->f_pos;
 88	struct inode *inode = filp->f_path.dentry->d_inode;
 89	struct super_block *sb = inode->i_sb;
 90	unsigned offset = pos & ~PAGE_CACHE_MASK;
 91	unsigned long n = pos >> PAGE_CACHE_SHIFT;
 92	unsigned long npages = dir_pages(inode);
 93	struct minix_sb_info *sbi = minix_sb(sb);
 94	unsigned chunk_size = sbi->s_dirsize;
 95	char *name;
 96	__u32 inumber;
 
 
 97
 98	pos = (pos + chunk_size-1) & ~(chunk_size-1);
 99	if (pos >= inode->i_size)
100		goto done;
 
 
 
101
102	for ( ; n < npages; n++, offset = 0) {
103		char *p, *kaddr, *limit;
104		struct page *page = dir_get_page(inode, n);
105
106		if (IS_ERR(page))
 
107			continue;
108		kaddr = (char *)page_address(page);
109		p = kaddr+offset;
110		limit = kaddr + minix_last_byte(inode, n) - chunk_size;
111		for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
 
 
112			if (sbi->s_version == MINIX_V3) {
113				minix3_dirent *de3 = (minix3_dirent *)p;
114				name = de3->name;
115				inumber = de3->inode;
116	 		} else {
117				minix_dirent *de = (minix_dirent *)p;
118				name = de->name;
119				inumber = de->inode;
120			}
121			if (inumber) {
122				int over;
123
124				unsigned l = strnlen(name, sbi->s_namelen);
125				offset = p - kaddr;
126				over = filldir(dirent, name, l,
127					(n << PAGE_CACHE_SHIFT) | offset,
128					inumber, DT_UNKNOWN);
129				if (over) {
130					dir_put_page(page);
131					goto done;
132				}
133			}
 
134		}
135		dir_put_page(page);
136	}
137
138done:
139	filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
140	return 0;
141}
142
143static inline int namecompare(int len, int maxlen,
144	const char * name, const char * buffer)
145{
146	if (len < maxlen && buffer[len])
147		return 0;
148	return !memcmp(name, buffer, len);
149}
150
151/*
152 *	minix_find_entry()
153 *
154 * finds an entry in the specified directory with the wanted name. It
155 * returns the cache buffer in which the entry was found, and the entry
156 * itself (as a parameter - res_dir). It does NOT read the inode of the
157 * entry - you'll have to do that yourself if you want to.
 
 
158 */
159minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
160{
161	const char * name = dentry->d_name.name;
162	int namelen = dentry->d_name.len;
163	struct inode * dir = dentry->d_parent->d_inode;
164	struct super_block * sb = dir->i_sb;
165	struct minix_sb_info * sbi = minix_sb(sb);
166	unsigned long n;
167	unsigned long npages = dir_pages(dir);
168	struct page *page = NULL;
169	char *p;
170
171	char *namx;
172	__u32 inumber;
173	*res_page = NULL;
174
175	for (n = 0; n < npages; n++) {
176		char *kaddr, *limit;
177
178		page = dir_get_page(dir, n);
179		if (IS_ERR(page))
180			continue;
181
182		kaddr = (char*)page_address(page);
183		limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
184		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
185			if (sbi->s_version == MINIX_V3) {
186				minix3_dirent *de3 = (minix3_dirent *)p;
187				namx = de3->name;
188				inumber = de3->inode;
189 			} else {
190				minix_dirent *de = (minix_dirent *)p;
191				namx = de->name;
192				inumber = de->inode;
193			}
194			if (!inumber)
195				continue;
196			if (namecompare(namelen, sbi->s_namelen, name, namx))
197				goto found;
198		}
199		dir_put_page(page);
200	}
201	return NULL;
202
203found:
204	*res_page = page;
205	return (minix_dirent *)p;
206}
207
208int minix_add_link(struct dentry *dentry, struct inode *inode)
209{
210	struct inode *dir = dentry->d_parent->d_inode;
211	const char * name = dentry->d_name.name;
212	int namelen = dentry->d_name.len;
213	struct super_block * sb = dir->i_sb;
214	struct minix_sb_info * sbi = minix_sb(sb);
215	struct page *page = NULL;
216	unsigned long npages = dir_pages(dir);
217	unsigned long n;
218	char *kaddr, *p;
219	minix_dirent *de;
220	minix3_dirent *de3;
221	loff_t pos;
222	int err;
223	char *namx = NULL;
224	__u32 inumber;
225
226	/*
227	 * We take care of directory expansion in the same loop
228	 * This code plays outside i_size, so it locks the page
229	 * to protect that region.
230	 */
231	for (n = 0; n <= npages; n++) {
232		char *limit, *dir_end;
233
234		page = dir_get_page(dir, n);
235		err = PTR_ERR(page);
236		if (IS_ERR(page))
237			goto out;
238		lock_page(page);
239		kaddr = (char*)page_address(page);
240		dir_end = kaddr + minix_last_byte(dir, n);
241		limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize;
242		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
243			de = (minix_dirent *)p;
244			de3 = (minix3_dirent *)p;
245			if (sbi->s_version == MINIX_V3) {
246				namx = de3->name;
247				inumber = de3->inode;
248		 	} else {
249  				namx = de->name;
250				inumber = de->inode;
251			}
252			if (p == dir_end) {
253				/* We hit i_size */
254				if (sbi->s_version == MINIX_V3)
255					de3->inode = 0;
256		 		else
257					de->inode = 0;
258				goto got_it;
259			}
260			if (!inumber)
261				goto got_it;
262			err = -EEXIST;
263			if (namecompare(namelen, sbi->s_namelen, name, namx))
264				goto out_unlock;
265		}
266		unlock_page(page);
267		dir_put_page(page);
268	}
269	BUG();
270	return -EINVAL;
271
272got_it:
273	pos = page_offset(page) + p - (char *)page_address(page);
274	err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
275	if (err)
276		goto out_unlock;
277	memcpy (namx, name, namelen);
278	if (sbi->s_version == MINIX_V3) {
279		memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
280		de3->inode = inode->i_ino;
281	} else {
282		memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
283		de->inode = inode->i_ino;
284	}
285	err = dir_commit_chunk(page, pos, sbi->s_dirsize);
286	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
287	mark_inode_dirty(dir);
 
288out_put:
289	dir_put_page(page);
290out:
291	return err;
292out_unlock:
293	unlock_page(page);
294	goto out_put;
295}
296
297int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
298{
299	struct inode *inode = page->mapping->host;
300	char *kaddr = page_address(page);
301	loff_t pos = page_offset(page) + (char*)de - kaddr;
302	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
303	unsigned len = sbi->s_dirsize;
304	int err;
305
306	lock_page(page);
307	err = minix_prepare_chunk(page, pos, len);
308	if (err == 0) {
309		if (sbi->s_version == MINIX_V3)
310			((minix3_dirent *) de)->inode = 0;
311		else
312			de->inode = 0;
313		err = dir_commit_chunk(page, pos, len);
314	} else {
315		unlock_page(page);
316	}
317	dir_put_page(page);
318	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
 
 
 
 
319	mark_inode_dirty(inode);
320	return err;
321}
322
323int minix_make_empty(struct inode *inode, struct inode *dir)
324{
325	struct page *page = grab_cache_page(inode->i_mapping, 0);
326	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
327	char *kaddr;
328	int err;
329
330	if (!page)
331		return -ENOMEM;
332	err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize);
333	if (err) {
334		unlock_page(page);
335		goto fail;
336	}
337
338	kaddr = kmap_atomic(page);
339	memset(kaddr, 0, PAGE_CACHE_SIZE);
340
341	if (sbi->s_version == MINIX_V3) {
342		minix3_dirent *de3 = (minix3_dirent *)kaddr;
343
344		de3->inode = inode->i_ino;
345		strcpy(de3->name, ".");
346		de3 = minix_next_entry(de3, sbi);
347		de3->inode = dir->i_ino;
348		strcpy(de3->name, "..");
349	} else {
350		minix_dirent *de = (minix_dirent *)kaddr;
351
352		de->inode = inode->i_ino;
353		strcpy(de->name, ".");
354		de = minix_next_entry(de, sbi);
355		de->inode = dir->i_ino;
356		strcpy(de->name, "..");
357	}
358	kunmap_atomic(kaddr);
359
360	err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
 
361fail:
362	page_cache_release(page);
363	return err;
364}
365
366/*
367 * routine to check that the specified directory is empty (for rmdir)
368 */
369int minix_empty_dir(struct inode * inode)
370{
371	struct page *page = NULL;
372	unsigned long i, npages = dir_pages(inode);
373	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
374	char *name;
375	__u32 inumber;
376
377	for (i = 0; i < npages; i++) {
378		char *p, *kaddr, *limit;
379
380		page = dir_get_page(inode, i);
381		if (IS_ERR(page))
382			continue;
383
384		kaddr = (char *)page_address(page);
385		limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
386		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
387			if (sbi->s_version == MINIX_V3) {
388				minix3_dirent *de3 = (minix3_dirent *)p;
389				name = de3->name;
390				inumber = de3->inode;
391			} else {
392				minix_dirent *de = (minix_dirent *)p;
393				name = de->name;
394				inumber = de->inode;
395			}
396
397			if (inumber != 0) {
398				/* check for . and .. */
399				if (name[0] != '.')
400					goto not_empty;
401				if (!name[1]) {
402					if (inumber != inode->i_ino)
403						goto not_empty;
404				} else if (name[1] != '.')
405					goto not_empty;
406				else if (name[2])
407					goto not_empty;
408			}
409		}
410		dir_put_page(page);
411	}
412	return 1;
413
414not_empty:
415	dir_put_page(page);
416	return 0;
417}
418
419/* Releases the page */
420void minix_set_link(struct minix_dir_entry *de, struct page *page,
421	struct inode *inode)
422{
423	struct inode *dir = page->mapping->host;
424	struct minix_sb_info *sbi = minix_sb(dir->i_sb);
425	loff_t pos = page_offset(page) +
426			(char *)de-(char*)page_address(page);
427	int err;
428
429	lock_page(page);
430
431	err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
432	if (err == 0) {
433		if (sbi->s_version == MINIX_V3)
434			((minix3_dirent *) de)->inode = inode->i_ino;
435		else
436			de->inode = inode->i_ino;
437		err = dir_commit_chunk(page, pos, sbi->s_dirsize);
438	} else {
439		unlock_page(page);
440	}
441	dir_put_page(page);
442	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
 
 
 
 
443	mark_inode_dirty(dir);
 
444}
445
446struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
447{
448	struct page *page = dir_get_page(dir, 0);
449	struct minix_sb_info *sbi = minix_sb(dir->i_sb);
450	struct minix_dir_entry *de = NULL;
451
452	if (!IS_ERR(page)) {
453		de = minix_next_entry(page_address(page), sbi);
454		*p = page;
455	}
456	return de;
457}
458
459ino_t minix_inode_by_name(struct dentry *dentry)
460{
461	struct page *page;
462	struct minix_dir_entry *de = minix_find_entry(dentry, &page);
463	ino_t res = 0;
464
465	if (de) {
466		struct address_space *mapping = page->mapping;
467		struct inode *inode = mapping->host;
468		struct minix_sb_info *sbi = minix_sb(inode->i_sb);
469
470		if (sbi->s_version == MINIX_V3)
471			res = ((minix3_dirent *) de)->inode;
472		else
473			res = de->inode;
474		dir_put_page(page);
475	}
476	return res;
477}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/minix/dir.c
  4 *
  5 *  Copyright (C) 1991, 1992 Linus Torvalds
  6 *
  7 *  minix directory handling functions
  8 *
  9 *  Updated to filesystem version 3 by Daniel Aragones
 10 */
 11
 12#include "minix.h"
 13#include <linux/buffer_head.h>
 14#include <linux/highmem.h>
 15#include <linux/swap.h>
 16
 17typedef struct minix_dir_entry minix_dirent;
 18typedef struct minix3_dir_entry minix3_dirent;
 19
 20static int minix_readdir(struct file *, struct dir_context *);
 21
 22const struct file_operations minix_dir_operations = {
 23	.llseek		= generic_file_llseek,
 24	.read		= generic_read_dir,
 25	.iterate_shared	= minix_readdir,
 26	.fsync		= generic_file_fsync,
 27};
 28
 
 
 
 
 
 
 29/*
 30 * Return the offset into page `page_nr' of the last valid
 31 * byte in that page, plus one.
 32 */
 33static unsigned
 34minix_last_byte(struct inode *inode, unsigned long page_nr)
 35{
 36	unsigned last_byte = PAGE_SIZE;
 37
 38	if (page_nr == (inode->i_size >> PAGE_SHIFT))
 39		last_byte = inode->i_size & (PAGE_SIZE - 1);
 40	return last_byte;
 41}
 42
 43static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
 
 
 
 
 
 44{
 45	struct address_space *mapping = folio->mapping;
 46	struct inode *dir = mapping->host;
 47
 48	block_write_end(NULL, mapping, pos, len, len, folio, NULL);
 49
 50	if (pos+len > dir->i_size) {
 51		i_size_write(dir, pos+len);
 52		mark_inode_dirty(dir);
 53	}
 54	folio_unlock(folio);
 55}
 56
 57static int minix_handle_dirsync(struct inode *dir)
 58{
 59	int err;
 60
 61	err = filemap_write_and_wait(dir->i_mapping);
 62	if (!err)
 63		err = sync_inode_metadata(dir, 1);
 64	return err;
 65}
 66
 67static void *dir_get_folio(struct inode *dir, unsigned long n,
 68		struct folio **foliop)
 69{
 70	struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
 71
 72	if (IS_ERR(folio))
 73		return ERR_CAST(folio);
 74	*foliop = folio;
 75	return kmap_local_folio(folio, 0);
 76}
 77
 78static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
 79{
 80	return (void*)((char*)de + sbi->s_dirsize);
 81}
 82
 83static int minix_readdir(struct file *file, struct dir_context *ctx)
 84{
 85	struct inode *inode = file_inode(file);
 
 86	struct super_block *sb = inode->i_sb;
 
 
 
 87	struct minix_sb_info *sbi = minix_sb(sb);
 88	unsigned chunk_size = sbi->s_dirsize;
 89	unsigned long npages = dir_pages(inode);
 90	unsigned long pos = ctx->pos;
 91	unsigned offset;
 92	unsigned long n;
 93
 94	ctx->pos = pos = ALIGN(pos, chunk_size);
 95	if (pos >= inode->i_size)
 96		return 0;
 97
 98	offset = pos & ~PAGE_MASK;
 99	n = pos >> PAGE_SHIFT;
100
101	for ( ; n < npages; n++, offset = 0) {
102		char *p, *kaddr, *limit;
103		struct folio *folio;
104
105		kaddr = dir_get_folio(inode, n, &folio);
106		if (IS_ERR(kaddr))
107			continue;
 
108		p = kaddr+offset;
109		limit = kaddr + minix_last_byte(inode, n) - chunk_size;
110		for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
111			const char *name;
112			__u32 inumber;
113			if (sbi->s_version == MINIX_V3) {
114				minix3_dirent *de3 = (minix3_dirent *)p;
115				name = de3->name;
116				inumber = de3->inode;
117	 		} else {
118				minix_dirent *de = (minix_dirent *)p;
119				name = de->name;
120				inumber = de->inode;
121			}
122			if (inumber) {
 
 
123				unsigned l = strnlen(name, sbi->s_namelen);
124				if (!dir_emit(ctx, name, l,
125					      inumber, DT_UNKNOWN)) {
126					folio_release_kmap(folio, p);
127					return 0;
 
 
 
128				}
129			}
130			ctx->pos += chunk_size;
131		}
132		folio_release_kmap(folio, kaddr);
133	}
 
 
 
134	return 0;
135}
136
137static inline int namecompare(int len, int maxlen,
138	const char * name, const char * buffer)
139{
140	if (len < maxlen && buffer[len])
141		return 0;
142	return !memcmp(name, buffer, len);
143}
144
145/*
146 *	minix_find_entry()
147 *
148 * finds an entry in the specified directory with the wanted name.
149 * It does NOT read the inode of the
 
150 * entry - you'll have to do that yourself if you want to.
151 * 
152 * On Success folio_release_kmap() should be called on *foliop.
153 */
154minix_dirent *minix_find_entry(struct dentry *dentry, struct folio **foliop)
155{
156	const char * name = dentry->d_name.name;
157	int namelen = dentry->d_name.len;
158	struct inode * dir = d_inode(dentry->d_parent);
159	struct super_block * sb = dir->i_sb;
160	struct minix_sb_info * sbi = minix_sb(sb);
161	unsigned long n;
162	unsigned long npages = dir_pages(dir);
 
163	char *p;
164
165	char *namx;
166	__u32 inumber;
 
167
168	for (n = 0; n < npages; n++) {
169		char *kaddr, *limit;
170
171		kaddr = dir_get_folio(dir, n, foliop);
172		if (IS_ERR(kaddr))
173			continue;
174
 
175		limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
176		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
177			if (sbi->s_version == MINIX_V3) {
178				minix3_dirent *de3 = (minix3_dirent *)p;
179				namx = de3->name;
180				inumber = de3->inode;
181 			} else {
182				minix_dirent *de = (minix_dirent *)p;
183				namx = de->name;
184				inumber = de->inode;
185			}
186			if (!inumber)
187				continue;
188			if (namecompare(namelen, sbi->s_namelen, name, namx))
189				goto found;
190		}
191		folio_release_kmap(*foliop, kaddr);
192	}
193	return NULL;
194
195found:
 
196	return (minix_dirent *)p;
197}
198
199int minix_add_link(struct dentry *dentry, struct inode *inode)
200{
201	struct inode *dir = d_inode(dentry->d_parent);
202	const char * name = dentry->d_name.name;
203	int namelen = dentry->d_name.len;
204	struct super_block * sb = dir->i_sb;
205	struct minix_sb_info * sbi = minix_sb(sb);
206	struct folio *folio = NULL;
207	unsigned long npages = dir_pages(dir);
208	unsigned long n;
209	char *kaddr, *p;
210	minix_dirent *de;
211	minix3_dirent *de3;
212	loff_t pos;
213	int err;
214	char *namx = NULL;
215	__u32 inumber;
216
217	/*
218	 * We take care of directory expansion in the same loop
219	 * This code plays outside i_size, so it locks the page
220	 * to protect that region.
221	 */
222	for (n = 0; n <= npages; n++) {
223		char *limit, *dir_end;
224
225		kaddr = dir_get_folio(dir, n, &folio);
226		if (IS_ERR(kaddr))
227			return PTR_ERR(kaddr);
228		folio_lock(folio);
 
 
229		dir_end = kaddr + minix_last_byte(dir, n);
230		limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
231		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
232			de = (minix_dirent *)p;
233			de3 = (minix3_dirent *)p;
234			if (sbi->s_version == MINIX_V3) {
235				namx = de3->name;
236				inumber = de3->inode;
237		 	} else {
238  				namx = de->name;
239				inumber = de->inode;
240			}
241			if (p == dir_end) {
242				/* We hit i_size */
243				if (sbi->s_version == MINIX_V3)
244					de3->inode = 0;
245		 		else
246					de->inode = 0;
247				goto got_it;
248			}
249			if (!inumber)
250				goto got_it;
251			err = -EEXIST;
252			if (namecompare(namelen, sbi->s_namelen, name, namx))
253				goto out_unlock;
254		}
255		folio_unlock(folio);
256		folio_release_kmap(folio, kaddr);
257	}
258	BUG();
259	return -EINVAL;
260
261got_it:
262	pos = folio_pos(folio) + offset_in_folio(folio, p);
263	err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
264	if (err)
265		goto out_unlock;
266	memcpy (namx, name, namelen);
267	if (sbi->s_version == MINIX_V3) {
268		memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
269		de3->inode = inode->i_ino;
270	} else {
271		memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
272		de->inode = inode->i_ino;
273	}
274	dir_commit_chunk(folio, pos, sbi->s_dirsize);
275	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
276	mark_inode_dirty(dir);
277	err = minix_handle_dirsync(dir);
278out_put:
279	folio_release_kmap(folio, kaddr);
 
280	return err;
281out_unlock:
282	folio_unlock(folio);
283	goto out_put;
284}
285
286int minix_delete_entry(struct minix_dir_entry *de, struct folio *folio)
287{
288	struct inode *inode = folio->mapping->host;
289	loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
 
290	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
291	unsigned len = sbi->s_dirsize;
292	int err;
293
294	folio_lock(folio);
295	err = minix_prepare_chunk(folio, pos, len);
296	if (err) {
297		folio_unlock(folio);
298		return err;
 
 
 
 
 
299	}
300	if (sbi->s_version == MINIX_V3)
301		((minix3_dirent *)de)->inode = 0;
302	else
303		de->inode = 0;
304	dir_commit_chunk(folio, pos, len);
305	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
306	mark_inode_dirty(inode);
307	return minix_handle_dirsync(inode);
308}
309
310int minix_make_empty(struct inode *inode, struct inode *dir)
311{
312	struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
313	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
314	char *kaddr;
315	int err;
316
317	if (IS_ERR(folio))
318		return PTR_ERR(folio);
319	err = minix_prepare_chunk(folio, 0, 2 * sbi->s_dirsize);
320	if (err) {
321		folio_unlock(folio);
322		goto fail;
323	}
324
325	kaddr = kmap_local_folio(folio, 0);
326	memset(kaddr, 0, folio_size(folio));
327
328	if (sbi->s_version == MINIX_V3) {
329		minix3_dirent *de3 = (minix3_dirent *)kaddr;
330
331		de3->inode = inode->i_ino;
332		strcpy(de3->name, ".");
333		de3 = minix_next_entry(de3, sbi);
334		de3->inode = dir->i_ino;
335		strcpy(de3->name, "..");
336	} else {
337		minix_dirent *de = (minix_dirent *)kaddr;
338
339		de->inode = inode->i_ino;
340		strcpy(de->name, ".");
341		de = minix_next_entry(de, sbi);
342		de->inode = dir->i_ino;
343		strcpy(de->name, "..");
344	}
345	kunmap_local(kaddr);
346
347	dir_commit_chunk(folio, 0, 2 * sbi->s_dirsize);
348	err = minix_handle_dirsync(inode);
349fail:
350	folio_put(folio);
351	return err;
352}
353
354/*
355 * routine to check that the specified directory is empty (for rmdir)
356 */
357int minix_empty_dir(struct inode * inode)
358{
359	struct folio *folio = NULL;
360	unsigned long i, npages = dir_pages(inode);
361	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
362	char *name, *kaddr;
363	__u32 inumber;
364
365	for (i = 0; i < npages; i++) {
366		char *p, *limit;
367
368		kaddr = dir_get_folio(inode, i, &folio);
369		if (IS_ERR(kaddr))
370			continue;
371
 
372		limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
373		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
374			if (sbi->s_version == MINIX_V3) {
375				minix3_dirent *de3 = (minix3_dirent *)p;
376				name = de3->name;
377				inumber = de3->inode;
378			} else {
379				minix_dirent *de = (minix_dirent *)p;
380				name = de->name;
381				inumber = de->inode;
382			}
383
384			if (inumber != 0) {
385				/* check for . and .. */
386				if (name[0] != '.')
387					goto not_empty;
388				if (!name[1]) {
389					if (inumber != inode->i_ino)
390						goto not_empty;
391				} else if (name[1] != '.')
392					goto not_empty;
393				else if (name[2])
394					goto not_empty;
395			}
396		}
397		folio_release_kmap(folio, kaddr);
398	}
399	return 1;
400
401not_empty:
402	folio_release_kmap(folio, kaddr);
403	return 0;
404}
405
406/* Releases the page */
407int minix_set_link(struct minix_dir_entry *de, struct folio *folio,
408		struct inode *inode)
409{
410	struct inode *dir = folio->mapping->host;
411	struct minix_sb_info *sbi = minix_sb(dir->i_sb);
412	loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
 
413	int err;
414
415	folio_lock(folio);
416	err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
417	if (err) {
418		folio_unlock(folio);
419		return err;
 
 
 
 
 
 
420	}
421	if (sbi->s_version == MINIX_V3)
422		((minix3_dirent *)de)->inode = inode->i_ino;
423	else
424		de->inode = inode->i_ino;
425	dir_commit_chunk(folio, pos, sbi->s_dirsize);
426	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
427	mark_inode_dirty(dir);
428	return minix_handle_dirsync(dir);
429}
430
431struct minix_dir_entry *minix_dotdot(struct inode *dir, struct folio **foliop)
432{
 
433	struct minix_sb_info *sbi = minix_sb(dir->i_sb);
434	struct minix_dir_entry *de = dir_get_folio(dir, 0, foliop);
435
436	if (!IS_ERR(de))
437		return minix_next_entry(de, sbi);
438	return NULL;
 
 
439}
440
441ino_t minix_inode_by_name(struct dentry *dentry)
442{
443	struct folio *folio;
444	struct minix_dir_entry *de = minix_find_entry(dentry, &folio);
445	ino_t res = 0;
446
447	if (de) {
448		struct inode *inode = folio->mapping->host;
 
449		struct minix_sb_info *sbi = minix_sb(inode->i_sb);
450
451		if (sbi->s_version == MINIX_V3)
452			res = ((minix3_dirent *) de)->inode;
453		else
454			res = de->inode;
455		folio_release_kmap(folio, de);
456	}
457	return res;
458}