Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * (C) 2001 Clemson University and The University of Chicago
4 * Copyright 2018 Omnibond Systems, L.L.C.
5 *
6 * See COPYING in top-level directory.
7 */
8
9/*
10 * Linux VFS inode operations.
11 */
12
13#include <linux/blkdev.h>
14#include <linux/fileattr.h>
15#include "protocol.h"
16#include "orangefs-kernel.h"
17#include "orangefs-bufmap.h"
18
19static int orangefs_writepage_locked(struct page *page,
20 struct writeback_control *wbc)
21{
22 struct inode *inode = page->mapping->host;
23 struct orangefs_write_range *wr = NULL;
24 struct iov_iter iter;
25 struct bio_vec bv;
26 size_t len, wlen;
27 ssize_t ret;
28 loff_t off;
29
30 set_page_writeback(page);
31
32 len = i_size_read(inode);
33 if (PagePrivate(page)) {
34 wr = (struct orangefs_write_range *)page_private(page);
35 WARN_ON(wr->pos >= len);
36 off = wr->pos;
37 if (off + wr->len > len)
38 wlen = len - off;
39 else
40 wlen = wr->len;
41 } else {
42 WARN_ON(1);
43 off = page_offset(page);
44 if (off + PAGE_SIZE > len)
45 wlen = len - off;
46 else
47 wlen = PAGE_SIZE;
48 }
49 /* Should've been handled in orangefs_invalidate_folio. */
50 WARN_ON(off == len || off + wlen > len);
51
52 WARN_ON(wlen == 0);
53 bvec_set_page(&bv, page, wlen, off % PAGE_SIZE);
54 iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, wlen);
55
56 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen,
57 len, wr, NULL, NULL);
58 if (ret < 0) {
59 mapping_set_error(page->mapping, ret);
60 } else {
61 ret = 0;
62 }
63 kfree(detach_page_private(page));
64 return ret;
65}
66
67static int orangefs_writepage(struct page *page, struct writeback_control *wbc)
68{
69 int ret;
70 ret = orangefs_writepage_locked(page, wbc);
71 unlock_page(page);
72 end_page_writeback(page);
73 return ret;
74}
75
76struct orangefs_writepages {
77 loff_t off;
78 size_t len;
79 kuid_t uid;
80 kgid_t gid;
81 int maxpages;
82 int npages;
83 struct page **pages;
84 struct bio_vec *bv;
85};
86
87static int orangefs_writepages_work(struct orangefs_writepages *ow,
88 struct writeback_control *wbc)
89{
90 struct inode *inode = ow->pages[0]->mapping->host;
91 struct orangefs_write_range *wrp, wr;
92 struct iov_iter iter;
93 ssize_t ret;
94 size_t len;
95 loff_t off;
96 int i;
97
98 len = i_size_read(inode);
99
100 for (i = 0; i < ow->npages; i++) {
101 set_page_writeback(ow->pages[i]);
102 bvec_set_page(&ow->bv[i], ow->pages[i],
103 min(page_offset(ow->pages[i]) + PAGE_SIZE,
104 ow->off + ow->len) -
105 max(ow->off, page_offset(ow->pages[i])),
106 i == 0 ? ow->off - page_offset(ow->pages[i]) : 0);
107 }
108 iov_iter_bvec(&iter, ITER_SOURCE, ow->bv, ow->npages, ow->len);
109
110 WARN_ON(ow->off >= len);
111 if (ow->off + ow->len > len)
112 ow->len = len - ow->off;
113
114 off = ow->off;
115 wr.uid = ow->uid;
116 wr.gid = ow->gid;
117 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len,
118 0, &wr, NULL, NULL);
119 if (ret < 0) {
120 for (i = 0; i < ow->npages; i++) {
121 mapping_set_error(ow->pages[i]->mapping, ret);
122 if (PagePrivate(ow->pages[i])) {
123 wrp = (struct orangefs_write_range *)
124 page_private(ow->pages[i]);
125 ClearPagePrivate(ow->pages[i]);
126 put_page(ow->pages[i]);
127 kfree(wrp);
128 }
129 end_page_writeback(ow->pages[i]);
130 unlock_page(ow->pages[i]);
131 }
132 } else {
133 ret = 0;
134 for (i = 0; i < ow->npages; i++) {
135 if (PagePrivate(ow->pages[i])) {
136 wrp = (struct orangefs_write_range *)
137 page_private(ow->pages[i]);
138 ClearPagePrivate(ow->pages[i]);
139 put_page(ow->pages[i]);
140 kfree(wrp);
141 }
142 end_page_writeback(ow->pages[i]);
143 unlock_page(ow->pages[i]);
144 }
145 }
146 return ret;
147}
148
149static int orangefs_writepages_callback(struct folio *folio,
150 struct writeback_control *wbc, void *data)
151{
152 struct orangefs_writepages *ow = data;
153 struct orangefs_write_range *wr = folio->private;
154 int ret;
155
156 if (!wr) {
157 folio_unlock(folio);
158 /* It's not private so there's nothing to write, right? */
159 printk("writepages_callback not private!\n");
160 BUG();
161 return 0;
162 }
163
164 ret = -1;
165 if (ow->npages == 0) {
166 ow->off = wr->pos;
167 ow->len = wr->len;
168 ow->uid = wr->uid;
169 ow->gid = wr->gid;
170 ow->pages[ow->npages++] = &folio->page;
171 ret = 0;
172 goto done;
173 }
174 if (!uid_eq(ow->uid, wr->uid) || !gid_eq(ow->gid, wr->gid)) {
175 orangefs_writepages_work(ow, wbc);
176 ow->npages = 0;
177 ret = -1;
178 goto done;
179 }
180 if (ow->off + ow->len == wr->pos) {
181 ow->len += wr->len;
182 ow->pages[ow->npages++] = &folio->page;
183 ret = 0;
184 goto done;
185 }
186done:
187 if (ret == -1) {
188 if (ow->npages) {
189 orangefs_writepages_work(ow, wbc);
190 ow->npages = 0;
191 }
192 ret = orangefs_writepage_locked(&folio->page, wbc);
193 mapping_set_error(folio->mapping, ret);
194 folio_unlock(folio);
195 folio_end_writeback(folio);
196 } else {
197 if (ow->npages == ow->maxpages) {
198 orangefs_writepages_work(ow, wbc);
199 ow->npages = 0;
200 }
201 }
202 return ret;
203}
204
205static int orangefs_writepages(struct address_space *mapping,
206 struct writeback_control *wbc)
207{
208 struct orangefs_writepages *ow;
209 struct blk_plug plug;
210 int ret;
211 ow = kzalloc(sizeof(struct orangefs_writepages), GFP_KERNEL);
212 if (!ow)
213 return -ENOMEM;
214 ow->maxpages = orangefs_bufmap_size_query()/PAGE_SIZE;
215 ow->pages = kcalloc(ow->maxpages, sizeof(struct page *), GFP_KERNEL);
216 if (!ow->pages) {
217 kfree(ow);
218 return -ENOMEM;
219 }
220 ow->bv = kcalloc(ow->maxpages, sizeof(struct bio_vec), GFP_KERNEL);
221 if (!ow->bv) {
222 kfree(ow->pages);
223 kfree(ow);
224 return -ENOMEM;
225 }
226 blk_start_plug(&plug);
227 ret = write_cache_pages(mapping, wbc, orangefs_writepages_callback, ow);
228 if (ow->npages)
229 ret = orangefs_writepages_work(ow, wbc);
230 blk_finish_plug(&plug);
231 kfree(ow->pages);
232 kfree(ow->bv);
233 kfree(ow);
234 return ret;
235}
236
237static int orangefs_launder_folio(struct folio *);
238
239static void orangefs_readahead(struct readahead_control *rac)
240{
241 loff_t offset;
242 struct iov_iter iter;
243 struct inode *inode = rac->mapping->host;
244 struct xarray *i_pages;
245 struct folio *folio;
246 loff_t new_start = readahead_pos(rac);
247 int ret;
248 size_t new_len = 0;
249
250 loff_t bytes_remaining = inode->i_size - readahead_pos(rac);
251 loff_t pages_remaining = bytes_remaining / PAGE_SIZE;
252
253 if (pages_remaining >= 1024)
254 new_len = 4194304;
255 else if (pages_remaining > readahead_count(rac))
256 new_len = bytes_remaining;
257
258 if (new_len)
259 readahead_expand(rac, new_start, new_len);
260
261 offset = readahead_pos(rac);
262 i_pages = &rac->mapping->i_pages;
263
264 iov_iter_xarray(&iter, ITER_DEST, i_pages, offset, readahead_length(rac));
265
266 /* read in the pages. */
267 if ((ret = wait_for_direct_io(ORANGEFS_IO_READ, inode,
268 &offset, &iter, readahead_length(rac),
269 inode->i_size, NULL, NULL, rac->file)) < 0)
270 gossip_debug(GOSSIP_FILE_DEBUG,
271 "%s: wait_for_direct_io failed. \n", __func__);
272 else
273 ret = 0;
274
275 /* clean up. */
276 while ((folio = readahead_folio(rac))) {
277 if (!ret)
278 folio_mark_uptodate(folio);
279 folio_unlock(folio);
280 }
281}
282
283static int orangefs_read_folio(struct file *file, struct folio *folio)
284{
285 struct inode *inode = folio->mapping->host;
286 struct iov_iter iter;
287 struct bio_vec bv;
288 ssize_t ret;
289 loff_t off; /* offset of this folio in the file */
290
291 if (folio_test_dirty(folio))
292 orangefs_launder_folio(folio);
293
294 off = folio_pos(folio);
295 bvec_set_folio(&bv, folio, folio_size(folio), 0);
296 iov_iter_bvec(&iter, ITER_DEST, &bv, 1, folio_size(folio));
297
298 ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter,
299 folio_size(folio), inode->i_size, NULL, NULL, file);
300 /* this will only zero remaining unread portions of the folio data */
301 iov_iter_zero(~0U, &iter);
302 /* takes care of potential aliasing */
303 flush_dcache_folio(folio);
304 if (ret > 0)
305 ret = 0;
306 folio_end_read(folio, ret == 0);
307 return ret;
308}
309
310static int orangefs_write_begin(struct file *file,
311 struct address_space *mapping, loff_t pos, unsigned len,
312 struct folio **foliop, void **fsdata)
313{
314 struct orangefs_write_range *wr;
315 struct folio *folio;
316 int ret;
317
318 folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN,
319 mapping_gfp_mask(mapping));
320 if (IS_ERR(folio))
321 return PTR_ERR(folio);
322
323 *foliop = folio;
324
325 if (folio_test_dirty(folio) && !folio_test_private(folio)) {
326 /*
327 * Should be impossible. If it happens, launder the page
328 * since we don't know what's dirty. This will WARN in
329 * orangefs_writepage_locked.
330 */
331 ret = orangefs_launder_folio(folio);
332 if (ret)
333 return ret;
334 }
335 if (folio_test_private(folio)) {
336 struct orangefs_write_range *wr;
337 wr = folio_get_private(folio);
338 if (wr->pos + wr->len == pos &&
339 uid_eq(wr->uid, current_fsuid()) &&
340 gid_eq(wr->gid, current_fsgid())) {
341 wr->len += len;
342 goto okay;
343 } else {
344 ret = orangefs_launder_folio(folio);
345 if (ret)
346 return ret;
347 }
348 }
349
350 wr = kmalloc(sizeof *wr, GFP_KERNEL);
351 if (!wr)
352 return -ENOMEM;
353
354 wr->pos = pos;
355 wr->len = len;
356 wr->uid = current_fsuid();
357 wr->gid = current_fsgid();
358 folio_attach_private(folio, wr);
359okay:
360 return 0;
361}
362
363static int orangefs_write_end(struct file *file, struct address_space *mapping,
364 loff_t pos, unsigned len, unsigned copied, struct folio *folio,
365 void *fsdata)
366{
367 struct inode *inode = folio->mapping->host;
368 loff_t last_pos = pos + copied;
369
370 /*
371 * No need to use i_size_read() here, the i_size
372 * cannot change under us because we hold the i_mutex.
373 */
374 if (last_pos > inode->i_size)
375 i_size_write(inode, last_pos);
376
377 /* zero the stale part of the folio if we did a short copy */
378 if (!folio_test_uptodate(folio)) {
379 unsigned from = pos & (PAGE_SIZE - 1);
380 if (copied < len) {
381 folio_zero_range(folio, from + copied, len - copied);
382 }
383 /* Set fully written pages uptodate. */
384 if (pos == folio_pos(folio) &&
385 (len == PAGE_SIZE || pos + len == inode->i_size)) {
386 folio_zero_segment(folio, from + copied, PAGE_SIZE);
387 folio_mark_uptodate(folio);
388 }
389 }
390
391 folio_mark_dirty(folio);
392 folio_unlock(folio);
393 folio_put(folio);
394
395 mark_inode_dirty_sync(file_inode(file));
396 return copied;
397}
398
399static void orangefs_invalidate_folio(struct folio *folio,
400 size_t offset, size_t length)
401{
402 struct orangefs_write_range *wr = folio_get_private(folio);
403
404 if (offset == 0 && length == PAGE_SIZE) {
405 kfree(folio_detach_private(folio));
406 return;
407 /* write range entirely within invalidate range (or equal) */
408 } else if (folio_pos(folio) + offset <= wr->pos &&
409 wr->pos + wr->len <= folio_pos(folio) + offset + length) {
410 kfree(folio_detach_private(folio));
411 /* XXX is this right? only caller in fs */
412 folio_cancel_dirty(folio);
413 return;
414 /* invalidate range chops off end of write range */
415 } else if (wr->pos < folio_pos(folio) + offset &&
416 wr->pos + wr->len <= folio_pos(folio) + offset + length &&
417 folio_pos(folio) + offset < wr->pos + wr->len) {
418 size_t x;
419 x = wr->pos + wr->len - (folio_pos(folio) + offset);
420 WARN_ON(x > wr->len);
421 wr->len -= x;
422 wr->uid = current_fsuid();
423 wr->gid = current_fsgid();
424 /* invalidate range chops off beginning of write range */
425 } else if (folio_pos(folio) + offset <= wr->pos &&
426 folio_pos(folio) + offset + length < wr->pos + wr->len &&
427 wr->pos < folio_pos(folio) + offset + length) {
428 size_t x;
429 x = folio_pos(folio) + offset + length - wr->pos;
430 WARN_ON(x > wr->len);
431 wr->pos += x;
432 wr->len -= x;
433 wr->uid = current_fsuid();
434 wr->gid = current_fsgid();
435 /* invalidate range entirely within write range (punch hole) */
436 } else if (wr->pos < folio_pos(folio) + offset &&
437 folio_pos(folio) + offset + length < wr->pos + wr->len) {
438 /* XXX what do we do here... should not WARN_ON */
439 WARN_ON(1);
440 /* punch hole */
441 /*
442 * should we just ignore this and write it out anyway?
443 * it hardly makes sense
444 */
445 return;
446 /* non-overlapping ranges */
447 } else {
448 /* WARN if they do overlap */
449 if (!((folio_pos(folio) + offset + length <= wr->pos) ^
450 (wr->pos + wr->len <= folio_pos(folio) + offset))) {
451 WARN_ON(1);
452 printk("invalidate range offset %llu length %zu\n",
453 folio_pos(folio) + offset, length);
454 printk("write range offset %llu length %zu\n",
455 wr->pos, wr->len);
456 }
457 return;
458 }
459
460 /*
461 * Above there are returns where wr is freed or where we WARN.
462 * Thus the following runs if wr was modified above.
463 */
464
465 orangefs_launder_folio(folio);
466}
467
468static bool orangefs_release_folio(struct folio *folio, gfp_t foo)
469{
470 return !folio_test_private(folio);
471}
472
473static void orangefs_free_folio(struct folio *folio)
474{
475 kfree(folio_detach_private(folio));
476}
477
478static int orangefs_launder_folio(struct folio *folio)
479{
480 int r = 0;
481 struct writeback_control wbc = {
482 .sync_mode = WB_SYNC_ALL,
483 .nr_to_write = 0,
484 };
485 folio_wait_writeback(folio);
486 if (folio_clear_dirty_for_io(folio)) {
487 r = orangefs_writepage_locked(&folio->page, &wbc);
488 folio_end_writeback(folio);
489 }
490 return r;
491}
492
493static ssize_t orangefs_direct_IO(struct kiocb *iocb,
494 struct iov_iter *iter)
495{
496 /*
497 * Comment from original do_readv_writev:
498 * Common entry point for read/write/readv/writev
499 * This function will dispatch it to either the direct I/O
500 * or buffered I/O path depending on the mount options and/or
501 * augmented/extended metadata attached to the file.
502 * Note: File extended attributes override any mount options.
503 */
504 struct file *file = iocb->ki_filp;
505 loff_t pos = iocb->ki_pos;
506 enum ORANGEFS_io_type type = iov_iter_rw(iter) == WRITE ?
507 ORANGEFS_IO_WRITE : ORANGEFS_IO_READ;
508 loff_t *offset = &pos;
509 struct inode *inode = file->f_mapping->host;
510 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
511 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
512 size_t count = iov_iter_count(iter);
513 ssize_t total_count = 0;
514 ssize_t ret = -EINVAL;
515
516 gossip_debug(GOSSIP_FILE_DEBUG,
517 "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
518 __func__,
519 handle,
520 (int)count);
521
522 if (type == ORANGEFS_IO_WRITE) {
523 gossip_debug(GOSSIP_FILE_DEBUG,
524 "%s(%pU): proceeding with offset : %llu, "
525 "size %d\n",
526 __func__,
527 handle,
528 llu(*offset),
529 (int)count);
530 }
531
532 if (count == 0) {
533 ret = 0;
534 goto out;
535 }
536
537 while (iov_iter_count(iter)) {
538 size_t each_count = iov_iter_count(iter);
539 size_t amt_complete;
540
541 /* how much to transfer in this loop iteration */
542 if (each_count > orangefs_bufmap_size_query())
543 each_count = orangefs_bufmap_size_query();
544
545 gossip_debug(GOSSIP_FILE_DEBUG,
546 "%s(%pU): size of each_count(%d)\n",
547 __func__,
548 handle,
549 (int)each_count);
550 gossip_debug(GOSSIP_FILE_DEBUG,
551 "%s(%pU): BEFORE wait_for_io: offset is %d\n",
552 __func__,
553 handle,
554 (int)*offset);
555
556 ret = wait_for_direct_io(type, inode, offset, iter,
557 each_count, 0, NULL, NULL, file);
558 gossip_debug(GOSSIP_FILE_DEBUG,
559 "%s(%pU): return from wait_for_io:%d\n",
560 __func__,
561 handle,
562 (int)ret);
563
564 if (ret < 0)
565 goto out;
566
567 *offset += ret;
568 total_count += ret;
569 amt_complete = ret;
570
571 gossip_debug(GOSSIP_FILE_DEBUG,
572 "%s(%pU): AFTER wait_for_io: offset is %d\n",
573 __func__,
574 handle,
575 (int)*offset);
576
577 /*
578 * if we got a short I/O operations,
579 * fall out and return what we got so far
580 */
581 if (amt_complete < each_count)
582 break;
583 } /*end while */
584
585out:
586 if (total_count > 0)
587 ret = total_count;
588 if (ret > 0) {
589 if (type == ORANGEFS_IO_READ) {
590 file_accessed(file);
591 } else {
592 file_update_time(file);
593 if (*offset > i_size_read(inode))
594 i_size_write(inode, *offset);
595 }
596 }
597
598 gossip_debug(GOSSIP_FILE_DEBUG,
599 "%s(%pU): Value(%d) returned.\n",
600 __func__,
601 handle,
602 (int)ret);
603
604 return ret;
605}
606
607/** ORANGEFS2 implementation of address space operations */
608static const struct address_space_operations orangefs_address_operations = {
609 .writepage = orangefs_writepage,
610 .readahead = orangefs_readahead,
611 .read_folio = orangefs_read_folio,
612 .writepages = orangefs_writepages,
613 .dirty_folio = filemap_dirty_folio,
614 .write_begin = orangefs_write_begin,
615 .write_end = orangefs_write_end,
616 .invalidate_folio = orangefs_invalidate_folio,
617 .release_folio = orangefs_release_folio,
618 .free_folio = orangefs_free_folio,
619 .launder_folio = orangefs_launder_folio,
620 .direct_IO = orangefs_direct_IO,
621};
622
623vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
624{
625 struct folio *folio = page_folio(vmf->page);
626 struct inode *inode = file_inode(vmf->vma->vm_file);
627 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
628 unsigned long *bitlock = &orangefs_inode->bitlock;
629 vm_fault_t ret;
630 struct orangefs_write_range *wr;
631
632 sb_start_pagefault(inode->i_sb);
633
634 if (wait_on_bit(bitlock, 1, TASK_KILLABLE)) {
635 ret = VM_FAULT_RETRY;
636 goto out;
637 }
638
639 folio_lock(folio);
640 if (folio_test_dirty(folio) && !folio_test_private(folio)) {
641 /*
642 * Should be impossible. If it happens, launder the folio
643 * since we don't know what's dirty. This will WARN in
644 * orangefs_writepage_locked.
645 */
646 if (orangefs_launder_folio(folio)) {
647 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
648 goto out;
649 }
650 }
651 if (folio_test_private(folio)) {
652 wr = folio_get_private(folio);
653 if (uid_eq(wr->uid, current_fsuid()) &&
654 gid_eq(wr->gid, current_fsgid())) {
655 wr->pos = page_offset(vmf->page);
656 wr->len = PAGE_SIZE;
657 goto okay;
658 } else {
659 if (orangefs_launder_folio(folio)) {
660 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
661 goto out;
662 }
663 }
664 }
665 wr = kmalloc(sizeof *wr, GFP_KERNEL);
666 if (!wr) {
667 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
668 goto out;
669 }
670 wr->pos = page_offset(vmf->page);
671 wr->len = PAGE_SIZE;
672 wr->uid = current_fsuid();
673 wr->gid = current_fsgid();
674 folio_attach_private(folio, wr);
675okay:
676
677 file_update_time(vmf->vma->vm_file);
678 if (folio->mapping != inode->i_mapping) {
679 folio_unlock(folio);
680 ret = VM_FAULT_LOCKED|VM_FAULT_NOPAGE;
681 goto out;
682 }
683
684 /*
685 * We mark the folio dirty already here so that when freeze is in
686 * progress, we are guaranteed that writeback during freezing will
687 * see the dirty folio and writeprotect it again.
688 */
689 folio_mark_dirty(folio);
690 folio_wait_stable(folio);
691 ret = VM_FAULT_LOCKED;
692out:
693 sb_end_pagefault(inode->i_sb);
694 return ret;
695}
696
697static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
698{
699 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
700 struct orangefs_kernel_op_s *new_op;
701 loff_t orig_size;
702 int ret = -EINVAL;
703
704 gossip_debug(GOSSIP_INODE_DEBUG,
705 "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n",
706 __func__,
707 get_khandle_from_ino(inode),
708 &orangefs_inode->refn.khandle,
709 orangefs_inode->refn.fs_id,
710 iattr->ia_size);
711
712 /* Ensure that we have a up to date size, so we know if it changed. */
713 ret = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_SIZE);
714 if (ret == -ESTALE)
715 ret = -EIO;
716 if (ret) {
717 gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n",
718 __func__, ret);
719 return ret;
720 }
721 orig_size = i_size_read(inode);
722
723 /* This is truncate_setsize in a different order. */
724 truncate_pagecache(inode, iattr->ia_size);
725 i_size_write(inode, iattr->ia_size);
726 if (iattr->ia_size > orig_size)
727 pagecache_isize_extended(inode, orig_size, iattr->ia_size);
728
729 new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE);
730 if (!new_op)
731 return -ENOMEM;
732
733 new_op->upcall.req.truncate.refn = orangefs_inode->refn;
734 new_op->upcall.req.truncate.size = (__s64) iattr->ia_size;
735
736 ret = service_operation(new_op,
737 __func__,
738 get_interruptible_flag(inode));
739
740 /*
741 * the truncate has no downcall members to retrieve, but
742 * the status value tells us if it went through ok or not
743 */
744 gossip_debug(GOSSIP_INODE_DEBUG, "%s: ret:%d:\n", __func__, ret);
745
746 op_release(new_op);
747
748 if (ret != 0)
749 return ret;
750
751 if (orig_size != i_size_read(inode))
752 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
753
754 return ret;
755}
756
757int __orangefs_setattr(struct inode *inode, struct iattr *iattr)
758{
759 int ret;
760
761 if (iattr->ia_valid & ATTR_MODE) {
762 if (iattr->ia_mode & (S_ISVTX)) {
763 if (is_root_handle(inode)) {
764 /*
765 * allow sticky bit to be set on root (since
766 * it shows up that way by default anyhow),
767 * but don't show it to the server
768 */
769 iattr->ia_mode -= S_ISVTX;
770 } else {
771 gossip_debug(GOSSIP_UTILS_DEBUG,
772 "User attempted to set sticky bit on non-root directory; returning EINVAL.\n");
773 ret = -EINVAL;
774 goto out;
775 }
776 }
777 if (iattr->ia_mode & (S_ISUID)) {
778 gossip_debug(GOSSIP_UTILS_DEBUG,
779 "Attempting to set setuid bit (not supported); returning EINVAL.\n");
780 ret = -EINVAL;
781 goto out;
782 }
783 }
784
785 if (iattr->ia_valid & ATTR_SIZE) {
786 ret = orangefs_setattr_size(inode, iattr);
787 if (ret)
788 goto out;
789 }
790
791again:
792 spin_lock(&inode->i_lock);
793 if (ORANGEFS_I(inode)->attr_valid) {
794 if (uid_eq(ORANGEFS_I(inode)->attr_uid, current_fsuid()) &&
795 gid_eq(ORANGEFS_I(inode)->attr_gid, current_fsgid())) {
796 ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
797 } else {
798 spin_unlock(&inode->i_lock);
799 write_inode_now(inode, 1);
800 goto again;
801 }
802 } else {
803 ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
804 ORANGEFS_I(inode)->attr_uid = current_fsuid();
805 ORANGEFS_I(inode)->attr_gid = current_fsgid();
806 }
807 setattr_copy(&nop_mnt_idmap, inode, iattr);
808 spin_unlock(&inode->i_lock);
809 mark_inode_dirty(inode);
810
811 ret = 0;
812out:
813 return ret;
814}
815
816int __orangefs_setattr_mode(struct dentry *dentry, struct iattr *iattr)
817{
818 int ret;
819 struct inode *inode = d_inode(dentry);
820
821 ret = __orangefs_setattr(inode, iattr);
822 /* change mode on a file that has ACLs */
823 if (!ret && (iattr->ia_valid & ATTR_MODE))
824 ret = posix_acl_chmod(&nop_mnt_idmap, dentry, inode->i_mode);
825 return ret;
826}
827
828/*
829 * Change attributes of an object referenced by dentry.
830 */
831int orangefs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
832 struct iattr *iattr)
833{
834 int ret;
835 gossip_debug(GOSSIP_INODE_DEBUG, "__orangefs_setattr: called on %pd\n",
836 dentry);
837 ret = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
838 if (ret)
839 goto out;
840 ret = __orangefs_setattr_mode(dentry, iattr);
841 sync_inode_metadata(d_inode(dentry), 1);
842out:
843 gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n",
844 ret);
845 return ret;
846}
847
848/*
849 * Obtain attributes of an object given a dentry
850 */
851int orangefs_getattr(struct mnt_idmap *idmap, const struct path *path,
852 struct kstat *stat, u32 request_mask, unsigned int flags)
853{
854 int ret;
855 struct inode *inode = path->dentry->d_inode;
856
857 gossip_debug(GOSSIP_INODE_DEBUG,
858 "orangefs_getattr: called on %pd mask %u\n",
859 path->dentry, request_mask);
860
861 ret = orangefs_inode_getattr(inode,
862 request_mask & STATX_SIZE ? ORANGEFS_GETATTR_SIZE : 0);
863 if (ret == 0) {
864 generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
865
866 /* override block size reported to stat */
867 if (!(request_mask & STATX_SIZE))
868 stat->result_mask &= ~STATX_SIZE;
869
870 generic_fill_statx_attr(inode, stat);
871 }
872 return ret;
873}
874
875int orangefs_permission(struct mnt_idmap *idmap,
876 struct inode *inode, int mask)
877{
878 int ret;
879
880 if (mask & MAY_NOT_BLOCK)
881 return -ECHILD;
882
883 gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__);
884
885 /* Make sure the permission (and other common attrs) are up to date. */
886 ret = orangefs_inode_getattr(inode, 0);
887 if (ret < 0)
888 return ret;
889
890 return generic_permission(&nop_mnt_idmap, inode, mask);
891}
892
893int orangefs_update_time(struct inode *inode, int flags)
894{
895 struct iattr iattr;
896
897 gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n",
898 get_khandle_from_ino(inode));
899 flags = generic_update_time(inode, flags);
900 memset(&iattr, 0, sizeof iattr);
901 if (flags & S_ATIME)
902 iattr.ia_valid |= ATTR_ATIME;
903 if (flags & S_CTIME)
904 iattr.ia_valid |= ATTR_CTIME;
905 if (flags & S_MTIME)
906 iattr.ia_valid |= ATTR_MTIME;
907 return __orangefs_setattr(inode, &iattr);
908}
909
910static int orangefs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
911{
912 u64 val = 0;
913 int ret;
914
915 gossip_debug(GOSSIP_FILE_DEBUG, "%s: called on %pd\n", __func__,
916 dentry);
917
918 ret = orangefs_inode_getxattr(d_inode(dentry),
919 "user.pvfs2.meta_hint",
920 &val, sizeof(val));
921 if (ret < 0 && ret != -ENODATA)
922 return ret;
923
924 gossip_debug(GOSSIP_FILE_DEBUG, "%s: flags=%u\n", __func__, (u32) val);
925
926 fileattr_fill_flags(fa, val);
927 return 0;
928}
929
930static int orangefs_fileattr_set(struct mnt_idmap *idmap,
931 struct dentry *dentry, struct fileattr *fa)
932{
933 u64 val = 0;
934
935 gossip_debug(GOSSIP_FILE_DEBUG, "%s: called on %pd\n", __func__,
936 dentry);
937 /*
938 * ORANGEFS_MIRROR_FL is set internally when the mirroring mode is
939 * turned on for a file. The user is not allowed to turn on this bit,
940 * but the bit is present if the user first gets the flags and then
941 * updates the flags with some new settings. So, we ignore it in the
942 * following edit. bligon.
943 */
944 if (fileattr_has_fsx(fa) ||
945 (fa->flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL | ORANGEFS_MIRROR_FL))) {
946 gossip_err("%s: only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n",
947 __func__);
948 return -EOPNOTSUPP;
949 }
950 val = fa->flags;
951 gossip_debug(GOSSIP_FILE_DEBUG, "%s: flags=%u\n", __func__, (u32) val);
952 return orangefs_inode_setxattr(d_inode(dentry),
953 "user.pvfs2.meta_hint",
954 &val, sizeof(val), 0);
955}
956
957/* ORANGEFS2 implementation of VFS inode operations for files */
958static const struct inode_operations orangefs_file_inode_operations = {
959 .get_inode_acl = orangefs_get_acl,
960 .set_acl = orangefs_set_acl,
961 .setattr = orangefs_setattr,
962 .getattr = orangefs_getattr,
963 .listxattr = orangefs_listxattr,
964 .permission = orangefs_permission,
965 .update_time = orangefs_update_time,
966 .fileattr_get = orangefs_fileattr_get,
967 .fileattr_set = orangefs_fileattr_set,
968};
969
970static int orangefs_init_iops(struct inode *inode)
971{
972 inode->i_mapping->a_ops = &orangefs_address_operations;
973
974 switch (inode->i_mode & S_IFMT) {
975 case S_IFREG:
976 inode->i_op = &orangefs_file_inode_operations;
977 inode->i_fop = &orangefs_file_operations;
978 break;
979 case S_IFLNK:
980 inode->i_op = &orangefs_symlink_inode_operations;
981 break;
982 case S_IFDIR:
983 inode->i_op = &orangefs_dir_inode_operations;
984 inode->i_fop = &orangefs_dir_operations;
985 break;
986 default:
987 gossip_debug(GOSSIP_INODE_DEBUG,
988 "%s: unsupported mode\n",
989 __func__);
990 return -EINVAL;
991 }
992
993 return 0;
994}
995
996/*
997 * Given an ORANGEFS object identifier (fsid, handle), convert it into
998 * a ino_t type that will be used as a hash-index from where the handle will
999 * be searched for in the VFS hash table of inodes.
1000 */
1001static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref)
1002{
1003 if (!ref)
1004 return 0;
1005 return orangefs_khandle_to_ino(&(ref->khandle));
1006}
1007
1008/*
1009 * Called to set up an inode from iget5_locked.
1010 */
1011static int orangefs_set_inode(struct inode *inode, void *data)
1012{
1013 struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
1014 ORANGEFS_I(inode)->refn.fs_id = ref->fs_id;
1015 ORANGEFS_I(inode)->refn.khandle = ref->khandle;
1016 ORANGEFS_I(inode)->attr_valid = 0;
1017 hash_init(ORANGEFS_I(inode)->xattr_cache);
1018 ORANGEFS_I(inode)->mapping_time = jiffies - 1;
1019 ORANGEFS_I(inode)->bitlock = 0;
1020 return 0;
1021}
1022
1023/*
1024 * Called to determine if handles match.
1025 */
1026static int orangefs_test_inode(struct inode *inode, void *data)
1027{
1028 struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
1029 struct orangefs_inode_s *orangefs_inode = NULL;
1030
1031 orangefs_inode = ORANGEFS_I(inode);
1032 /* test handles and fs_ids... */
1033 return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle),
1034 &(ref->khandle)) &&
1035 orangefs_inode->refn.fs_id == ref->fs_id);
1036}
1037
1038/*
1039 * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS
1040 * file handle.
1041 *
1042 * @sb: the file system super block instance.
1043 * @ref: The ORANGEFS object for which we are trying to locate an inode.
1044 */
1045struct inode *orangefs_iget(struct super_block *sb,
1046 struct orangefs_object_kref *ref)
1047{
1048 struct inode *inode = NULL;
1049 unsigned long hash;
1050 int error;
1051
1052 hash = orangefs_handle_hash(ref);
1053 inode = iget5_locked(sb,
1054 hash,
1055 orangefs_test_inode,
1056 orangefs_set_inode,
1057 ref);
1058
1059 if (!inode)
1060 return ERR_PTR(-ENOMEM);
1061
1062 if (!(inode->i_state & I_NEW))
1063 return inode;
1064
1065 error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
1066 if (error) {
1067 iget_failed(inode);
1068 return ERR_PTR(error);
1069 }
1070
1071 inode->i_ino = hash; /* needed for stat etc */
1072 orangefs_init_iops(inode);
1073 unlock_new_inode(inode);
1074
1075 gossip_debug(GOSSIP_INODE_DEBUG,
1076 "iget handle %pU, fsid %d hash %ld i_ino %lu\n",
1077 &ref->khandle,
1078 ref->fs_id,
1079 hash,
1080 inode->i_ino);
1081
1082 return inode;
1083}
1084
1085/*
1086 * Allocate an inode for a newly created file and insert it into the inode hash.
1087 */
1088struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
1089 umode_t mode, dev_t dev, struct orangefs_object_kref *ref)
1090{
1091 struct posix_acl *acl = NULL, *default_acl = NULL;
1092 unsigned long hash = orangefs_handle_hash(ref);
1093 struct inode *inode;
1094 int error;
1095
1096 gossip_debug(GOSSIP_INODE_DEBUG,
1097 "%s:(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n",
1098 __func__,
1099 sb,
1100 MAJOR(dev),
1101 MINOR(dev),
1102 mode);
1103
1104 inode = new_inode(sb);
1105 if (!inode)
1106 return ERR_PTR(-ENOMEM);
1107
1108 error = posix_acl_create(dir, &mode, &default_acl, &acl);
1109 if (error)
1110 goto out_iput;
1111
1112 orangefs_set_inode(inode, ref);
1113 inode->i_ino = hash; /* needed for stat etc */
1114
1115 error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
1116 if (error)
1117 goto out_iput;
1118
1119 orangefs_init_iops(inode);
1120 inode->i_rdev = dev;
1121
1122 if (default_acl) {
1123 error = __orangefs_set_acl(inode, default_acl,
1124 ACL_TYPE_DEFAULT);
1125 if (error)
1126 goto out_iput;
1127 }
1128
1129 if (acl) {
1130 error = __orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
1131 if (error)
1132 goto out_iput;
1133 }
1134
1135 error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
1136 if (error < 0)
1137 goto out_iput;
1138
1139 gossip_debug(GOSSIP_INODE_DEBUG,
1140 "Initializing ACL's for inode %pU\n",
1141 get_khandle_from_ino(inode));
1142 if (mode != inode->i_mode) {
1143 struct iattr iattr = {
1144 .ia_mode = mode,
1145 .ia_valid = ATTR_MODE,
1146 };
1147 inode->i_mode = mode;
1148 __orangefs_setattr(inode, &iattr);
1149 __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
1150 }
1151 posix_acl_release(acl);
1152 posix_acl_release(default_acl);
1153 return inode;
1154
1155out_iput:
1156 iput(inode);
1157 posix_acl_release(acl);
1158 posix_acl_release(default_acl);
1159 return ERR_PTR(error);
1160}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * (C) 2001 Clemson University and The University of Chicago
4 * Copyright 2018 Omnibond Systems, L.L.C.
5 *
6 * See COPYING in top-level directory.
7 */
8
9/*
10 * Linux VFS inode operations.
11 */
12
13#include <linux/bvec.h>
14#include "protocol.h"
15#include "orangefs-kernel.h"
16#include "orangefs-bufmap.h"
17
18static int orangefs_writepage_locked(struct page *page,
19 struct writeback_control *wbc)
20{
21 struct inode *inode = page->mapping->host;
22 struct orangefs_write_range *wr = NULL;
23 struct iov_iter iter;
24 struct bio_vec bv;
25 size_t len, wlen;
26 ssize_t ret;
27 loff_t off;
28
29 set_page_writeback(page);
30
31 len = i_size_read(inode);
32 if (PagePrivate(page)) {
33 wr = (struct orangefs_write_range *)page_private(page);
34 WARN_ON(wr->pos >= len);
35 off = wr->pos;
36 if (off + wr->len > len)
37 wlen = len - off;
38 else
39 wlen = wr->len;
40 } else {
41 WARN_ON(1);
42 off = page_offset(page);
43 if (off + PAGE_SIZE > len)
44 wlen = len - off;
45 else
46 wlen = PAGE_SIZE;
47 }
48 /* Should've been handled in orangefs_invalidatepage. */
49 WARN_ON(off == len || off + wlen > len);
50
51 bv.bv_page = page;
52 bv.bv_len = wlen;
53 bv.bv_offset = off % PAGE_SIZE;
54 WARN_ON(wlen == 0);
55 iov_iter_bvec(&iter, WRITE, &bv, 1, wlen);
56
57 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen,
58 len, wr, NULL, NULL);
59 if (ret < 0) {
60 SetPageError(page);
61 mapping_set_error(page->mapping, ret);
62 } else {
63 ret = 0;
64 }
65 kfree(detach_page_private(page));
66 return ret;
67}
68
69static int orangefs_writepage(struct page *page, struct writeback_control *wbc)
70{
71 int ret;
72 ret = orangefs_writepage_locked(page, wbc);
73 unlock_page(page);
74 end_page_writeback(page);
75 return ret;
76}
77
78struct orangefs_writepages {
79 loff_t off;
80 size_t len;
81 kuid_t uid;
82 kgid_t gid;
83 int maxpages;
84 int npages;
85 struct page **pages;
86 struct bio_vec *bv;
87};
88
89static int orangefs_writepages_work(struct orangefs_writepages *ow,
90 struct writeback_control *wbc)
91{
92 struct inode *inode = ow->pages[0]->mapping->host;
93 struct orangefs_write_range *wrp, wr;
94 struct iov_iter iter;
95 ssize_t ret;
96 size_t len;
97 loff_t off;
98 int i;
99
100 len = i_size_read(inode);
101
102 for (i = 0; i < ow->npages; i++) {
103 set_page_writeback(ow->pages[i]);
104 ow->bv[i].bv_page = ow->pages[i];
105 ow->bv[i].bv_len = min(page_offset(ow->pages[i]) + PAGE_SIZE,
106 ow->off + ow->len) -
107 max(ow->off, page_offset(ow->pages[i]));
108 if (i == 0)
109 ow->bv[i].bv_offset = ow->off -
110 page_offset(ow->pages[i]);
111 else
112 ow->bv[i].bv_offset = 0;
113 }
114 iov_iter_bvec(&iter, WRITE, ow->bv, ow->npages, ow->len);
115
116 WARN_ON(ow->off >= len);
117 if (ow->off + ow->len > len)
118 ow->len = len - ow->off;
119
120 off = ow->off;
121 wr.uid = ow->uid;
122 wr.gid = ow->gid;
123 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len,
124 0, &wr, NULL, NULL);
125 if (ret < 0) {
126 for (i = 0; i < ow->npages; i++) {
127 SetPageError(ow->pages[i]);
128 mapping_set_error(ow->pages[i]->mapping, ret);
129 if (PagePrivate(ow->pages[i])) {
130 wrp = (struct orangefs_write_range *)
131 page_private(ow->pages[i]);
132 ClearPagePrivate(ow->pages[i]);
133 put_page(ow->pages[i]);
134 kfree(wrp);
135 }
136 end_page_writeback(ow->pages[i]);
137 unlock_page(ow->pages[i]);
138 }
139 } else {
140 ret = 0;
141 for (i = 0; i < ow->npages; i++) {
142 if (PagePrivate(ow->pages[i])) {
143 wrp = (struct orangefs_write_range *)
144 page_private(ow->pages[i]);
145 ClearPagePrivate(ow->pages[i]);
146 put_page(ow->pages[i]);
147 kfree(wrp);
148 }
149 end_page_writeback(ow->pages[i]);
150 unlock_page(ow->pages[i]);
151 }
152 }
153 return ret;
154}
155
156static int orangefs_writepages_callback(struct page *page,
157 struct writeback_control *wbc, void *data)
158{
159 struct orangefs_writepages *ow = data;
160 struct orangefs_write_range *wr;
161 int ret;
162
163 if (!PagePrivate(page)) {
164 unlock_page(page);
165 /* It's not private so there's nothing to write, right? */
166 printk("writepages_callback not private!\n");
167 BUG();
168 return 0;
169 }
170 wr = (struct orangefs_write_range *)page_private(page);
171
172 ret = -1;
173 if (ow->npages == 0) {
174 ow->off = wr->pos;
175 ow->len = wr->len;
176 ow->uid = wr->uid;
177 ow->gid = wr->gid;
178 ow->pages[ow->npages++] = page;
179 ret = 0;
180 goto done;
181 }
182 if (!uid_eq(ow->uid, wr->uid) || !gid_eq(ow->gid, wr->gid)) {
183 orangefs_writepages_work(ow, wbc);
184 ow->npages = 0;
185 ret = -1;
186 goto done;
187 }
188 if (ow->off + ow->len == wr->pos) {
189 ow->len += wr->len;
190 ow->pages[ow->npages++] = page;
191 ret = 0;
192 goto done;
193 }
194done:
195 if (ret == -1) {
196 if (ow->npages) {
197 orangefs_writepages_work(ow, wbc);
198 ow->npages = 0;
199 }
200 ret = orangefs_writepage_locked(page, wbc);
201 mapping_set_error(page->mapping, ret);
202 unlock_page(page);
203 end_page_writeback(page);
204 } else {
205 if (ow->npages == ow->maxpages) {
206 orangefs_writepages_work(ow, wbc);
207 ow->npages = 0;
208 }
209 }
210 return ret;
211}
212
213static int orangefs_writepages(struct address_space *mapping,
214 struct writeback_control *wbc)
215{
216 struct orangefs_writepages *ow;
217 struct blk_plug plug;
218 int ret;
219 ow = kzalloc(sizeof(struct orangefs_writepages), GFP_KERNEL);
220 if (!ow)
221 return -ENOMEM;
222 ow->maxpages = orangefs_bufmap_size_query()/PAGE_SIZE;
223 ow->pages = kcalloc(ow->maxpages, sizeof(struct page *), GFP_KERNEL);
224 if (!ow->pages) {
225 kfree(ow);
226 return -ENOMEM;
227 }
228 ow->bv = kcalloc(ow->maxpages, sizeof(struct bio_vec), GFP_KERNEL);
229 if (!ow->bv) {
230 kfree(ow->pages);
231 kfree(ow);
232 return -ENOMEM;
233 }
234 blk_start_plug(&plug);
235 ret = write_cache_pages(mapping, wbc, orangefs_writepages_callback, ow);
236 if (ow->npages)
237 ret = orangefs_writepages_work(ow, wbc);
238 blk_finish_plug(&plug);
239 kfree(ow->pages);
240 kfree(ow->bv);
241 kfree(ow);
242 return ret;
243}
244
245static int orangefs_launder_page(struct page *);
246
247static int orangefs_readpage(struct file *file, struct page *page)
248{
249 struct inode *inode = page->mapping->host;
250 struct iov_iter iter;
251 struct bio_vec bv;
252 ssize_t ret;
253 loff_t off; /* offset into this page */
254 pgoff_t index; /* which page */
255 struct page *next_page;
256 char *kaddr;
257 loff_t read_size;
258 int buffer_index = -1; /* orangefs shared memory slot */
259 int slot_index; /* index into slot */
260 int remaining;
261
262 /*
263 * Get up to this many bytes from Orangefs at a time and try
264 * to fill them into the page cache at once. Tests with dd made
265 * this seem like a reasonable static number, if there was
266 * interest perhaps this number could be made setable through
267 * sysfs...
268 */
269 read_size = 524288;
270
271 if (PageDirty(page))
272 orangefs_launder_page(page);
273
274 off = page_offset(page);
275 index = off >> PAGE_SHIFT;
276 bv.bv_page = page;
277 bv.bv_len = PAGE_SIZE;
278 bv.bv_offset = 0;
279 iov_iter_bvec(&iter, READ, &bv, 1, PAGE_SIZE);
280
281 ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter,
282 read_size, inode->i_size, NULL, &buffer_index, file);
283 remaining = ret;
284 /* this will only zero remaining unread portions of the page data */
285 iov_iter_zero(~0U, &iter);
286 /* takes care of potential aliasing */
287 flush_dcache_page(page);
288 if (ret < 0) {
289 SetPageError(page);
290 unlock_page(page);
291 goto out;
292 } else {
293 SetPageUptodate(page);
294 if (PageError(page))
295 ClearPageError(page);
296 ret = 0;
297 }
298 /* unlock the page after the ->readpage() routine completes */
299 unlock_page(page);
300
301 if (remaining > PAGE_SIZE) {
302 slot_index = 0;
303 while ((remaining - PAGE_SIZE) >= PAGE_SIZE) {
304 remaining -= PAGE_SIZE;
305 /*
306 * It is an optimization to try and fill more than one
307 * page... by now we've already gotten the single
308 * page we were after, if stuff doesn't seem to
309 * be going our way at this point just return
310 * and hope for the best.
311 *
312 * If we look for pages and they're already there is
313 * one reason to give up, and if they're not there
314 * and we can't create them is another reason.
315 */
316
317 index++;
318 slot_index++;
319 next_page = find_get_page(inode->i_mapping, index);
320 if (next_page) {
321 gossip_debug(GOSSIP_FILE_DEBUG,
322 "%s: found next page, quitting\n",
323 __func__);
324 put_page(next_page);
325 goto out;
326 }
327 next_page = find_or_create_page(inode->i_mapping,
328 index,
329 GFP_KERNEL);
330 /*
331 * I've never hit this, leave it as a printk for
332 * now so it will be obvious.
333 */
334 if (!next_page) {
335 printk("%s: can't create next page, quitting\n",
336 __func__);
337 goto out;
338 }
339 kaddr = kmap_atomic(next_page);
340 orangefs_bufmap_page_fill(kaddr,
341 buffer_index,
342 slot_index);
343 kunmap_atomic(kaddr);
344 SetPageUptodate(next_page);
345 unlock_page(next_page);
346 put_page(next_page);
347 }
348 }
349
350out:
351 if (buffer_index != -1)
352 orangefs_bufmap_put(buffer_index);
353 return ret;
354}
355
356static int orangefs_write_begin(struct file *file,
357 struct address_space *mapping,
358 loff_t pos, unsigned len, unsigned flags, struct page **pagep,
359 void **fsdata)
360{
361 struct orangefs_write_range *wr;
362 struct page *page;
363 pgoff_t index;
364 int ret;
365
366 index = pos >> PAGE_SHIFT;
367
368 page = grab_cache_page_write_begin(mapping, index, flags);
369 if (!page)
370 return -ENOMEM;
371
372 *pagep = page;
373
374 if (PageDirty(page) && !PagePrivate(page)) {
375 /*
376 * Should be impossible. If it happens, launder the page
377 * since we don't know what's dirty. This will WARN in
378 * orangefs_writepage_locked.
379 */
380 ret = orangefs_launder_page(page);
381 if (ret)
382 return ret;
383 }
384 if (PagePrivate(page)) {
385 struct orangefs_write_range *wr;
386 wr = (struct orangefs_write_range *)page_private(page);
387 if (wr->pos + wr->len == pos &&
388 uid_eq(wr->uid, current_fsuid()) &&
389 gid_eq(wr->gid, current_fsgid())) {
390 wr->len += len;
391 goto okay;
392 } else {
393 ret = orangefs_launder_page(page);
394 if (ret)
395 return ret;
396 }
397 }
398
399 wr = kmalloc(sizeof *wr, GFP_KERNEL);
400 if (!wr)
401 return -ENOMEM;
402
403 wr->pos = pos;
404 wr->len = len;
405 wr->uid = current_fsuid();
406 wr->gid = current_fsgid();
407 attach_page_private(page, wr);
408okay:
409 return 0;
410}
411
412static int orangefs_write_end(struct file *file, struct address_space *mapping,
413 loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata)
414{
415 struct inode *inode = page->mapping->host;
416 loff_t last_pos = pos + copied;
417
418 /*
419 * No need to use i_size_read() here, the i_size
420 * cannot change under us because we hold the i_mutex.
421 */
422 if (last_pos > inode->i_size)
423 i_size_write(inode, last_pos);
424
425 /* zero the stale part of the page if we did a short copy */
426 if (!PageUptodate(page)) {
427 unsigned from = pos & (PAGE_SIZE - 1);
428 if (copied < len) {
429 zero_user(page, from + copied, len - copied);
430 }
431 /* Set fully written pages uptodate. */
432 if (pos == page_offset(page) &&
433 (len == PAGE_SIZE || pos + len == inode->i_size)) {
434 zero_user_segment(page, from + copied, PAGE_SIZE);
435 SetPageUptodate(page);
436 }
437 }
438
439 set_page_dirty(page);
440 unlock_page(page);
441 put_page(page);
442
443 mark_inode_dirty_sync(file_inode(file));
444 return copied;
445}
446
447static void orangefs_invalidatepage(struct page *page,
448 unsigned int offset,
449 unsigned int length)
450{
451 struct orangefs_write_range *wr;
452 wr = (struct orangefs_write_range *)page_private(page);
453
454 if (offset == 0 && length == PAGE_SIZE) {
455 kfree(detach_page_private(page));
456 return;
457 /* write range entirely within invalidate range (or equal) */
458 } else if (page_offset(page) + offset <= wr->pos &&
459 wr->pos + wr->len <= page_offset(page) + offset + length) {
460 kfree(detach_page_private(page));
461 /* XXX is this right? only caller in fs */
462 cancel_dirty_page(page);
463 return;
464 /* invalidate range chops off end of write range */
465 } else if (wr->pos < page_offset(page) + offset &&
466 wr->pos + wr->len <= page_offset(page) + offset + length &&
467 page_offset(page) + offset < wr->pos + wr->len) {
468 size_t x;
469 x = wr->pos + wr->len - (page_offset(page) + offset);
470 WARN_ON(x > wr->len);
471 wr->len -= x;
472 wr->uid = current_fsuid();
473 wr->gid = current_fsgid();
474 /* invalidate range chops off beginning of write range */
475 } else if (page_offset(page) + offset <= wr->pos &&
476 page_offset(page) + offset + length < wr->pos + wr->len &&
477 wr->pos < page_offset(page) + offset + length) {
478 size_t x;
479 x = page_offset(page) + offset + length - wr->pos;
480 WARN_ON(x > wr->len);
481 wr->pos += x;
482 wr->len -= x;
483 wr->uid = current_fsuid();
484 wr->gid = current_fsgid();
485 /* invalidate range entirely within write range (punch hole) */
486 } else if (wr->pos < page_offset(page) + offset &&
487 page_offset(page) + offset + length < wr->pos + wr->len) {
488 /* XXX what do we do here... should not WARN_ON */
489 WARN_ON(1);
490 /* punch hole */
491 /*
492 * should we just ignore this and write it out anyway?
493 * it hardly makes sense
494 */
495 return;
496 /* non-overlapping ranges */
497 } else {
498 /* WARN if they do overlap */
499 if (!((page_offset(page) + offset + length <= wr->pos) ^
500 (wr->pos + wr->len <= page_offset(page) + offset))) {
501 WARN_ON(1);
502 printk("invalidate range offset %llu length %u\n",
503 page_offset(page) + offset, length);
504 printk("write range offset %llu length %zu\n",
505 wr->pos, wr->len);
506 }
507 return;
508 }
509
510 /*
511 * Above there are returns where wr is freed or where we WARN.
512 * Thus the following runs if wr was modified above.
513 */
514
515 orangefs_launder_page(page);
516}
517
518static int orangefs_releasepage(struct page *page, gfp_t foo)
519{
520 return !PagePrivate(page);
521}
522
523static void orangefs_freepage(struct page *page)
524{
525 kfree(detach_page_private(page));
526}
527
528static int orangefs_launder_page(struct page *page)
529{
530 int r = 0;
531 struct writeback_control wbc = {
532 .sync_mode = WB_SYNC_ALL,
533 .nr_to_write = 0,
534 };
535 wait_on_page_writeback(page);
536 if (clear_page_dirty_for_io(page)) {
537 r = orangefs_writepage_locked(page, &wbc);
538 end_page_writeback(page);
539 }
540 return r;
541}
542
543static ssize_t orangefs_direct_IO(struct kiocb *iocb,
544 struct iov_iter *iter)
545{
546 /*
547 * Comment from original do_readv_writev:
548 * Common entry point for read/write/readv/writev
549 * This function will dispatch it to either the direct I/O
550 * or buffered I/O path depending on the mount options and/or
551 * augmented/extended metadata attached to the file.
552 * Note: File extended attributes override any mount options.
553 */
554 struct file *file = iocb->ki_filp;
555 loff_t pos = iocb->ki_pos;
556 enum ORANGEFS_io_type type = iov_iter_rw(iter) == WRITE ?
557 ORANGEFS_IO_WRITE : ORANGEFS_IO_READ;
558 loff_t *offset = &pos;
559 struct inode *inode = file->f_mapping->host;
560 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
561 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
562 size_t count = iov_iter_count(iter);
563 ssize_t total_count = 0;
564 ssize_t ret = -EINVAL;
565 int i = 0;
566
567 gossip_debug(GOSSIP_FILE_DEBUG,
568 "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
569 __func__,
570 handle,
571 (int)count);
572
573 if (type == ORANGEFS_IO_WRITE) {
574 gossip_debug(GOSSIP_FILE_DEBUG,
575 "%s(%pU): proceeding with offset : %llu, "
576 "size %d\n",
577 __func__,
578 handle,
579 llu(*offset),
580 (int)count);
581 }
582
583 if (count == 0) {
584 ret = 0;
585 goto out;
586 }
587
588 while (iov_iter_count(iter)) {
589 size_t each_count = iov_iter_count(iter);
590 size_t amt_complete;
591 i++;
592
593 /* how much to transfer in this loop iteration */
594 if (each_count > orangefs_bufmap_size_query())
595 each_count = orangefs_bufmap_size_query();
596
597 gossip_debug(GOSSIP_FILE_DEBUG,
598 "%s(%pU): size of each_count(%d)\n",
599 __func__,
600 handle,
601 (int)each_count);
602 gossip_debug(GOSSIP_FILE_DEBUG,
603 "%s(%pU): BEFORE wait_for_io: offset is %d\n",
604 __func__,
605 handle,
606 (int)*offset);
607
608 ret = wait_for_direct_io(type, inode, offset, iter,
609 each_count, 0, NULL, NULL, file);
610 gossip_debug(GOSSIP_FILE_DEBUG,
611 "%s(%pU): return from wait_for_io:%d\n",
612 __func__,
613 handle,
614 (int)ret);
615
616 if (ret < 0)
617 goto out;
618
619 *offset += ret;
620 total_count += ret;
621 amt_complete = ret;
622
623 gossip_debug(GOSSIP_FILE_DEBUG,
624 "%s(%pU): AFTER wait_for_io: offset is %d\n",
625 __func__,
626 handle,
627 (int)*offset);
628
629 /*
630 * if we got a short I/O operations,
631 * fall out and return what we got so far
632 */
633 if (amt_complete < each_count)
634 break;
635 } /*end while */
636
637out:
638 if (total_count > 0)
639 ret = total_count;
640 if (ret > 0) {
641 if (type == ORANGEFS_IO_READ) {
642 file_accessed(file);
643 } else {
644 file_update_time(file);
645 if (*offset > i_size_read(inode))
646 i_size_write(inode, *offset);
647 }
648 }
649
650 gossip_debug(GOSSIP_FILE_DEBUG,
651 "%s(%pU): Value(%d) returned.\n",
652 __func__,
653 handle,
654 (int)ret);
655
656 return ret;
657}
658
659/** ORANGEFS2 implementation of address space operations */
660static const struct address_space_operations orangefs_address_operations = {
661 .writepage = orangefs_writepage,
662 .readpage = orangefs_readpage,
663 .writepages = orangefs_writepages,
664 .set_page_dirty = __set_page_dirty_nobuffers,
665 .write_begin = orangefs_write_begin,
666 .write_end = orangefs_write_end,
667 .invalidatepage = orangefs_invalidatepage,
668 .releasepage = orangefs_releasepage,
669 .freepage = orangefs_freepage,
670 .launder_page = orangefs_launder_page,
671 .direct_IO = orangefs_direct_IO,
672};
673
674vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
675{
676 struct page *page = vmf->page;
677 struct inode *inode = file_inode(vmf->vma->vm_file);
678 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
679 unsigned long *bitlock = &orangefs_inode->bitlock;
680 vm_fault_t ret;
681 struct orangefs_write_range *wr;
682
683 sb_start_pagefault(inode->i_sb);
684
685 if (wait_on_bit(bitlock, 1, TASK_KILLABLE)) {
686 ret = VM_FAULT_RETRY;
687 goto out;
688 }
689
690 lock_page(page);
691 if (PageDirty(page) && !PagePrivate(page)) {
692 /*
693 * Should be impossible. If it happens, launder the page
694 * since we don't know what's dirty. This will WARN in
695 * orangefs_writepage_locked.
696 */
697 if (orangefs_launder_page(page)) {
698 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
699 goto out;
700 }
701 }
702 if (PagePrivate(page)) {
703 wr = (struct orangefs_write_range *)page_private(page);
704 if (uid_eq(wr->uid, current_fsuid()) &&
705 gid_eq(wr->gid, current_fsgid())) {
706 wr->pos = page_offset(page);
707 wr->len = PAGE_SIZE;
708 goto okay;
709 } else {
710 if (orangefs_launder_page(page)) {
711 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
712 goto out;
713 }
714 }
715 }
716 wr = kmalloc(sizeof *wr, GFP_KERNEL);
717 if (!wr) {
718 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
719 goto out;
720 }
721 wr->pos = page_offset(page);
722 wr->len = PAGE_SIZE;
723 wr->uid = current_fsuid();
724 wr->gid = current_fsgid();
725 attach_page_private(page, wr);
726okay:
727
728 file_update_time(vmf->vma->vm_file);
729 if (page->mapping != inode->i_mapping) {
730 unlock_page(page);
731 ret = VM_FAULT_LOCKED|VM_FAULT_NOPAGE;
732 goto out;
733 }
734
735 /*
736 * We mark the page dirty already here so that when freeze is in
737 * progress, we are guaranteed that writeback during freezing will
738 * see the dirty page and writeprotect it again.
739 */
740 set_page_dirty(page);
741 wait_for_stable_page(page);
742 ret = VM_FAULT_LOCKED;
743out:
744 sb_end_pagefault(inode->i_sb);
745 return ret;
746}
747
748static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
749{
750 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
751 struct orangefs_kernel_op_s *new_op;
752 loff_t orig_size;
753 int ret = -EINVAL;
754
755 gossip_debug(GOSSIP_INODE_DEBUG,
756 "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n",
757 __func__,
758 get_khandle_from_ino(inode),
759 &orangefs_inode->refn.khandle,
760 orangefs_inode->refn.fs_id,
761 iattr->ia_size);
762
763 /* Ensure that we have a up to date size, so we know if it changed. */
764 ret = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_SIZE);
765 if (ret == -ESTALE)
766 ret = -EIO;
767 if (ret) {
768 gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n",
769 __func__, ret);
770 return ret;
771 }
772 orig_size = i_size_read(inode);
773
774 /* This is truncate_setsize in a different order. */
775 truncate_pagecache(inode, iattr->ia_size);
776 i_size_write(inode, iattr->ia_size);
777 if (iattr->ia_size > orig_size)
778 pagecache_isize_extended(inode, orig_size, iattr->ia_size);
779
780 new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE);
781 if (!new_op)
782 return -ENOMEM;
783
784 new_op->upcall.req.truncate.refn = orangefs_inode->refn;
785 new_op->upcall.req.truncate.size = (__s64) iattr->ia_size;
786
787 ret = service_operation(new_op,
788 __func__,
789 get_interruptible_flag(inode));
790
791 /*
792 * the truncate has no downcall members to retrieve, but
793 * the status value tells us if it went through ok or not
794 */
795 gossip_debug(GOSSIP_INODE_DEBUG, "%s: ret:%d:\n", __func__, ret);
796
797 op_release(new_op);
798
799 if (ret != 0)
800 return ret;
801
802 if (orig_size != i_size_read(inode))
803 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
804
805 return ret;
806}
807
808int __orangefs_setattr(struct inode *inode, struct iattr *iattr)
809{
810 int ret;
811
812 if (iattr->ia_valid & ATTR_MODE) {
813 if (iattr->ia_mode & (S_ISVTX)) {
814 if (is_root_handle(inode)) {
815 /*
816 * allow sticky bit to be set on root (since
817 * it shows up that way by default anyhow),
818 * but don't show it to the server
819 */
820 iattr->ia_mode -= S_ISVTX;
821 } else {
822 gossip_debug(GOSSIP_UTILS_DEBUG,
823 "User attempted to set sticky bit on non-root directory; returning EINVAL.\n");
824 ret = -EINVAL;
825 goto out;
826 }
827 }
828 if (iattr->ia_mode & (S_ISUID)) {
829 gossip_debug(GOSSIP_UTILS_DEBUG,
830 "Attempting to set setuid bit (not supported); returning EINVAL.\n");
831 ret = -EINVAL;
832 goto out;
833 }
834 }
835
836 if (iattr->ia_valid & ATTR_SIZE) {
837 ret = orangefs_setattr_size(inode, iattr);
838 if (ret)
839 goto out;
840 }
841
842again:
843 spin_lock(&inode->i_lock);
844 if (ORANGEFS_I(inode)->attr_valid) {
845 if (uid_eq(ORANGEFS_I(inode)->attr_uid, current_fsuid()) &&
846 gid_eq(ORANGEFS_I(inode)->attr_gid, current_fsgid())) {
847 ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
848 } else {
849 spin_unlock(&inode->i_lock);
850 write_inode_now(inode, 1);
851 goto again;
852 }
853 } else {
854 ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
855 ORANGEFS_I(inode)->attr_uid = current_fsuid();
856 ORANGEFS_I(inode)->attr_gid = current_fsgid();
857 }
858 setattr_copy(inode, iattr);
859 spin_unlock(&inode->i_lock);
860 mark_inode_dirty(inode);
861
862 if (iattr->ia_valid & ATTR_MODE)
863 /* change mod on a file that has ACLs */
864 ret = posix_acl_chmod(inode, inode->i_mode);
865
866 ret = 0;
867out:
868 return ret;
869}
870
871/*
872 * Change attributes of an object referenced by dentry.
873 */
874int orangefs_setattr(struct dentry *dentry, struct iattr *iattr)
875{
876 int ret;
877 gossip_debug(GOSSIP_INODE_DEBUG, "__orangefs_setattr: called on %pd\n",
878 dentry);
879 ret = setattr_prepare(dentry, iattr);
880 if (ret)
881 goto out;
882 ret = __orangefs_setattr(d_inode(dentry), iattr);
883 sync_inode_metadata(d_inode(dentry), 1);
884out:
885 gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n",
886 ret);
887 return ret;
888}
889
890/*
891 * Obtain attributes of an object given a dentry
892 */
893int orangefs_getattr(const struct path *path, struct kstat *stat,
894 u32 request_mask, unsigned int flags)
895{
896 int ret;
897 struct inode *inode = path->dentry->d_inode;
898
899 gossip_debug(GOSSIP_INODE_DEBUG,
900 "orangefs_getattr: called on %pd mask %u\n",
901 path->dentry, request_mask);
902
903 ret = orangefs_inode_getattr(inode,
904 request_mask & STATX_SIZE ? ORANGEFS_GETATTR_SIZE : 0);
905 if (ret == 0) {
906 generic_fillattr(inode, stat);
907
908 /* override block size reported to stat */
909 if (!(request_mask & STATX_SIZE))
910 stat->result_mask &= ~STATX_SIZE;
911
912 stat->attributes_mask = STATX_ATTR_IMMUTABLE |
913 STATX_ATTR_APPEND;
914 if (inode->i_flags & S_IMMUTABLE)
915 stat->attributes |= STATX_ATTR_IMMUTABLE;
916 if (inode->i_flags & S_APPEND)
917 stat->attributes |= STATX_ATTR_APPEND;
918 }
919 return ret;
920}
921
922int orangefs_permission(struct inode *inode, int mask)
923{
924 int ret;
925
926 if (mask & MAY_NOT_BLOCK)
927 return -ECHILD;
928
929 gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__);
930
931 /* Make sure the permission (and other common attrs) are up to date. */
932 ret = orangefs_inode_getattr(inode, 0);
933 if (ret < 0)
934 return ret;
935
936 return generic_permission(inode, mask);
937}
938
939int orangefs_update_time(struct inode *inode, struct timespec64 *time, int flags)
940{
941 struct iattr iattr;
942 gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n",
943 get_khandle_from_ino(inode));
944 generic_update_time(inode, time, flags);
945 memset(&iattr, 0, sizeof iattr);
946 if (flags & S_ATIME)
947 iattr.ia_valid |= ATTR_ATIME;
948 if (flags & S_CTIME)
949 iattr.ia_valid |= ATTR_CTIME;
950 if (flags & S_MTIME)
951 iattr.ia_valid |= ATTR_MTIME;
952 return __orangefs_setattr(inode, &iattr);
953}
954
955/* ORANGEFS2 implementation of VFS inode operations for files */
956static const struct inode_operations orangefs_file_inode_operations = {
957 .get_acl = orangefs_get_acl,
958 .set_acl = orangefs_set_acl,
959 .setattr = orangefs_setattr,
960 .getattr = orangefs_getattr,
961 .listxattr = orangefs_listxattr,
962 .permission = orangefs_permission,
963 .update_time = orangefs_update_time,
964};
965
966static int orangefs_init_iops(struct inode *inode)
967{
968 inode->i_mapping->a_ops = &orangefs_address_operations;
969
970 switch (inode->i_mode & S_IFMT) {
971 case S_IFREG:
972 inode->i_op = &orangefs_file_inode_operations;
973 inode->i_fop = &orangefs_file_operations;
974 break;
975 case S_IFLNK:
976 inode->i_op = &orangefs_symlink_inode_operations;
977 break;
978 case S_IFDIR:
979 inode->i_op = &orangefs_dir_inode_operations;
980 inode->i_fop = &orangefs_dir_operations;
981 break;
982 default:
983 gossip_debug(GOSSIP_INODE_DEBUG,
984 "%s: unsupported mode\n",
985 __func__);
986 return -EINVAL;
987 }
988
989 return 0;
990}
991
992/*
993 * Given an ORANGEFS object identifier (fsid, handle), convert it into
994 * a ino_t type that will be used as a hash-index from where the handle will
995 * be searched for in the VFS hash table of inodes.
996 */
997static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref)
998{
999 if (!ref)
1000 return 0;
1001 return orangefs_khandle_to_ino(&(ref->khandle));
1002}
1003
1004/*
1005 * Called to set up an inode from iget5_locked.
1006 */
1007static int orangefs_set_inode(struct inode *inode, void *data)
1008{
1009 struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
1010 ORANGEFS_I(inode)->refn.fs_id = ref->fs_id;
1011 ORANGEFS_I(inode)->refn.khandle = ref->khandle;
1012 ORANGEFS_I(inode)->attr_valid = 0;
1013 hash_init(ORANGEFS_I(inode)->xattr_cache);
1014 ORANGEFS_I(inode)->mapping_time = jiffies - 1;
1015 ORANGEFS_I(inode)->bitlock = 0;
1016 return 0;
1017}
1018
1019/*
1020 * Called to determine if handles match.
1021 */
1022static int orangefs_test_inode(struct inode *inode, void *data)
1023{
1024 struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
1025 struct orangefs_inode_s *orangefs_inode = NULL;
1026
1027 orangefs_inode = ORANGEFS_I(inode);
1028 /* test handles and fs_ids... */
1029 return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle),
1030 &(ref->khandle)) &&
1031 orangefs_inode->refn.fs_id == ref->fs_id);
1032}
1033
1034/*
1035 * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS
1036 * file handle.
1037 *
1038 * @sb: the file system super block instance.
1039 * @ref: The ORANGEFS object for which we are trying to locate an inode.
1040 */
1041struct inode *orangefs_iget(struct super_block *sb,
1042 struct orangefs_object_kref *ref)
1043{
1044 struct inode *inode = NULL;
1045 unsigned long hash;
1046 int error;
1047
1048 hash = orangefs_handle_hash(ref);
1049 inode = iget5_locked(sb,
1050 hash,
1051 orangefs_test_inode,
1052 orangefs_set_inode,
1053 ref);
1054
1055 if (!inode)
1056 return ERR_PTR(-ENOMEM);
1057
1058 if (!(inode->i_state & I_NEW))
1059 return inode;
1060
1061 error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
1062 if (error) {
1063 iget_failed(inode);
1064 return ERR_PTR(error);
1065 }
1066
1067 inode->i_ino = hash; /* needed for stat etc */
1068 orangefs_init_iops(inode);
1069 unlock_new_inode(inode);
1070
1071 gossip_debug(GOSSIP_INODE_DEBUG,
1072 "iget handle %pU, fsid %d hash %ld i_ino %lu\n",
1073 &ref->khandle,
1074 ref->fs_id,
1075 hash,
1076 inode->i_ino);
1077
1078 return inode;
1079}
1080
1081/*
1082 * Allocate an inode for a newly created file and insert it into the inode hash.
1083 */
1084struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
1085 int mode, dev_t dev, struct orangefs_object_kref *ref)
1086{
1087 unsigned long hash = orangefs_handle_hash(ref);
1088 struct inode *inode;
1089 int error;
1090
1091 gossip_debug(GOSSIP_INODE_DEBUG,
1092 "%s:(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n",
1093 __func__,
1094 sb,
1095 MAJOR(dev),
1096 MINOR(dev),
1097 mode);
1098
1099 inode = new_inode(sb);
1100 if (!inode)
1101 return ERR_PTR(-ENOMEM);
1102
1103 orangefs_set_inode(inode, ref);
1104 inode->i_ino = hash; /* needed for stat etc */
1105
1106 error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
1107 if (error)
1108 goto out_iput;
1109
1110 orangefs_init_iops(inode);
1111 inode->i_rdev = dev;
1112
1113 error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
1114 if (error < 0)
1115 goto out_iput;
1116
1117 gossip_debug(GOSSIP_INODE_DEBUG,
1118 "Initializing ACL's for inode %pU\n",
1119 get_khandle_from_ino(inode));
1120 orangefs_init_acl(inode, dir);
1121 return inode;
1122
1123out_iput:
1124 iput(inode);
1125 return ERR_PTR(error);
1126}