Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/blkdev.h>
6#include <linux/sched/mm.h>
7#include <linux/atomic.h>
8#include <linux/vmalloc.h>
9#include "ctree.h"
10#include "volumes.h"
11#include "zoned.h"
12#include "rcu-string.h"
13#include "disk-io.h"
14#include "block-group.h"
15#include "transaction.h"
16#include "dev-replace.h"
17#include "space-info.h"
18#include "fs.h"
19#include "accessors.h"
20
21/* Maximum number of zones to report per blkdev_report_zones() call */
22#define BTRFS_REPORT_NR_ZONES 4096
23/* Invalid allocation pointer value for missing devices */
24#define WP_MISSING_DEV ((u64)-1)
25/* Pseudo write pointer value for conventional zone */
26#define WP_CONVENTIONAL ((u64)-2)
27
28/*
29 * Location of the first zone of superblock logging zone pairs.
30 *
31 * - primary superblock: 0B (zone 0)
32 * - first copy: 512G (zone starting at that offset)
33 * - second copy: 4T (zone starting at that offset)
34 */
35#define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL)
36#define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
37#define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
38
39#define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
40#define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
41
42/* Number of superblock log zones */
43#define BTRFS_NR_SB_LOG_ZONES 2
44
45/*
46 * Minimum of active zones we need:
47 *
48 * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors
49 * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group
50 * - 1 zone for tree-log dedicated block group
51 * - 1 zone for relocation
52 */
53#define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5)
54
55/*
56 * Minimum / maximum supported zone size. Currently, SMR disks have a zone
57 * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range.
58 * We do not expect the zone size to become larger than 8GiB or smaller than
59 * 4MiB in the near future.
60 */
61#define BTRFS_MAX_ZONE_SIZE SZ_8G
62#define BTRFS_MIN_ZONE_SIZE SZ_4M
63
64#define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
65
66static inline bool sb_zone_is_full(const struct blk_zone *zone)
67{
68 return (zone->cond == BLK_ZONE_COND_FULL) ||
69 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
70}
71
72static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
73{
74 struct blk_zone *zones = data;
75
76 memcpy(&zones[idx], zone, sizeof(*zone));
77
78 return 0;
79}
80
81static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
82 u64 *wp_ret)
83{
84 bool empty[BTRFS_NR_SB_LOG_ZONES];
85 bool full[BTRFS_NR_SB_LOG_ZONES];
86 sector_t sector;
87 int i;
88
89 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
90 ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL);
91 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY);
92 full[i] = sb_zone_is_full(&zones[i]);
93 }
94
95 /*
96 * Possible states of log buffer zones
97 *
98 * Empty[0] In use[0] Full[0]
99 * Empty[1] * 0 1
100 * In use[1] x x 1
101 * Full[1] 0 0 C
102 *
103 * Log position:
104 * *: Special case, no superblock is written
105 * 0: Use write pointer of zones[0]
106 * 1: Use write pointer of zones[1]
107 * C: Compare super blocks from zones[0] and zones[1], use the latest
108 * one determined by generation
109 * x: Invalid state
110 */
111
112 if (empty[0] && empty[1]) {
113 /* Special case to distinguish no superblock to read */
114 *wp_ret = zones[0].start << SECTOR_SHIFT;
115 return -ENOENT;
116 } else if (full[0] && full[1]) {
117 /* Compare two super blocks */
118 struct address_space *mapping = bdev->bd_inode->i_mapping;
119 struct page *page[BTRFS_NR_SB_LOG_ZONES];
120 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
121 int i;
122
123 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
124 u64 bytenr;
125
126 bytenr = ((zones[i].start + zones[i].len)
127 << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
128
129 page[i] = read_cache_page_gfp(mapping,
130 bytenr >> PAGE_SHIFT, GFP_NOFS);
131 if (IS_ERR(page[i])) {
132 if (i == 1)
133 btrfs_release_disk_super(super[0]);
134 return PTR_ERR(page[i]);
135 }
136 super[i] = page_address(page[i]);
137 }
138
139 if (btrfs_super_generation(super[0]) >
140 btrfs_super_generation(super[1]))
141 sector = zones[1].start;
142 else
143 sector = zones[0].start;
144
145 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
146 btrfs_release_disk_super(super[i]);
147 } else if (!full[0] && (empty[1] || full[1])) {
148 sector = zones[0].wp;
149 } else if (full[0]) {
150 sector = zones[1].wp;
151 } else {
152 return -EUCLEAN;
153 }
154 *wp_ret = sector << SECTOR_SHIFT;
155 return 0;
156}
157
158/*
159 * Get the first zone number of the superblock mirror
160 */
161static inline u32 sb_zone_number(int shift, int mirror)
162{
163 u64 zone;
164
165 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
166 switch (mirror) {
167 case 0: zone = 0; break;
168 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
169 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
170 }
171
172 ASSERT(zone <= U32_MAX);
173
174 return (u32)zone;
175}
176
177static inline sector_t zone_start_sector(u32 zone_number,
178 struct block_device *bdev)
179{
180 return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
181}
182
183static inline u64 zone_start_physical(u32 zone_number,
184 struct btrfs_zoned_device_info *zone_info)
185{
186 return (u64)zone_number << zone_info->zone_size_shift;
187}
188
189/*
190 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
191 * device into static sized chunks and fake a conventional zone on each of
192 * them.
193 */
194static int emulate_report_zones(struct btrfs_device *device, u64 pos,
195 struct blk_zone *zones, unsigned int nr_zones)
196{
197 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
198 sector_t bdev_size = bdev_nr_sectors(device->bdev);
199 unsigned int i;
200
201 pos >>= SECTOR_SHIFT;
202 for (i = 0; i < nr_zones; i++) {
203 zones[i].start = i * zone_sectors + pos;
204 zones[i].len = zone_sectors;
205 zones[i].capacity = zone_sectors;
206 zones[i].wp = zones[i].start + zone_sectors;
207 zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
208 zones[i].cond = BLK_ZONE_COND_NOT_WP;
209
210 if (zones[i].wp >= bdev_size) {
211 i++;
212 break;
213 }
214 }
215
216 return i;
217}
218
219static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
220 struct blk_zone *zones, unsigned int *nr_zones)
221{
222 struct btrfs_zoned_device_info *zinfo = device->zone_info;
223 u32 zno;
224 int ret;
225
226 if (!*nr_zones)
227 return 0;
228
229 if (!bdev_is_zoned(device->bdev)) {
230 ret = emulate_report_zones(device, pos, zones, *nr_zones);
231 *nr_zones = ret;
232 return 0;
233 }
234
235 /* Check cache */
236 if (zinfo->zone_cache) {
237 unsigned int i;
238
239 ASSERT(IS_ALIGNED(pos, zinfo->zone_size));
240 zno = pos >> zinfo->zone_size_shift;
241 /*
242 * We cannot report zones beyond the zone end. So, it is OK to
243 * cap *nr_zones to at the end.
244 */
245 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno);
246
247 for (i = 0; i < *nr_zones; i++) {
248 struct blk_zone *zone_info;
249
250 zone_info = &zinfo->zone_cache[zno + i];
251 if (!zone_info->len)
252 break;
253 }
254
255 if (i == *nr_zones) {
256 /* Cache hit on all the zones */
257 memcpy(zones, zinfo->zone_cache + zno,
258 sizeof(*zinfo->zone_cache) * *nr_zones);
259 return 0;
260 }
261 }
262
263 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
264 copy_zone_info_cb, zones);
265 if (ret < 0) {
266 btrfs_err_in_rcu(device->fs_info,
267 "zoned: failed to read zone %llu on %s (devid %llu)",
268 pos, rcu_str_deref(device->name),
269 device->devid);
270 return ret;
271 }
272 *nr_zones = ret;
273 if (!ret)
274 return -EIO;
275
276 /* Populate cache */
277 if (zinfo->zone_cache)
278 memcpy(zinfo->zone_cache + zno, zones,
279 sizeof(*zinfo->zone_cache) * *nr_zones);
280
281 return 0;
282}
283
284/* The emulated zone size is determined from the size of device extent */
285static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
286{
287 struct btrfs_path *path;
288 struct btrfs_root *root = fs_info->dev_root;
289 struct btrfs_key key;
290 struct extent_buffer *leaf;
291 struct btrfs_dev_extent *dext;
292 int ret = 0;
293
294 key.objectid = 1;
295 key.type = BTRFS_DEV_EXTENT_KEY;
296 key.offset = 0;
297
298 path = btrfs_alloc_path();
299 if (!path)
300 return -ENOMEM;
301
302 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
303 if (ret < 0)
304 goto out;
305
306 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
307 ret = btrfs_next_leaf(root, path);
308 if (ret < 0)
309 goto out;
310 /* No dev extents at all? Not good */
311 if (ret > 0) {
312 ret = -EUCLEAN;
313 goto out;
314 }
315 }
316
317 leaf = path->nodes[0];
318 dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
319 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
320 ret = 0;
321
322out:
323 btrfs_free_path(path);
324
325 return ret;
326}
327
328int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
329{
330 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
331 struct btrfs_device *device;
332 int ret = 0;
333
334 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
335 if (!btrfs_fs_incompat(fs_info, ZONED))
336 return 0;
337
338 mutex_lock(&fs_devices->device_list_mutex);
339 list_for_each_entry(device, &fs_devices->devices, dev_list) {
340 /* We can skip reading of zone info for missing devices */
341 if (!device->bdev)
342 continue;
343
344 ret = btrfs_get_dev_zone_info(device, true);
345 if (ret)
346 break;
347 }
348 mutex_unlock(&fs_devices->device_list_mutex);
349
350 return ret;
351}
352
353int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
354{
355 struct btrfs_fs_info *fs_info = device->fs_info;
356 struct btrfs_zoned_device_info *zone_info = NULL;
357 struct block_device *bdev = device->bdev;
358 unsigned int max_active_zones;
359 unsigned int nactive;
360 sector_t nr_sectors;
361 sector_t sector = 0;
362 struct blk_zone *zones = NULL;
363 unsigned int i, nreported = 0, nr_zones;
364 sector_t zone_sectors;
365 char *model, *emulated;
366 int ret;
367
368 /*
369 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
370 * yet be set.
371 */
372 if (!btrfs_fs_incompat(fs_info, ZONED))
373 return 0;
374
375 if (device->zone_info)
376 return 0;
377
378 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
379 if (!zone_info)
380 return -ENOMEM;
381
382 device->zone_info = zone_info;
383
384 if (!bdev_is_zoned(bdev)) {
385 if (!fs_info->zone_size) {
386 ret = calculate_emulated_zone_size(fs_info);
387 if (ret)
388 goto out;
389 }
390
391 ASSERT(fs_info->zone_size);
392 zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
393 } else {
394 zone_sectors = bdev_zone_sectors(bdev);
395 }
396
397 ASSERT(is_power_of_two_u64(zone_sectors));
398 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
399
400 /* We reject devices with a zone size larger than 8GB */
401 if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
402 btrfs_err_in_rcu(fs_info,
403 "zoned: %s: zone size %llu larger than supported maximum %llu",
404 rcu_str_deref(device->name),
405 zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
406 ret = -EINVAL;
407 goto out;
408 } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) {
409 btrfs_err_in_rcu(fs_info,
410 "zoned: %s: zone size %llu smaller than supported minimum %u",
411 rcu_str_deref(device->name),
412 zone_info->zone_size, BTRFS_MIN_ZONE_SIZE);
413 ret = -EINVAL;
414 goto out;
415 }
416
417 nr_sectors = bdev_nr_sectors(bdev);
418 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
419 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
420 /*
421 * We limit max_zone_append_size also by max_segments *
422 * PAGE_SIZE. Technically, we can have multiple pages per segment. But,
423 * since btrfs adds the pages one by one to a bio, and btrfs cannot
424 * increase the metadata reservation even if it increases the number of
425 * extents, it is safe to stick with the limit.
426 *
427 * With the zoned emulation, we can have non-zoned device on the zoned
428 * mode. In this case, we don't have a valid max zone append size. So,
429 * use max_segments * PAGE_SIZE as the pseudo max_zone_append_size.
430 */
431 if (bdev_is_zoned(bdev)) {
432 zone_info->max_zone_append_size = min_t(u64,
433 (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
434 (u64)bdev_max_segments(bdev) << PAGE_SHIFT);
435 } else {
436 zone_info->max_zone_append_size =
437 (u64)bdev_max_segments(bdev) << PAGE_SHIFT;
438 }
439 if (!IS_ALIGNED(nr_sectors, zone_sectors))
440 zone_info->nr_zones++;
441
442 max_active_zones = bdev_max_active_zones(bdev);
443 if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) {
444 btrfs_err_in_rcu(fs_info,
445"zoned: %s: max active zones %u is too small, need at least %u active zones",
446 rcu_str_deref(device->name), max_active_zones,
447 BTRFS_MIN_ACTIVE_ZONES);
448 ret = -EINVAL;
449 goto out;
450 }
451 zone_info->max_active_zones = max_active_zones;
452
453 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
454 if (!zone_info->seq_zones) {
455 ret = -ENOMEM;
456 goto out;
457 }
458
459 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
460 if (!zone_info->empty_zones) {
461 ret = -ENOMEM;
462 goto out;
463 }
464
465 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
466 if (!zone_info->active_zones) {
467 ret = -ENOMEM;
468 goto out;
469 }
470
471 zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
472 if (!zones) {
473 ret = -ENOMEM;
474 goto out;
475 }
476
477 /*
478 * Enable zone cache only for a zoned device. On a non-zoned device, we
479 * fill the zone info with emulated CONVENTIONAL zones, so no need to
480 * use the cache.
481 */
482 if (populate_cache && bdev_is_zoned(device->bdev)) {
483 zone_info->zone_cache = vzalloc(sizeof(struct blk_zone) *
484 zone_info->nr_zones);
485 if (!zone_info->zone_cache) {
486 btrfs_err_in_rcu(device->fs_info,
487 "zoned: failed to allocate zone cache for %s",
488 rcu_str_deref(device->name));
489 ret = -ENOMEM;
490 goto out;
491 }
492 }
493
494 /* Get zones type */
495 nactive = 0;
496 while (sector < nr_sectors) {
497 nr_zones = BTRFS_REPORT_NR_ZONES;
498 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
499 &nr_zones);
500 if (ret)
501 goto out;
502
503 for (i = 0; i < nr_zones; i++) {
504 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
505 __set_bit(nreported, zone_info->seq_zones);
506 switch (zones[i].cond) {
507 case BLK_ZONE_COND_EMPTY:
508 __set_bit(nreported, zone_info->empty_zones);
509 break;
510 case BLK_ZONE_COND_IMP_OPEN:
511 case BLK_ZONE_COND_EXP_OPEN:
512 case BLK_ZONE_COND_CLOSED:
513 __set_bit(nreported, zone_info->active_zones);
514 nactive++;
515 break;
516 }
517 nreported++;
518 }
519 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
520 }
521
522 if (nreported != zone_info->nr_zones) {
523 btrfs_err_in_rcu(device->fs_info,
524 "inconsistent number of zones on %s (%u/%u)",
525 rcu_str_deref(device->name), nreported,
526 zone_info->nr_zones);
527 ret = -EIO;
528 goto out;
529 }
530
531 if (max_active_zones) {
532 if (nactive > max_active_zones) {
533 btrfs_err_in_rcu(device->fs_info,
534 "zoned: %u active zones on %s exceeds max_active_zones %u",
535 nactive, rcu_str_deref(device->name),
536 max_active_zones);
537 ret = -EIO;
538 goto out;
539 }
540 atomic_set(&zone_info->active_zones_left,
541 max_active_zones - nactive);
542 /* Overcommit does not work well with active zone tacking. */
543 set_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags);
544 }
545
546 /* Validate superblock log */
547 nr_zones = BTRFS_NR_SB_LOG_ZONES;
548 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
549 u32 sb_zone;
550 u64 sb_wp;
551 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
552
553 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
554 if (sb_zone + 1 >= zone_info->nr_zones)
555 continue;
556
557 ret = btrfs_get_dev_zones(device,
558 zone_start_physical(sb_zone, zone_info),
559 &zone_info->sb_zones[sb_pos],
560 &nr_zones);
561 if (ret)
562 goto out;
563
564 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
565 btrfs_err_in_rcu(device->fs_info,
566 "zoned: failed to read super block log zone info at devid %llu zone %u",
567 device->devid, sb_zone);
568 ret = -EUCLEAN;
569 goto out;
570 }
571
572 /*
573 * If zones[0] is conventional, always use the beginning of the
574 * zone to record superblock. No need to validate in that case.
575 */
576 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
577 BLK_ZONE_TYPE_CONVENTIONAL)
578 continue;
579
580 ret = sb_write_pointer(device->bdev,
581 &zone_info->sb_zones[sb_pos], &sb_wp);
582 if (ret != -ENOENT && ret) {
583 btrfs_err_in_rcu(device->fs_info,
584 "zoned: super block log zone corrupted devid %llu zone %u",
585 device->devid, sb_zone);
586 ret = -EUCLEAN;
587 goto out;
588 }
589 }
590
591
592 kvfree(zones);
593
594 switch (bdev_zoned_model(bdev)) {
595 case BLK_ZONED_HM:
596 model = "host-managed zoned";
597 emulated = "";
598 break;
599 case BLK_ZONED_HA:
600 model = "host-aware zoned";
601 emulated = "";
602 break;
603 case BLK_ZONED_NONE:
604 model = "regular";
605 emulated = "emulated ";
606 break;
607 default:
608 /* Just in case */
609 btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
610 bdev_zoned_model(bdev),
611 rcu_str_deref(device->name));
612 ret = -EOPNOTSUPP;
613 goto out_free_zone_info;
614 }
615
616 btrfs_info_in_rcu(fs_info,
617 "%s block device %s, %u %szones of %llu bytes",
618 model, rcu_str_deref(device->name), zone_info->nr_zones,
619 emulated, zone_info->zone_size);
620
621 return 0;
622
623out:
624 kvfree(zones);
625out_free_zone_info:
626 btrfs_destroy_dev_zone_info(device);
627
628 return ret;
629}
630
631void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
632{
633 struct btrfs_zoned_device_info *zone_info = device->zone_info;
634
635 if (!zone_info)
636 return;
637
638 bitmap_free(zone_info->active_zones);
639 bitmap_free(zone_info->seq_zones);
640 bitmap_free(zone_info->empty_zones);
641 vfree(zone_info->zone_cache);
642 kfree(zone_info);
643 device->zone_info = NULL;
644}
645
646struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev)
647{
648 struct btrfs_zoned_device_info *zone_info;
649
650 zone_info = kmemdup(orig_dev->zone_info, sizeof(*zone_info), GFP_KERNEL);
651 if (!zone_info)
652 return NULL;
653
654 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
655 if (!zone_info->seq_zones)
656 goto out;
657
658 bitmap_copy(zone_info->seq_zones, orig_dev->zone_info->seq_zones,
659 zone_info->nr_zones);
660
661 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
662 if (!zone_info->empty_zones)
663 goto out;
664
665 bitmap_copy(zone_info->empty_zones, orig_dev->zone_info->empty_zones,
666 zone_info->nr_zones);
667
668 zone_info->active_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
669 if (!zone_info->active_zones)
670 goto out;
671
672 bitmap_copy(zone_info->active_zones, orig_dev->zone_info->active_zones,
673 zone_info->nr_zones);
674 zone_info->zone_cache = NULL;
675
676 return zone_info;
677
678out:
679 bitmap_free(zone_info->seq_zones);
680 bitmap_free(zone_info->empty_zones);
681 bitmap_free(zone_info->active_zones);
682 kfree(zone_info);
683 return NULL;
684}
685
686int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
687 struct blk_zone *zone)
688{
689 unsigned int nr_zones = 1;
690 int ret;
691
692 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
693 if (ret != 0 || !nr_zones)
694 return ret ? ret : -EIO;
695
696 return 0;
697}
698
699static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info)
700{
701 struct btrfs_device *device;
702
703 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
704 if (device->bdev &&
705 bdev_zoned_model(device->bdev) == BLK_ZONED_HM) {
706 btrfs_err(fs_info,
707 "zoned: mode not enabled but zoned device found: %pg",
708 device->bdev);
709 return -EINVAL;
710 }
711 }
712
713 return 0;
714}
715
716int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
717{
718 struct btrfs_device *device;
719 u64 zone_size = 0;
720 u64 max_zone_append_size = 0;
721 int ret;
722
723 /*
724 * Host-Managed devices can't be used without the ZONED flag. With the
725 * ZONED all devices can be used, using zone emulation if required.
726 */
727 if (!btrfs_fs_incompat(fs_info, ZONED))
728 return btrfs_check_for_zoned_device(fs_info);
729
730 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
731 struct btrfs_zoned_device_info *zone_info = device->zone_info;
732
733 if (!device->bdev)
734 continue;
735
736 if (!zone_size) {
737 zone_size = zone_info->zone_size;
738 } else if (zone_info->zone_size != zone_size) {
739 btrfs_err(fs_info,
740 "zoned: unequal block device zone sizes: have %llu found %llu",
741 zone_info->zone_size, zone_size);
742 return -EINVAL;
743 }
744 if (!max_zone_append_size ||
745 (zone_info->max_zone_append_size &&
746 zone_info->max_zone_append_size < max_zone_append_size))
747 max_zone_append_size = zone_info->max_zone_append_size;
748 }
749
750 /*
751 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
752 * btrfs_create_chunk(). Since we want stripe_len == zone_size,
753 * check the alignment here.
754 */
755 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
756 btrfs_err(fs_info,
757 "zoned: zone size %llu not aligned to stripe %u",
758 zone_size, BTRFS_STRIPE_LEN);
759 return -EINVAL;
760 }
761
762 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
763 btrfs_err(fs_info, "zoned: mixed block groups not supported");
764 return -EINVAL;
765 }
766
767 fs_info->zone_size = zone_size;
768 fs_info->max_zone_append_size = ALIGN_DOWN(max_zone_append_size,
769 fs_info->sectorsize);
770 fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
771 if (fs_info->max_zone_append_size < fs_info->max_extent_size)
772 fs_info->max_extent_size = fs_info->max_zone_append_size;
773
774 /*
775 * Check mount options here, because we might change fs_info->zoned
776 * from fs_info->zone_size.
777 */
778 ret = btrfs_check_mountopts_zoned(fs_info);
779 if (ret)
780 return ret;
781
782 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
783 return 0;
784}
785
786int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
787{
788 if (!btrfs_is_zoned(info))
789 return 0;
790
791 /*
792 * Space cache writing is not COWed. Disable that to avoid write errors
793 * in sequential zones.
794 */
795 if (btrfs_test_opt(info, SPACE_CACHE)) {
796 btrfs_err(info, "zoned: space cache v1 is not supported");
797 return -EINVAL;
798 }
799
800 if (btrfs_test_opt(info, NODATACOW)) {
801 btrfs_err(info, "zoned: NODATACOW not supported");
802 return -EINVAL;
803 }
804
805 return 0;
806}
807
808static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
809 int rw, u64 *bytenr_ret)
810{
811 u64 wp;
812 int ret;
813
814 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
815 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
816 return 0;
817 }
818
819 ret = sb_write_pointer(bdev, zones, &wp);
820 if (ret != -ENOENT && ret < 0)
821 return ret;
822
823 if (rw == WRITE) {
824 struct blk_zone *reset = NULL;
825
826 if (wp == zones[0].start << SECTOR_SHIFT)
827 reset = &zones[0];
828 else if (wp == zones[1].start << SECTOR_SHIFT)
829 reset = &zones[1];
830
831 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
832 ASSERT(sb_zone_is_full(reset));
833
834 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
835 reset->start, reset->len,
836 GFP_NOFS);
837 if (ret)
838 return ret;
839
840 reset->cond = BLK_ZONE_COND_EMPTY;
841 reset->wp = reset->start;
842 }
843 } else if (ret != -ENOENT) {
844 /*
845 * For READ, we want the previous one. Move write pointer to
846 * the end of a zone, if it is at the head of a zone.
847 */
848 u64 zone_end = 0;
849
850 if (wp == zones[0].start << SECTOR_SHIFT)
851 zone_end = zones[1].start + zones[1].capacity;
852 else if (wp == zones[1].start << SECTOR_SHIFT)
853 zone_end = zones[0].start + zones[0].capacity;
854 if (zone_end)
855 wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT,
856 BTRFS_SUPER_INFO_SIZE);
857
858 wp -= BTRFS_SUPER_INFO_SIZE;
859 }
860
861 *bytenr_ret = wp;
862 return 0;
863
864}
865
866int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
867 u64 *bytenr_ret)
868{
869 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
870 sector_t zone_sectors;
871 u32 sb_zone;
872 int ret;
873 u8 zone_sectors_shift;
874 sector_t nr_sectors;
875 u32 nr_zones;
876
877 if (!bdev_is_zoned(bdev)) {
878 *bytenr_ret = btrfs_sb_offset(mirror);
879 return 0;
880 }
881
882 ASSERT(rw == READ || rw == WRITE);
883
884 zone_sectors = bdev_zone_sectors(bdev);
885 if (!is_power_of_2(zone_sectors))
886 return -EINVAL;
887 zone_sectors_shift = ilog2(zone_sectors);
888 nr_sectors = bdev_nr_sectors(bdev);
889 nr_zones = nr_sectors >> zone_sectors_shift;
890
891 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
892 if (sb_zone + 1 >= nr_zones)
893 return -ENOENT;
894
895 ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
896 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
897 zones);
898 if (ret < 0)
899 return ret;
900 if (ret != BTRFS_NR_SB_LOG_ZONES)
901 return -EIO;
902
903 return sb_log_location(bdev, zones, rw, bytenr_ret);
904}
905
906int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
907 u64 *bytenr_ret)
908{
909 struct btrfs_zoned_device_info *zinfo = device->zone_info;
910 u32 zone_num;
911
912 /*
913 * For a zoned filesystem on a non-zoned block device, use the same
914 * super block locations as regular filesystem. Doing so, the super
915 * block can always be retrieved and the zoned flag of the volume
916 * detected from the super block information.
917 */
918 if (!bdev_is_zoned(device->bdev)) {
919 *bytenr_ret = btrfs_sb_offset(mirror);
920 return 0;
921 }
922
923 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
924 if (zone_num + 1 >= zinfo->nr_zones)
925 return -ENOENT;
926
927 return sb_log_location(device->bdev,
928 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
929 rw, bytenr_ret);
930}
931
932static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
933 int mirror)
934{
935 u32 zone_num;
936
937 if (!zinfo)
938 return false;
939
940 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
941 if (zone_num + 1 >= zinfo->nr_zones)
942 return false;
943
944 if (!test_bit(zone_num, zinfo->seq_zones))
945 return false;
946
947 return true;
948}
949
950int btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
951{
952 struct btrfs_zoned_device_info *zinfo = device->zone_info;
953 struct blk_zone *zone;
954 int i;
955
956 if (!is_sb_log_zone(zinfo, mirror))
957 return 0;
958
959 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
960 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
961 /* Advance the next zone */
962 if (zone->cond == BLK_ZONE_COND_FULL) {
963 zone++;
964 continue;
965 }
966
967 if (zone->cond == BLK_ZONE_COND_EMPTY)
968 zone->cond = BLK_ZONE_COND_IMP_OPEN;
969
970 zone->wp += SUPER_INFO_SECTORS;
971
972 if (sb_zone_is_full(zone)) {
973 /*
974 * No room left to write new superblock. Since
975 * superblock is written with REQ_SYNC, it is safe to
976 * finish the zone now.
977 *
978 * If the write pointer is exactly at the capacity,
979 * explicit ZONE_FINISH is not necessary.
980 */
981 if (zone->wp != zone->start + zone->capacity) {
982 int ret;
983
984 ret = blkdev_zone_mgmt(device->bdev,
985 REQ_OP_ZONE_FINISH, zone->start,
986 zone->len, GFP_NOFS);
987 if (ret)
988 return ret;
989 }
990
991 zone->wp = zone->start + zone->len;
992 zone->cond = BLK_ZONE_COND_FULL;
993 }
994 return 0;
995 }
996
997 /* All the zones are FULL. Should not reach here. */
998 ASSERT(0);
999 return -EIO;
1000}
1001
1002int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
1003{
1004 sector_t zone_sectors;
1005 sector_t nr_sectors;
1006 u8 zone_sectors_shift;
1007 u32 sb_zone;
1008 u32 nr_zones;
1009
1010 zone_sectors = bdev_zone_sectors(bdev);
1011 zone_sectors_shift = ilog2(zone_sectors);
1012 nr_sectors = bdev_nr_sectors(bdev);
1013 nr_zones = nr_sectors >> zone_sectors_shift;
1014
1015 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
1016 if (sb_zone + 1 >= nr_zones)
1017 return -ENOENT;
1018
1019 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1020 zone_start_sector(sb_zone, bdev),
1021 zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
1022}
1023
1024/*
1025 * Find allocatable zones within a given region.
1026 *
1027 * @device: the device to allocate a region on
1028 * @hole_start: the position of the hole to allocate the region
1029 * @num_bytes: size of wanted region
1030 * @hole_end: the end of the hole
1031 * @return: position of allocatable zones
1032 *
1033 * Allocatable region should not contain any superblock locations.
1034 */
1035u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
1036 u64 hole_end, u64 num_bytes)
1037{
1038 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1039 const u8 shift = zinfo->zone_size_shift;
1040 u64 nzones = num_bytes >> shift;
1041 u64 pos = hole_start;
1042 u64 begin, end;
1043 bool have_sb;
1044 int i;
1045
1046 ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
1047 ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
1048
1049 while (pos < hole_end) {
1050 begin = pos >> shift;
1051 end = begin + nzones;
1052
1053 if (end > zinfo->nr_zones)
1054 return hole_end;
1055
1056 /* Check if zones in the region are all empty */
1057 if (btrfs_dev_is_sequential(device, pos) &&
1058 find_next_zero_bit(zinfo->empty_zones, end, begin) != end) {
1059 pos += zinfo->zone_size;
1060 continue;
1061 }
1062
1063 have_sb = false;
1064 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1065 u32 sb_zone;
1066 u64 sb_pos;
1067
1068 sb_zone = sb_zone_number(shift, i);
1069 if (!(end <= sb_zone ||
1070 sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
1071 have_sb = true;
1072 pos = zone_start_physical(
1073 sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
1074 break;
1075 }
1076
1077 /* We also need to exclude regular superblock positions */
1078 sb_pos = btrfs_sb_offset(i);
1079 if (!(pos + num_bytes <= sb_pos ||
1080 sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
1081 have_sb = true;
1082 pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
1083 zinfo->zone_size);
1084 break;
1085 }
1086 }
1087 if (!have_sb)
1088 break;
1089 }
1090
1091 return pos;
1092}
1093
1094static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos)
1095{
1096 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1097 unsigned int zno = (pos >> zone_info->zone_size_shift);
1098
1099 /* We can use any number of zones */
1100 if (zone_info->max_active_zones == 0)
1101 return true;
1102
1103 if (!test_bit(zno, zone_info->active_zones)) {
1104 /* Active zone left? */
1105 if (atomic_dec_if_positive(&zone_info->active_zones_left) < 0)
1106 return false;
1107 if (test_and_set_bit(zno, zone_info->active_zones)) {
1108 /* Someone already set the bit */
1109 atomic_inc(&zone_info->active_zones_left);
1110 }
1111 }
1112
1113 return true;
1114}
1115
1116static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos)
1117{
1118 struct btrfs_zoned_device_info *zone_info = device->zone_info;
1119 unsigned int zno = (pos >> zone_info->zone_size_shift);
1120
1121 /* We can use any number of zones */
1122 if (zone_info->max_active_zones == 0)
1123 return;
1124
1125 if (test_and_clear_bit(zno, zone_info->active_zones))
1126 atomic_inc(&zone_info->active_zones_left);
1127}
1128
1129int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
1130 u64 length, u64 *bytes)
1131{
1132 int ret;
1133
1134 *bytes = 0;
1135 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
1136 physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
1137 GFP_NOFS);
1138 if (ret)
1139 return ret;
1140
1141 *bytes = length;
1142 while (length) {
1143 btrfs_dev_set_zone_empty(device, physical);
1144 btrfs_dev_clear_active_zone(device, physical);
1145 physical += device->zone_info->zone_size;
1146 length -= device->zone_info->zone_size;
1147 }
1148
1149 return 0;
1150}
1151
1152int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
1153{
1154 struct btrfs_zoned_device_info *zinfo = device->zone_info;
1155 const u8 shift = zinfo->zone_size_shift;
1156 unsigned long begin = start >> shift;
1157 unsigned long end = (start + size) >> shift;
1158 u64 pos;
1159 int ret;
1160
1161 ASSERT(IS_ALIGNED(start, zinfo->zone_size));
1162 ASSERT(IS_ALIGNED(size, zinfo->zone_size));
1163
1164 if (end > zinfo->nr_zones)
1165 return -ERANGE;
1166
1167 /* All the zones are conventional */
1168 if (find_next_bit(zinfo->seq_zones, begin, end) == end)
1169 return 0;
1170
1171 /* All the zones are sequential and empty */
1172 if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
1173 find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
1174 return 0;
1175
1176 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
1177 u64 reset_bytes;
1178
1179 if (!btrfs_dev_is_sequential(device, pos) ||
1180 btrfs_dev_is_empty_zone(device, pos))
1181 continue;
1182
1183 /* Free regions should be empty */
1184 btrfs_warn_in_rcu(
1185 device->fs_info,
1186 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
1187 rcu_str_deref(device->name), device->devid, pos >> shift);
1188 WARN_ON_ONCE(1);
1189
1190 ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
1191 &reset_bytes);
1192 if (ret)
1193 return ret;
1194 }
1195
1196 return 0;
1197}
1198
1199/*
1200 * Calculate an allocation pointer from the extent allocation information
1201 * for a block group consist of conventional zones. It is pointed to the
1202 * end of the highest addressed extent in the block group as an allocation
1203 * offset.
1204 */
1205static int calculate_alloc_pointer(struct btrfs_block_group *cache,
1206 u64 *offset_ret, bool new)
1207{
1208 struct btrfs_fs_info *fs_info = cache->fs_info;
1209 struct btrfs_root *root;
1210 struct btrfs_path *path;
1211 struct btrfs_key key;
1212 struct btrfs_key found_key;
1213 int ret;
1214 u64 length;
1215
1216 /*
1217 * Avoid tree lookups for a new block group, there's no use for it.
1218 * It must always be 0.
1219 *
1220 * Also, we have a lock chain of extent buffer lock -> chunk mutex.
1221 * For new a block group, this function is called from
1222 * btrfs_make_block_group() which is already taking the chunk mutex.
1223 * Thus, we cannot call calculate_alloc_pointer() which takes extent
1224 * buffer locks to avoid deadlock.
1225 */
1226 if (new) {
1227 *offset_ret = 0;
1228 return 0;
1229 }
1230
1231 path = btrfs_alloc_path();
1232 if (!path)
1233 return -ENOMEM;
1234
1235 key.objectid = cache->start + cache->length;
1236 key.type = 0;
1237 key.offset = 0;
1238
1239 root = btrfs_extent_root(fs_info, key.objectid);
1240 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1241 /* We should not find the exact match */
1242 if (!ret)
1243 ret = -EUCLEAN;
1244 if (ret < 0)
1245 goto out;
1246
1247 ret = btrfs_previous_extent_item(root, path, cache->start);
1248 if (ret) {
1249 if (ret == 1) {
1250 ret = 0;
1251 *offset_ret = 0;
1252 }
1253 goto out;
1254 }
1255
1256 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
1257
1258 if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
1259 length = found_key.offset;
1260 else
1261 length = fs_info->nodesize;
1262
1263 if (!(found_key.objectid >= cache->start &&
1264 found_key.objectid + length <= cache->start + cache->length)) {
1265 ret = -EUCLEAN;
1266 goto out;
1267 }
1268 *offset_ret = found_key.objectid + length - cache->start;
1269 ret = 0;
1270
1271out:
1272 btrfs_free_path(path);
1273 return ret;
1274}
1275
1276int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1277{
1278 struct btrfs_fs_info *fs_info = cache->fs_info;
1279 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1280 struct extent_map *em;
1281 struct map_lookup *map;
1282 struct btrfs_device *device;
1283 u64 logical = cache->start;
1284 u64 length = cache->length;
1285 int ret;
1286 int i;
1287 unsigned int nofs_flag;
1288 u64 *alloc_offsets = NULL;
1289 u64 *caps = NULL;
1290 u64 *physical = NULL;
1291 unsigned long *active = NULL;
1292 u64 last_alloc = 0;
1293 u32 num_sequential = 0, num_conventional = 0;
1294
1295 if (!btrfs_is_zoned(fs_info))
1296 return 0;
1297
1298 /* Sanity check */
1299 if (!IS_ALIGNED(length, fs_info->zone_size)) {
1300 btrfs_err(fs_info,
1301 "zoned: block group %llu len %llu unaligned to zone size %llu",
1302 logical, length, fs_info->zone_size);
1303 return -EIO;
1304 }
1305
1306 /* Get the chunk mapping */
1307 read_lock(&em_tree->lock);
1308 em = lookup_extent_mapping(em_tree, logical, length);
1309 read_unlock(&em_tree->lock);
1310
1311 if (!em)
1312 return -EINVAL;
1313
1314 map = em->map_lookup;
1315
1316 cache->physical_map = kmemdup(map, map_lookup_size(map->num_stripes), GFP_NOFS);
1317 if (!cache->physical_map) {
1318 ret = -ENOMEM;
1319 goto out;
1320 }
1321
1322 alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
1323 if (!alloc_offsets) {
1324 ret = -ENOMEM;
1325 goto out;
1326 }
1327
1328 caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
1329 if (!caps) {
1330 ret = -ENOMEM;
1331 goto out;
1332 }
1333
1334 physical = kcalloc(map->num_stripes, sizeof(*physical), GFP_NOFS);
1335 if (!physical) {
1336 ret = -ENOMEM;
1337 goto out;
1338 }
1339
1340 active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
1341 if (!active) {
1342 ret = -ENOMEM;
1343 goto out;
1344 }
1345
1346 for (i = 0; i < map->num_stripes; i++) {
1347 bool is_sequential;
1348 struct blk_zone zone;
1349 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1350 int dev_replace_is_ongoing = 0;
1351
1352 device = map->stripes[i].dev;
1353 physical[i] = map->stripes[i].physical;
1354
1355 if (device->bdev == NULL) {
1356 alloc_offsets[i] = WP_MISSING_DEV;
1357 continue;
1358 }
1359
1360 is_sequential = btrfs_dev_is_sequential(device, physical[i]);
1361 if (is_sequential)
1362 num_sequential++;
1363 else
1364 num_conventional++;
1365
1366 /*
1367 * Consider a zone as active if we can allow any number of
1368 * active zones.
1369 */
1370 if (!device->zone_info->max_active_zones)
1371 __set_bit(i, active);
1372
1373 if (!is_sequential) {
1374 alloc_offsets[i] = WP_CONVENTIONAL;
1375 continue;
1376 }
1377
1378 /*
1379 * This zone will be used for allocation, so mark this zone
1380 * non-empty.
1381 */
1382 btrfs_dev_clear_zone_empty(device, physical[i]);
1383
1384 down_read(&dev_replace->rwsem);
1385 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
1386 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1387 btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical[i]);
1388 up_read(&dev_replace->rwsem);
1389
1390 /*
1391 * The group is mapped to a sequential zone. Get the zone write
1392 * pointer to determine the allocation offset within the zone.
1393 */
1394 WARN_ON(!IS_ALIGNED(physical[i], fs_info->zone_size));
1395 nofs_flag = memalloc_nofs_save();
1396 ret = btrfs_get_dev_zone(device, physical[i], &zone);
1397 memalloc_nofs_restore(nofs_flag);
1398 if (ret == -EIO || ret == -EOPNOTSUPP) {
1399 ret = 0;
1400 alloc_offsets[i] = WP_MISSING_DEV;
1401 continue;
1402 } else if (ret) {
1403 goto out;
1404 }
1405
1406 if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
1407 btrfs_err_in_rcu(fs_info,
1408 "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
1409 zone.start << SECTOR_SHIFT,
1410 rcu_str_deref(device->name), device->devid);
1411 ret = -EIO;
1412 goto out;
1413 }
1414
1415 caps[i] = (zone.capacity << SECTOR_SHIFT);
1416
1417 switch (zone.cond) {
1418 case BLK_ZONE_COND_OFFLINE:
1419 case BLK_ZONE_COND_READONLY:
1420 btrfs_err(fs_info,
1421 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1422 physical[i] >> device->zone_info->zone_size_shift,
1423 rcu_str_deref(device->name), device->devid);
1424 alloc_offsets[i] = WP_MISSING_DEV;
1425 break;
1426 case BLK_ZONE_COND_EMPTY:
1427 alloc_offsets[i] = 0;
1428 break;
1429 case BLK_ZONE_COND_FULL:
1430 alloc_offsets[i] = caps[i];
1431 break;
1432 default:
1433 /* Partially used zone */
1434 alloc_offsets[i] =
1435 ((zone.wp - zone.start) << SECTOR_SHIFT);
1436 __set_bit(i, active);
1437 break;
1438 }
1439 }
1440
1441 if (num_sequential > 0)
1442 set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1443
1444 if (num_conventional > 0) {
1445 /* Zone capacity is always zone size in emulation */
1446 cache->zone_capacity = cache->length;
1447 ret = calculate_alloc_pointer(cache, &last_alloc, new);
1448 if (ret) {
1449 btrfs_err(fs_info,
1450 "zoned: failed to determine allocation offset of bg %llu",
1451 cache->start);
1452 goto out;
1453 } else if (map->num_stripes == num_conventional) {
1454 cache->alloc_offset = last_alloc;
1455 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
1456 goto out;
1457 }
1458 }
1459
1460 switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
1461 case 0: /* single */
1462 if (alloc_offsets[0] == WP_MISSING_DEV) {
1463 btrfs_err(fs_info,
1464 "zoned: cannot recover write pointer for zone %llu",
1465 physical[0]);
1466 ret = -EIO;
1467 goto out;
1468 }
1469 cache->alloc_offset = alloc_offsets[0];
1470 cache->zone_capacity = caps[0];
1471 if (test_bit(0, active))
1472 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
1473 break;
1474 case BTRFS_BLOCK_GROUP_DUP:
1475 if (map->type & BTRFS_BLOCK_GROUP_DATA) {
1476 btrfs_err(fs_info, "zoned: profile DUP not yet supported on data bg");
1477 ret = -EINVAL;
1478 goto out;
1479 }
1480 if (alloc_offsets[0] == WP_MISSING_DEV) {
1481 btrfs_err(fs_info,
1482 "zoned: cannot recover write pointer for zone %llu",
1483 physical[0]);
1484 ret = -EIO;
1485 goto out;
1486 }
1487 if (alloc_offsets[1] == WP_MISSING_DEV) {
1488 btrfs_err(fs_info,
1489 "zoned: cannot recover write pointer for zone %llu",
1490 physical[1]);
1491 ret = -EIO;
1492 goto out;
1493 }
1494 if (alloc_offsets[0] != alloc_offsets[1]) {
1495 btrfs_err(fs_info,
1496 "zoned: write pointer offset mismatch of zones in DUP profile");
1497 ret = -EIO;
1498 goto out;
1499 }
1500 if (test_bit(0, active) != test_bit(1, active)) {
1501 if (!btrfs_zone_activate(cache)) {
1502 ret = -EIO;
1503 goto out;
1504 }
1505 } else {
1506 if (test_bit(0, active))
1507 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
1508 &cache->runtime_flags);
1509 }
1510 cache->alloc_offset = alloc_offsets[0];
1511 cache->zone_capacity = min(caps[0], caps[1]);
1512 break;
1513 case BTRFS_BLOCK_GROUP_RAID1:
1514 case BTRFS_BLOCK_GROUP_RAID0:
1515 case BTRFS_BLOCK_GROUP_RAID10:
1516 case BTRFS_BLOCK_GROUP_RAID5:
1517 case BTRFS_BLOCK_GROUP_RAID6:
1518 /* non-single profiles are not supported yet */
1519 default:
1520 btrfs_err(fs_info, "zoned: profile %s not yet supported",
1521 btrfs_bg_type_to_raid_name(map->type));
1522 ret = -EINVAL;
1523 goto out;
1524 }
1525
1526out:
1527 if (cache->alloc_offset > fs_info->zone_size) {
1528 btrfs_err(fs_info,
1529 "zoned: invalid write pointer %llu in block group %llu",
1530 cache->alloc_offset, cache->start);
1531 ret = -EIO;
1532 }
1533
1534 if (cache->alloc_offset > cache->zone_capacity) {
1535 btrfs_err(fs_info,
1536"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
1537 cache->alloc_offset, cache->zone_capacity,
1538 cache->start);
1539 ret = -EIO;
1540 }
1541
1542 /* An extent is allocated after the write pointer */
1543 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1544 btrfs_err(fs_info,
1545 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1546 logical, last_alloc, cache->alloc_offset);
1547 ret = -EIO;
1548 }
1549
1550 if (!ret) {
1551 cache->meta_write_pointer = cache->alloc_offset + cache->start;
1552 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
1553 btrfs_get_block_group(cache);
1554 spin_lock(&fs_info->zone_active_bgs_lock);
1555 list_add_tail(&cache->active_bg_list,
1556 &fs_info->zone_active_bgs);
1557 spin_unlock(&fs_info->zone_active_bgs_lock);
1558 }
1559 } else {
1560 kfree(cache->physical_map);
1561 cache->physical_map = NULL;
1562 }
1563 bitmap_free(active);
1564 kfree(physical);
1565 kfree(caps);
1566 kfree(alloc_offsets);
1567 free_extent_map(em);
1568
1569 return ret;
1570}
1571
1572void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
1573{
1574 u64 unusable, free;
1575
1576 if (!btrfs_is_zoned(cache->fs_info))
1577 return;
1578
1579 WARN_ON(cache->bytes_super != 0);
1580 unusable = (cache->alloc_offset - cache->used) +
1581 (cache->length - cache->zone_capacity);
1582 free = cache->zone_capacity - cache->alloc_offset;
1583
1584 /* We only need ->free_space in ALLOC_SEQ block groups */
1585 cache->cached = BTRFS_CACHE_FINISHED;
1586 cache->free_space_ctl->free_space = free;
1587 cache->zone_unusable = unusable;
1588}
1589
1590void btrfs_redirty_list_add(struct btrfs_transaction *trans,
1591 struct extent_buffer *eb)
1592{
1593 struct btrfs_fs_info *fs_info = eb->fs_info;
1594
1595 if (!btrfs_is_zoned(fs_info) ||
1596 btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN) ||
1597 !list_empty(&eb->release_list))
1598 return;
1599
1600 set_extent_buffer_dirty(eb);
1601 set_extent_bits_nowait(&trans->dirty_pages, eb->start,
1602 eb->start + eb->len - 1, EXTENT_DIRTY);
1603 memzero_extent_buffer(eb, 0, eb->len);
1604 set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
1605
1606 spin_lock(&trans->releasing_ebs_lock);
1607 list_add_tail(&eb->release_list, &trans->releasing_ebs);
1608 spin_unlock(&trans->releasing_ebs_lock);
1609 atomic_inc(&eb->refs);
1610}
1611
1612void btrfs_free_redirty_list(struct btrfs_transaction *trans)
1613{
1614 spin_lock(&trans->releasing_ebs_lock);
1615 while (!list_empty(&trans->releasing_ebs)) {
1616 struct extent_buffer *eb;
1617
1618 eb = list_first_entry(&trans->releasing_ebs,
1619 struct extent_buffer, release_list);
1620 list_del_init(&eb->release_list);
1621 free_extent_buffer(eb);
1622 }
1623 spin_unlock(&trans->releasing_ebs_lock);
1624}
1625
1626bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
1627{
1628 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1629 struct btrfs_block_group *cache;
1630 bool ret = false;
1631
1632 if (!btrfs_is_zoned(fs_info))
1633 return false;
1634
1635 if (!is_data_inode(&inode->vfs_inode))
1636 return false;
1637
1638 /*
1639 * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
1640 * extent layout the relocation code has.
1641 * Furthermore we have set aside own block-group from which only the
1642 * relocation "process" can allocate and make sure only one process at a
1643 * time can add pages to an extent that gets relocated, so it's safe to
1644 * use regular REQ_OP_WRITE for this special case.
1645 */
1646 if (btrfs_is_data_reloc_root(inode->root))
1647 return false;
1648
1649 cache = btrfs_lookup_block_group(fs_info, start);
1650 ASSERT(cache);
1651 if (!cache)
1652 return false;
1653
1654 ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1655 btrfs_put_block_group(cache);
1656
1657 return ret;
1658}
1659
1660void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
1661 struct bio *bio)
1662{
1663 struct btrfs_ordered_extent *ordered;
1664 const u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
1665
1666 if (bio_op(bio) != REQ_OP_ZONE_APPEND)
1667 return;
1668
1669 ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), file_offset);
1670 if (WARN_ON(!ordered))
1671 return;
1672
1673 ordered->physical = physical;
1674 ordered->bdev = bio->bi_bdev;
1675
1676 btrfs_put_ordered_extent(ordered);
1677}
1678
1679void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
1680{
1681 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1682 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1683 struct extent_map_tree *em_tree;
1684 struct extent_map *em;
1685 struct btrfs_ordered_sum *sum;
1686 u64 orig_logical = ordered->disk_bytenr;
1687 u64 *logical = NULL;
1688 int nr, stripe_len;
1689
1690 /* Zoned devices should not have partitions. So, we can assume it is 0 */
1691 ASSERT(!bdev_is_partition(ordered->bdev));
1692 if (WARN_ON(!ordered->bdev))
1693 return;
1694
1695 if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, ordered->bdev,
1696 ordered->physical, &logical, &nr,
1697 &stripe_len)))
1698 goto out;
1699
1700 WARN_ON(nr != 1);
1701
1702 if (orig_logical == *logical)
1703 goto out;
1704
1705 ordered->disk_bytenr = *logical;
1706
1707 em_tree = &inode->extent_tree;
1708 write_lock(&em_tree->lock);
1709 em = search_extent_mapping(em_tree, ordered->file_offset,
1710 ordered->num_bytes);
1711 em->block_start = *logical;
1712 free_extent_map(em);
1713 write_unlock(&em_tree->lock);
1714
1715 list_for_each_entry(sum, &ordered->list, list) {
1716 if (*logical < orig_logical)
1717 sum->bytenr -= orig_logical - *logical;
1718 else
1719 sum->bytenr += *logical - orig_logical;
1720 }
1721
1722out:
1723 kfree(logical);
1724}
1725
1726bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
1727 struct extent_buffer *eb,
1728 struct btrfs_block_group **cache_ret)
1729{
1730 struct btrfs_block_group *cache;
1731 bool ret = true;
1732
1733 if (!btrfs_is_zoned(fs_info))
1734 return true;
1735
1736 cache = btrfs_lookup_block_group(fs_info, eb->start);
1737 if (!cache)
1738 return true;
1739
1740 if (cache->meta_write_pointer != eb->start) {
1741 btrfs_put_block_group(cache);
1742 cache = NULL;
1743 ret = false;
1744 } else {
1745 cache->meta_write_pointer = eb->start + eb->len;
1746 }
1747
1748 *cache_ret = cache;
1749
1750 return ret;
1751}
1752
1753void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
1754 struct extent_buffer *eb)
1755{
1756 if (!btrfs_is_zoned(eb->fs_info) || !cache)
1757 return;
1758
1759 ASSERT(cache->meta_write_pointer == eb->start + eb->len);
1760 cache->meta_write_pointer = eb->start;
1761}
1762
1763int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
1764{
1765 if (!btrfs_dev_is_sequential(device, physical))
1766 return -EOPNOTSUPP;
1767
1768 return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
1769 length >> SECTOR_SHIFT, GFP_NOFS, 0);
1770}
1771
1772static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
1773 struct blk_zone *zone)
1774{
1775 struct btrfs_io_context *bioc = NULL;
1776 u64 mapped_length = PAGE_SIZE;
1777 unsigned int nofs_flag;
1778 int nmirrors;
1779 int i, ret;
1780
1781 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
1782 &mapped_length, &bioc);
1783 if (ret || !bioc || mapped_length < PAGE_SIZE) {
1784 ret = -EIO;
1785 goto out_put_bioc;
1786 }
1787
1788 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1789 ret = -EINVAL;
1790 goto out_put_bioc;
1791 }
1792
1793 nofs_flag = memalloc_nofs_save();
1794 nmirrors = (int)bioc->num_stripes;
1795 for (i = 0; i < nmirrors; i++) {
1796 u64 physical = bioc->stripes[i].physical;
1797 struct btrfs_device *dev = bioc->stripes[i].dev;
1798
1799 /* Missing device */
1800 if (!dev->bdev)
1801 continue;
1802
1803 ret = btrfs_get_dev_zone(dev, physical, zone);
1804 /* Failing device */
1805 if (ret == -EIO || ret == -EOPNOTSUPP)
1806 continue;
1807 break;
1808 }
1809 memalloc_nofs_restore(nofs_flag);
1810out_put_bioc:
1811 btrfs_put_bioc(bioc);
1812 return ret;
1813}
1814
1815/*
1816 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
1817 * filling zeros between @physical_pos to a write pointer of dev-replace
1818 * source device.
1819 */
1820int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
1821 u64 physical_start, u64 physical_pos)
1822{
1823 struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
1824 struct blk_zone zone;
1825 u64 length;
1826 u64 wp;
1827 int ret;
1828
1829 if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
1830 return 0;
1831
1832 ret = read_zone_info(fs_info, logical, &zone);
1833 if (ret)
1834 return ret;
1835
1836 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
1837
1838 if (physical_pos == wp)
1839 return 0;
1840
1841 if (physical_pos > wp)
1842 return -EUCLEAN;
1843
1844 length = wp - physical_pos;
1845 return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
1846}
1847
1848struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
1849 u64 logical, u64 length)
1850{
1851 struct btrfs_device *device;
1852 struct extent_map *em;
1853 struct map_lookup *map;
1854
1855 em = btrfs_get_chunk_map(fs_info, logical, length);
1856 if (IS_ERR(em))
1857 return ERR_CAST(em);
1858
1859 map = em->map_lookup;
1860 /* We only support single profile for now */
1861 device = map->stripes[0].dev;
1862
1863 free_extent_map(em);
1864
1865 return device;
1866}
1867
1868/*
1869 * Activate block group and underlying device zones
1870 *
1871 * @block_group: the block group to activate
1872 *
1873 * Return: true on success, false otherwise
1874 */
1875bool btrfs_zone_activate(struct btrfs_block_group *block_group)
1876{
1877 struct btrfs_fs_info *fs_info = block_group->fs_info;
1878 struct btrfs_space_info *space_info = block_group->space_info;
1879 struct map_lookup *map;
1880 struct btrfs_device *device;
1881 u64 physical;
1882 bool ret;
1883 int i;
1884
1885 if (!btrfs_is_zoned(block_group->fs_info))
1886 return true;
1887
1888 map = block_group->physical_map;
1889
1890 spin_lock(&space_info->lock);
1891 spin_lock(&block_group->lock);
1892 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
1893 ret = true;
1894 goto out_unlock;
1895 }
1896
1897 /* No space left */
1898 if (btrfs_zoned_bg_is_full(block_group)) {
1899 ret = false;
1900 goto out_unlock;
1901 }
1902
1903 for (i = 0; i < map->num_stripes; i++) {
1904 device = map->stripes[i].dev;
1905 physical = map->stripes[i].physical;
1906
1907 if (device->zone_info->max_active_zones == 0)
1908 continue;
1909
1910 if (!btrfs_dev_set_active_zone(device, physical)) {
1911 /* Cannot activate the zone */
1912 ret = false;
1913 goto out_unlock;
1914 }
1915 }
1916
1917 /* Successfully activated all the zones */
1918 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
1919 space_info->active_total_bytes += block_group->length;
1920 spin_unlock(&block_group->lock);
1921 btrfs_try_granting_tickets(fs_info, space_info);
1922 spin_unlock(&space_info->lock);
1923
1924 /* For the active block group list */
1925 btrfs_get_block_group(block_group);
1926
1927 spin_lock(&fs_info->zone_active_bgs_lock);
1928 list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
1929 spin_unlock(&fs_info->zone_active_bgs_lock);
1930
1931 return true;
1932
1933out_unlock:
1934 spin_unlock(&block_group->lock);
1935 spin_unlock(&space_info->lock);
1936 return ret;
1937}
1938
1939static void wait_eb_writebacks(struct btrfs_block_group *block_group)
1940{
1941 struct btrfs_fs_info *fs_info = block_group->fs_info;
1942 const u64 end = block_group->start + block_group->length;
1943 struct radix_tree_iter iter;
1944 struct extent_buffer *eb;
1945 void __rcu **slot;
1946
1947 rcu_read_lock();
1948 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
1949 block_group->start >> fs_info->sectorsize_bits) {
1950 eb = radix_tree_deref_slot(slot);
1951 if (!eb)
1952 continue;
1953 if (radix_tree_deref_retry(eb)) {
1954 slot = radix_tree_iter_retry(&iter);
1955 continue;
1956 }
1957
1958 if (eb->start < block_group->start)
1959 continue;
1960 if (eb->start >= end)
1961 break;
1962
1963 slot = radix_tree_iter_resume(slot, &iter);
1964 rcu_read_unlock();
1965 wait_on_extent_buffer_writeback(eb);
1966 rcu_read_lock();
1967 }
1968 rcu_read_unlock();
1969}
1970
1971static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
1972{
1973 struct btrfs_fs_info *fs_info = block_group->fs_info;
1974 struct map_lookup *map;
1975 const bool is_metadata = (block_group->flags &
1976 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
1977 int ret = 0;
1978 int i;
1979
1980 spin_lock(&block_group->lock);
1981 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
1982 spin_unlock(&block_group->lock);
1983 return 0;
1984 }
1985
1986 /* Check if we have unwritten allocated space */
1987 if (is_metadata &&
1988 block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
1989 spin_unlock(&block_group->lock);
1990 return -EAGAIN;
1991 }
1992
1993 /*
1994 * If we are sure that the block group is full (= no more room left for
1995 * new allocation) and the IO for the last usable block is completed, we
1996 * don't need to wait for the other IOs. This holds because we ensure
1997 * the sequential IO submissions using the ZONE_APPEND command for data
1998 * and block_group->meta_write_pointer for metadata.
1999 */
2000 if (!fully_written) {
2001 spin_unlock(&block_group->lock);
2002
2003 ret = btrfs_inc_block_group_ro(block_group, false);
2004 if (ret)
2005 return ret;
2006
2007 /* Ensure all writes in this block group finish */
2008 btrfs_wait_block_group_reservations(block_group);
2009 /* No need to wait for NOCOW writers. Zoned mode does not allow that */
2010 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
2011 block_group->length);
2012 /* Wait for extent buffers to be written. */
2013 if (is_metadata)
2014 wait_eb_writebacks(block_group);
2015
2016 spin_lock(&block_group->lock);
2017
2018 /*
2019 * Bail out if someone already deactivated the block group, or
2020 * allocated space is left in the block group.
2021 */
2022 if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2023 &block_group->runtime_flags)) {
2024 spin_unlock(&block_group->lock);
2025 btrfs_dec_block_group_ro(block_group);
2026 return 0;
2027 }
2028
2029 if (block_group->reserved) {
2030 spin_unlock(&block_group->lock);
2031 btrfs_dec_block_group_ro(block_group);
2032 return -EAGAIN;
2033 }
2034 }
2035
2036 clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
2037 block_group->alloc_offset = block_group->zone_capacity;
2038 block_group->free_space_ctl->free_space = 0;
2039 btrfs_clear_treelog_bg(block_group);
2040 btrfs_clear_data_reloc_bg(block_group);
2041 spin_unlock(&block_group->lock);
2042
2043 map = block_group->physical_map;
2044 for (i = 0; i < map->num_stripes; i++) {
2045 struct btrfs_device *device = map->stripes[i].dev;
2046 const u64 physical = map->stripes[i].physical;
2047
2048 if (device->zone_info->max_active_zones == 0)
2049 continue;
2050
2051 ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
2052 physical >> SECTOR_SHIFT,
2053 device->zone_info->zone_size >> SECTOR_SHIFT,
2054 GFP_NOFS);
2055
2056 if (ret)
2057 return ret;
2058
2059 btrfs_dev_clear_active_zone(device, physical);
2060 }
2061
2062 if (!fully_written)
2063 btrfs_dec_block_group_ro(block_group);
2064
2065 spin_lock(&fs_info->zone_active_bgs_lock);
2066 ASSERT(!list_empty(&block_group->active_bg_list));
2067 list_del_init(&block_group->active_bg_list);
2068 spin_unlock(&fs_info->zone_active_bgs_lock);
2069
2070 /* For active_bg_list */
2071 btrfs_put_block_group(block_group);
2072
2073 clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2074
2075 return 0;
2076}
2077
2078int btrfs_zone_finish(struct btrfs_block_group *block_group)
2079{
2080 if (!btrfs_is_zoned(block_group->fs_info))
2081 return 0;
2082
2083 return do_zone_finish(block_group, false);
2084}
2085
2086bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
2087{
2088 struct btrfs_fs_info *fs_info = fs_devices->fs_info;
2089 struct btrfs_device *device;
2090 bool ret = false;
2091
2092 if (!btrfs_is_zoned(fs_info))
2093 return true;
2094
2095 /* Check if there is a device with active zones left */
2096 mutex_lock(&fs_info->chunk_mutex);
2097 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
2098 struct btrfs_zoned_device_info *zinfo = device->zone_info;
2099
2100 if (!device->bdev)
2101 continue;
2102
2103 if (!zinfo->max_active_zones ||
2104 atomic_read(&zinfo->active_zones_left)) {
2105 ret = true;
2106 break;
2107 }
2108 }
2109 mutex_unlock(&fs_info->chunk_mutex);
2110
2111 if (!ret)
2112 set_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
2113
2114 return ret;
2115}
2116
2117void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length)
2118{
2119 struct btrfs_block_group *block_group;
2120 u64 min_alloc_bytes;
2121
2122 if (!btrfs_is_zoned(fs_info))
2123 return;
2124
2125 block_group = btrfs_lookup_block_group(fs_info, logical);
2126 ASSERT(block_group);
2127
2128 /* No MIXED_BG on zoned btrfs. */
2129 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA)
2130 min_alloc_bytes = fs_info->sectorsize;
2131 else
2132 min_alloc_bytes = fs_info->nodesize;
2133
2134 /* Bail out if we can allocate more data from this block group. */
2135 if (logical + length + min_alloc_bytes <=
2136 block_group->start + block_group->zone_capacity)
2137 goto out;
2138
2139 do_zone_finish(block_group, true);
2140
2141out:
2142 btrfs_put_block_group(block_group);
2143}
2144
2145static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
2146{
2147 struct btrfs_block_group *bg =
2148 container_of(work, struct btrfs_block_group, zone_finish_work);
2149
2150 wait_on_extent_buffer_writeback(bg->last_eb);
2151 free_extent_buffer(bg->last_eb);
2152 btrfs_zone_finish_endio(bg->fs_info, bg->start, bg->length);
2153 btrfs_put_block_group(bg);
2154}
2155
2156void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
2157 struct extent_buffer *eb)
2158{
2159 if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
2160 eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
2161 return;
2162
2163 if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
2164 btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
2165 bg->start);
2166 return;
2167 }
2168
2169 /* For the work */
2170 btrfs_get_block_group(bg);
2171 atomic_inc(&eb->refs);
2172 bg->last_eb = eb;
2173 INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
2174 queue_work(system_unbound_wq, &bg->zone_finish_work);
2175}
2176
2177void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
2178{
2179 struct btrfs_fs_info *fs_info = bg->fs_info;
2180
2181 spin_lock(&fs_info->relocation_bg_lock);
2182 if (fs_info->data_reloc_bg == bg->start)
2183 fs_info->data_reloc_bg = 0;
2184 spin_unlock(&fs_info->relocation_bg_lock);
2185}
2186
2187void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
2188{
2189 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2190 struct btrfs_device *device;
2191
2192 if (!btrfs_is_zoned(fs_info))
2193 return;
2194
2195 mutex_lock(&fs_devices->device_list_mutex);
2196 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2197 if (device->zone_info) {
2198 vfree(device->zone_info->zone_cache);
2199 device->zone_info->zone_cache = NULL;
2200 }
2201 }
2202 mutex_unlock(&fs_devices->device_list_mutex);
2203}
2204
2205bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
2206{
2207 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2208 struct btrfs_device *device;
2209 u64 used = 0;
2210 u64 total = 0;
2211 u64 factor;
2212
2213 ASSERT(btrfs_is_zoned(fs_info));
2214
2215 if (fs_info->bg_reclaim_threshold == 0)
2216 return false;
2217
2218 mutex_lock(&fs_devices->device_list_mutex);
2219 list_for_each_entry(device, &fs_devices->devices, dev_list) {
2220 if (!device->bdev)
2221 continue;
2222
2223 total += device->disk_total_bytes;
2224 used += device->bytes_used;
2225 }
2226 mutex_unlock(&fs_devices->device_list_mutex);
2227
2228 factor = div64_u64(used * 100, total);
2229 return factor >= fs_info->bg_reclaim_threshold;
2230}
2231
2232void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical,
2233 u64 length)
2234{
2235 struct btrfs_block_group *block_group;
2236
2237 if (!btrfs_is_zoned(fs_info))
2238 return;
2239
2240 block_group = btrfs_lookup_block_group(fs_info, logical);
2241 /* It should be called on a previous data relocation block group. */
2242 ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
2243
2244 spin_lock(&block_group->lock);
2245 if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
2246 goto out;
2247
2248 /* All relocation extents are written. */
2249 if (block_group->start + block_group->alloc_offset == logical + length) {
2250 /* Now, release this block group for further allocations. */
2251 clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
2252 &block_group->runtime_flags);
2253 }
2254
2255out:
2256 spin_unlock(&block_group->lock);
2257 btrfs_put_block_group(block_group);
2258}
2259
2260int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
2261{
2262 struct btrfs_block_group *block_group;
2263 struct btrfs_block_group *min_bg = NULL;
2264 u64 min_avail = U64_MAX;
2265 int ret;
2266
2267 spin_lock(&fs_info->zone_active_bgs_lock);
2268 list_for_each_entry(block_group, &fs_info->zone_active_bgs,
2269 active_bg_list) {
2270 u64 avail;
2271
2272 spin_lock(&block_group->lock);
2273 if (block_group->reserved ||
2274 (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
2275 spin_unlock(&block_group->lock);
2276 continue;
2277 }
2278
2279 avail = block_group->zone_capacity - block_group->alloc_offset;
2280 if (min_avail > avail) {
2281 if (min_bg)
2282 btrfs_put_block_group(min_bg);
2283 min_bg = block_group;
2284 min_avail = avail;
2285 btrfs_get_block_group(min_bg);
2286 }
2287 spin_unlock(&block_group->lock);
2288 }
2289 spin_unlock(&fs_info->zone_active_bgs_lock);
2290
2291 if (!min_bg)
2292 return 0;
2293
2294 ret = btrfs_zone_finish(min_bg);
2295 btrfs_put_block_group(min_bg);
2296
2297 return ret < 0 ? ret : 1;
2298}
2299
2300int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
2301 struct btrfs_space_info *space_info,
2302 bool do_finish)
2303{
2304 struct btrfs_block_group *bg;
2305 int index;
2306
2307 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
2308 return 0;
2309
2310 /* No more block groups to activate */
2311 if (space_info->active_total_bytes == space_info->total_bytes)
2312 return 0;
2313
2314 for (;;) {
2315 int ret;
2316 bool need_finish = false;
2317
2318 down_read(&space_info->groups_sem);
2319 for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) {
2320 list_for_each_entry(bg, &space_info->block_groups[index],
2321 list) {
2322 if (!spin_trylock(&bg->lock))
2323 continue;
2324 if (btrfs_zoned_bg_is_full(bg) ||
2325 test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2326 &bg->runtime_flags)) {
2327 spin_unlock(&bg->lock);
2328 continue;
2329 }
2330 spin_unlock(&bg->lock);
2331
2332 if (btrfs_zone_activate(bg)) {
2333 up_read(&space_info->groups_sem);
2334 return 1;
2335 }
2336
2337 need_finish = true;
2338 }
2339 }
2340 up_read(&space_info->groups_sem);
2341
2342 if (!do_finish || !need_finish)
2343 break;
2344
2345 ret = btrfs_zone_finish_one_bg(fs_info);
2346 if (ret == 0)
2347 break;
2348 if (ret < 0)
2349 return ret;
2350 }
2351
2352 return 0;
2353}