Skip to content

Commit 70fc38f

Browse files
naotakdave
authored andcommitted
btrfs-progs: zoned: factor out btrfs_load_zone_info()
Now that, we have zone capacity and (basic) zone activeness support. It's time to factor out btrfs_load_zone_info() as same as the kernel side. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> Signed-off-by: David Sterba <dsterba@suse.com>
1 parent edee42b commit 70fc38f

File tree

1 file changed

+71
-53
lines changed

1 file changed

+71
-53
lines changed

kernel-shared/zoned.c

Lines changed: 71 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -891,10 +891,76 @@ struct zone_info {
891891
u64 alloc_offset;
892892
};
893893

894+
static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
895+
struct zone_info *info, unsigned long *active,
896+
struct map_lookup *map)
897+
{
898+
struct btrfs_device *device;
899+
struct blk_zone zone;
900+
901+
info->physical = map->stripes[zone_idx].physical;
902+
903+
device = map->stripes[zone_idx].dev;
904+
905+
if (device->fd == -1) {
906+
info->alloc_offset = WP_MISSING_DEV;
907+
return 0;
908+
}
909+
910+
/* Consider a zone as active if we can allow any number of active zones. */
911+
if (!device->zone_info->max_active_zones)
912+
set_bit(zone_idx, active);
913+
914+
if (!btrfs_dev_is_sequential(device, info->physical)) {
915+
info->alloc_offset = WP_CONVENTIONAL;
916+
info->capacity = device->zone_info->zone_size;
917+
return 0;
918+
}
919+
920+
/*
921+
* The group is mapped to a sequential zone. Get the zone write
922+
* pointer to determine the allocation offset within the zone.
923+
*/
924+
WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
925+
zone = device->zone_info->zones[info->physical / fs_info->zone_size];
926+
927+
if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
928+
error("zoned: unexpected conventional zone %llu on device %s (devid %llu)",
929+
zone.start << SECTOR_SHIFT, device->name,
930+
device->devid);
931+
return -EIO;
932+
}
933+
934+
info->capacity = (zone.capacity << SECTOR_SHIFT);
935+
936+
switch (zone.cond) {
937+
case BLK_ZONE_COND_OFFLINE:
938+
case BLK_ZONE_COND_READONLY:
939+
error(
940+
"zoned: offline/readonly zone %llu on device %s (devid %llu)",
941+
info->physical / fs_info->zone_size, device->name,
942+
device->devid);
943+
info->alloc_offset = WP_MISSING_DEV;
944+
break;
945+
case BLK_ZONE_COND_EMPTY:
946+
info->alloc_offset = 0;
947+
break;
948+
case BLK_ZONE_COND_FULL:
949+
info->alloc_offset = fs_info->zone_size;
950+
break;
951+
default:
952+
/* Partially used zone */
953+
info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
954+
set_bit(zone_idx, active);
955+
break;
956+
}
957+
958+
return 0;
959+
}
960+
894961
int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
895962
struct btrfs_block_group *cache)
896963
{
897-
struct btrfs_device *device;
898964
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
899965
struct cache_extent *ce;
900966
struct map_lookup *map;
@@ -944,60 +1010,12 @@ int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
9441010
}
9451011

9461012
for (i = 0; i < map->num_stripes; i++) {
947-
struct zone_info *info = &zone_info[i];
948-
bool is_sequential;
949-
struct blk_zone zone;
950-
951-
device = map->stripes[i].dev;
952-
info->physical = map->stripes[i].physical;
953-
954-
if (device->fd == -1) {
955-
info->alloc_offset = WP_MISSING_DEV;
956-
continue;
957-
}
958-
959-
/* Consider a zone as active if we can allow any number of active zones. */
960-
if (!device->zone_info->max_active_zones)
961-
set_bit(i, active);
1013+
ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
1014+
if (ret)
1015+
goto out;
9621016

963-
is_sequential = btrfs_dev_is_sequential(device, info->physical);
964-
if (!is_sequential) {
1017+
if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
9651018
num_conventional++;
966-
info->alloc_offset = WP_CONVENTIONAL;
967-
info->capacity = device->zone_info->zone_size;
968-
continue;
969-
}
970-
971-
/*
972-
* The group is mapped to a sequential zone. Get the zone write
973-
* pointer to determine the allocation offset within the zone.
974-
*/
975-
WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
976-
zone = device->zone_info->zones[info->physical / fs_info->zone_size];
977-
978-
info->capacity = (zone.capacity << SECTOR_SHIFT);
979-
980-
switch (zone.cond) {
981-
case BLK_ZONE_COND_OFFLINE:
982-
case BLK_ZONE_COND_READONLY:
983-
error(
984-
"zoned: offline/readonly zone %llu on device %s (devid %llu)",
985-
info->physical / fs_info->zone_size, device->name,
986-
device->devid);
987-
info->alloc_offset = WP_MISSING_DEV;
988-
break;
989-
case BLK_ZONE_COND_EMPTY:
990-
info->alloc_offset = 0;
991-
break;
992-
case BLK_ZONE_COND_FULL:
993-
info->alloc_offset = fs_info->zone_size;
994-
break;
995-
default:
996-
/* Partially used zone */
997-
info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
998-
set_bit(i, active);
999-
break;
1000-
}
10011019
}
10021020

10031021
if (num_conventional > 0) {

0 commit comments

Comments
 (0)