Skip to content

Commit edee42b

Browse files
naotakdave
authored andcommitted
btrfs-progs: zoned: activate block group on loading
Introduce "zone_is_active" member to struct btrfs_block_group and activate it on loading a block group. Note that activeness check for the extent allocation is currently not implemented. The activeness checking requires to activate a non-active block group on the extent allocation, which also require finishing a zone in the case of hitting the active zone limit. Since mkfs should not hit the limit, implementing the zone finishing code would not be necessary at the moment. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> Signed-off-by: David Sterba <dsterba@suse.com>
1 parent b2b9099 commit edee42b

File tree

2 files changed

+16
-0
lines changed

2 files changed

+16
-0
lines changed

kernel-shared/ctree.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -286,6 +286,7 @@ struct btrfs_block_group {
286286
u64 alloc_offset;
287287
u64 write_offset;
288288
u64 zone_capacity;
289+
bool zone_is_active;
289290

290291
u64 global_root_id;
291292
};

kernel-shared/zoned.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -901,6 +901,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
901901
u64 logical = cache->start;
902902
u64 length = cache->length;
903903
struct zone_info *zone_info = NULL;
904+
unsigned long *active = NULL;
904905
int ret = 0;
905906
int i;
906907
u64 last_alloc = 0;
@@ -935,6 +936,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
935936
return -ENOMEM;
936937
}
937938

939+
active = bitmap_zalloc(map->num_stripes);
940+
if (!active) {
941+
free(zone_info);
942+
error_msg(ERROR_MSG_MEMORY, "active bitmap");
943+
return -ENOMEM;
944+
}
945+
938946
for (i = 0; i < map->num_stripes; i++) {
939947
struct zone_info *info = &zone_info[i];
940948
bool is_sequential;
@@ -948,6 +956,10 @@ int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
948956
continue;
949957
}
950958

959+
/* Consider a zone as active if we can allow any number of active zones. */
960+
if (!device->zone_info->max_active_zones)
961+
set_bit(i, active);
962+
951963
is_sequential = btrfs_dev_is_sequential(device, info->physical);
952964
if (!is_sequential) {
953965
num_conventional++;
@@ -983,6 +995,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
983995
default:
984996
/* Partially used zone */
985997
info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
998+
set_bit(i, active);
986999
break;
9871000
}
9881001
}
@@ -1008,8 +1021,10 @@ int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
10081021
ret = -EINVAL;
10091022
goto out;
10101023
}
1024+
/* SINGLE profile case. */
10111025
cache->alloc_offset = zone_info[0].alloc_offset;
10121026
cache->zone_capacity = zone_info[0].capacity;
1027+
cache->zone_is_active = test_bit(0, active);
10131028

10141029
out:
10151030
/* An extent is allocated after the write pointer */

0 commit comments

Comments
 (0)