Skip to content

Commit 19c5b60

Browse files
Christoph Hellwigcmaiolino
authored andcommitted
xfs: split and refactor zone validation
Currently xfs_zone_validate mixes validating the software zone state in the XFS realtime group with validating the hardware state reported in struct blk_zone and deriving the write pointer from that. Move all code that works on the realtime group to xfs_init_zone, and only keep the hardware state validation in xfs_zone_validate. This makes the code more clear, and allows for better reuse in userspace. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Signed-off-by: Carlos Maiolino <cem@kernel.org>
1 parent 776b76f commit 19c5b60

3 files changed

Lines changed: 68 additions & 114 deletions

File tree

fs/xfs/libxfs/xfs_zones.c

Lines changed: 39 additions & 110 deletions
Original file line numberDiff line numberDiff line change
@@ -15,173 +15,102 @@
1515
#include "xfs_zones.h"
1616

1717
static bool
18-
xfs_zone_validate_empty(
18+
xfs_validate_blk_zone_seq(
19+
struct xfs_mount *mp,
1920
struct blk_zone *zone,
20-
struct xfs_rtgroup *rtg,
21+
unsigned int zone_no,
2122
xfs_rgblock_t *write_pointer)
2223
{
23-
struct xfs_mount *mp = rtg_mount(rtg);
24-
25-
if (rtg_rmap(rtg)->i_used_blocks > 0) {
26-
xfs_warn(mp, "empty zone %u has non-zero used counter (0x%x).",
27-
rtg_rgno(rtg), rtg_rmap(rtg)->i_used_blocks);
28-
return false;
29-
}
30-
31-
*write_pointer = 0;
32-
return true;
33-
}
34-
35-
static bool
36-
xfs_zone_validate_wp(
37-
struct blk_zone *zone,
38-
struct xfs_rtgroup *rtg,
39-
xfs_rgblock_t *write_pointer)
40-
{
41-
struct xfs_mount *mp = rtg_mount(rtg);
42-
xfs_rtblock_t wp_fsb = xfs_daddr_to_rtb(mp, zone->wp);
43-
44-
if (rtg_rmap(rtg)->i_used_blocks > rtg->rtg_extents) {
45-
xfs_warn(mp, "zone %u has too large used counter (0x%x).",
46-
rtg_rgno(rtg), rtg_rmap(rtg)->i_used_blocks);
47-
return false;
48-
}
49-
50-
if (xfs_rtb_to_rgno(mp, wp_fsb) != rtg_rgno(rtg)) {
51-
xfs_warn(mp, "zone %u write pointer (0x%llx) outside of zone.",
52-
rtg_rgno(rtg), wp_fsb);
53-
return false;
54-
}
55-
56-
*write_pointer = xfs_rtb_to_rgbno(mp, wp_fsb);
57-
if (*write_pointer >= rtg->rtg_extents) {
58-
xfs_warn(mp, "zone %u has invalid write pointer (0x%x).",
59-
rtg_rgno(rtg), *write_pointer);
60-
return false;
61-
}
62-
63-
return true;
64-
}
65-
66-
static bool
67-
xfs_zone_validate_full(
68-
struct blk_zone *zone,
69-
struct xfs_rtgroup *rtg,
70-
xfs_rgblock_t *write_pointer)
71-
{
72-
struct xfs_mount *mp = rtg_mount(rtg);
73-
74-
if (rtg_rmap(rtg)->i_used_blocks > rtg->rtg_extents) {
75-
xfs_warn(mp, "zone %u has too large used counter (0x%x).",
76-
rtg_rgno(rtg), rtg_rmap(rtg)->i_used_blocks);
77-
return false;
78-
}
79-
80-
*write_pointer = rtg->rtg_extents;
81-
return true;
82-
}
83-
84-
static bool
85-
xfs_zone_validate_seq(
86-
struct blk_zone *zone,
87-
struct xfs_rtgroup *rtg,
88-
xfs_rgblock_t *write_pointer)
89-
{
90-
struct xfs_mount *mp = rtg_mount(rtg);
91-
9224
switch (zone->cond) {
9325
case BLK_ZONE_COND_EMPTY:
94-
return xfs_zone_validate_empty(zone, rtg, write_pointer);
26+
*write_pointer = 0;
27+
return true;
9528
case BLK_ZONE_COND_IMP_OPEN:
9629
case BLK_ZONE_COND_EXP_OPEN:
9730
case BLK_ZONE_COND_CLOSED:
9831
case BLK_ZONE_COND_ACTIVE:
99-
return xfs_zone_validate_wp(zone, rtg, write_pointer);
32+
if (zone->wp < zone->start ||
33+
zone->wp >= zone->start + zone->capacity) {
34+
xfs_warn(mp,
35+
"zone %u write pointer (%llu) outside of zone.",
36+
zone_no, zone->wp);
37+
return false;
38+
}
39+
40+
*write_pointer = XFS_BB_TO_FSB(mp, zone->wp - zone->start);
41+
return true;
10042
case BLK_ZONE_COND_FULL:
101-
return xfs_zone_validate_full(zone, rtg, write_pointer);
43+
*write_pointer = XFS_BB_TO_FSB(mp, zone->capacity);
44+
return true;
10245
case BLK_ZONE_COND_NOT_WP:
10346
case BLK_ZONE_COND_OFFLINE:
10447
case BLK_ZONE_COND_READONLY:
10548
xfs_warn(mp, "zone %u has unsupported zone condition 0x%x.",
106-
rtg_rgno(rtg), zone->cond);
49+
zone_no, zone->cond);
10750
return false;
10851
default:
10952
xfs_warn(mp, "zone %u has unknown zone condition 0x%x.",
110-
rtg_rgno(rtg), zone->cond);
53+
zone_no, zone->cond);
11154
return false;
11255
}
11356
}
11457

11558
static bool
116-
xfs_zone_validate_conv(
59+
xfs_validate_blk_zone_conv(
60+
struct xfs_mount *mp,
11761
struct blk_zone *zone,
118-
struct xfs_rtgroup *rtg)
62+
unsigned int zone_no)
11963
{
120-
struct xfs_mount *mp = rtg_mount(rtg);
121-
12264
switch (zone->cond) {
12365
case BLK_ZONE_COND_NOT_WP:
12466
return true;
12567
default:
12668
xfs_warn(mp,
12769
"conventional zone %u has unsupported zone condition 0x%x.",
128-
rtg_rgno(rtg), zone->cond);
70+
zone_no, zone->cond);
12971
return false;
13072
}
13173
}
13274

13375
bool
134-
xfs_zone_validate(
76+
xfs_validate_blk_zone(
77+
struct xfs_mount *mp,
13578
struct blk_zone *zone,
136-
struct xfs_rtgroup *rtg,
79+
unsigned int zone_no,
80+
uint32_t expected_size,
81+
uint32_t expected_capacity,
13782
xfs_rgblock_t *write_pointer)
13883
{
139-
struct xfs_mount *mp = rtg_mount(rtg);
140-
struct xfs_groups *g = &mp->m_groups[XG_TYPE_RTG];
141-
uint32_t expected_size;
142-
14384
/*
14485
* Check that the zone capacity matches the rtgroup size stored in the
14586
* superblock. Note that all zones including the last one must have a
14687
* uniform capacity.
14788
*/
148-
if (XFS_BB_TO_FSB(mp, zone->capacity) != g->blocks) {
89+
if (XFS_BB_TO_FSB(mp, zone->capacity) != expected_capacity) {
14990
xfs_warn(mp,
150-
"zone %u capacity (0x%llx) does not match RT group size (0x%x).",
151-
rtg_rgno(rtg), XFS_BB_TO_FSB(mp, zone->capacity),
152-
g->blocks);
91+
"zone %u capacity (%llu) does not match RT group size (%u).",
92+
zone_no, XFS_BB_TO_FSB(mp, zone->capacity),
93+
expected_capacity);
15394
return false;
15495
}
15596

156-
if (g->has_daddr_gaps) {
157-
expected_size = 1 << g->blklog;
158-
} else {
159-
if (zone->len != zone->capacity) {
160-
xfs_warn(mp,
161-
"zone %u has capacity != size ((0x%llx vs 0x%llx)",
162-
rtg_rgno(rtg),
163-
XFS_BB_TO_FSB(mp, zone->len),
164-
XFS_BB_TO_FSB(mp, zone->capacity));
165-
return false;
166-
}
167-
expected_size = g->blocks;
168-
}
169-
17097
if (XFS_BB_TO_FSB(mp, zone->len) != expected_size) {
17198
xfs_warn(mp,
172-
"zone %u length (0x%llx) does match geometry (0x%x).",
173-
rtg_rgno(rtg), XFS_BB_TO_FSB(mp, zone->len),
99+
"zone %u length (%llu) does not match geometry (%u).",
100+
zone_no, XFS_BB_TO_FSB(mp, zone->len),
174101
expected_size);
102+
return false;
175103
}
176104

177105
switch (zone->type) {
178106
case BLK_ZONE_TYPE_CONVENTIONAL:
179-
return xfs_zone_validate_conv(zone, rtg);
107+
return xfs_validate_blk_zone_conv(mp, zone, zone_no);
180108
case BLK_ZONE_TYPE_SEQWRITE_REQ:
181-
return xfs_zone_validate_seq(zone, rtg, write_pointer);
109+
return xfs_validate_blk_zone_seq(mp, zone, zone_no,
110+
write_pointer);
182111
default:
183112
xfs_warn(mp, "zoned %u has unsupported type 0x%x.",
184-
rtg_rgno(rtg), zone->type);
113+
zone_no, zone->type);
185114
return false;
186115
}
187116
}

fs/xfs/libxfs/xfs_zones.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,8 @@ struct blk_zone;
3737
*/
3838
#define XFS_DEFAULT_MAX_OPEN_ZONES 128
3939

40-
bool xfs_zone_validate(struct blk_zone *zone, struct xfs_rtgroup *rtg,
41-
xfs_rgblock_t *write_pointer);
40+
bool xfs_validate_blk_zone(struct xfs_mount *mp, struct blk_zone *zone,
41+
unsigned int zone_no, uint32_t expected_size,
42+
uint32_t expected_capacity, xfs_rgblock_t *write_pointer);
4243

4344
#endif /* _LIBXFS_ZONES_H */

fs/xfs/xfs_zone_alloc.c

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -977,13 +977,15 @@ xfs_free_open_zones(
977977

978978
struct xfs_init_zones {
979979
struct xfs_mount *mp;
980+
uint32_t zone_size;
981+
uint32_t zone_capacity;
980982
uint64_t available;
981983
uint64_t reclaimable;
982984
};
983985

984986
/*
985987
* For sequential write required zones, we restart writing at the hardware write
986-
* pointer returned by xfs_zone_validate().
988+
* pointer returned by xfs_validate_blk_zone().
987989
*
988990
* For conventional zones or conventional devices we have to query the rmap to
989991
* find the highest recorded block and set the write pointer to the block after
@@ -1018,6 +1020,25 @@ xfs_init_zone(
10181020
uint32_t used = rtg_rmap(rtg)->i_used_blocks;
10191021
int error;
10201022

1023+
if (write_pointer > rtg->rtg_extents) {
1024+
xfs_warn(mp, "zone %u has invalid write pointer (0x%x).",
1025+
rtg_rgno(rtg), write_pointer);
1026+
return -EFSCORRUPTED;
1027+
}
1028+
1029+
if (used > rtg->rtg_extents) {
1030+
xfs_warn(mp,
1031+
"zone %u has used counter (0x%x) larger than zone capacity (0x%llx).",
1032+
rtg_rgno(rtg), used, rtg->rtg_extents);
1033+
return -EFSCORRUPTED;
1034+
}
1035+
1036+
if (write_pointer == 0 && used != 0) {
1037+
xfs_warn(mp, "empty zone %u has non-zero used counter (0x%x).",
1038+
rtg_rgno(rtg), used);
1039+
return -EFSCORRUPTED;
1040+
}
1041+
10211042
/*
10221043
* If there are no used blocks, but the zone is not in empty state yet
10231044
* we lost power before the zoned reset. In that case finish the work
@@ -1081,7 +1102,8 @@ xfs_get_zone_info_cb(
10811102
xfs_warn(mp, "realtime group not found for zone %u.", rgno);
10821103
return -EFSCORRUPTED;
10831104
}
1084-
if (!xfs_zone_validate(zone, rtg, &write_pointer)) {
1105+
if (!xfs_validate_blk_zone(mp, zone, idx, iz->zone_size,
1106+
iz->zone_capacity, &write_pointer)) {
10851107
xfs_rtgroup_rele(rtg);
10861108
return -EFSCORRUPTED;
10871109
}
@@ -1227,6 +1249,8 @@ xfs_mount_zones(
12271249
{
12281250
struct xfs_init_zones iz = {
12291251
.mp = mp,
1252+
.zone_capacity = mp->m_groups[XG_TYPE_RTG].blocks,
1253+
.zone_size = xfs_rtgroup_raw_size(mp),
12301254
};
12311255
struct xfs_buftarg *bt = mp->m_rtdev_targp;
12321256
xfs_extlen_t zone_blocks = mp->m_groups[XG_TYPE_RTG].blocks;

0 commit comments

Comments
 (0)