Skip to content

Commit 0879478

Browse files
author
Marc Zyngier
committed
KVM: arm64: Introduce data structure tracking both RES0 and RES1 bits
We have so far mostly tracked RES0 bits, but only made a few attempts at being just as strict for RES1 bits (probably because they are both rarer and harder to handle). Start scratching the surface by introducing a data structure tracking RES0 and RES1 bits at the same time. Note that contrary to the usual idiom, this structure is mostly passed around by value -- the ABI handles it nicely, and the resulting code is much nicer. Reviewed-by: Fuad Tabba <tabba@google.com> Tested-by: Fuad Tabba <tabba@google.com> Link: https://patch.msgid.link/20260202184329.2724080-5-maz@kernel.org Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent a3c9200 commit 0879478

3 files changed

Lines changed: 160 additions & 138 deletions

File tree

arch/arm64/include/asm/kvm_host.h

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -626,13 +626,24 @@ enum vcpu_sysreg {
626626
NR_SYS_REGS /* Nothing after this line! */
627627
};
628628

629+
struct resx {
630+
u64 res0;
631+
u64 res1;
632+
};
633+
629634
struct kvm_sysreg_masks {
630-
struct {
631-
u64 res0;
632-
u64 res1;
633-
} mask[NR_SYS_REGS - __SANITISED_REG_START__];
635+
struct resx mask[NR_SYS_REGS - __SANITISED_REG_START__];
634636
};
635637

638+
static inline void __kvm_set_sysreg_resx(struct kvm_arch *arch,
639+
enum vcpu_sysreg sr, struct resx resx)
640+
{
641+
arch->sysreg_masks->mask[sr - __SANITISED_REG_START__] = resx;
642+
}
643+
644+
#define kvm_set_sysreg_resx(k, sr, resx) \
645+
__kvm_set_sysreg_resx(&(k)->arch, (sr), (resx))
646+
636647
struct fgt_masks {
637648
const char *str;
638649
u64 mask;
@@ -1607,7 +1618,7 @@ static inline bool kvm_arch_has_irq_bypass(void)
16071618
}
16081619

16091620
void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt);
1610-
void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1);
1621+
struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg);
16111622
void check_feature_map(void);
16121623
void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu);
16131624

arch/arm64/kvm/config.c

Lines changed: 80 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -1290,14 +1290,14 @@ static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map
12901290
}
12911291
}
12921292

1293-
static u64 __compute_fixed_bits(struct kvm *kvm,
1294-
const struct reg_bits_to_feat_map *map,
1295-
int map_size,
1296-
u64 *fixed_bits,
1297-
unsigned long require,
1298-
unsigned long exclude)
1293+
static struct resx __compute_fixed_bits(struct kvm *kvm,
1294+
const struct reg_bits_to_feat_map *map,
1295+
int map_size,
1296+
u64 *fixed_bits,
1297+
unsigned long require,
1298+
unsigned long exclude)
12991299
{
1300-
u64 val = 0;
1300+
struct resx resx = {};
13011301

13021302
for (int i = 0; i < map_size; i++) {
13031303
bool match;
@@ -1316,53 +1316,62 @@ static u64 __compute_fixed_bits(struct kvm *kvm,
13161316
match = idreg_feat_match(kvm, &map[i]);
13171317

13181318
if (!match || (map[i].flags & FIXED_VALUE))
1319-
val |= reg_feat_map_bits(&map[i]);
1319+
resx.res0 |= reg_feat_map_bits(&map[i]);
13201320
}
13211321

1322-
return val;
1322+
return resx;
13231323
}
13241324

1325-
static u64 compute_res0_bits(struct kvm *kvm,
1326-
const struct reg_bits_to_feat_map *map,
1327-
int map_size,
1328-
unsigned long require,
1329-
unsigned long exclude)
1325+
static struct resx compute_resx_bits(struct kvm *kvm,
1326+
const struct reg_bits_to_feat_map *map,
1327+
int map_size,
1328+
unsigned long require,
1329+
unsigned long exclude)
13301330
{
13311331
return __compute_fixed_bits(kvm, map, map_size, NULL,
13321332
require, exclude | FIXED_VALUE);
13331333
}
13341334

1335-
static u64 compute_reg_res0_bits(struct kvm *kvm,
1336-
const struct reg_feat_map_desc *r,
1337-
unsigned long require, unsigned long exclude)
1335+
static struct resx compute_reg_resx_bits(struct kvm *kvm,
1336+
const struct reg_feat_map_desc *r,
1337+
unsigned long require,
1338+
unsigned long exclude)
13381339
{
1339-
u64 res0;
1340+
struct resx resx, tmp;
13401341

1341-
res0 = compute_res0_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz,
1342+
resx = compute_resx_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz,
13421343
require, exclude);
13431344

1344-
res0 |= compute_res0_bits(kvm, &r->feat_map, 1, require, exclude);
1345-
res0 |= ~reg_feat_map_bits(&r->feat_map);
1345+
tmp = compute_resx_bits(kvm, &r->feat_map, 1, require, exclude);
1346+
1347+
resx.res0 |= tmp.res0;
1348+
resx.res0 |= ~reg_feat_map_bits(&r->feat_map);
1349+
resx.res1 |= tmp.res1;
13461350

1347-
return res0;
1351+
return resx;
13481352
}
13491353

13501354
static u64 compute_fgu_bits(struct kvm *kvm, const struct reg_feat_map_desc *r)
13511355
{
1356+
struct resx resx;
1357+
13521358
/*
13531359
* If computing FGUs, we collect the unsupported feature bits as
1354-
* RES0 bits, but don't take the actual RES0 bits or register
1360+
* RESx bits, but don't take the actual RESx bits or register
13551361
* existence into account -- we're not computing bits for the
13561362
* register itself.
13571363
*/
1358-
return compute_res0_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz,
1364+
resx = compute_resx_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz,
13591365
0, NEVER_FGU);
1366+
1367+
return resx.res0 | resx.res1;
13601368
}
13611369

1362-
static u64 compute_reg_fixed_bits(struct kvm *kvm,
1363-
const struct reg_feat_map_desc *r,
1364-
u64 *fixed_bits, unsigned long require,
1365-
unsigned long exclude)
1370+
static struct resx compute_reg_fixed_bits(struct kvm *kvm,
1371+
const struct reg_feat_map_desc *r,
1372+
u64 *fixed_bits,
1373+
unsigned long require,
1374+
unsigned long exclude)
13661375
{
13671376
return __compute_fixed_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz,
13681377
fixed_bits, require | FIXED_VALUE, exclude);
@@ -1405,91 +1414,94 @@ void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt)
14051414
kvm->arch.fgu[fgt] = val;
14061415
}
14071416

1408-
void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1)
1417+
struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg)
14091418
{
14101419
u64 fixed = 0, mask;
1420+
struct resx resx;
14111421

14121422
switch (reg) {
14131423
case HFGRTR_EL2:
1414-
*res0 = compute_reg_res0_bits(kvm, &hfgrtr_desc, 0, 0);
1415-
*res1 = HFGRTR_EL2_RES1;
1424+
resx = compute_reg_resx_bits(kvm, &hfgrtr_desc, 0, 0);
1425+
resx.res1 |= HFGRTR_EL2_RES1;
14161426
break;
14171427
case HFGWTR_EL2:
1418-
*res0 = compute_reg_res0_bits(kvm, &hfgwtr_desc, 0, 0);
1419-
*res1 = HFGWTR_EL2_RES1;
1428+
resx = compute_reg_resx_bits(kvm, &hfgwtr_desc, 0, 0);
1429+
resx.res1 |= HFGWTR_EL2_RES1;
14201430
break;
14211431
case HFGITR_EL2:
1422-
*res0 = compute_reg_res0_bits(kvm, &hfgitr_desc, 0, 0);
1423-
*res1 = HFGITR_EL2_RES1;
1432+
resx = compute_reg_resx_bits(kvm, &hfgitr_desc, 0, 0);
1433+
resx.res1 |= HFGITR_EL2_RES1;
14241434
break;
14251435
case HDFGRTR_EL2:
1426-
*res0 = compute_reg_res0_bits(kvm, &hdfgrtr_desc, 0, 0);
1427-
*res1 = HDFGRTR_EL2_RES1;
1436+
resx = compute_reg_resx_bits(kvm, &hdfgrtr_desc, 0, 0);
1437+
resx.res1 |= HDFGRTR_EL2_RES1;
14281438
break;
14291439
case HDFGWTR_EL2:
1430-
*res0 = compute_reg_res0_bits(kvm, &hdfgwtr_desc, 0, 0);
1431-
*res1 = HDFGWTR_EL2_RES1;
1440+
resx = compute_reg_resx_bits(kvm, &hdfgwtr_desc, 0, 0);
1441+
resx.res1 |= HDFGWTR_EL2_RES1;
14321442
break;
14331443
case HAFGRTR_EL2:
1434-
*res0 = compute_reg_res0_bits(kvm, &hafgrtr_desc, 0, 0);
1435-
*res1 = HAFGRTR_EL2_RES1;
1444+
resx = compute_reg_resx_bits(kvm, &hafgrtr_desc, 0, 0);
1445+
resx.res1 |= HAFGRTR_EL2_RES1;
14361446
break;
14371447
case HFGRTR2_EL2:
1438-
*res0 = compute_reg_res0_bits(kvm, &hfgrtr2_desc, 0, 0);
1439-
*res1 = HFGRTR2_EL2_RES1;
1448+
resx = compute_reg_resx_bits(kvm, &hfgrtr2_desc, 0, 0);
1449+
resx.res1 |= HFGRTR2_EL2_RES1;
14401450
break;
14411451
case HFGWTR2_EL2:
1442-
*res0 = compute_reg_res0_bits(kvm, &hfgwtr2_desc, 0, 0);
1443-
*res1 = HFGWTR2_EL2_RES1;
1452+
resx = compute_reg_resx_bits(kvm, &hfgwtr2_desc, 0, 0);
1453+
resx.res1 |= HFGWTR2_EL2_RES1;
14441454
break;
14451455
case HFGITR2_EL2:
1446-
*res0 = compute_reg_res0_bits(kvm, &hfgitr2_desc, 0, 0);
1447-
*res1 = HFGITR2_EL2_RES1;
1456+
resx = compute_reg_resx_bits(kvm, &hfgitr2_desc, 0, 0);
1457+
resx.res1 |= HFGITR2_EL2_RES1;
14481458
break;
14491459
case HDFGRTR2_EL2:
1450-
*res0 = compute_reg_res0_bits(kvm, &hdfgrtr2_desc, 0, 0);
1451-
*res1 = HDFGRTR2_EL2_RES1;
1460+
resx = compute_reg_resx_bits(kvm, &hdfgrtr2_desc, 0, 0);
1461+
resx.res1 |= HDFGRTR2_EL2_RES1;
14521462
break;
14531463
case HDFGWTR2_EL2:
1454-
*res0 = compute_reg_res0_bits(kvm, &hdfgwtr2_desc, 0, 0);
1455-
*res1 = HDFGWTR2_EL2_RES1;
1464+
resx = compute_reg_resx_bits(kvm, &hdfgwtr2_desc, 0, 0);
1465+
resx.res1 |= HDFGWTR2_EL2_RES1;
14561466
break;
14571467
case HCRX_EL2:
1458-
*res0 = compute_reg_res0_bits(kvm, &hcrx_desc, 0, 0);
1459-
*res1 = __HCRX_EL2_RES1;
1468+
resx = compute_reg_resx_bits(kvm, &hcrx_desc, 0, 0);
1469+
resx.res1 |= __HCRX_EL2_RES1;
14601470
break;
14611471
case HCR_EL2:
1462-
mask = compute_reg_fixed_bits(kvm, &hcr_desc, &fixed, 0, 0);
1463-
*res0 = compute_reg_res0_bits(kvm, &hcr_desc, 0, 0);
1464-
*res0 |= (mask & ~fixed);
1465-
*res1 = HCR_EL2_RES1 | (mask & fixed);
1472+
mask = compute_reg_fixed_bits(kvm, &hcr_desc, &fixed, 0, 0).res0;
1473+
resx = compute_reg_resx_bits(kvm, &hcr_desc, 0, 0);
1474+
resx.res0 |= (mask & ~fixed);
1475+
resx.res1 |= HCR_EL2_RES1 | (mask & fixed);
14661476
break;
14671477
case SCTLR2_EL1:
14681478
case SCTLR2_EL2:
1469-
*res0 = compute_reg_res0_bits(kvm, &sctlr2_desc, 0, 0);
1470-
*res1 = SCTLR2_EL1_RES1;
1479+
resx = compute_reg_resx_bits(kvm, &sctlr2_desc, 0, 0);
1480+
resx.res1 |= SCTLR2_EL1_RES1;
14711481
break;
14721482
case TCR2_EL2:
1473-
*res0 = compute_reg_res0_bits(kvm, &tcr2_el2_desc, 0, 0);
1474-
*res1 = TCR2_EL2_RES1;
1483+
resx = compute_reg_resx_bits(kvm, &tcr2_el2_desc, 0, 0);
1484+
resx.res1 |= TCR2_EL2_RES1;
14751485
break;
14761486
case SCTLR_EL1:
1477-
*res0 = compute_reg_res0_bits(kvm, &sctlr_el1_desc, 0, 0);
1478-
*res1 = SCTLR_EL1_RES1;
1487+
resx = compute_reg_resx_bits(kvm, &sctlr_el1_desc, 0, 0);
1488+
resx.res1 |= SCTLR_EL1_RES1;
14791489
break;
14801490
case MDCR_EL2:
1481-
*res0 = compute_reg_res0_bits(kvm, &mdcr_el2_desc, 0, 0);
1482-
*res1 = MDCR_EL2_RES1;
1491+
resx = compute_reg_resx_bits(kvm, &mdcr_el2_desc, 0, 0);
1492+
resx.res1 |= MDCR_EL2_RES1;
14831493
break;
14841494
case VTCR_EL2:
1485-
*res0 = compute_reg_res0_bits(kvm, &vtcr_el2_desc, 0, 0);
1486-
*res1 = VTCR_EL2_RES1;
1495+
resx = compute_reg_resx_bits(kvm, &vtcr_el2_desc, 0, 0);
1496+
resx.res1 |= VTCR_EL2_RES1;
14871497
break;
14881498
default:
14891499
WARN_ON_ONCE(1);
1490-
*res0 = *res1 = 0;
1500+
resx = (typeof(resx)){};
14911501
break;
14921502
}
1503+
1504+
return resx;
14931505
}
14941506

14951507
static __always_inline struct fgt_masks *__fgt_reg_to_masks(enum vcpu_sysreg reg)

0 commit comments

Comments
 (0)