Skip to content

Commit 6cbf874

Browse files
Quentin PerretMarc Zyngier
authored andcommitted
KVM: arm64: Move hyp_pool locking out of refcount helpers
The hyp_page refcount helpers currently rely on the hyp_pool lock for serialization. However, this means the refcounts can't be changed from the buddy allocator core as it already holds the lock, which means pages have to go through odd transient states. For example, when a page is freed, its refcount is set to 0, and the lock is transiently released before the page can be attached to a free list in the buddy tree. This is currently harmless as the allocator checks the list node of each page to see if it is available for allocation or not, but it means the page refcount can't be trusted to represent the state of the page even if the pool lock is held. In order to fix this, remove the pool locking from the refcount helpers, and move all the logic to the buddy allocator. This will simplify the removal of the list node from struct hyp_page in a later patch. Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210608114518.748712-2-qperret@google.com
1 parent 8124c8a commit 6cbf874

2 files changed

Lines changed: 32 additions & 46 deletions

File tree

arch/arm64/kvm/hyp/include/nvhe/gfp.h

Lines changed: 0 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -22,41 +22,6 @@ struct hyp_pool {
2222
unsigned int max_order;
2323
};
2424

25-
static inline void hyp_page_ref_inc(struct hyp_page *p)
26-
{
27-
struct hyp_pool *pool = hyp_page_to_pool(p);
28-
29-
hyp_spin_lock(&pool->lock);
30-
p->refcount++;
31-
hyp_spin_unlock(&pool->lock);
32-
}
33-
34-
static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
35-
{
36-
struct hyp_pool *pool = hyp_page_to_pool(p);
37-
int ret;
38-
39-
hyp_spin_lock(&pool->lock);
40-
p->refcount--;
41-
ret = (p->refcount == 0);
42-
hyp_spin_unlock(&pool->lock);
43-
44-
return ret;
45-
}
46-
47-
static inline void hyp_set_page_refcounted(struct hyp_page *p)
48-
{
49-
struct hyp_pool *pool = hyp_page_to_pool(p);
50-
51-
hyp_spin_lock(&pool->lock);
52-
if (p->refcount) {
53-
hyp_spin_unlock(&pool->lock);
54-
BUG();
55-
}
56-
p->refcount = 1;
57-
hyp_spin_unlock(&pool->lock);
58-
}
59-
6025
/* Allocation */
6126
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
6227
void hyp_get_page(void *addr);

arch/arm64/kvm/hyp/nvhe/page_alloc.c

Lines changed: 32 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -93,15 +93,6 @@ static void __hyp_attach_page(struct hyp_pool *pool,
9393
list_add_tail(&p->node, &pool->free_area[order]);
9494
}
9595

96-
static void hyp_attach_page(struct hyp_page *p)
97-
{
98-
struct hyp_pool *pool = hyp_page_to_pool(p);
99-
100-
hyp_spin_lock(&pool->lock);
101-
__hyp_attach_page(pool, p);
102-
hyp_spin_unlock(&pool->lock);
103-
}
104-
10596
static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
10697
struct hyp_page *p,
10798
unsigned int order)
@@ -125,19 +116,49 @@ static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
125116
return p;
126117
}
127118

119+
static inline void hyp_page_ref_inc(struct hyp_page *p)
120+
{
121+
p->refcount++;
122+
}
123+
124+
static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
125+
{
126+
p->refcount--;
127+
return (p->refcount == 0);
128+
}
129+
130+
static inline void hyp_set_page_refcounted(struct hyp_page *p)
131+
{
132+
BUG_ON(p->refcount);
133+
p->refcount = 1;
134+
}
135+
136+
/*
137+
* Changes to the buddy tree and page refcounts must be done with the hyp_pool
138+
* lock held. If a refcount change requires an update to the buddy tree (e.g.
139+
* hyp_put_page()), both operations must be done within the same critical
140+
* section to guarantee transient states (e.g. a page with null refcount but
141+
* not yet attached to a free list) can't be observed by well-behaved readers.
142+
*/
128143
void hyp_put_page(void *addr)
129144
{
130145
struct hyp_page *p = hyp_virt_to_page(addr);
146+
struct hyp_pool *pool = hyp_page_to_pool(p);
131147

148+
hyp_spin_lock(&pool->lock);
132149
if (hyp_page_ref_dec_and_test(p))
133-
hyp_attach_page(p);
150+
__hyp_attach_page(pool, p);
151+
hyp_spin_unlock(&pool->lock);
134152
}
135153

136154
void hyp_get_page(void *addr)
137155
{
138156
struct hyp_page *p = hyp_virt_to_page(addr);
157+
struct hyp_pool *pool = hyp_page_to_pool(p);
139158

159+
hyp_spin_lock(&pool->lock);
140160
hyp_page_ref_inc(p);
161+
hyp_spin_unlock(&pool->lock);
141162
}
142163

143164
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order)
@@ -159,8 +180,8 @@ void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order)
159180
p = list_first_entry(&pool->free_area[i], struct hyp_page, node);
160181
p = __hyp_extract_page(pool, p, order);
161182

162-
hyp_spin_unlock(&pool->lock);
163183
hyp_set_page_refcounted(p);
184+
hyp_spin_unlock(&pool->lock);
164185

165186
return hyp_page_to_virt(p);
166187
}

0 commit comments

Comments
 (0)