Skip to content

Commit de51999

Browse files
soleenakpm00
authored andcommitted
kho: allow memory preservation state updates after finalization
Currently, kho_preserve_* and kho_unpreserve_* return -EBUSY if KHO is finalized. This enforces a rigid "freeze" on the KHO memory state. With the introduction of re-entrant finalization, this restriction is no longer necessary. Users should be allowed to modify the preservation set (e.g., adding new pages or freeing old ones) even after an initial finalization. The intended workflow for updates is now: 1. Modify state (preserve/unpreserve). 2. Call kho_finalize() again to refresh the serialized metadata. Remove the kho_out.finalized checks to enable this dynamic behavior. This also allows to convert kho_unpreserve_* functions to void, as they do not return any error anymore. Link: https://lkml.kernel.org/r/20251114190002.3311679-13-pasha.tatashin@soleen.com Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com> Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Reviewed-by: Pratyush Yadav <pratyush@kernel.org> Cc: Alexander Graf <graf@amazon.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Baoquan He <bhe@redhat.com> Cc: Coiby Xu <coxu@redhat.com> Cc: Dave Vasilevsky <dave@vasilevsky.ca> Cc: Eric Biggers <ebiggers@google.com> Cc: Kees Cook <kees@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent d725595 commit de51999

2 files changed

Lines changed: 19 additions & 57 deletions

File tree

include/linux/kexec_handover.h

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -44,11 +44,11 @@ bool kho_is_enabled(void);
4444
bool is_kho_boot(void);
4545

4646
int kho_preserve_folio(struct folio *folio);
47-
int kho_unpreserve_folio(struct folio *folio);
47+
void kho_unpreserve_folio(struct folio *folio);
4848
int kho_preserve_pages(struct page *page, unsigned int nr_pages);
49-
int kho_unpreserve_pages(struct page *page, unsigned int nr_pages);
49+
void kho_unpreserve_pages(struct page *page, unsigned int nr_pages);
5050
int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation);
51-
int kho_unpreserve_vmalloc(struct kho_vmalloc *preservation);
51+
void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation);
5252
void *kho_alloc_preserve(size_t size);
5353
void kho_unpreserve_free(void *mem);
5454
void kho_restore_free(void *mem);
@@ -79,31 +79,22 @@ static inline int kho_preserve_folio(struct folio *folio)
7979
return -EOPNOTSUPP;
8080
}
8181

82-
static inline int kho_unpreserve_folio(struct folio *folio)
83-
{
84-
return -EOPNOTSUPP;
85-
}
82+
static inline void kho_unpreserve_folio(struct folio *folio) { }
8683

8784
static inline int kho_preserve_pages(struct page *page, unsigned int nr_pages)
8885
{
8986
return -EOPNOTSUPP;
9087
}
9188

92-
static inline int kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
93-
{
94-
return -EOPNOTSUPP;
95-
}
89+
static inline void kho_unpreserve_pages(struct page *page, unsigned int nr_pages) { }
9690

9791
static inline int kho_preserve_vmalloc(void *ptr,
9892
struct kho_vmalloc *preservation)
9993
{
10094
return -EOPNOTSUPP;
10195
}
10296

103-
static inline int kho_unpreserve_vmalloc(struct kho_vmalloc *preservation)
104-
{
105-
return -EOPNOTSUPP;
106-
}
97+
static inline void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation) { }
10798

10899
static inline void *kho_alloc_preserve(size_t size)
109100
{

kernel/liveupdate/kexec_handover.c

Lines changed: 13 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -185,10 +185,6 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
185185
const unsigned long pfn_high = pfn >> order;
186186

187187
might_sleep();
188-
189-
if (kho_out.finalized)
190-
return -EBUSY;
191-
192188
physxa = xa_load(&track->orders, order);
193189
if (!physxa) {
194190
int err;
@@ -807,20 +803,14 @@ EXPORT_SYMBOL_GPL(kho_preserve_folio);
807803
* Instructs KHO to unpreserve a folio that was preserved by
808804
* kho_preserve_folio() before. The provided @folio (pfn and order)
809805
* must exactly match a previously preserved folio.
810-
*
811-
* Return: 0 on success, error code on failure
812806
*/
813-
int kho_unpreserve_folio(struct folio *folio)
807+
void kho_unpreserve_folio(struct folio *folio)
814808
{
815809
const unsigned long pfn = folio_pfn(folio);
816810
const unsigned int order = folio_order(folio);
817811
struct kho_mem_track *track = &kho_out.track;
818812

819-
if (kho_out.finalized)
820-
return -EBUSY;
821-
822813
__kho_unpreserve_order(track, pfn, order);
823-
return 0;
824814
}
825815
EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
826816

@@ -877,21 +867,14 @@ EXPORT_SYMBOL_GPL(kho_preserve_pages);
877867
* This must be called with the same @page and @nr_pages as the corresponding
878868
* kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger
879869
* preserved blocks is not supported.
880-
*
881-
* Return: 0 on success, error code on failure
882870
*/
883-
int kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
871+
void kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
884872
{
885873
struct kho_mem_track *track = &kho_out.track;
886874
const unsigned long start_pfn = page_to_pfn(page);
887875
const unsigned long end_pfn = start_pfn + nr_pages;
888876

889-
if (kho_out.finalized)
890-
return -EBUSY;
891-
892877
__kho_unpreserve(track, start_pfn, end_pfn);
893-
894-
return 0;
895878
}
896879
EXPORT_SYMBOL_GPL(kho_unpreserve_pages);
897880

@@ -976,20 +959,6 @@ static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk,
976959
}
977960
}
978961

979-
static void kho_vmalloc_free_chunks(struct kho_vmalloc *kho_vmalloc)
980-
{
981-
struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(kho_vmalloc->first);
982-
983-
while (chunk) {
984-
struct kho_vmalloc_chunk *tmp = chunk;
985-
986-
kho_vmalloc_unpreserve_chunk(chunk, kho_vmalloc->order);
987-
988-
chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
989-
free_page((unsigned long)tmp);
990-
}
991-
}
992-
993962
/**
994963
* kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec
995964
* @ptr: pointer to the area in vmalloc address space
@@ -1051,7 +1020,7 @@ int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation)
10511020
return 0;
10521021

10531022
err_free:
1054-
kho_vmalloc_free_chunks(preservation);
1023+
kho_unpreserve_vmalloc(preservation);
10551024
return err;
10561025
}
10571026
EXPORT_SYMBOL_GPL(kho_preserve_vmalloc);
@@ -1062,17 +1031,19 @@ EXPORT_SYMBOL_GPL(kho_preserve_vmalloc);
10621031
*
10631032
* Instructs KHO to unpreserve the area in vmalloc address space that was
10641033
* previously preserved with kho_preserve_vmalloc().
1065-
*
1066-
* Return: 0 on success, error code on failure
10671034
*/
1068-
int kho_unpreserve_vmalloc(struct kho_vmalloc *preservation)
1035+
void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation)
10691036
{
1070-
if (kho_out.finalized)
1071-
return -EBUSY;
1037+
struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
10721038

1073-
kho_vmalloc_free_chunks(preservation);
1039+
while (chunk) {
1040+
struct kho_vmalloc_chunk *tmp = chunk;
10741041

1075-
return 0;
1042+
kho_vmalloc_unpreserve_chunk(chunk, preservation->order);
1043+
1044+
chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1045+
free_page((unsigned long)tmp);
1046+
}
10761047
}
10771048
EXPORT_SYMBOL_GPL(kho_unpreserve_vmalloc);
10781049

@@ -1221,7 +1192,7 @@ void kho_unpreserve_free(void *mem)
12211192
return;
12221193

12231194
folio = virt_to_folio(mem);
1224-
WARN_ON_ONCE(kho_unpreserve_folio(folio));
1195+
kho_unpreserve_folio(folio);
12251196
folio_put(folio);
12261197
}
12271198
EXPORT_SYMBOL_GPL(kho_unpreserve_free);

0 commit comments

Comments
 (0)