Skip to content

Commit e6f7982

Browse files
Alexei Starovoitovanakryiko
authored andcommitted
mm: Introduce VM_SPARSE kind and vm_area_[un]map_pages().
vmap/vmalloc APIs are used to map a set of pages into contiguous kernel virtual space. get_vm_area() with appropriate flag is used to request an area of kernel address range. It's used for vmalloc, vmap, ioremap, xen use cases. - vmalloc use case dominates the usage. Such vm areas have VM_ALLOC flag. - the areas created by vmap() function should be tagged with VM_MAP. - ioremap areas are tagged with VM_IOREMAP. BPF would like to extend the vmap API to implement a lazily-populated sparse, yet contiguous kernel virtual space. Introduce VM_SPARSE flag and vm_area_map_pages(area, start_addr, count, pages) API to map a set of pages within a given area. It has the same sanity checks as vmap() does. It also checks that get_vm_area() was created with VM_SPARSE flag which identifies such areas in /proc/vmallocinfo and returns zero pages on read through /proc/kcore. The next commits will introduce bpf_arena which is a sparsely populated shared memory region between bpf program and user space process. It will map privately-managed pages into a sparse vm area with the following steps: // request virtual memory region during bpf prog verification area = get_vm_area(area_size, VM_SPARSE); // on demand vm_area_map_pages(area, kaddr, kend, pages); vm_area_unmap_pages(area, kaddr, kend); // after bpf program is detached and unloaded free_vm_area(area); Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com> Link: https://lore.kernel.org/bpf/20240305030516.41519-3-alexei.starovoitov@gmail.com
1 parent 3e49a86 commit e6f7982

2 files changed

Lines changed: 62 additions & 2 deletions

File tree

include/linux/vmalloc.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ struct iov_iter; /* in uio.h */
3535
#else
3636
#define VM_DEFER_KMEMLEAK 0
3737
#endif
38+
#define VM_SPARSE 0x00001000 /* sparse vm_area. not all pages are present. */
3839

3940
/* bits [20..32] reserved for arch specific ioremap internals */
4041

@@ -232,6 +233,10 @@ static inline bool is_vm_area_hugepages(const void *addr)
232233
}
233234

234235
#ifdef CONFIG_MMU
236+
int vm_area_map_pages(struct vm_struct *area, unsigned long start,
237+
unsigned long end, struct page **pages);
238+
void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
239+
unsigned long end);
235240
void vunmap_range(unsigned long addr, unsigned long end);
236241
static inline void set_vm_flush_reset_perms(void *addr)
237242
{

mm/vmalloc.c

Lines changed: 57 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -648,6 +648,58 @@ static int vmap_pages_range(unsigned long addr, unsigned long end,
648648
return err;
649649
}
650650

651+
static int check_sparse_vm_area(struct vm_struct *area, unsigned long start,
652+
unsigned long end)
653+
{
654+
might_sleep();
655+
if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS))
656+
return -EINVAL;
657+
if (WARN_ON_ONCE(area->flags & VM_NO_GUARD))
658+
return -EINVAL;
659+
if (WARN_ON_ONCE(!(area->flags & VM_SPARSE)))
660+
return -EINVAL;
661+
if ((end - start) >> PAGE_SHIFT > totalram_pages())
662+
return -E2BIG;
663+
if (start < (unsigned long)area->addr ||
664+
(void *)end > area->addr + get_vm_area_size(area))
665+
return -ERANGE;
666+
return 0;
667+
}
668+
669+
/**
670+
* vm_area_map_pages - map pages inside given sparse vm_area
671+
* @area: vm_area
672+
* @start: start address inside vm_area
673+
* @end: end address inside vm_area
674+
* @pages: pages to map (always PAGE_SIZE pages)
675+
*/
676+
int vm_area_map_pages(struct vm_struct *area, unsigned long start,
677+
unsigned long end, struct page **pages)
678+
{
679+
int err;
680+
681+
err = check_sparse_vm_area(area, start, end);
682+
if (err)
683+
return err;
684+
685+
return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT);
686+
}
687+
688+
/**
689+
* vm_area_unmap_pages - unmap pages inside given sparse vm_area
690+
* @area: vm_area
691+
* @start: start address inside vm_area
692+
* @end: end address inside vm_area
693+
*/
694+
void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
695+
unsigned long end)
696+
{
697+
if (check_sparse_vm_area(area, start, end))
698+
return;
699+
700+
vunmap_range(start, end);
701+
}
702+
651703
int is_vmalloc_or_module_addr(const void *x)
652704
{
653705
/*
@@ -3822,9 +3874,9 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
38223874

38233875
if (flags & VMAP_RAM)
38243876
copied = vmap_ram_vread_iter(iter, addr, n, flags);
3825-
else if (!(vm && (vm->flags & VM_IOREMAP)))
3877+
else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE))))
38263878
copied = aligned_vread_iter(iter, addr, n);
3827-
else /* IOREMAP area is treated as memory hole */
3879+
else /* IOREMAP | SPARSE area is treated as memory hole */
38283880
copied = zero_iter(iter, n);
38293881

38303882
addr += copied;
@@ -4415,6 +4467,9 @@ static int s_show(struct seq_file *m, void *p)
44154467
if (v->flags & VM_IOREMAP)
44164468
seq_puts(m, " ioremap");
44174469

4470+
if (v->flags & VM_SPARSE)
4471+
seq_puts(m, " sparse");
4472+
44184473
if (v->flags & VM_ALLOC)
44194474
seq_puts(m, " vmalloc");
44204475

0 commit comments

Comments
 (0)