Skip to content

Commit dd55dd0

Browse files
committed
LoongArch: Adjust memory management for 32BIT/64BIT
Adjust memory management for both 32BIT and 64BIT, including: address space definition, DMW CSR definition, page table bits definition, boot time detection of VA/PA bits, page table init, tlb exception handling, copy_page/clear_page/dump_tlb libraries, etc. Reviewed-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com> Signed-off-by: Yawei Li <liyawei@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
1 parent 7b2afea commit dd55dd0

12 files changed

Lines changed: 421 additions & 196 deletions

File tree

arch/loongarch/include/asm/addrspace.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,20 @@ extern unsigned long vm_map_base;
3838
#endif
3939

4040
#ifndef WRITECOMBINE_BASE
41+
#ifdef CONFIG_32BIT
42+
#define WRITECOMBINE_BASE CSR_DMW0_BASE
43+
#else
4144
#define WRITECOMBINE_BASE CSR_DMW2_BASE
4245
#endif
46+
#endif
4347

48+
#ifdef CONFIG_32BIT
49+
#define DMW_PABITS 29
50+
#define TO_PHYS_MASK ((_UL(1) << _UL(DMW_PABITS)) - 1)
51+
#else
4452
#define DMW_PABITS 48
4553
#define TO_PHYS_MASK ((_ULL(1) << _ULL(DMW_PABITS)) - 1)
54+
#endif
4655

4756
/*
4857
* Memory above this physical address will be considered highmem.
@@ -112,7 +121,11 @@ extern unsigned long vm_map_base;
112121
/*
113122
* Returns the physical address of a KPRANGEx / XKPRANGE address
114123
*/
124+
#ifdef CONFIG_32BIT
125+
#define PHYSADDR(a) ((_ACAST32_(a)) & TO_PHYS_MASK)
126+
#else
115127
#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK)
128+
#endif
116129

117130
/*
118131
* On LoongArch, I/O ports mappring is following:

arch/loongarch/include/asm/cpu-features.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,16 +20,13 @@
2020
#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
2121

2222
#ifdef CONFIG_32BIT
23-
# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
2423
# define cpu_vabits 31
2524
# define cpu_pabits 31
2625
#endif
2726

2827
#ifdef CONFIG_64BIT
29-
# define cpu_has_64bits 1
3028
# define cpu_vabits cpu_data[0].vabits
3129
# define cpu_pabits cpu_data[0].pabits
32-
# define __NEED_ADDRBITS_PROBE
3330
#endif
3431

3532
/*

arch/loongarch/include/asm/loongarch.h

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -912,6 +912,26 @@
912912
#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */
913913

914914
/* Direct Map window 0/1/2/3 */
915+
916+
#ifdef CONFIG_32BIT
917+
918+
#define CSR_DMW0_PLV0 (1 << 0)
919+
#define CSR_DMW0_VSEG (0x4)
920+
#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
921+
#define CSR_DMW0_INIT (CSR_DMW0_BASE | CSR_DMW0_PLV0)
922+
923+
#define CSR_DMW1_PLV0 (1 << 0)
924+
#define CSR_DMW1_MAT (1 << 4)
925+
#define CSR_DMW1_VSEG (0x5)
926+
#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS)
927+
#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0)
928+
929+
#define CSR_DMW2_INIT 0x0
930+
931+
#define CSR_DMW3_INIT 0x0
932+
933+
#else
934+
915935
#define CSR_DMW0_PLV0 _CONST64_(1 << 0)
916936
#define CSR_DMW0_VSEG _CONST64_(0x8000)
917937
#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS)
@@ -931,6 +951,8 @@
931951

932952
#define CSR_DMW3_INIT 0x0
933953

954+
#endif
955+
934956
/* Performance Counter registers */
935957
#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */
936958
#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */
@@ -1388,8 +1410,10 @@ __BUILD_CSR_OP(tlbidx)
13881410
#define ENTRYLO_C_SHIFT 4
13891411
#define ENTRYLO_C (_ULCAST_(3) << ENTRYLO_C_SHIFT)
13901412
#define ENTRYLO_G (_ULCAST_(1) << 6)
1413+
#ifdef CONFIG_64BIT
13911414
#define ENTRYLO_NR (_ULCAST_(1) << 61)
13921415
#define ENTRYLO_NX (_ULCAST_(1) << 62)
1416+
#endif
13931417

13941418
/* Values for PageSize register */
13951419
#define PS_4K 0x0000000c

arch/loongarch/include/asm/page.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
#include <vdso/page.h>
1212

13-
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
13+
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - PTRLOG)
1414
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
1515
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
1616
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)

arch/loongarch/include/asm/pgtable-bits.h

Lines changed: 34 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,26 @@
66
#define _ASM_PGTABLE_BITS_H
77

88
/* Page table bits */
9+
10+
#ifdef CONFIG_32BIT
11+
#define _PAGE_VALID_SHIFT 0
12+
#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */
13+
#define _PAGE_DIRTY_SHIFT 1
14+
#define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */
15+
#define _CACHE_SHIFT 4 /* 4~5, two bits */
16+
#define _PAGE_GLOBAL_SHIFT 6
17+
#define _PAGE_HUGE_SHIFT 6 /* HUGE is a PMD bit */
18+
#define _PAGE_PRESENT_SHIFT 7
19+
#define _PAGE_PFN_SHIFT 8
20+
#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
21+
#define _PAGE_SWP_EXCLUSIVE_SHIFT 13
22+
#define _PAGE_PFN_END_SHIFT 28
23+
#define _PAGE_WRITE_SHIFT 29
24+
#define _PAGE_MODIFIED_SHIFT 30
25+
#define _PAGE_PRESENT_INVALID_SHIFT 31
26+
#endif
27+
28+
#ifdef CONFIG_64BIT
929
#define _PAGE_VALID_SHIFT 0
1030
#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */
1131
#define _PAGE_DIRTY_SHIFT 1
@@ -18,25 +38,31 @@
1838
#define _PAGE_MODIFIED_SHIFT 9
1939
#define _PAGE_PROTNONE_SHIFT 10
2040
#define _PAGE_SPECIAL_SHIFT 11
21-
#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
2241
#define _PAGE_PFN_SHIFT 12
42+
#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */
2343
#define _PAGE_SWP_EXCLUSIVE_SHIFT 23
2444
#define _PAGE_PFN_END_SHIFT 48
2545
#define _PAGE_PRESENT_INVALID_SHIFT 60
2646
#define _PAGE_NO_READ_SHIFT 61
2747
#define _PAGE_NO_EXEC_SHIFT 62
2848
#define _PAGE_RPLV_SHIFT 63
49+
#endif
2950

3051
/* Used by software */
3152
#define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT)
3253
#define _PAGE_PRESENT_INVALID (_ULCAST_(1) << _PAGE_PRESENT_INVALID_SHIFT)
3354
#define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT)
3455
#define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT)
3556
#define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
57+
#ifdef CONFIG_32BIT
58+
#define _PAGE_PROTNONE 0
59+
#define _PAGE_SPECIAL 0
60+
#else
3661
#define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
3762
#define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
63+
#endif
3864

39-
/* We borrow bit 23 to store the exclusive marker in swap PTEs. */
65+
/* We borrow bit 13/23 to store the exclusive marker in swap PTEs. */
4066
#define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT)
4167

4268
/* Used by TLB hardware (placed in EntryLo*) */
@@ -46,9 +72,15 @@
4672
#define _PAGE_GLOBAL (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT)
4773
#define _PAGE_HUGE (_ULCAST_(1) << _PAGE_HUGE_SHIFT)
4874
#define _PAGE_HGLOBAL (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT)
75+
#ifdef CONFIG_32BIT
76+
#define _PAGE_NO_READ 0
77+
#define _PAGE_NO_EXEC 0
78+
#define _PAGE_RPLV 0
79+
#else
4980
#define _PAGE_NO_READ (_ULCAST_(1) << _PAGE_NO_READ_SHIFT)
5081
#define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT)
5182
#define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT)
83+
#endif
5284
#define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT)
5385
#define PFN_PTE_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT)
5486

arch/loongarch/include/asm/pgtable.h

Lines changed: 57 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
#include <linux/compiler.h>
1313
#include <asm/addrspace.h>
14+
#include <asm/asm.h>
1415
#include <asm/page.h>
1516
#include <asm/pgtable-bits.h>
1617

@@ -23,37 +24,45 @@
2324
#endif
2425

2526
#if CONFIG_PGTABLE_LEVELS == 2
26-
#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
27+
#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
2728
#elif CONFIG_PGTABLE_LEVELS == 3
28-
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
29+
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
2930
#define PMD_SIZE (1UL << PMD_SHIFT)
3031
#define PMD_MASK (~(PMD_SIZE-1))
31-
#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
32+
#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
3233
#elif CONFIG_PGTABLE_LEVELS == 4
33-
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
34+
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
3435
#define PMD_SIZE (1UL << PMD_SHIFT)
3536
#define PMD_MASK (~(PMD_SIZE-1))
36-
#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
37+
#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
3738
#define PUD_SIZE (1UL << PUD_SHIFT)
3839
#define PUD_MASK (~(PUD_SIZE-1))
39-
#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3))
40+
#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - PTRLOG))
4041
#endif
4142

4243
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
4344
#define PGDIR_MASK (~(PGDIR_SIZE-1))
4445

45-
#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3))
46+
#ifdef CONFIG_32BIT
47+
#define VA_BITS 32
48+
#else
49+
#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - PTRLOG))
50+
#endif
4651

47-
#define PTRS_PER_PGD (PAGE_SIZE >> 3)
52+
#define PTRS_PER_PGD (PAGE_SIZE >> PTRLOG)
4853
#if CONFIG_PGTABLE_LEVELS > 3
49-
#define PTRS_PER_PUD (PAGE_SIZE >> 3)
54+
#define PTRS_PER_PUD (PAGE_SIZE >> PTRLOG)
5055
#endif
5156
#if CONFIG_PGTABLE_LEVELS > 2
52-
#define PTRS_PER_PMD (PAGE_SIZE >> 3)
57+
#define PTRS_PER_PMD (PAGE_SIZE >> PTRLOG)
5358
#endif
54-
#define PTRS_PER_PTE (PAGE_SIZE >> 3)
59+
#define PTRS_PER_PTE (PAGE_SIZE >> PTRLOG)
5560

61+
#ifdef CONFIG_32BIT
62+
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
63+
#else
5664
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
65+
#endif
5766

5867
#ifndef __ASSEMBLER__
5968

@@ -74,11 +83,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
7483

7584
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
7685

77-
/*
78-
* TLB refill handlers may also map the vmalloc area into xkvrange.
79-
* Avoid the first couple of pages so NULL pointer dereferences will
80-
* still reliably trap.
81-
*/
86+
#ifdef CONFIG_32BIT
87+
88+
#define VMALLOC_START (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
89+
#define VMALLOC_END (FIXADDR_START - (2 * PAGE_SIZE))
90+
91+
#endif
92+
93+
#ifdef CONFIG_64BIT
94+
8295
#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
8396
#define MODULES_END (MODULES_VADDR + SZ_256M)
8497

@@ -106,6 +119,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
106119
#define KFENCE_AREA_START (VMEMMAP_END + 1)
107120
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
108121

122+
#endif
123+
109124
#define ptep_get(ptep) READ_ONCE(*(ptep))
110125
#define pmdp_get(pmdp) READ_ONCE(*(pmdp))
111126

@@ -277,7 +292,16 @@ extern void kernel_pte_init(void *addr);
277292
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
278293
* are !pte_none() && !pte_present().
279294
*
280-
* Format of swap PTEs:
295+
* Format of 32bit swap PTEs:
296+
*
297+
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
298+
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
299+
* <------------ offset -------------> E <- type -> <-- zeroes -->
300+
*
301+
* E is the exclusive marker that is not stored in swap entries.
302+
* The zero'ed bits include _PAGE_PRESENT.
303+
*
304+
* Format of 64bit swap PTEs:
281305
*
282306
* 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
283307
* 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
@@ -290,16 +314,27 @@ extern void kernel_pte_init(void *addr);
290314
* E is the exclusive marker that is not stored in swap entries.
291315
* The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
292316
*/
317+
318+
#define __SWP_TYPE_BITS (IS_ENABLED(CONFIG_32BIT) ? 5 : 7)
319+
#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
320+
#define __SWP_TYPE_SHIFT (IS_ENABLED(CONFIG_32BIT) ? 8 : 16)
321+
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1)
322+
293323
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
294-
{ pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
324+
{
325+
pte_t pte;
326+
pte_val(pte) = ((type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | (offset << __SWP_OFFSET_SHIFT);
327+
return pte;
328+
}
295329

296-
#define __swp_type(x) (((x).val >> 16) & 0x7f)
297-
#define __swp_offset(x) ((x).val >> 24)
330+
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
331+
#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
298332
#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
299-
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
333+
300334
#define __swp_entry_to_pte(x) __pte((x).val)
301-
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
302335
#define __swp_entry_to_pmd(x) __pmd((x).val | _PAGE_HUGE)
336+
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
337+
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
303338

304339
static inline bool pte_swp_exclusive(pte_t pte)
305340
{

arch/loongarch/kernel/cpu-probe.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,11 @@ EXPORT_SYMBOL(vm_map_base);
106106

107107
static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
108108
{
109-
#ifdef __NEED_ADDRBITS_PROBE
109+
#ifdef CONFIG_32BIT
110+
c->pabits = cpu_pabits;
111+
c->vabits = cpu_vabits;
112+
vm_map_base = KVRANGE;
113+
#else
110114
c->pabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4;
111115
c->vabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12;
112116
vm_map_base = 0UL - (1UL << c->vabits);

arch/loongarch/lib/dump_tlb.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,24 +73,32 @@ static void dump_tlb(int first, int last)
7373
vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask);
7474

7575
/* NR/NX are in awkward places, so mask them off separately */
76+
#ifdef CONFIG_64BIT
7677
pa = entrylo0 & ~(ENTRYLO_NR | ENTRYLO_NX);
78+
#endif
7779
pa = pa & PAGE_MASK;
7880
pr_cont("\n\t[");
81+
#ifdef CONFIG_64BIT
7982
pr_cont("nr=%d nx=%d ",
8083
(entrylo0 & ENTRYLO_NR) ? 1 : 0,
8184
(entrylo0 & ENTRYLO_NX) ? 1 : 0);
85+
#endif
8286
pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld] [",
8387
pwidth, pa, c0,
8488
(entrylo0 & ENTRYLO_D) ? 1 : 0,
8589
(entrylo0 & ENTRYLO_V) ? 1 : 0,
8690
(entrylo0 & ENTRYLO_G) ? 1 : 0,
8791
(entrylo0 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT);
8892
/* NR/NX are in awkward places, so mask them off separately */
93+
#ifdef CONFIG_64BIT
8994
pa = entrylo1 & ~(ENTRYLO_NR | ENTRYLO_NX);
95+
#endif
9096
pa = pa & PAGE_MASK;
97+
#ifdef CONFIG_64BIT
9198
pr_cont("nr=%d nx=%d ",
9299
(entrylo1 & ENTRYLO_NR) ? 1 : 0,
93100
(entrylo1 & ENTRYLO_NX) ? 1 : 0);
101+
#endif
94102
pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n",
95103
pwidth, pa, c1,
96104
(entrylo1 & ENTRYLO_D) ? 1 : 0,

arch/loongarch/mm/init.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(invalid_pmd_table);
224224
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
225225
EXPORT_SYMBOL(invalid_pte_table);
226226

227-
#ifdef CONFIG_EXECMEM
227+
#if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
228228
static struct execmem_info execmem_info __ro_after_init;
229229

230230
struct execmem_info __init *execmem_arch_setup(void)
@@ -242,4 +242,4 @@ struct execmem_info __init *execmem_arch_setup(void)
242242

243243
return &execmem_info;
244244
}
245-
#endif /* CONFIG_EXECMEM */
245+
#endif /* CONFIG_EXECMEM && MODULES_VADDR */

0 commit comments

Comments
 (0)