@@ -392,80 +392,90 @@ void nested_vmx_check_supported(void)
392392 }
393393}
394394
395- void nested_pg_map (struct vmx_pages * vmx , struct kvm_vm * vm ,
396- uint64_t nested_paddr , uint64_t paddr )
395+ static void nested_create_pte (struct kvm_vm * vm ,
396+ struct eptPageTableEntry * pte ,
397+ uint64_t nested_paddr ,
398+ uint64_t paddr ,
399+ int current_level ,
400+ int target_level )
401+ {
402+ if (!pte -> readable ) {
403+ pte -> writable = true;
404+ pte -> readable = true;
405+ pte -> executable = true;
406+ pte -> page_size = (current_level == target_level );
407+ if (pte -> page_size )
408+ pte -> address = paddr >> vm -> page_shift ;
409+ else
410+ pte -> address = vm_alloc_page_table (vm ) >> vm -> page_shift ;
411+ } else {
412+ /*
413+ * Entry already present. Assert that the caller doesn't want
414+ * a hugepage at this level, and that there isn't a hugepage at
415+ * this level.
416+ */
417+ TEST_ASSERT (current_level != target_level ,
418+ "Cannot create hugepage at level: %u, nested_paddr: 0x%lx\n" ,
419+ current_level , nested_paddr );
420+ TEST_ASSERT (!pte -> page_size ,
421+ "Cannot create page table at level: %u, nested_paddr: 0x%lx\n" ,
422+ current_level , nested_paddr );
423+ }
424+ }
425+
426+
427+ void __nested_pg_map (struct vmx_pages * vmx , struct kvm_vm * vm ,
428+ uint64_t nested_paddr , uint64_t paddr , int target_level )
397429{
398- uint16_t index [4 ];
399- struct eptPageTableEntry * pml4e ;
430+ const uint64_t page_size = PG_LEVEL_SIZE (target_level );
431+ struct eptPageTableEntry * pt = vmx -> eptp_hva , * pte ;
432+ uint16_t index ;
400433
401434 TEST_ASSERT (vm -> mode == VM_MODE_PXXV48_4K , "Attempt to use "
402435 "unknown or unsupported guest mode, mode: 0x%x" , vm -> mode );
403436
404- TEST_ASSERT ((nested_paddr % vm -> page_size ) == 0 ,
437+ TEST_ASSERT ((nested_paddr % page_size ) == 0 ,
405438 "Nested physical address not on page boundary,\n"
406- " nested_paddr: 0x%lx vm-> page_size: 0x%x " ,
407- nested_paddr , vm -> page_size );
439+ " nested_paddr: 0x%lx page_size: 0x%lx " ,
440+ nested_paddr , page_size );
408441 TEST_ASSERT ((nested_paddr >> vm -> page_shift ) <= vm -> max_gfn ,
409442 "Physical address beyond beyond maximum supported,\n"
410443 " nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x" ,
411444 paddr , vm -> max_gfn , vm -> page_size );
412- TEST_ASSERT ((paddr % vm -> page_size ) == 0 ,
445+ TEST_ASSERT ((paddr % page_size ) == 0 ,
413446 "Physical address not on page boundary,\n"
414- " paddr: 0x%lx vm-> page_size: 0x%x " ,
415- paddr , vm -> page_size );
447+ " paddr: 0x%lx page_size: 0x%lx " ,
448+ paddr , page_size );
416449 TEST_ASSERT ((paddr >> vm -> page_shift ) <= vm -> max_gfn ,
417450 "Physical address beyond beyond maximum supported,\n"
418451 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x" ,
419452 paddr , vm -> max_gfn , vm -> page_size );
420453
421- index [0 ] = (nested_paddr >> 12 ) & 0x1ffu ;
422- index [1 ] = (nested_paddr >> 21 ) & 0x1ffu ;
423- index [2 ] = (nested_paddr >> 30 ) & 0x1ffu ;
424- index [3 ] = (nested_paddr >> 39 ) & 0x1ffu ;
425-
426- /* Allocate page directory pointer table if not present. */
427- pml4e = vmx -> eptp_hva ;
428- if (!pml4e [index [3 ]].readable ) {
429- pml4e [index [3 ]].address = vm_alloc_page_table (vm ) >> vm -> page_shift ;
430- pml4e [index [3 ]].writable = true;
431- pml4e [index [3 ]].readable = true;
432- pml4e [index [3 ]].executable = true;
433- }
454+ for (int level = PG_LEVEL_512G ; level >= PG_LEVEL_4K ; level -- ) {
455+ index = (nested_paddr >> PG_LEVEL_SHIFT (level )) & 0x1ffu ;
456+ pte = & pt [index ];
434457
435- /* Allocate page directory table if not present. */
436- struct eptPageTableEntry * pdpe ;
437- pdpe = addr_gpa2hva (vm , pml4e [index [3 ]].address * vm -> page_size );
438- if (!pdpe [index [2 ]].readable ) {
439- pdpe [index [2 ]].address = vm_alloc_page_table (vm ) >> vm -> page_shift ;
440- pdpe [index [2 ]].writable = true;
441- pdpe [index [2 ]].readable = true;
442- pdpe [index [2 ]].executable = true;
443- }
458+ nested_create_pte (vm , pte , nested_paddr , paddr , level , target_level );
444459
445- /* Allocate page table if not present. */
446- struct eptPageTableEntry * pde ;
447- pde = addr_gpa2hva (vm , pdpe [index [2 ]].address * vm -> page_size );
448- if (!pde [index [1 ]].readable ) {
449- pde [index [1 ]].address = vm_alloc_page_table (vm ) >> vm -> page_shift ;
450- pde [index [1 ]].writable = true;
451- pde [index [1 ]].readable = true;
452- pde [index [1 ]].executable = true;
453- }
460+ if (pte -> page_size )
461+ break ;
454462
455- /* Fill in page table entry. */
456- struct eptPageTableEntry * pte ;
457- pte = addr_gpa2hva (vm , pde [index [1 ]].address * vm -> page_size );
458- pte [index [0 ]].address = paddr >> vm -> page_shift ;
459- pte [index [0 ]].writable = true;
460- pte [index [0 ]].readable = true;
461- pte [index [0 ]].executable = true;
463+ pt = addr_gpa2hva (vm , pte -> address * vm -> page_size );
464+ }
462465
463466 /*
464467 * For now mark these as accessed and dirty because the only
465468 * testcase we have needs that. Can be reconsidered later.
466469 */
467- pte [index [0 ]].accessed = true;
468- pte [index [0 ]].dirty = true;
470+ pte -> accessed = true;
471+ pte -> dirty = true;
472+
473+ }
474+
475+ void nested_pg_map (struct vmx_pages * vmx , struct kvm_vm * vm ,
476+ uint64_t nested_paddr , uint64_t paddr )
477+ {
478+ __nested_pg_map (vmx , vm , nested_paddr , paddr , PG_LEVEL_4K );
469479}
470480
471481/*
0 commit comments