@@ -651,4 +651,265 @@ TEST_F(v_csr_invalid, ptrace_v_invalid_values)
651651 }
652652}
653653
654+ FIXTURE (v_csr_valid )
655+ {
656+ };
657+
658+ FIXTURE_SETUP (v_csr_valid )
659+ {
660+ }
661+
662+ FIXTURE_TEARDOWN (v_csr_valid )
663+ {
664+ }
665+
666+ /* modifications of the initial vsetvli settings */
667+ FIXTURE_VARIANT (v_csr_valid )
668+ {
669+ unsigned long vstart ;
670+ unsigned long vl ;
671+ unsigned long vtype ;
672+ unsigned long vcsr ;
673+ unsigned long vlenb_mul ;
674+ unsigned long vlenb_min ;
675+ unsigned long vlenb_max ;
676+ unsigned long spec ;
677+ };
678+
679+ /* valid for VLEN >= 128: LMUL= 1/4, SEW = 32 */
680+ FIXTURE_VARIANT_ADD (v_csr_valid , frac_lmul1 )
681+ {
682+ .vstart = 0x0 ,
683+ .vl = 0x0 ,
684+ .vtype = 0x16 ,
685+ .vcsr = 0x0 ,
686+ .vlenb_mul = 0x1 ,
687+ .vlenb_min = 0x10 ,
688+ .vlenb_max = 0x0 ,
689+ .spec = VECTOR_1_0 ,
690+ };
691+
692+ /* valid for VLEN >= 16: LMUL= 2, SEW = 32 */
693+ FIXTURE_VARIANT_ADD (v_csr_valid , int_lmul1 )
694+ {
695+ .vstart = 0x0 ,
696+ .vl = 0x0 ,
697+ .vtype = 0x11 ,
698+ .vcsr = 0x0 ,
699+ .vlenb_mul = 0x1 ,
700+ .vlenb_min = 0x2 ,
701+ .vlenb_max = 0x0 ,
702+ .spec = VECTOR_1_0 ,
703+ };
704+
705+ /* valid for XTheadVector VLEN >= 16: LMUL= 2, SEW = 32 */
706+ FIXTURE_VARIANT_ADD (v_csr_valid , int_lmul2 )
707+ {
708+ .vstart = 0x0 ,
709+ .vl = 0x0 ,
710+ .vtype = 0x9 ,
711+ .vcsr = 0x0 ,
712+ .vlenb_mul = 0x1 ,
713+ .vlenb_min = 0x2 ,
714+ .vlenb_max = 0x0 ,
715+ .spec = XTHEAD_VECTOR_0_7 ,
716+ };
717+
718+ /* valid for VLEN >= 32: LMUL= 2, SEW = 32, VL = 2 */
719+ FIXTURE_VARIANT_ADD (v_csr_valid , int_lmul3 )
720+ {
721+ .vstart = 0x0 ,
722+ .vl = 0x2 ,
723+ .vtype = 0x11 ,
724+ .vcsr = 0x0 ,
725+ .vlenb_mul = 0x1 ,
726+ .vlenb_min = 0x4 ,
727+ .vlenb_max = 0x0 ,
728+ .spec = VECTOR_1_0 ,
729+ };
730+
731+ TEST_F (v_csr_valid , ptrace_v_valid_values )
732+ {
733+ unsigned long vlenb ;
734+ pid_t pid ;
735+
736+ if (!is_vector_supported () && !is_xtheadvector_supported ())
737+ SKIP (return , "Vectors not supported" );
738+
739+ if (is_vector_supported () && !vector_test (variant -> spec ))
740+ SKIP (return , "Test not supported for Vector" );
741+
742+ if (is_xtheadvector_supported () && !xthead_test (variant -> spec ))
743+ SKIP (return , "Test not supported for XTheadVector" );
744+
745+ vlenb = get_vr_len ();
746+
747+ if (variant -> vlenb_min ) {
748+ if (vlenb < variant -> vlenb_min )
749+ SKIP (return , "This test does not support VLEN < %lu\n" ,
750+ variant -> vlenb_min * 8 );
751+ }
752+ if (variant -> vlenb_max ) {
753+ if (vlenb > variant -> vlenb_max )
754+ SKIP (return , "This test does not support VLEN > %lu\n" ,
755+ variant -> vlenb_max * 8 );
756+ }
757+
758+ chld_lock = 1 ;
759+ pid = fork ();
760+ ASSERT_LE (0 , pid )
761+ TH_LOG ("fork: %m" );
762+
763+ if (pid == 0 ) {
764+ unsigned long vl ;
765+
766+ while (chld_lock == 1 )
767+ asm volatile ("" : : "g" (chld_lock ) : "memory" );
768+
769+ if (is_xtheadvector_supported ()) {
770+ asm volatile (
771+ // 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli
772+ // vsetvli t4, x0, e16, m2, d1
773+ ".4byte 0b00000000010100000111111011010111\n"
774+ "mv %[new_vl], t4\n"
775+ : [new_vl ] "=r" (vl ) : : "t4" );
776+ } else {
777+ asm volatile (
778+ ".option push\n"
779+ ".option arch, +zve32x\n"
780+ "vsetvli %[new_vl], x0, e16, m2, tu, mu\n"
781+ ".option pop\n"
782+ : [new_vl ] "=r" (vl ) : : );
783+ }
784+
785+ asm volatile (
786+ ".option push\n"
787+ ".option norvc\n"
788+ ".option arch, +zve32x\n"
789+ "ebreak\n" /* breakpoint 1: apply new V state using ptrace */
790+ "nop\n"
791+ "ebreak\n" /* breakpoint 2: V state clean - context will not be saved */
792+ "vmv.v.i v0, -1\n"
793+ "ebreak\n" /* breakpoint 3: V state dirty - context will be saved */
794+ ".option pop\n" );
795+ } else {
796+ struct __riscv_v_regset_state * regset_data ;
797+ struct user_regs_struct regs ;
798+ size_t regset_size ;
799+ struct iovec iov ;
800+ int status ;
801+
802+ /* attach */
803+
804+ ASSERT_EQ (0 , ptrace (PTRACE_ATTACH , pid , NULL , NULL ));
805+ ASSERT_EQ (pid , waitpid (pid , & status , 0 ));
806+ ASSERT_TRUE (WIFSTOPPED (status ));
807+
808+ /* unlock */
809+
810+ ASSERT_EQ (0 , ptrace (PTRACE_POKEDATA , pid , & chld_lock , 0 ));
811+
812+ /* resume and wait for the 1st ebreak */
813+
814+ ASSERT_EQ (0 , ptrace (PTRACE_CONT , pid , NULL , NULL ));
815+ ASSERT_EQ (pid , waitpid (pid , & status , 0 ));
816+ ASSERT_TRUE (WIFSTOPPED (status ));
817+
818+ /* read tracee vector csr regs using ptrace GETREGSET */
819+
820+ regset_size = sizeof (* regset_data ) + vlenb * 32 ;
821+ regset_data = calloc (1 , regset_size );
822+
823+ iov .iov_base = regset_data ;
824+ iov .iov_len = regset_size ;
825+
826+ ASSERT_EQ (0 , ptrace (PTRACE_GETREGSET , pid , NT_RISCV_VECTOR , & iov ));
827+
828+ /* verify initial vsetvli settings */
829+
830+ if (is_xtheadvector_supported ())
831+ EXPECT_EQ (5UL , regset_data -> vtype );
832+ else
833+ EXPECT_EQ (9UL , regset_data -> vtype );
834+
835+ EXPECT_EQ (regset_data -> vlenb , regset_data -> vl );
836+ EXPECT_EQ (vlenb , regset_data -> vlenb );
837+ EXPECT_EQ (0UL , regset_data -> vstart );
838+ EXPECT_EQ (0UL , regset_data -> vcsr );
839+
840+ /* apply valid settings from fixture variants */
841+
842+ regset_data -> vlenb *= variant -> vlenb_mul ;
843+ regset_data -> vstart = variant -> vstart ;
844+ regset_data -> vtype = variant -> vtype ;
845+ regset_data -> vcsr = variant -> vcsr ;
846+ regset_data -> vl = variant -> vl ;
847+
848+ iov .iov_base = regset_data ;
849+ iov .iov_len = regset_size ;
850+
851+ ASSERT_EQ (0 , ptrace (PTRACE_SETREGSET , pid , NT_RISCV_VECTOR , & iov ));
852+
853+ /* skip 1st ebreak, then resume and wait for the 2nd ebreak */
854+
855+ iov .iov_base = & regs ;
856+ iov .iov_len = sizeof (regs );
857+
858+ ASSERT_EQ (0 , ptrace (PTRACE_GETREGSET , pid , NT_PRSTATUS , & iov ));
859+ regs .pc += 4 ;
860+ ASSERT_EQ (0 , ptrace (PTRACE_SETREGSET , pid , NT_PRSTATUS , & iov ));
861+
862+ ASSERT_EQ (0 , ptrace (PTRACE_CONT , pid , NULL , NULL ));
863+ ASSERT_EQ (pid , waitpid (pid , & status , 0 ));
864+ ASSERT_TRUE (WIFSTOPPED (status ));
865+
866+ /* read tracee vector csr regs using ptrace GETREGSET */
867+
868+ iov .iov_base = regset_data ;
869+ iov .iov_len = regset_size ;
870+
871+ ASSERT_EQ (0 , ptrace (PTRACE_GETREGSET , pid , NT_RISCV_VECTOR , & iov ));
872+
873+ /* verify vector csr regs from tracee context */
874+
875+ EXPECT_EQ (regset_data -> vstart , variant -> vstart );
876+ EXPECT_EQ (regset_data -> vtype , variant -> vtype );
877+ EXPECT_EQ (regset_data -> vcsr , variant -> vcsr );
878+ EXPECT_EQ (regset_data -> vl , variant -> vl );
879+ EXPECT_EQ (regset_data -> vlenb , vlenb );
880+
881+ /* skip 2nd ebreak, then resume and wait for the 3rd ebreak */
882+
883+ iov .iov_base = & regs ;
884+ iov .iov_len = sizeof (regs );
885+
886+ ASSERT_EQ (0 , ptrace (PTRACE_GETREGSET , pid , NT_PRSTATUS , & iov ));
887+ regs .pc += 4 ;
888+ ASSERT_EQ (0 , ptrace (PTRACE_SETREGSET , pid , NT_PRSTATUS , & iov ));
889+
890+ ASSERT_EQ (0 , ptrace (PTRACE_CONT , pid , NULL , NULL ));
891+ ASSERT_EQ (pid , waitpid (pid , & status , 0 ));
892+ ASSERT_TRUE (WIFSTOPPED (status ));
893+
894+ /* read tracee vector csr regs using ptrace GETREGSET */
895+
896+ iov .iov_base = regset_data ;
897+ iov .iov_len = regset_size ;
898+
899+ ASSERT_EQ (0 , ptrace (PTRACE_GETREGSET , pid , NT_RISCV_VECTOR , & iov ));
900+
901+ /* verify vector csr regs from tracee context */
902+
903+ EXPECT_EQ (regset_data -> vstart , variant -> vstart );
904+ EXPECT_EQ (regset_data -> vtype , variant -> vtype );
905+ EXPECT_EQ (regset_data -> vcsr , variant -> vcsr );
906+ EXPECT_EQ (regset_data -> vl , variant -> vl );
907+ EXPECT_EQ (regset_data -> vlenb , vlenb );
908+
909+ /* cleanup */
910+
911+ ASSERT_EQ (0 , kill (pid , SIGKILL ));
912+ }
913+ }
914+
654915TEST_HARNESS_MAIN
0 commit comments