@@ -334,4 +334,321 @@ TEST(ptrace_v_syscall_clobbering)
334334 }
335335}
336336
337+ FIXTURE (v_csr_invalid )
338+ {
339+ };
340+
341+ FIXTURE_SETUP (v_csr_invalid )
342+ {
343+ }
344+
345+ FIXTURE_TEARDOWN (v_csr_invalid )
346+ {
347+ }
348+
349+ #define VECTOR_1_0 BIT(0)
350+ #define XTHEAD_VECTOR_0_7 BIT(1)
351+
352+ #define vector_test (x ) ((x) & VECTOR_1_0)
353+ #define xthead_test (x ) ((x) & XTHEAD_VECTOR_0_7)
354+
355+ /* modifications of the initial vsetvli settings */
356+ FIXTURE_VARIANT (v_csr_invalid )
357+ {
358+ unsigned long vstart ;
359+ unsigned long vl ;
360+ unsigned long vtype ;
361+ unsigned long vcsr ;
362+ unsigned long vlenb_mul ;
363+ unsigned long vlenb_min ;
364+ unsigned long vlenb_max ;
365+ unsigned long spec ;
366+ };
367+
368+ /* unexpected vlenb value */
369+ FIXTURE_VARIANT_ADD (v_csr_invalid , new_vlenb )
370+ {
371+ .vstart = 0x0 ,
372+ .vl = 0x0 ,
373+ .vtype = 0x3 ,
374+ .vcsr = 0x0 ,
375+ .vlenb_mul = 0x2 ,
376+ .vlenb_min = 0x0 ,
377+ .vlenb_max = 0x0 ,
378+ .spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7 ,
379+ };
380+
381+ /* invalid reserved bits in vcsr */
382+ FIXTURE_VARIANT_ADD (v_csr_invalid , vcsr_invalid_reserved_bits )
383+ {
384+ .vstart = 0x0 ,
385+ .vl = 0x0 ,
386+ .vtype = 0x3 ,
387+ .vcsr = 0x1UL << 8 ,
388+ .vlenb_mul = 0x1 ,
389+ .vlenb_min = 0x0 ,
390+ .vlenb_max = 0x0 ,
391+ .spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7 ,
392+ };
393+
394+ /* invalid reserved bits in vtype */
395+ FIXTURE_VARIANT_ADD (v_csr_invalid , vtype_invalid_reserved_bits )
396+ {
397+ .vstart = 0x0 ,
398+ .vl = 0x0 ,
399+ .vtype = (0x1UL << 8 ) | 0x3 ,
400+ .vcsr = 0x0 ,
401+ .vlenb_mul = 0x1 ,
402+ .vlenb_min = 0x0 ,
403+ .vlenb_max = 0x0 ,
404+ .spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7 ,
405+ };
406+
407+ /* set vill bit */
408+ FIXTURE_VARIANT_ADD (v_csr_invalid , invalid_vill_bit )
409+ {
410+ .vstart = 0x0 ,
411+ .vl = 0x0 ,
412+ .vtype = (0x1UL << (__riscv_xlen - 1 )) | 0x3 ,
413+ .vcsr = 0x0 ,
414+ .vlenb_mul = 0x1 ,
415+ .vlenb_min = 0x0 ,
416+ .vlenb_max = 0x0 ,
417+ .spec = VECTOR_1_0 | XTHEAD_VECTOR_0_7 ,
418+ };
419+
420+ /* reserved vsew value: vsew > 3 */
421+ FIXTURE_VARIANT_ADD (v_csr_invalid , reserved_vsew )
422+ {
423+ .vstart = 0x0 ,
424+ .vl = 0x0 ,
425+ .vtype = 0x4UL << 3 ,
426+ .vcsr = 0x0 ,
427+ .vlenb_mul = 0x1 ,
428+ .vlenb_min = 0x0 ,
429+ .vlenb_max = 0x0 ,
430+ .spec = VECTOR_1_0 ,
431+ };
432+
433+ /* XTheadVector: unsupported non-zero VEDIV value */
434+ FIXTURE_VARIANT_ADD (v_csr_invalid , reserved_vediv )
435+ {
436+ .vstart = 0x0 ,
437+ .vl = 0x0 ,
438+ .vtype = 0x3UL << 5 ,
439+ .vcsr = 0x0 ,
440+ .vlenb_mul = 0x1 ,
441+ .vlenb_min = 0x0 ,
442+ .vlenb_max = 0x0 ,
443+ .spec = XTHEAD_VECTOR_0_7 ,
444+ };
445+
446+ /* reserved vlmul value: vlmul == 4 */
447+ FIXTURE_VARIANT_ADD (v_csr_invalid , reserved_vlmul )
448+ {
449+ .vstart = 0x0 ,
450+ .vl = 0x0 ,
451+ .vtype = 0x4 ,
452+ .vcsr = 0x0 ,
453+ .vlenb_mul = 0x1 ,
454+ .vlenb_min = 0x0 ,
455+ .vlenb_max = 0x0 ,
456+ .spec = VECTOR_1_0 ,
457+ };
458+
459+ /* invalid fractional LMUL for VLEN <= 256: LMUL= 1/8, SEW = 64 */
460+ FIXTURE_VARIANT_ADD (v_csr_invalid , frac_lmul1 )
461+ {
462+ .vstart = 0x0 ,
463+ .vl = 0x0 ,
464+ .vtype = 0x1d ,
465+ .vcsr = 0x0 ,
466+ .vlenb_mul = 0x1 ,
467+ .vlenb_min = 0x0 ,
468+ .vlenb_max = 0x20 ,
469+ .spec = VECTOR_1_0 ,
470+ };
471+
472+ /* invalid integral LMUL for VLEN <= 16: LMUL= 2, SEW = 64 */
473+ FIXTURE_VARIANT_ADD (v_csr_invalid , int_lmul1 )
474+ {
475+ .vstart = 0x0 ,
476+ .vl = 0x0 ,
477+ .vtype = 0x19 ,
478+ .vcsr = 0x0 ,
479+ .vlenb_mul = 0x1 ,
480+ .vlenb_min = 0x0 ,
481+ .vlenb_max = 0x2 ,
482+ .spec = VECTOR_1_0 ,
483+ };
484+
485+ /* XTheadVector: invalid integral LMUL for VLEN <= 16: LMUL= 2, SEW = 64 */
486+ FIXTURE_VARIANT_ADD (v_csr_invalid , int_lmul2 )
487+ {
488+ .vstart = 0x0 ,
489+ .vl = 0x0 ,
490+ .vtype = 0xd ,
491+ .vcsr = 0x0 ,
492+ .vlenb_mul = 0x1 ,
493+ .vlenb_min = 0x0 ,
494+ .vlenb_max = 0x2 ,
495+ .spec = XTHEAD_VECTOR_0_7 ,
496+ };
497+
498+ /* invalid VL for VLEN <= 128: LMUL= 2, SEW = 64, VL = 8 */
499+ FIXTURE_VARIANT_ADD (v_csr_invalid , vl1 )
500+ {
501+ .vstart = 0x0 ,
502+ .vl = 0x8 ,
503+ .vtype = 0x19 ,
504+ .vcsr = 0x0 ,
505+ .vlenb_mul = 0x1 ,
506+ .vlenb_min = 0x0 ,
507+ .vlenb_max = 0x10 ,
508+ .spec = VECTOR_1_0 ,
509+ };
510+
511+ /* XTheadVector: invalid VL for VLEN <= 128: LMUL= 2, SEW = 64, VL = 8 */
512+ FIXTURE_VARIANT_ADD (v_csr_invalid , vl2 )
513+ {
514+ .vstart = 0x0 ,
515+ .vl = 0x8 ,
516+ .vtype = 0xd ,
517+ .vcsr = 0x0 ,
518+ .vlenb_mul = 0x1 ,
519+ .vlenb_min = 0x0 ,
520+ .vlenb_max = 0x10 ,
521+ .spec = XTHEAD_VECTOR_0_7 ,
522+ };
523+
524+ TEST_F (v_csr_invalid , ptrace_v_invalid_values )
525+ {
526+ unsigned long vlenb ;
527+ pid_t pid ;
528+
529+ if (!is_vector_supported () && !is_xtheadvector_supported ())
530+ SKIP (return , "Vectors not supported" );
531+
532+ if (is_vector_supported () && !vector_test (variant -> spec ))
533+ SKIP (return , "Test not supported for Vector" );
534+
535+ if (is_xtheadvector_supported () && !xthead_test (variant -> spec ))
536+ SKIP (return , "Test not supported for XTheadVector" );
537+
538+ vlenb = get_vr_len ();
539+
540+ if (variant -> vlenb_min ) {
541+ if (vlenb < variant -> vlenb_min )
542+ SKIP (return , "This test does not support VLEN < %lu\n" ,
543+ variant -> vlenb_min * 8 );
544+ }
545+
546+ if (variant -> vlenb_max ) {
547+ if (vlenb > variant -> vlenb_max )
548+ SKIP (return , "This test does not support VLEN > %lu\n" ,
549+ variant -> vlenb_max * 8 );
550+ }
551+
552+ chld_lock = 1 ;
553+ pid = fork ();
554+ ASSERT_LE (0 , pid )
555+ TH_LOG ("fork: %m" );
556+
557+ if (pid == 0 ) {
558+ unsigned long vl ;
559+
560+ while (chld_lock == 1 )
561+ asm volatile ("" : : "g" (chld_lock ) : "memory" );
562+
563+ if (is_xtheadvector_supported ()) {
564+ asm volatile (
565+ // 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli
566+ // vsetvli t4, x0, e16, m2, d1
567+ ".4byte 0b00000000010100000111111011010111\n"
568+ "mv %[new_vl], t4\n"
569+ : [new_vl ] "=r" (vl ) : : "t4" );
570+ } else {
571+ asm volatile (
572+ ".option push\n"
573+ ".option arch, +zve32x\n"
574+ "vsetvli %[new_vl], x0, e16, m2, tu, mu\n"
575+ ".option pop\n"
576+ : [new_vl ] "=r" (vl ) : : );
577+ }
578+
579+ while (1 ) {
580+ asm volatile (
581+ ".option push\n"
582+ ".option norvc\n"
583+ "ebreak\n"
584+ "nop\n"
585+ ".option pop\n" );
586+ }
587+ } else {
588+ struct __riscv_v_regset_state * regset_data ;
589+ size_t regset_size ;
590+ struct iovec iov ;
591+ int status ;
592+ int ret ;
593+
594+ /* attach */
595+
596+ ASSERT_EQ (0 , ptrace (PTRACE_ATTACH , pid , NULL , NULL ));
597+ ASSERT_EQ (pid , waitpid (pid , & status , 0 ));
598+ ASSERT_TRUE (WIFSTOPPED (status ));
599+
600+ /* unlock */
601+
602+ ASSERT_EQ (0 , ptrace (PTRACE_POKEDATA , pid , & chld_lock , 0 ));
603+
604+ /* resume and wait for the 1st ebreak */
605+
606+ ASSERT_EQ (0 , ptrace (PTRACE_CONT , pid , NULL , NULL ));
607+ ASSERT_EQ (pid , waitpid (pid , & status , 0 ));
608+ ASSERT_TRUE (WIFSTOPPED (status ));
609+
610+ /* read tracee vector csr regs using ptrace GETREGSET */
611+
612+ regset_size = sizeof (* regset_data ) + vlenb * 32 ;
613+ regset_data = calloc (1 , regset_size );
614+
615+ iov .iov_base = regset_data ;
616+ iov .iov_len = regset_size ;
617+
618+ ASSERT_EQ (0 , ptrace (PTRACE_GETREGSET , pid , NT_RISCV_VECTOR , & iov ));
619+
620+ /* verify initial vsetvli settings */
621+
622+ if (is_xtheadvector_supported ())
623+ EXPECT_EQ (5UL , regset_data -> vtype );
624+ else
625+ EXPECT_EQ (9UL , regset_data -> vtype );
626+
627+ EXPECT_EQ (regset_data -> vlenb , regset_data -> vl );
628+ EXPECT_EQ (vlenb , regset_data -> vlenb );
629+ EXPECT_EQ (0UL , regset_data -> vstart );
630+ EXPECT_EQ (0UL , regset_data -> vcsr );
631+
632+ /* apply invalid settings from fixture variants */
633+
634+ regset_data -> vlenb *= variant -> vlenb_mul ;
635+ regset_data -> vstart = variant -> vstart ;
636+ regset_data -> vtype = variant -> vtype ;
637+ regset_data -> vcsr = variant -> vcsr ;
638+ regset_data -> vl = variant -> vl ;
639+
640+ iov .iov_base = regset_data ;
641+ iov .iov_len = regset_size ;
642+
643+ errno = 0 ;
644+ ret = ptrace (PTRACE_SETREGSET , pid , NT_RISCV_VECTOR , & iov );
645+ ASSERT_EQ (errno , EINVAL );
646+ ASSERT_EQ (ret , -1 );
647+
648+ /* cleanup */
649+
650+ ASSERT_EQ (0 , kill (pid , SIGKILL ));
651+ }
652+ }
653+
337654TEST_HARNESS_MAIN
0 commit comments