@@ -770,3 +770,122 @@ int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_fl
770770 bpf_cpumask_release (mask2 );
771771 return 0 ;
772772}
773+
774+ SEC ("tp_btf/task_newtask" )
775+ int BPF_PROG (test_populate_reject_small_mask , struct task_struct * task , u64 clone_flags )
776+ {
777+ struct bpf_cpumask * local ;
778+ u8 toofewbits ;
779+ int ret ;
780+
781+ if (!is_test_task ())
782+ return 0 ;
783+
784+ local = create_cpumask ();
785+ if (!local )
786+ return 0 ;
787+
788+ /* The kfunc should prevent this operation */
789+ ret = bpf_cpumask_populate ((struct cpumask * )local , & toofewbits , sizeof (toofewbits ));
790+ if (ret != - EACCES )
791+ err = 2 ;
792+
793+ bpf_cpumask_release (local );
794+
795+ return 0 ;
796+ }
797+
798+ /* Mask is guaranteed to be large enough for bpf_cpumask_t. */
799+ #define CPUMASK_TEST_MASKLEN (sizeof(cpumask_t))
800+
801+ /* Add an extra word for the test_populate_reject_unaligned test. */
802+ u64 bits [CPUMASK_TEST_MASKLEN / 8 + 1 ];
803+ extern bool CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS __kconfig __weak ;
804+
805+ SEC ("tp_btf/task_newtask" )
806+ int BPF_PROG (test_populate_reject_unaligned , struct task_struct * task , u64 clone_flags )
807+ {
808+ struct bpf_cpumask * mask ;
809+ char * src ;
810+ int ret ;
811+
812+ if (!is_test_task ())
813+ return 0 ;
814+
815+ /* Skip if unaligned accesses are fine for this arch. */
816+ if (CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS )
817+ return 0 ;
818+
819+ mask = bpf_cpumask_create ();
820+ if (!mask ) {
821+ err = 1 ;
822+ return 0 ;
823+ }
824+
825+ /* Misalign the source array by a byte. */
826+ src = & ((char * )bits )[1 ];
827+
828+ ret = bpf_cpumask_populate ((struct cpumask * )mask , src , CPUMASK_TEST_MASKLEN );
829+ if (ret != - EINVAL )
830+ err = 2 ;
831+
832+ bpf_cpumask_release (mask );
833+
834+ return 0 ;
835+ }
836+
837+
838+ SEC ("tp_btf/task_newtask" )
839+ int BPF_PROG (test_populate , struct task_struct * task , u64 clone_flags )
840+ {
841+ struct bpf_cpumask * mask ;
842+ bool bit ;
843+ int ret ;
844+ int i ;
845+
846+ if (!is_test_task ())
847+ return 0 ;
848+
849+ /* Set only odd bits. */
850+ __builtin_memset (bits , 0xaa , CPUMASK_TEST_MASKLEN );
851+
852+ mask = bpf_cpumask_create ();
853+ if (!mask ) {
854+ err = 1 ;
855+ return 0 ;
856+ }
857+
858+ /* Pass the entire bits array, the kfunc will only copy the valid bits. */
859+ ret = bpf_cpumask_populate ((struct cpumask * )mask , bits , CPUMASK_TEST_MASKLEN );
860+ if (ret ) {
861+ err = 2 ;
862+ goto out ;
863+ }
864+
865+ /*
866+ * Test is there to appease the verifier. We cannot directly
867+ * access NR_CPUS, the upper bound for nr_cpus, so we infer
868+ * it from the size of cpumask_t.
869+ */
870+ if (nr_cpus < 0 || nr_cpus >= CPUMASK_TEST_MASKLEN * 8 ) {
871+ err = 3 ;
872+ goto out ;
873+ }
874+
875+ bpf_for (i , 0 , nr_cpus ) {
876+ /* Odd-numbered bits should be set, even ones unset. */
877+ bit = bpf_cpumask_test_cpu (i , (const struct cpumask * )mask );
878+ if (bit == (i % 2 != 0 ))
879+ continue ;
880+
881+ err = 4 ;
882+ break ;
883+ }
884+
885+ out :
886+ bpf_cpumask_release (mask );
887+
888+ return 0 ;
889+ }
890+
891+ #undef CPUMASK_TEST_MASKLEN
0 commit comments