@@ -151,6 +151,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
151151 }
152152 }
153153
154+ /* from vcn4 and above, only unified queue is used */
155+ adev -> vcn .using_unified_queue =
156+ amdgpu_ip_version (adev , UVD_HWIP , 0 ) >= IP_VERSION (4 , 0 , 0 );
157+
154158 hdr = (const struct common_firmware_header * )adev -> vcn .fw [0 ]-> data ;
155159 adev -> vcn .fw_version = le32_to_cpu (hdr -> ucode_version );
156160
@@ -279,18 +283,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
279283 return 0 ;
280284}
281285
282- /* from vcn4 and above, only unified queue is used */
283- static bool amdgpu_vcn_using_unified_queue (struct amdgpu_ring * ring )
284- {
285- struct amdgpu_device * adev = ring -> adev ;
286- bool ret = false;
287-
288- if (amdgpu_ip_version (adev , UVD_HWIP , 0 ) >= IP_VERSION (4 , 0 , 0 ))
289- ret = true;
290-
291- return ret ;
292- }
293-
294286bool amdgpu_vcn_is_disabled_vcn (struct amdgpu_device * adev , enum vcn_ring_type type , uint32_t vcn_instance )
295287{
296288 bool ret = false;
@@ -728,12 +720,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
728720 struct amdgpu_job * job ;
729721 struct amdgpu_ib * ib ;
730722 uint64_t addr = AMDGPU_GPU_PAGE_ALIGN (ib_msg -> gpu_addr );
731- bool sq = amdgpu_vcn_using_unified_queue (ring );
732723 uint32_t * ib_checksum ;
733724 uint32_t ib_pack_in_dw ;
734725 int i , r ;
735726
736- if (sq )
727+ if (adev -> vcn . using_unified_queue )
737728 ib_size_dw += 8 ;
738729
739730 r = amdgpu_job_alloc_with_ib (ring -> adev , NULL , NULL ,
@@ -746,7 +737,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
746737 ib -> length_dw = 0 ;
747738
748739 /* single queue headers */
749- if (sq ) {
740+ if (adev -> vcn . using_unified_queue ) {
750741 ib_pack_in_dw = sizeof (struct amdgpu_vcn_decode_buffer ) / sizeof (uint32_t )
751742 + 4 + 2 ; /* engine info + decoding ib in dw */
752743 ib_checksum = amdgpu_vcn_unified_ring_ib_header (ib , ib_pack_in_dw , false);
@@ -765,7 +756,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
765756 for (i = ib -> length_dw ; i < ib_size_dw ; ++ i )
766757 ib -> ptr [i ] = 0x0 ;
767758
768- if (sq )
759+ if (adev -> vcn . using_unified_queue )
769760 amdgpu_vcn_unified_ring_ib_checksum (& ib_checksum , ib_pack_in_dw );
770761
771762 r = amdgpu_job_submit_direct (job , ring , & f );
@@ -855,15 +846,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
855846 struct dma_fence * * fence )
856847{
857848 unsigned int ib_size_dw = 16 ;
849+ struct amdgpu_device * adev = ring -> adev ;
858850 struct amdgpu_job * job ;
859851 struct amdgpu_ib * ib ;
860852 struct dma_fence * f = NULL ;
861853 uint32_t * ib_checksum = NULL ;
862854 uint64_t addr ;
863- bool sq = amdgpu_vcn_using_unified_queue (ring );
864855 int i , r ;
865856
866- if (sq )
857+ if (adev -> vcn . using_unified_queue )
867858 ib_size_dw += 8 ;
868859
869860 r = amdgpu_job_alloc_with_ib (ring -> adev , NULL , NULL ,
@@ -877,7 +868,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
877868
878869 ib -> length_dw = 0 ;
879870
880- if (sq )
871+ if (adev -> vcn . using_unified_queue )
881872 ib_checksum = amdgpu_vcn_unified_ring_ib_header (ib , 0x11 , true);
882873
883874 ib -> ptr [ib -> length_dw ++ ] = 0x00000018 ;
@@ -899,7 +890,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
899890 for (i = ib -> length_dw ; i < ib_size_dw ; ++ i )
900891 ib -> ptr [i ] = 0x0 ;
901892
902- if (sq )
893+ if (adev -> vcn . using_unified_queue )
903894 amdgpu_vcn_unified_ring_ib_checksum (& ib_checksum , 0x11 );
904895
905896 r = amdgpu_job_submit_direct (job , ring , & f );
@@ -922,15 +913,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
922913 struct dma_fence * * fence )
923914{
924915 unsigned int ib_size_dw = 16 ;
916+ struct amdgpu_device * adev = ring -> adev ;
925917 struct amdgpu_job * job ;
926918 struct amdgpu_ib * ib ;
927919 struct dma_fence * f = NULL ;
928920 uint32_t * ib_checksum = NULL ;
929921 uint64_t addr ;
930- bool sq = amdgpu_vcn_using_unified_queue (ring );
931922 int i , r ;
932923
933- if (sq )
924+ if (adev -> vcn . using_unified_queue )
934925 ib_size_dw += 8 ;
935926
936927 r = amdgpu_job_alloc_with_ib (ring -> adev , NULL , NULL ,
@@ -944,7 +935,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
944935
945936 ib -> length_dw = 0 ;
946937
947- if (sq )
938+ if (adev -> vcn . using_unified_queue )
948939 ib_checksum = amdgpu_vcn_unified_ring_ib_header (ib , 0x11 , true);
949940
950941 ib -> ptr [ib -> length_dw ++ ] = 0x00000018 ;
@@ -966,7 +957,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
966957 for (i = ib -> length_dw ; i < ib_size_dw ; ++ i )
967958 ib -> ptr [i ] = 0x0 ;
968959
969- if (sq )
960+ if (adev -> vcn . using_unified_queue )
970961 amdgpu_vcn_unified_ring_ib_checksum (& ib_checksum , 0x11 );
971962
972963 r = amdgpu_job_submit_direct (job , ring , & f );
0 commit comments