@@ -327,6 +327,47 @@ static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sd
327327 return SDW_CMD_OK ;
328328}
329329
330+ static void amd_sdw_fill_slave_status (struct amd_sdw_manager * amd_manager , u16 index , u32 status )
331+ {
332+ switch (status ) {
333+ case SDW_SLAVE_ATTACHED :
334+ case SDW_SLAVE_UNATTACHED :
335+ case SDW_SLAVE_ALERT :
336+ amd_manager -> status [index ] = status ;
337+ break ;
338+ default :
339+ amd_manager -> status [index ] = SDW_SLAVE_RESERVED ;
340+ break ;
341+ }
342+ }
343+
344+ static void amd_sdw_process_ping_status (u64 response , struct amd_sdw_manager * amd_manager )
345+ {
346+ u64 slave_stat ;
347+ u32 val ;
348+ u16 dev_index ;
349+
350+ /* slave status response */
351+ slave_stat = FIELD_GET (AMD_SDW_MCP_SLAVE_STAT_0_3 , response );
352+ slave_stat |= FIELD_GET (AMD_SDW_MCP_SLAVE_STAT_4_11 , response ) << 8 ;
353+ dev_dbg (amd_manager -> dev , "slave_stat:0x%llx\n" , slave_stat );
354+ for (dev_index = 0 ; dev_index <= SDW_MAX_DEVICES ; ++ dev_index ) {
355+ val = (slave_stat >> (dev_index * 2 )) & AMD_SDW_MCP_SLAVE_STATUS_MASK ;
356+ dev_dbg (amd_manager -> dev , "val:0x%x\n" , val );
357+ amd_sdw_fill_slave_status (amd_manager , dev_index , val );
358+ }
359+ }
360+
361+ static void amd_sdw_read_and_process_ping_status (struct amd_sdw_manager * amd_manager )
362+ {
363+ u64 response ;
364+
365+ mutex_lock (& amd_manager -> bus .msg_lock );
366+ response = amd_sdw_send_cmd_get_resp (amd_manager , 0 , 0 );
367+ mutex_unlock (& amd_manager -> bus .msg_lock );
368+ amd_sdw_process_ping_status (response , amd_manager );
369+ }
370+
330371static u32 amd_sdw_read_ping_status (struct sdw_bus * bus )
331372{
332373 struct amd_sdw_manager * amd_manager = to_amd_sdw (bus );
@@ -723,6 +764,89 @@ static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
723764 dais , num_dais );
724765}
725766
767+ static void amd_sdw_update_slave_status_work (struct work_struct * work )
768+ {
769+ struct amd_sdw_manager * amd_manager =
770+ container_of (work , struct amd_sdw_manager , amd_sdw_work );
771+ int retry_count = 0 ;
772+
773+ if (amd_manager -> status [0 ] == SDW_SLAVE_ATTACHED ) {
774+ writel (0 , amd_manager -> mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7 );
775+ writel (0 , amd_manager -> mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11 );
776+ }
777+
778+ update_status :
779+ sdw_handle_slave_status (& amd_manager -> bus , amd_manager -> status );
780+ /*
781+ * During the peripheral enumeration sequence, the SoundWire manager interrupts
782+ * are masked. Once the device number programming is done for all peripherals,
783+ * interrupts will be unmasked. Read the peripheral device status from ping command
784+ * and process the response. This sequence will ensure all peripheral devices enumerated
785+ * and initialized properly.
786+ */
787+ if (amd_manager -> status [0 ] == SDW_SLAVE_ATTACHED ) {
788+ if (retry_count ++ < SDW_MAX_DEVICES ) {
789+ writel (AMD_SDW_IRQ_MASK_0TO7 , amd_manager -> mmio +
790+ ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7 );
791+ writel (AMD_SDW_IRQ_MASK_8TO11 , amd_manager -> mmio +
792+ ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11 );
793+ amd_sdw_read_and_process_ping_status (amd_manager );
794+ goto update_status ;
795+ } else {
796+ dev_err_ratelimited (amd_manager -> dev ,
797+ "Device0 detected after %d iterations\n" ,
798+ retry_count );
799+ }
800+ }
801+ }
802+
803+ static void amd_sdw_update_slave_status (u32 status_change_0to7 , u32 status_change_8to11 ,
804+ struct amd_sdw_manager * amd_manager )
805+ {
806+ u64 slave_stat ;
807+ u32 val ;
808+ int dev_index ;
809+
810+ if (status_change_0to7 == AMD_SDW_SLAVE_0_ATTACHED )
811+ memset (amd_manager -> status , 0 , sizeof (amd_manager -> status ));
812+ slave_stat = status_change_0to7 ;
813+ slave_stat |= FIELD_GET (AMD_SDW_MCP_SLAVE_STATUS_8TO_11 , status_change_8to11 ) << 32 ;
814+ dev_dbg (amd_manager -> dev , "status_change_0to7:0x%x status_change_8to11:0x%x\n" ,
815+ status_change_0to7 , status_change_8to11 );
816+ if (slave_stat ) {
817+ for (dev_index = 0 ; dev_index <= SDW_MAX_DEVICES ; ++ dev_index ) {
818+ if (slave_stat & AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK (dev_index )) {
819+ val = (slave_stat >> AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK (dev_index )) &
820+ AMD_SDW_MCP_SLAVE_STATUS_MASK ;
821+ amd_sdw_fill_slave_status (amd_manager , dev_index , val );
822+ }
823+ }
824+ }
825+ }
826+
827+ static void amd_sdw_irq_thread (struct work_struct * work )
828+ {
829+ struct amd_sdw_manager * amd_manager =
830+ container_of (work , struct amd_sdw_manager , amd_sdw_irq_thread );
831+ u32 status_change_8to11 ;
832+ u32 status_change_0to7 ;
833+
834+ status_change_8to11 = readl (amd_manager -> mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11 );
835+ status_change_0to7 = readl (amd_manager -> mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7 );
836+ dev_dbg (amd_manager -> dev , "[SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n" ,
837+ amd_manager -> instance , status_change_0to7 , status_change_8to11 );
838+ if (status_change_8to11 & AMD_SDW_PREQ_INTR_STAT ) {
839+ amd_sdw_read_and_process_ping_status (amd_manager );
840+ } else {
841+ /* Check for the updated status on peripheral device */
842+ amd_sdw_update_slave_status (status_change_0to7 , status_change_8to11 , amd_manager );
843+ }
844+ if (status_change_8to11 || status_change_0to7 )
845+ schedule_work (& amd_manager -> amd_sdw_work );
846+ writel (0x00 , amd_manager -> mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11 );
847+ writel (0x00 , amd_manager -> mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7 );
848+ }
849+
726850static void amd_sdw_probe_work (struct work_struct * work )
727851{
728852 struct amd_sdw_manager * amd_manager = container_of (work , struct amd_sdw_manager ,
@@ -815,6 +939,8 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
815939 return ret ;
816940 }
817941 dev_set_drvdata (dev , amd_manager );
942+ INIT_WORK (& amd_manager -> amd_sdw_irq_thread , amd_sdw_irq_thread );
943+ INIT_WORK (& amd_manager -> amd_sdw_work , amd_sdw_update_slave_status_work );
818944 INIT_WORK (& amd_manager -> probe_work , amd_sdw_probe_work );
819945 /*
820946 * Instead of having lengthy probe sequence, use deferred probe.
0 commit comments