1919#include "xe_gt_sriov_printk.h"
2020#include "xe_guc_buf.h"
2121#include "xe_guc_ct.h"
22+ #include "xe_migrate.h"
2223#include "xe_mmio.h"
2324#include "xe_sriov.h"
2425#include "xe_sriov_packet.h"
@@ -505,6 +506,206 @@ int xe_gt_sriov_pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid,
505506 return pf_restore_vf_mmio_mig_data (gt , vfid , data );
506507}
507508
509+ static ssize_t pf_migration_vram_size (struct xe_gt * gt , unsigned int vfid )
510+ {
511+ if (!xe_gt_is_main_type (gt ))
512+ return 0 ;
513+
514+ return xe_gt_sriov_pf_config_get_lmem (gt , vfid );
515+ }
516+
517+ static struct dma_fence * __pf_save_restore_vram (struct xe_gt * gt , unsigned int vfid ,
518+ struct xe_bo * vram , u64 vram_offset ,
519+ struct xe_bo * sysmem , u64 sysmem_offset ,
520+ size_t size , bool save )
521+ {
522+ struct dma_fence * ret = NULL ;
523+ struct drm_exec exec ;
524+ int err ;
525+
526+ drm_exec_init (& exec , 0 , 0 );
527+ drm_exec_until_all_locked (& exec ) {
528+ err = drm_exec_lock_obj (& exec , & vram -> ttm .base );
529+ drm_exec_retry_on_contention (& exec );
530+ if (err ) {
531+ ret = ERR_PTR (err );
532+ goto err ;
533+ }
534+
535+ err = drm_exec_lock_obj (& exec , & sysmem -> ttm .base );
536+ drm_exec_retry_on_contention (& exec );
537+ if (err ) {
538+ ret = ERR_PTR (err );
539+ goto err ;
540+ }
541+ }
542+
543+ ret = xe_migrate_vram_copy_chunk (vram , vram_offset , sysmem , sysmem_offset , size ,
544+ save ? XE_MIGRATE_COPY_TO_SRAM : XE_MIGRATE_COPY_TO_VRAM );
545+
546+ err :
547+ drm_exec_fini (& exec );
548+
549+ return ret ;
550+ }
551+
552+ #define PF_VRAM_SAVE_RESTORE_TIMEOUT (5 * HZ)
553+ static int pf_save_vram_chunk (struct xe_gt * gt , unsigned int vfid ,
554+ struct xe_bo * src_vram , u64 src_vram_offset ,
555+ size_t size )
556+ {
557+ struct xe_sriov_packet * data ;
558+ struct dma_fence * fence ;
559+ int ret ;
560+
561+ data = xe_sriov_packet_alloc (gt_to_xe (gt ));
562+ if (!data )
563+ return - ENOMEM ;
564+
565+ ret = xe_sriov_packet_init (data , gt -> tile -> id , gt -> info .id ,
566+ XE_SRIOV_PACKET_TYPE_VRAM , src_vram_offset ,
567+ size );
568+ if (ret )
569+ goto fail ;
570+
571+ fence = __pf_save_restore_vram (gt , vfid ,
572+ src_vram , src_vram_offset ,
573+ data -> bo , 0 , size , true);
574+
575+ ret = dma_fence_wait_timeout (fence , false, PF_VRAM_SAVE_RESTORE_TIMEOUT );
576+ dma_fence_put (fence );
577+ if (!ret ) {
578+ ret = - ETIME ;
579+ goto fail ;
580+ }
581+
582+ pf_dump_mig_data (gt , vfid , data , "VRAM data save" );
583+
584+ ret = xe_gt_sriov_pf_migration_save_produce (gt , vfid , data );
585+ if (ret )
586+ goto fail ;
587+
588+ return 0 ;
589+
590+ fail :
591+ xe_sriov_packet_free (data );
592+ return ret ;
593+ }
594+
595+ #define VF_VRAM_STATE_CHUNK_MAX_SIZE SZ_512M
596+ static int pf_save_vf_vram_mig_data (struct xe_gt * gt , unsigned int vfid )
597+ {
598+ struct xe_gt_sriov_migration_data * migration = pf_pick_gt_migration (gt , vfid );
599+ loff_t * offset = & migration -> save .vram_offset ;
600+ struct xe_bo * vram ;
601+ size_t vram_size , chunk_size ;
602+ int ret ;
603+
604+ vram = xe_gt_sriov_pf_config_get_lmem_obj (gt , vfid );
605+ if (!vram )
606+ return - ENXIO ;
607+
608+ vram_size = xe_bo_size (vram );
609+
610+ xe_gt_assert (gt , * offset < vram_size );
611+
612+ chunk_size = min (vram_size - * offset , VF_VRAM_STATE_CHUNK_MAX_SIZE );
613+
614+ ret = pf_save_vram_chunk (gt , vfid , vram , * offset , chunk_size );
615+ if (ret )
616+ goto fail ;
617+
618+ * offset += chunk_size ;
619+
620+ xe_bo_put (vram );
621+
622+ if (* offset < vram_size )
623+ return - EAGAIN ;
624+
625+ return 0 ;
626+
627+ fail :
628+ xe_bo_put (vram );
629+ xe_gt_sriov_err (gt , "Failed to save VF%u VRAM data (%pe)\n" , vfid , ERR_PTR (ret ));
630+ return ret ;
631+ }
632+
633+ static int pf_restore_vf_vram_mig_data (struct xe_gt * gt , unsigned int vfid ,
634+ struct xe_sriov_packet * data )
635+ {
636+ u64 end = data -> hdr .offset + data -> hdr .size ;
637+ struct dma_fence * fence ;
638+ struct xe_bo * vram ;
639+ size_t size ;
640+ int ret = 0 ;
641+
642+ vram = xe_gt_sriov_pf_config_get_lmem_obj (gt , vfid );
643+ if (!vram )
644+ return - ENXIO ;
645+
646+ size = xe_bo_size (vram );
647+
648+ if (end > size || end < data -> hdr .size ) {
649+ ret = - EINVAL ;
650+ goto err ;
651+ }
652+
653+ pf_dump_mig_data (gt , vfid , data , "VRAM data restore" );
654+
655+ fence = __pf_save_restore_vram (gt , vfid , vram , data -> hdr .offset ,
656+ data -> bo , 0 , data -> hdr .size , false);
657+ ret = dma_fence_wait_timeout (fence , false, PF_VRAM_SAVE_RESTORE_TIMEOUT );
658+ dma_fence_put (fence );
659+ if (!ret ) {
660+ ret = - ETIME ;
661+ goto err ;
662+ }
663+
664+ return 0 ;
665+ err :
666+ xe_bo_put (vram );
667+ xe_gt_sriov_err (gt , "Failed to restore VF%u VRAM data (%pe)\n" , vfid , ERR_PTR (ret ));
668+ return ret ;
669+ }
670+
671+ /**
672+ * xe_gt_sriov_pf_migration_vram_save() - Save VF VRAM migration data.
673+ * @gt: the &xe_gt
674+ * @vfid: the VF identifier (can't be 0)
675+ *
676+ * This function is for PF only.
677+ *
678+ * Return: 0 on success or a negative error code on failure.
679+ */
680+ int xe_gt_sriov_pf_migration_vram_save (struct xe_gt * gt , unsigned int vfid )
681+ {
682+ xe_gt_assert (gt , IS_SRIOV_PF (gt_to_xe (gt )));
683+ xe_gt_assert (gt , vfid != PFID );
684+ xe_gt_assert (gt , vfid <= xe_sriov_pf_get_totalvfs (gt_to_xe (gt )));
685+
686+ return pf_save_vf_vram_mig_data (gt , vfid );
687+ }
688+
689+ /**
690+ * xe_gt_sriov_pf_migration_vram_restore() - Restore VF VRAM migration data.
691+ * @gt: the &xe_gt
692+ * @vfid: the VF identifier (can't be 0)
693+ * @data: the &xe_sriov_packet containing migration data
694+ *
695+ * This function is for PF only.
696+ *
697+ * Return: 0 on success or a negative error code on failure.
698+ */
699+ int xe_gt_sriov_pf_migration_vram_restore (struct xe_gt * gt , unsigned int vfid ,
700+ struct xe_sriov_packet * data )
701+ {
702+ xe_gt_assert (gt , IS_SRIOV_PF (gt_to_xe (gt )));
703+ xe_gt_assert (gt , vfid != PFID );
704+ xe_gt_assert (gt , vfid <= xe_sriov_pf_get_totalvfs (gt_to_xe (gt )));
705+
706+ return pf_restore_vf_vram_mig_data (gt , vfid , data );
707+ }
708+
508709/**
509710 * xe_gt_sriov_pf_migration_size() - Total size of migration data from all components within a GT.
510711 * @gt: the &xe_gt
@@ -544,6 +745,13 @@ ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid)
544745 size += sizeof (struct xe_sriov_packet_hdr );
545746 total += size ;
546747
748+ size = pf_migration_vram_size (gt , vfid );
749+ if (size < 0 )
750+ return size ;
751+ if (size > 0 )
752+ size += sizeof (struct xe_sriov_packet_hdr );
753+ total += size ;
754+
547755 return total ;
548756}
549757
@@ -606,6 +814,7 @@ void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid)
606814 struct xe_gt_sriov_migration_data * migration = pf_pick_gt_migration (gt , vfid );
607815
608816 migration -> save .data_remaining = 0 ;
817+ migration -> save .vram_offset = 0 ;
609818
610819 xe_gt_assert (gt , pf_migration_guc_size (gt , vfid ) > 0 );
611820 pf_migration_save_data_todo (gt , vfid , XE_SRIOV_PACKET_TYPE_GUC );
@@ -615,6 +824,9 @@ void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid)
615824
616825 xe_gt_assert (gt , pf_migration_mmio_size (gt , vfid ) > 0 );
617826 pf_migration_save_data_todo (gt , vfid , XE_SRIOV_PACKET_TYPE_MMIO );
827+
828+ if (pf_migration_vram_size (gt , vfid ) > 0 )
829+ pf_migration_save_data_todo (gt , vfid , XE_SRIOV_PACKET_TYPE_VRAM );
618830}
619831
620832/**
0 commit comments