Skip to content

Commit 91da591

Browse files
Ellen Panalexdeucher
authored andcommitted
drm/amdgpu: Add logic for VF data exchange region to init from dynamic crit_region offsets
1. Added VF logic to init data exchange region using the offsets from dynamic(v2) critical regions; Signed-off-by: Ellen Pan <yunru.pan@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
1 parent b4a8fcc commit 91da591

1 file changed

Lines changed: 85 additions & 19 deletions

File tree

drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c

Lines changed: 85 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -218,12 +218,12 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
218218
&adev->virt.mm_table.gpu_addr,
219219
(void *)&adev->virt.mm_table.cpu_addr);
220220
if (r) {
221-
DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
221+
dev_err(adev->dev, "failed to alloc mm table and error = %d.\n", r);
222222
return r;
223223
}
224224

225225
memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
226-
DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
226+
dev_info(adev->dev, "MM table gpu addr = 0x%llx, cpu addr = %p.\n",
227227
adev->virt.mm_table.gpu_addr,
228228
adev->virt.mm_table.cpu_addr);
229229
return 0;
@@ -403,7 +403,9 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
403403
if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
404404
AMDGPU_GPU_PAGE_SIZE,
405405
&bo, NULL))
406-
DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
406+
dev_dbg(adev->dev,
407+
"RAS WARN: reserve vram for retired page %llx fail\n",
408+
bp);
407409
data->bps_bo[i] = bo;
408410
}
409411
data->last_reserved = i + 1;
@@ -671,24 +673,50 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
671673
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
672674
}
673675

676+
static int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev, uint32_t *pfvf_data)
677+
{
678+
uint32_t dataexchange_offset =
679+
adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset;
680+
uint32_t dataexchange_size =
681+
adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10;
682+
uint64_t pos = 0;
683+
684+
dev_info(adev->dev,
685+
"Got data exchange info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
686+
dataexchange_offset, dataexchange_size);
687+
688+
if (!IS_ALIGNED(dataexchange_offset, 4) || !IS_ALIGNED(dataexchange_size, 4)) {
689+
dev_err(adev->dev, "Data exchange data not aligned to 4 bytes\n");
690+
return -EINVAL;
691+
}
692+
693+
pos = (uint64_t)dataexchange_offset;
694+
amdgpu_device_vram_access(adev, pos, pfvf_data,
695+
dataexchange_size, false);
696+
697+
return 0;
698+
}
699+
674700
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
675701
{
676702
if (adev->virt.vf2pf_update_interval_ms != 0) {
677-
DRM_INFO("clean up the vf2pf work item\n");
703+
dev_info(adev->dev, "clean up the vf2pf work item\n");
678704
cancel_delayed_work_sync(&adev->virt.vf2pf_work);
679705
adev->virt.vf2pf_update_interval_ms = 0;
680706
}
681707
}
682708

683709
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
684710
{
711+
uint32_t *pfvf_data = NULL;
712+
685713
adev->virt.fw_reserve.p_pf2vf = NULL;
686714
adev->virt.fw_reserve.p_vf2pf = NULL;
687715
adev->virt.vf2pf_update_interval_ms = 0;
688716
adev->virt.vf2pf_update_retry_cnt = 0;
689717

690718
if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
691-
DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
719+
dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!");
692720
} else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
693721
/* go through this logic in ip_init and reset to init workqueue*/
694722
amdgpu_virt_exchange_data(adev);
@@ -697,11 +725,34 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
697725
schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
698726
} else if (adev->bios != NULL) {
699727
/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
700-
adev->virt.fw_reserve.p_pf2vf =
701-
(struct amd_sriov_msg_pf2vf_info_header *)
702-
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
728+
if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
729+
pfvf_data =
730+
kzalloc(adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10,
731+
GFP_KERNEL);
732+
if (!pfvf_data) {
733+
dev_err(adev->dev, "Failed to allocate memory for pfvf_data\n");
734+
return;
735+
}
703736

704-
amdgpu_virt_read_pf2vf_data(adev);
737+
if (amdgpu_virt_read_exchange_data_from_mem(adev, pfvf_data))
738+
goto free_pfvf_data;
739+
740+
adev->virt.fw_reserve.p_pf2vf =
741+
(struct amd_sriov_msg_pf2vf_info_header *)pfvf_data;
742+
743+
amdgpu_virt_read_pf2vf_data(adev);
744+
745+
free_pfvf_data:
746+
kfree(pfvf_data);
747+
pfvf_data = NULL;
748+
adev->virt.fw_reserve.p_pf2vf = NULL;
749+
} else {
750+
adev->virt.fw_reserve.p_pf2vf =
751+
(struct amd_sriov_msg_pf2vf_info_header *)
752+
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
753+
754+
amdgpu_virt_read_pf2vf_data(adev);
755+
}
705756
}
706757
}
707758

@@ -714,14 +765,29 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
714765

715766
if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
716767
if (adev->mman.fw_vram_usage_va) {
717-
adev->virt.fw_reserve.p_pf2vf =
718-
(struct amd_sriov_msg_pf2vf_info_header *)
719-
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
720-
adev->virt.fw_reserve.p_vf2pf =
721-
(struct amd_sriov_msg_vf2pf_info_header *)
722-
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
723-
adev->virt.fw_reserve.ras_telemetry =
724-
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
768+
if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
769+
adev->virt.fw_reserve.p_pf2vf =
770+
(struct amd_sriov_msg_pf2vf_info_header *)
771+
(adev->mman.fw_vram_usage_va +
772+
adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset);
773+
adev->virt.fw_reserve.p_vf2pf =
774+
(struct amd_sriov_msg_vf2pf_info_header *)
775+
(adev->mman.fw_vram_usage_va +
776+
adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset +
777+
(AMD_SRIOV_MSG_SIZE_KB << 10));
778+
adev->virt.fw_reserve.ras_telemetry =
779+
(adev->mman.fw_vram_usage_va +
780+
adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset);
781+
} else {
782+
adev->virt.fw_reserve.p_pf2vf =
783+
(struct amd_sriov_msg_pf2vf_info_header *)
784+
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
785+
adev->virt.fw_reserve.p_vf2pf =
786+
(struct amd_sriov_msg_vf2pf_info_header *)
787+
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
788+
adev->virt.fw_reserve.ras_telemetry =
789+
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
790+
}
725791
} else if (adev->mman.drv_vram_usage_va) {
726792
adev->virt.fw_reserve.p_pf2vf =
727793
(struct amd_sriov_msg_pf2vf_info_header *)
@@ -829,7 +895,7 @@ static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg)
829895
break;
830896
default: /* other chip doesn't support SRIOV */
831897
is_sriov = false;
832-
DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
898+
dev_err(adev->dev, "Unknown asic type: %d!\n", adev->asic_type);
833899
break;
834900
}
835901
}
@@ -1510,7 +1576,7 @@ amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block bloc
15101576
case AMDGPU_RAS_BLOCK__MPIO:
15111577
return RAS_TELEMETRY_GPU_BLOCK_MPIO;
15121578
default:
1513-
DRM_WARN_ONCE("Unsupported SRIOV RAS telemetry block 0x%x\n",
1579+
dev_warn(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n",
15141580
block);
15151581
return RAS_TELEMETRY_GPU_BLOCK_COUNT;
15161582
}

0 commit comments

Comments
 (0)