Skip to content

Commit bf2084a

Browse files
xiaogang-chen-amdalexdeucher
authored andcommitted
drm/amdkfd: Use huge page size to check split svm range alignment
When split svm ranges that have been mapped using huge page should use huge page size(2MB) to check split range alignment, not prange->granularity that means migration granularity. Fixes: 7ef6b2d ("drm/amdkfd: remap unaligned svm ranges that have split") Signed-off-by: Xiaogang Chen <xiaogang.chen@amd.com> Reviewed-by: Philip Yang <Philip.Yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> (cherry picked from commit 448ee45)
1 parent c7685d1 commit bf2084a

1 file changed

Lines changed: 32 additions & 14 deletions

File tree

drivers/gpu/drm/amd/amdkfd/kfd_svm.c

Lines changed: 32 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1144,30 +1144,48 @@ static int
11441144
svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
11451145
struct list_head *insert_list, struct list_head *remap_list)
11461146
{
1147+
unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
1148+
unsigned long start_align = ALIGN(prange->start, 512);
1149+
bool huge_page_mapping = last_align_down > start_align;
11471150
struct svm_range *tail = NULL;
1148-
int r = svm_range_split(prange, prange->start, new_last, &tail);
1151+
int r;
11491152

1150-
if (!r) {
1151-
list_add(&tail->list, insert_list);
1152-
if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
1153-
list_add(&tail->update_list, remap_list);
1154-
}
1155-
return r;
1153+
r = svm_range_split(prange, prange->start, new_last, &tail);
1154+
1155+
if (r)
1156+
return r;
1157+
1158+
list_add(&tail->list, insert_list);
1159+
1160+
if (huge_page_mapping && tail->start > start_align &&
1161+
tail->start < last_align_down && (!IS_ALIGNED(tail->start, 512)))
1162+
list_add(&tail->update_list, remap_list);
1163+
1164+
return 0;
11561165
}
11571166

11581167
static int
11591168
svm_range_split_head(struct svm_range *prange, uint64_t new_start,
11601169
struct list_head *insert_list, struct list_head *remap_list)
11611170
{
1171+
unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
1172+
unsigned long start_align = ALIGN(prange->start, 512);
1173+
bool huge_page_mapping = last_align_down > start_align;
11621174
struct svm_range *head = NULL;
1163-
int r = svm_range_split(prange, new_start, prange->last, &head);
1175+
int r;
11641176

1165-
if (!r) {
1166-
list_add(&head->list, insert_list);
1167-
if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
1168-
list_add(&head->update_list, remap_list);
1169-
}
1170-
return r;
1177+
r = svm_range_split(prange, new_start, prange->last, &head);
1178+
1179+
if (r)
1180+
return r;
1181+
1182+
list_add(&head->list, insert_list);
1183+
1184+
if (huge_page_mapping && head->last + 1 > start_align &&
1185+
head->last + 1 < last_align_down && (!IS_ALIGNED(head->last, 512)))
1186+
list_add(&head->update_list, remap_list);
1187+
1188+
return 0;
11711189
}
11721190

11731191
static void

0 commit comments

Comments
 (0)