|
14 | 14 |
|
15 | 15 | #include "../lib/drm_random.h" |
16 | 16 |
|
| 17 | +static unsigned int random_seed; |
| 18 | + |
17 | 19 | static inline u64 get_size(int order, u64 chunk_size) |
18 | 20 | { |
19 | 21 | return (1 << order) * chunk_size; |
20 | 22 | } |
21 | 23 |
|
| 24 | +static void drm_test_buddy_alloc_range_bias(struct kunit *test) |
| 25 | +{ |
| 26 | + u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem; |
| 27 | + DRM_RND_STATE(prng, random_seed); |
| 28 | + unsigned int i, count, *order; |
| 29 | + struct drm_buddy mm; |
| 30 | + LIST_HEAD(allocated); |
| 31 | + |
| 32 | + bias_size = SZ_1M; |
| 33 | + ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size); |
| 34 | + ps = max(SZ_4K, ps); |
| 35 | + mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */ |
| 36 | + |
| 37 | + kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps); |
| 38 | + |
| 39 | + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), |
| 40 | + "buddy_init failed\n"); |
| 41 | + |
| 42 | + count = mm_size / bias_size; |
| 43 | + order = drm_random_order(count, &prng); |
| 44 | + KUNIT_EXPECT_TRUE(test, order); |
| 45 | + |
| 46 | + /* |
| 47 | + * Idea is to split the address space into uniform bias ranges, and then |
| 48 | + * in some random order allocate within each bias, using various |
| 49 | + * patterns within. This should detect if allocations leak out from a |
| 50 | + * given bias, for example. |
| 51 | + */ |
| 52 | + |
| 53 | + for (i = 0; i < count; i++) { |
| 54 | + LIST_HEAD(tmp); |
| 55 | + u32 size; |
| 56 | + |
| 57 | + bias_start = order[i] * bias_size; |
| 58 | + bias_end = bias_start + bias_size; |
| 59 | + bias_rem = bias_size; |
| 60 | + |
| 61 | + /* internal round_up too big */ |
| 62 | + KUNIT_ASSERT_TRUE_MSG(test, |
| 63 | + drm_buddy_alloc_blocks(&mm, bias_start, |
| 64 | + bias_end, bias_size + ps, bias_size, |
| 65 | + &allocated, |
| 66 | + DRM_BUDDY_RANGE_ALLOCATION), |
| 67 | + "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", |
| 68 | + bias_start, bias_end, bias_size, bias_size); |
| 69 | + |
| 70 | + /* size too big */ |
| 71 | + KUNIT_ASSERT_TRUE_MSG(test, |
| 72 | + drm_buddy_alloc_blocks(&mm, bias_start, |
| 73 | + bias_end, bias_size + ps, ps, |
| 74 | + &allocated, |
| 75 | + DRM_BUDDY_RANGE_ALLOCATION), |
| 76 | + "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", |
| 77 | + bias_start, bias_end, bias_size + ps, ps); |
| 78 | + |
| 79 | + /* bias range too small for size */ |
| 80 | + KUNIT_ASSERT_TRUE_MSG(test, |
| 81 | + drm_buddy_alloc_blocks(&mm, bias_start + ps, |
| 82 | + bias_end, bias_size, ps, |
| 83 | + &allocated, |
| 84 | + DRM_BUDDY_RANGE_ALLOCATION), |
| 85 | + "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", |
| 86 | + bias_start + ps, bias_end, bias_size, ps); |
| 87 | + |
| 88 | + /* bias misaligned */ |
| 89 | + KUNIT_ASSERT_TRUE_MSG(test, |
| 90 | + drm_buddy_alloc_blocks(&mm, bias_start + ps, |
| 91 | + bias_end - ps, |
| 92 | + bias_size >> 1, bias_size >> 1, |
| 93 | + &allocated, |
| 94 | + DRM_BUDDY_RANGE_ALLOCATION), |
| 95 | + "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n", |
| 96 | + bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1); |
| 97 | + |
| 98 | + /* single big page */ |
| 99 | + KUNIT_ASSERT_FALSE_MSG(test, |
| 100 | + drm_buddy_alloc_blocks(&mm, bias_start, |
| 101 | + bias_end, bias_size, bias_size, |
| 102 | + &tmp, |
| 103 | + DRM_BUDDY_RANGE_ALLOCATION), |
| 104 | + "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n", |
| 105 | + bias_start, bias_end, bias_size, bias_size); |
| 106 | + drm_buddy_free_list(&mm, &tmp); |
| 107 | + |
| 108 | + /* single page with internal round_up */ |
| 109 | + KUNIT_ASSERT_FALSE_MSG(test, |
| 110 | + drm_buddy_alloc_blocks(&mm, bias_start, |
| 111 | + bias_end, ps, bias_size, |
| 112 | + &tmp, |
| 113 | + DRM_BUDDY_RANGE_ALLOCATION), |
| 114 | + "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", |
| 115 | + bias_start, bias_end, ps, bias_size); |
| 116 | + drm_buddy_free_list(&mm, &tmp); |
| 117 | + |
| 118 | + /* random size within */ |
| 119 | + size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); |
| 120 | + if (size) |
| 121 | + KUNIT_ASSERT_FALSE_MSG(test, |
| 122 | + drm_buddy_alloc_blocks(&mm, bias_start, |
| 123 | + bias_end, size, ps, |
| 124 | + &tmp, |
| 125 | + DRM_BUDDY_RANGE_ALLOCATION), |
| 126 | + "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", |
| 127 | + bias_start, bias_end, size, ps); |
| 128 | + |
| 129 | + bias_rem -= size; |
| 130 | + /* too big for current avail */ |
| 131 | + KUNIT_ASSERT_TRUE_MSG(test, |
| 132 | + drm_buddy_alloc_blocks(&mm, bias_start, |
| 133 | + bias_end, bias_rem + ps, ps, |
| 134 | + &allocated, |
| 135 | + DRM_BUDDY_RANGE_ALLOCATION), |
| 136 | + "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", |
| 137 | + bias_start, bias_end, bias_rem + ps, ps); |
| 138 | + |
| 139 | + if (bias_rem) { |
| 140 | + /* random fill of the remainder */ |
| 141 | + size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); |
| 142 | + size = max(size, ps); |
| 143 | + |
| 144 | + KUNIT_ASSERT_FALSE_MSG(test, |
| 145 | + drm_buddy_alloc_blocks(&mm, bias_start, |
| 146 | + bias_end, size, ps, |
| 147 | + &allocated, |
| 148 | + DRM_BUDDY_RANGE_ALLOCATION), |
| 149 | + "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", |
| 150 | + bias_start, bias_end, size, ps); |
| 151 | + /* |
| 152 | + * Intentionally allow some space to be left |
| 153 | + * unallocated, and ideally not always on the bias |
| 154 | + * boundaries. |
| 155 | + */ |
| 156 | + drm_buddy_free_list(&mm, &tmp); |
| 157 | + } else { |
| 158 | + list_splice_tail(&tmp, &allocated); |
| 159 | + } |
| 160 | + } |
| 161 | + |
| 162 | + kfree(order); |
| 163 | + drm_buddy_free_list(&mm, &allocated); |
| 164 | + drm_buddy_fini(&mm); |
| 165 | + |
| 166 | + /* |
| 167 | + * Something more free-form. Idea is to pick a random starting bias |
| 168 | + * range within the address space and then start filling it up. Also |
| 169 | + * randomly grow the bias range in both directions as we go along. This |
| 170 | + * should give us bias start/end which is not always uniform like above, |
| 171 | + * and in some cases will require the allocator to jump over already |
| 172 | + * allocated nodes in the middle of the address space. |
| 173 | + */ |
| 174 | + |
| 175 | + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), |
| 176 | + "buddy_init failed\n"); |
| 177 | + |
| 178 | + bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps); |
| 179 | + bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps); |
| 180 | + bias_end = max(bias_end, bias_start + ps); |
| 181 | + bias_rem = bias_end - bias_start; |
| 182 | + |
| 183 | + do { |
| 184 | + u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); |
| 185 | + |
| 186 | + KUNIT_ASSERT_FALSE_MSG(test, |
| 187 | + drm_buddy_alloc_blocks(&mm, bias_start, |
| 188 | + bias_end, size, ps, |
| 189 | + &allocated, |
| 190 | + DRM_BUDDY_RANGE_ALLOCATION), |
| 191 | + "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", |
| 192 | + bias_start, bias_end, size); |
| 193 | + bias_rem -= size; |
| 194 | + |
| 195 | + /* |
| 196 | + * Try to randomly grow the bias range in both directions, or |
| 197 | + * only one, or perhaps don't grow at all. |
| 198 | + */ |
| 199 | + do { |
| 200 | + u32 old_bias_start = bias_start; |
| 201 | + u32 old_bias_end = bias_end; |
| 202 | + |
| 203 | + if (bias_start) |
| 204 | + bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps); |
| 205 | + if (bias_end != mm_size) |
| 206 | + bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps); |
| 207 | + |
| 208 | + bias_rem += old_bias_start - bias_start; |
| 209 | + bias_rem += bias_end - old_bias_end; |
| 210 | + } while (!bias_rem && (bias_start || bias_end != mm_size)); |
| 211 | + } while (bias_rem); |
| 212 | + |
| 213 | + KUNIT_ASSERT_EQ(test, bias_start, 0); |
| 214 | + KUNIT_ASSERT_EQ(test, bias_end, mm_size); |
| 215 | + KUNIT_ASSERT_TRUE_MSG(test, |
| 216 | + drm_buddy_alloc_blocks(&mm, bias_start, bias_end, |
| 217 | + ps, ps, |
| 218 | + &allocated, |
| 219 | + DRM_BUDDY_RANGE_ALLOCATION), |
| 220 | + "buddy_alloc passed with bias(%x-%x), size=%u\n", |
| 221 | + bias_start, bias_end, ps); |
| 222 | + |
| 223 | + drm_buddy_free_list(&mm, &allocated); |
| 224 | + drm_buddy_fini(&mm); |
| 225 | +} |
| 226 | + |
22 | 227 | static void drm_test_buddy_alloc_contiguous(struct kunit *test) |
23 | 228 | { |
24 | 229 | u32 mm_size, ps = SZ_4K, i, n_pages, total; |
@@ -363,17 +568,30 @@ static void drm_test_buddy_alloc_limit(struct kunit *test) |
363 | 568 | drm_buddy_fini(&mm); |
364 | 569 | } |
365 | 570 |
|
| 571 | +static int drm_buddy_suite_init(struct kunit_suite *suite) |
| 572 | +{ |
| 573 | + while (!random_seed) |
| 574 | + random_seed = get_random_u32(); |
| 575 | + |
| 576 | + kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", |
| 577 | + random_seed); |
| 578 | + |
| 579 | + return 0; |
| 580 | +} |
| 581 | + |
366 | 582 | static struct kunit_case drm_buddy_tests[] = { |
367 | 583 | KUNIT_CASE(drm_test_buddy_alloc_limit), |
368 | 584 | KUNIT_CASE(drm_test_buddy_alloc_optimistic), |
369 | 585 | KUNIT_CASE(drm_test_buddy_alloc_pessimistic), |
370 | 586 | KUNIT_CASE(drm_test_buddy_alloc_pathological), |
371 | 587 | KUNIT_CASE(drm_test_buddy_alloc_contiguous), |
| 588 | + KUNIT_CASE(drm_test_buddy_alloc_range_bias), |
372 | 589 | {} |
373 | 590 | }; |
374 | 591 |
|
375 | 592 | static struct kunit_suite drm_buddy_test_suite = { |
376 | 593 | .name = "drm_buddy", |
| 594 | + .suite_init = drm_buddy_suite_init, |
377 | 595 | .test_cases = drm_buddy_tests, |
378 | 596 | }; |
379 | 597 |
|
|
0 commit comments