|
27 | 27 | #include "xe_reg_whitelist.h" |
28 | 28 | #include "xe_rtp_types.h" |
29 | 29 |
|
30 | | -#define XE_REG_SR_GROW_STEP_DEFAULT 16 |
31 | | - |
32 | 30 | static void reg_sr_fini(struct drm_device *drm, void *arg) |
33 | 31 | { |
34 | 32 | struct xe_reg_sr *sr = arg; |
| 33 | + struct xe_reg_sr_entry *entry; |
| 34 | + unsigned long reg; |
| 35 | + |
| 36 | + xa_for_each(&sr->xa, reg, entry) |
| 37 | + kfree(entry); |
35 | 38 |
|
36 | 39 | xa_destroy(&sr->xa); |
37 | | - kfree(sr->pool.arr); |
38 | | - memset(&sr->pool, 0, sizeof(sr->pool)); |
39 | 40 | } |
40 | 41 |
|
41 | 42 | int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe) |
42 | 43 | { |
43 | 44 | xa_init(&sr->xa); |
44 | | - memset(&sr->pool, 0, sizeof(sr->pool)); |
45 | | - sr->pool.grow_step = XE_REG_SR_GROW_STEP_DEFAULT; |
46 | 45 | sr->name = name; |
47 | 46 |
|
48 | 47 | return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr); |
49 | 48 | } |
50 | 49 | EXPORT_SYMBOL_IF_KUNIT(xe_reg_sr_init); |
51 | 50 |
|
52 | | -static struct xe_reg_sr_entry *alloc_entry(struct xe_reg_sr *sr) |
53 | | -{ |
54 | | - if (sr->pool.used == sr->pool.allocated) { |
55 | | - struct xe_reg_sr_entry *arr; |
56 | | - |
57 | | - arr = krealloc_array(sr->pool.arr, |
58 | | - ALIGN(sr->pool.allocated + 1, sr->pool.grow_step), |
59 | | - sizeof(*arr), GFP_KERNEL); |
60 | | - if (!arr) |
61 | | - return NULL; |
62 | | - |
63 | | - sr->pool.arr = arr; |
64 | | - sr->pool.allocated += sr->pool.grow_step; |
65 | | - } |
66 | | - |
67 | | - return &sr->pool.arr[sr->pool.used++]; |
68 | | -} |
69 | | - |
70 | 51 | static bool compatible_entries(const struct xe_reg_sr_entry *e1, |
71 | 52 | const struct xe_reg_sr_entry *e2) |
72 | 53 | { |
@@ -112,7 +93,7 @@ int xe_reg_sr_add(struct xe_reg_sr *sr, |
112 | 93 | return 0; |
113 | 94 | } |
114 | 95 |
|
115 | | - pentry = alloc_entry(sr); |
| 96 | + pentry = kmalloc(sizeof(*pentry), GFP_KERNEL); |
116 | 97 | if (!pentry) { |
117 | 98 | ret = -ENOMEM; |
118 | 99 | goto fail; |
|
0 commit comments