Skip to content

Commit f0ded97

Browse files
committed
Merge tag 'drm-rust-next-2025-11-18' of https://gitlab.freedesktop.org/drm/rust/kernel into drm-next
Cross-subsystem Changes: Rust - Make slice::as_flattened usable on all supported versions of rustc. - Add FromBytes::from_bytes_prefix() method. Core Changes: - Update Tyr in MAINTAINERS file. - Remove redundant device ptr from Rust GEM object. - Change how AlwaysRefCounted is implemented for GEM objects. - Add deferred vm_bo cleanup to GPUVM and use it in Panthor. Driver Changes: Nova Core - Introduction of bitfield! macro, with support for different storage sizes and custom visibility. - Introduction of safe converters between integer types for which the conversion is lossless. - GSP initialized up to fully booted state on Ampere. - Use more future-proof register for GPU identification. - Various simplifications and optimizations. Nova - Select NOVA_CORE. - Depend on CONFIG_64BIT. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alice Ryhl <aliceryhl@google.com> Link: https://patch.msgid.link/aRxtJC0D1pQUepF4@google.com
2 parents f3a1d69 + 77b686f commit f0ded97

48 files changed

Lines changed: 5781 additions & 1131 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

Documentation/gpu/nova/core/todo.rst

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -44,25 +44,6 @@ automatically generates the corresponding mappings between a value and a number.
4444
| Complexity: Beginner
4545
| Link: https://docs.rs/num/latest/num/trait.FromPrimitive.html
4646
47-
Conversion from byte slices for types implementing FromBytes [TRSM]
48-
-------------------------------------------------------------------
49-
50-
We retrieve several structures from byte streams coming from the BIOS or loaded
51-
firmware. At the moment converting the bytes slice into the proper type require
52-
an inelegant `unsafe` operation; this will go away once `FromBytes` implements
53-
a proper `from_bytes` method.
54-
55-
| Complexity: Beginner
56-
57-
CoherentAllocation improvements [COHA]
58-
--------------------------------------
59-
60-
`CoherentAllocation` needs a safe way to write into the allocation, and to
61-
obtain slices within the allocation.
62-
63-
| Complexity: Beginner
64-
| Contact: Abdiel Janulgue
65-
6647
Generic register abstraction [REGA]
6748
-----------------------------------
6849

@@ -153,17 +134,6 @@ A `num` core kernel module is being designed to provide these operations.
153134
| Complexity: Intermediate
154135
| Contact: Alexandre Courbot
155136
156-
Delay / Sleep abstractions [DLAY]
157-
---------------------------------
158-
159-
Rust abstractions for the kernel's delay() and sleep() functions.
160-
161-
FUJITA Tomonori plans to work on abstractions for read_poll_timeout_atomic()
162-
(and friends) [1].
163-
164-
| Complexity: Beginner
165-
| Link: https://lore.kernel.org/netdev/20250228.080550.354359820929821928.fujita.tomonori@gmail.com/ [1]
166-
167137
IRQ abstractions
168138
----------------
169139

MAINTAINERS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8264,6 +8264,7 @@ S: Supported
82648264
W: https://drm.pages.freedesktop.org/maintainer-tools/drm-rust.html
82658265
T: git https://gitlab.freedesktop.org/drm/rust/kernel.git
82668266
F: drivers/gpu/drm/nova/
8267+
F: drivers/gpu/drm/tyr/
82678268
F: drivers/gpu/nova-core/
82688269
F: rust/kernel/drm/
82698270

drivers/gpu/drm/drm_gpuvm.c

Lines changed: 190 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -877,6 +877,31 @@ __drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock,
877877
cond_spin_unlock(lock, !!lock);
878878
}
879879

880+
/**
881+
* drm_gpuvm_bo_is_zombie() - check whether this vm_bo is scheduled for cleanup
882+
* @vm_bo: the &drm_gpuvm_bo
883+
*
884+
* When a vm_bo is scheduled for cleanup using the bo_defer list, it is not
885+
* immediately removed from the evict and extobj lists. Therefore, anyone
886+
* iterating these lists should skip entries that are being destroyed.
887+
*
888+
* Checking the refcount without incrementing it is okay as long as the lock
889+
* protecting the evict/extobj list is held for as long as you are using the
890+
* vm_bo, because even if the refcount hits zero while you are using it, freeing
891+
* the vm_bo requires taking the list's lock.
892+
*
893+
* Zombie entries can be observed on the evict and extobj lists regardless of
894+
* whether DRM_GPUVM_RESV_PROTECTED is used, but they remain on the lists for a
895+
* longer time when the resv lock is used because we can't take the resv lock
896+
* during run_job() in immediate mode, meaning that they need to remain on the
897+
* lists until drm_gpuvm_bo_deferred_cleanup() is called.
898+
*/
899+
static bool
900+
drm_gpuvm_bo_is_zombie(struct drm_gpuvm_bo *vm_bo)
901+
{
902+
return !kref_read(&vm_bo->kref);
903+
}
904+
880905
/**
881906
* drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
882907
* @__vm_bo: the &drm_gpuvm_bo
@@ -1082,6 +1107,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
10821107
INIT_LIST_HEAD(&gpuvm->evict.list);
10831108
spin_lock_init(&gpuvm->evict.lock);
10841109

1110+
init_llist_head(&gpuvm->bo_defer);
1111+
10851112
kref_init(&gpuvm->kref);
10861113

10871114
gpuvm->name = name ? name : "unknown";
@@ -1123,6 +1150,8 @@ drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
11231150
"Extobj list should be empty.\n");
11241151
drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
11251152
"Evict list should be empty.\n");
1153+
drm_WARN(gpuvm->drm, !llist_empty(&gpuvm->bo_defer),
1154+
"VM BO cleanup list should be empty.\n");
11261155

11271156
drm_gem_object_put(gpuvm->r_obj);
11281157
}
@@ -1218,6 +1247,9 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
12181247

12191248
drm_gpuvm_resv_assert_held(gpuvm);
12201249
list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
1250+
if (drm_gpuvm_bo_is_zombie(vm_bo))
1251+
continue;
1252+
12211253
ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
12221254
if (ret)
12231255
break;
@@ -1461,6 +1493,9 @@ drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
14611493

14621494
list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
14631495
list.entry.evict) {
1496+
if (drm_gpuvm_bo_is_zombie(vm_bo))
1497+
continue;
1498+
14641499
ret = ops->vm_bo_validate(vm_bo, exec);
14651500
if (ret)
14661501
break;
@@ -1561,6 +1596,7 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
15611596

15621597
INIT_LIST_HEAD(&vm_bo->list.entry.extobj);
15631598
INIT_LIST_HEAD(&vm_bo->list.entry.evict);
1599+
init_llist_node(&vm_bo->list.entry.bo_defer);
15641600

15651601
return vm_bo;
15661602
}
@@ -1622,6 +1658,126 @@ drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
16221658
}
16231659
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
16241660

1661+
/*
1662+
* drm_gpuvm_bo_into_zombie() - called when the vm_bo becomes a zombie due to
1663+
* deferred cleanup
1664+
*
1665+
* If deferred cleanup is used, then this must be called right after the vm_bo
1666+
* refcount drops to zero. Must be called with GEM mutex held. After releasing
1667+
* the GEM mutex, drm_gpuvm_bo_defer_zombie_cleanup() must be called.
1668+
*/
1669+
static void
1670+
drm_gpuvm_bo_into_zombie(struct kref *kref)
1671+
{
1672+
struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
1673+
kref);
1674+
1675+
if (!drm_gpuvm_resv_protected(vm_bo->vm)) {
1676+
drm_gpuvm_bo_list_del(vm_bo, extobj, true);
1677+
drm_gpuvm_bo_list_del(vm_bo, evict, true);
1678+
}
1679+
1680+
list_del(&vm_bo->list.entry.gem);
1681+
}
1682+
1683+
/*
1684+
* drm_gpuvm_bo_defer_zombie_cleanup() - adds a new zombie vm_bo to the
1685+
* bo_defer list
1686+
*
1687+
* Called after drm_gpuvm_bo_into_zombie(). GEM mutex must not be held.
1688+
*
1689+
* It's important that the GEM stays alive for the duration in which we hold
1690+
* the mutex, but the instant we add the vm_bo to bo_defer, another thread
1691+
* might call drm_gpuvm_bo_deferred_cleanup() and put the GEM. Therefore, to
1692+
* avoid kfreeing a mutex we are holding, the GEM mutex must be released
1693+
* *before* calling this function.
1694+
*/
1695+
static void
1696+
drm_gpuvm_bo_defer_zombie_cleanup(struct drm_gpuvm_bo *vm_bo)
1697+
{
1698+
llist_add(&vm_bo->list.entry.bo_defer, &vm_bo->vm->bo_defer);
1699+
}
1700+
1701+
static void
1702+
drm_gpuvm_bo_defer_free(struct kref *kref)
1703+
{
1704+
struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
1705+
kref);
1706+
1707+
drm_gpuvm_bo_into_zombie(kref);
1708+
mutex_unlock(&vm_bo->obj->gpuva.lock);
1709+
drm_gpuvm_bo_defer_zombie_cleanup(vm_bo);
1710+
}
1711+
1712+
/**
1713+
* drm_gpuvm_bo_put_deferred() - drop a struct drm_gpuvm_bo reference with
1714+
* deferred cleanup
1715+
* @vm_bo: the &drm_gpuvm_bo to release the reference of
1716+
*
1717+
* This releases a reference to @vm_bo.
1718+
*
1719+
* This might take and release the GEMs GPUVA lock. You should call
1720+
* drm_gpuvm_bo_deferred_cleanup() later to complete the cleanup process.
1721+
*
1722+
* Returns: true if vm_bo is being destroyed, false otherwise.
1723+
*/
1724+
bool
1725+
drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo)
1726+
{
1727+
if (!vm_bo)
1728+
return false;
1729+
1730+
drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
1731+
1732+
return !!kref_put_mutex(&vm_bo->kref,
1733+
drm_gpuvm_bo_defer_free,
1734+
&vm_bo->obj->gpuva.lock);
1735+
}
1736+
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred);
1737+
1738+
/**
1739+
* drm_gpuvm_bo_deferred_cleanup() - clean up BOs in the deferred list
1740+
* deferred cleanup
1741+
* @gpuvm: the VM to clean up
1742+
*
1743+
* Cleans up &drm_gpuvm_bo instances in the deferred cleanup list.
1744+
*/
1745+
void
1746+
drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
1747+
{
1748+
const struct drm_gpuvm_ops *ops = gpuvm->ops;
1749+
struct drm_gpuvm_bo *vm_bo;
1750+
struct drm_gem_object *obj;
1751+
struct llist_node *bo_defer;
1752+
1753+
bo_defer = llist_del_all(&gpuvm->bo_defer);
1754+
if (!bo_defer)
1755+
return;
1756+
1757+
if (drm_gpuvm_resv_protected(gpuvm)) {
1758+
dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL);
1759+
llist_for_each_entry(vm_bo, bo_defer, list.entry.bo_defer) {
1760+
drm_gpuvm_bo_list_del(vm_bo, extobj, false);
1761+
drm_gpuvm_bo_list_del(vm_bo, evict, false);
1762+
}
1763+
dma_resv_unlock(drm_gpuvm_resv(gpuvm));
1764+
}
1765+
1766+
while (bo_defer) {
1767+
vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer);
1768+
bo_defer = bo_defer->next;
1769+
obj = vm_bo->obj;
1770+
if (ops && ops->vm_bo_free)
1771+
ops->vm_bo_free(vm_bo);
1772+
else
1773+
kfree(vm_bo);
1774+
1775+
drm_gpuvm_put(gpuvm);
1776+
drm_gem_object_put(obj);
1777+
}
1778+
}
1779+
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup);
1780+
16251781
static struct drm_gpuvm_bo *
16261782
__drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
16271783
struct drm_gem_object *obj)
@@ -1949,6 +2105,40 @@ drm_gpuva_unlink(struct drm_gpuva *va)
19492105
}
19502106
EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
19512107

2108+
/**
2109+
* drm_gpuva_unlink_defer() - unlink a &drm_gpuva with deferred vm_bo cleanup
2110+
* @va: the &drm_gpuva to unlink
2111+
*
2112+
* Similar to drm_gpuva_unlink(), but uses drm_gpuvm_bo_put_deferred() and takes
2113+
* the lock for the caller.
2114+
*/
2115+
void
2116+
drm_gpuva_unlink_defer(struct drm_gpuva *va)
2117+
{
2118+
struct drm_gem_object *obj = va->gem.obj;
2119+
struct drm_gpuvm_bo *vm_bo = va->vm_bo;
2120+
bool should_defer_bo;
2121+
2122+
if (unlikely(!obj))
2123+
return;
2124+
2125+
drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
2126+
2127+
mutex_lock(&obj->gpuva.lock);
2128+
list_del_init(&va->gem.entry);
2129+
2130+
/*
2131+
* This is drm_gpuvm_bo_put_deferred() except we already hold the mutex.
2132+
*/
2133+
should_defer_bo = kref_put(&vm_bo->kref, drm_gpuvm_bo_into_zombie);
2134+
mutex_unlock(&obj->gpuva.lock);
2135+
if (should_defer_bo)
2136+
drm_gpuvm_bo_defer_zombie_cleanup(vm_bo);
2137+
2138+
va->vm_bo = NULL;
2139+
}
2140+
EXPORT_SYMBOL_GPL(drm_gpuva_unlink_defer);
2141+
19522142
/**
19532143
* drm_gpuva_find_first() - find the first &drm_gpuva in the given range
19542144
* @gpuvm: the &drm_gpuvm to search in

drivers/gpu/drm/nova/Kconfig

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
config DRM_NOVA
22
tristate "Nova DRM driver"
3+
depends on 64BIT
34
depends on DRM=y
45
depends on PCI
56
depends on RUST
67
select AUXILIARY_BUS
8+
select NOVA_CORE
79
default n
810
help
911
Choose this if you want to build the Nova DRM driver for Nvidia

0 commit comments

Comments
 (0)