136136 * requirement is inherited from the wait-before-signal behavior required by
137137 * the Vulkan timeline semaphore API.
138138 *
139+ * Alternatively, &DRM_IOCTL_SYNCOBJ_EVENTFD can be used to wait without
140+ * blocking: an eventfd will be signaled when the syncobj is. This is useful to
141+ * integrate the wait in an event loop.
142+ *
139143 *
140144 * Import/export of syncobjs
141145 * -------------------------
185189
186190#include <linux/anon_inodes.h>
187191#include <linux/dma-fence-unwrap.h>
192+ #include <linux/eventfd.h>
188193#include <linux/file.h>
189194#include <linux/fs.h>
190195#include <linux/sched/signal.h>
@@ -212,6 +217,20 @@ struct syncobj_wait_entry {
212217static void syncobj_wait_syncobj_func (struct drm_syncobj * syncobj ,
213218 struct syncobj_wait_entry * wait );
214219
220+ struct syncobj_eventfd_entry {
221+ struct list_head node ;
222+ struct dma_fence * fence ;
223+ struct dma_fence_cb fence_cb ;
224+ struct drm_syncobj * syncobj ;
225+ struct eventfd_ctx * ev_fd_ctx ;
226+ u64 point ;
227+ u32 flags ;
228+ };
229+
230+ static void
231+ syncobj_eventfd_entry_func (struct drm_syncobj * syncobj ,
232+ struct syncobj_eventfd_entry * entry );
233+
215234/**
216235 * drm_syncobj_find - lookup and reference a sync object.
217236 * @file_private: drm file private pointer
@@ -274,6 +293,28 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
274293 spin_unlock (& syncobj -> lock );
275294}
276295
296+ static void
297+ syncobj_eventfd_entry_free (struct syncobj_eventfd_entry * entry )
298+ {
299+ eventfd_ctx_put (entry -> ev_fd_ctx );
300+ dma_fence_put (entry -> fence );
301+ /* This happens either inside the syncobj lock, or after the node has
302+ * already been removed from the list.
303+ */
304+ list_del (& entry -> node );
305+ kfree (entry );
306+ }
307+
308+ static void
309+ drm_syncobj_add_eventfd (struct drm_syncobj * syncobj ,
310+ struct syncobj_eventfd_entry * entry )
311+ {
312+ spin_lock (& syncobj -> lock );
313+ list_add_tail (& entry -> node , & syncobj -> ev_fd_list );
314+ syncobj_eventfd_entry_func (syncobj , entry );
315+ spin_unlock (& syncobj -> lock );
316+ }
317+
277318/**
278319 * drm_syncobj_add_point - add new timeline point to the syncobj
279320 * @syncobj: sync object to add timeline point do
@@ -288,7 +329,8 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
288329 struct dma_fence * fence ,
289330 uint64_t point )
290331{
291- struct syncobj_wait_entry * cur , * tmp ;
332+ struct syncobj_wait_entry * wait_cur , * wait_tmp ;
333+ struct syncobj_eventfd_entry * ev_fd_cur , * ev_fd_tmp ;
292334 struct dma_fence * prev ;
293335
294336 dma_fence_get (fence );
@@ -302,8 +344,10 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
302344 dma_fence_chain_init (chain , prev , fence , point );
303345 rcu_assign_pointer (syncobj -> fence , & chain -> base );
304346
305- list_for_each_entry_safe (cur , tmp , & syncobj -> cb_list , node )
306- syncobj_wait_syncobj_func (syncobj , cur );
347+ list_for_each_entry_safe (wait_cur , wait_tmp , & syncobj -> cb_list , node )
348+ syncobj_wait_syncobj_func (syncobj , wait_cur );
349+ list_for_each_entry_safe (ev_fd_cur , ev_fd_tmp , & syncobj -> ev_fd_list , node )
350+ syncobj_eventfd_entry_func (syncobj , ev_fd_cur );
307351 spin_unlock (& syncobj -> lock );
308352
309353 /* Walk the chain once to trigger garbage collection */
@@ -323,7 +367,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
323367 struct dma_fence * fence )
324368{
325369 struct dma_fence * old_fence ;
326- struct syncobj_wait_entry * cur , * tmp ;
370+ struct syncobj_wait_entry * wait_cur , * wait_tmp ;
371+ struct syncobj_eventfd_entry * ev_fd_cur , * ev_fd_tmp ;
327372
328373 if (fence )
329374 dma_fence_get (fence );
@@ -335,8 +380,10 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
335380 rcu_assign_pointer (syncobj -> fence , fence );
336381
337382 if (fence != old_fence ) {
338- list_for_each_entry_safe (cur , tmp , & syncobj -> cb_list , node )
339- syncobj_wait_syncobj_func (syncobj , cur );
383+ list_for_each_entry_safe (wait_cur , wait_tmp , & syncobj -> cb_list , node )
384+ syncobj_wait_syncobj_func (syncobj , wait_cur );
385+ list_for_each_entry_safe (ev_fd_cur , ev_fd_tmp , & syncobj -> ev_fd_list , node )
386+ syncobj_eventfd_entry_func (syncobj , ev_fd_cur );
340387 }
341388
342389 spin_unlock (& syncobj -> lock );
@@ -472,7 +519,13 @@ void drm_syncobj_free(struct kref *kref)
472519 struct drm_syncobj * syncobj = container_of (kref ,
473520 struct drm_syncobj ,
474521 refcount );
522+ struct syncobj_eventfd_entry * ev_fd_cur , * ev_fd_tmp ;
523+
475524 drm_syncobj_replace_fence (syncobj , NULL );
525+
526+ list_for_each_entry_safe (ev_fd_cur , ev_fd_tmp , & syncobj -> ev_fd_list , node )
527+ syncobj_eventfd_entry_free (ev_fd_cur );
528+
476529 kfree (syncobj );
477530}
478531EXPORT_SYMBOL (drm_syncobj_free );
@@ -501,6 +554,7 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
501554
502555 kref_init (& syncobj -> refcount );
503556 INIT_LIST_HEAD (& syncobj -> cb_list );
557+ INIT_LIST_HEAD (& syncobj -> ev_fd_list );
504558 spin_lock_init (& syncobj -> lock );
505559
506560 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED ) {
@@ -1304,6 +1358,88 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
13041358 return ret ;
13051359}
13061360
1361+ static void syncobj_eventfd_entry_fence_func (struct dma_fence * fence ,
1362+ struct dma_fence_cb * cb )
1363+ {
1364+ struct syncobj_eventfd_entry * entry =
1365+ container_of (cb , struct syncobj_eventfd_entry , fence_cb );
1366+
1367+ eventfd_signal (entry -> ev_fd_ctx , 1 );
1368+ syncobj_eventfd_entry_free (entry );
1369+ }
1370+
1371+ static void
1372+ syncobj_eventfd_entry_func (struct drm_syncobj * syncobj ,
1373+ struct syncobj_eventfd_entry * entry )
1374+ {
1375+ int ret ;
1376+ struct dma_fence * fence ;
1377+
1378+ /* This happens inside the syncobj lock */
1379+ fence = dma_fence_get (rcu_dereference_protected (syncobj -> fence , 1 ));
1380+ ret = dma_fence_chain_find_seqno (& fence , entry -> point );
1381+ if (ret != 0 || !fence ) {
1382+ dma_fence_put (fence );
1383+ return ;
1384+ }
1385+
1386+ list_del_init (& entry -> node );
1387+ entry -> fence = fence ;
1388+
1389+ if (entry -> flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE ) {
1390+ eventfd_signal (entry -> ev_fd_ctx , 1 );
1391+ syncobj_eventfd_entry_free (entry );
1392+ } else {
1393+ ret = dma_fence_add_callback (fence , & entry -> fence_cb ,
1394+ syncobj_eventfd_entry_fence_func );
1395+ if (ret == - ENOENT ) {
1396+ eventfd_signal (entry -> ev_fd_ctx , 1 );
1397+ syncobj_eventfd_entry_free (entry );
1398+ }
1399+ }
1400+ }
1401+
1402+ int
1403+ drm_syncobj_eventfd_ioctl (struct drm_device * dev , void * data ,
1404+ struct drm_file * file_private )
1405+ {
1406+ struct drm_syncobj_eventfd * args = data ;
1407+ struct drm_syncobj * syncobj ;
1408+ struct eventfd_ctx * ev_fd_ctx ;
1409+ struct syncobj_eventfd_entry * entry ;
1410+
1411+ if (!drm_core_check_feature (dev , DRIVER_SYNCOBJ_TIMELINE ))
1412+ return - EOPNOTSUPP ;
1413+
1414+ if (args -> flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE )
1415+ return - EINVAL ;
1416+
1417+ if (args -> pad )
1418+ return - EINVAL ;
1419+
1420+ syncobj = drm_syncobj_find (file_private , args -> handle );
1421+ if (!syncobj )
1422+ return - ENOENT ;
1423+
1424+ ev_fd_ctx = eventfd_ctx_fdget (args -> fd );
1425+ if (IS_ERR (ev_fd_ctx ))
1426+ return PTR_ERR (ev_fd_ctx );
1427+
1428+ entry = kzalloc (sizeof (* entry ), GFP_KERNEL );
1429+ if (!entry ) {
1430+ eventfd_ctx_put (ev_fd_ctx );
1431+ return - ENOMEM ;
1432+ }
1433+ entry -> syncobj = syncobj ;
1434+ entry -> ev_fd_ctx = ev_fd_ctx ;
1435+ entry -> point = args -> point ;
1436+ entry -> flags = args -> flags ;
1437+
1438+ drm_syncobj_add_eventfd (syncobj , entry );
1439+ drm_syncobj_put (syncobj );
1440+
1441+ return 0 ;
1442+ }
13071443
13081444int
13091445drm_syncobj_reset_ioctl (struct drm_device * dev , void * data ,
0 commit comments