@@ -624,6 +624,7 @@ static void qaic_free_object(struct drm_gem_object *obj)
624624 qaic_free_sgt (bo -> sgt );
625625 }
626626
627+ mutex_destroy (& bo -> lock );
627628 drm_gem_object_release (obj );
628629 kfree (bo );
629630}
@@ -641,6 +642,7 @@ static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
641642 bo -> sliced = false;
642643 reinit_completion (& bo -> xfer_done );
643644 } else {
645+ mutex_init (& bo -> lock );
644646 init_completion (& bo -> xfer_done );
645647 }
646648 complete_all (& bo -> xfer_done );
@@ -1002,10 +1004,13 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
10021004 }
10031005
10041006 bo = to_qaic_bo (obj );
1007+ ret = mutex_lock_interruptible (& bo -> lock );
1008+ if (ret )
1009+ goto put_bo ;
10051010
10061011 if (bo -> sliced ) {
10071012 ret = - EINVAL ;
1008- goto put_bo ;
1013+ goto unlock_bo ;
10091014 }
10101015
10111016 dbc = & qdev -> dbc [args -> hdr .dbc_id ];
@@ -1029,7 +1034,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
10291034 bo -> sliced = true;
10301035 list_add_tail (& bo -> bo_list , & bo -> dbc -> bo_lists );
10311036 srcu_read_unlock (& dbc -> ch_lock , rcu_id );
1032- drm_gem_object_put ( obj );
1037+ mutex_unlock ( & bo -> lock );
10331038 srcu_read_unlock (& qdev -> dev_lock , qdev_rcu_id );
10341039 srcu_read_unlock (& usr -> qddev_lock , usr_rcu_id );
10351040
@@ -1039,6 +1044,8 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
10391044 qaic_unprepare_bo (qdev , bo );
10401045unlock_ch_srcu :
10411046 srcu_read_unlock (& dbc -> ch_lock , rcu_id );
1047+ unlock_bo :
1048+ mutex_unlock (& bo -> lock );
10421049put_bo :
10431050 drm_gem_object_put (obj );
10441051free_slice_ent :
@@ -1193,15 +1200,18 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
11931200 }
11941201
11951202 bo = to_qaic_bo (obj );
1203+ ret = mutex_lock_interruptible (& bo -> lock );
1204+ if (ret )
1205+ goto failed_to_send_bo ;
11961206
11971207 if (!bo -> sliced ) {
11981208 ret = - EINVAL ;
1199- goto failed_to_send_bo ;
1209+ goto unlock_bo ;
12001210 }
12011211
12021212 if (is_partial && pexec [i ].resize > bo -> base .size ) {
12031213 ret = - EINVAL ;
1204- goto failed_to_send_bo ;
1214+ goto unlock_bo ;
12051215 }
12061216
12071217 spin_lock_irqsave (& dbc -> xfer_lock , flags );
@@ -1210,7 +1220,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
12101220 if (queued ) {
12111221 spin_unlock_irqrestore (& dbc -> xfer_lock , flags );
12121222 ret = - EINVAL ;
1213- goto failed_to_send_bo ;
1223+ goto unlock_bo ;
12141224 }
12151225
12161226 bo -> req_id = dbc -> next_req_id ++ ;
@@ -1241,17 +1251,20 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
12411251 if (ret ) {
12421252 bo -> queued = false;
12431253 spin_unlock_irqrestore (& dbc -> xfer_lock , flags );
1244- goto failed_to_send_bo ;
1254+ goto unlock_bo ;
12451255 }
12461256 }
12471257 reinit_completion (& bo -> xfer_done );
12481258 list_add_tail (& bo -> xfer_list , & dbc -> xfer_list );
12491259 spin_unlock_irqrestore (& dbc -> xfer_lock , flags );
12501260 dma_sync_sgtable_for_device (& qdev -> pdev -> dev , bo -> sgt , bo -> dir );
1261+ mutex_unlock (& bo -> lock );
12511262 }
12521263
12531264 return 0 ;
12541265
1266+ unlock_bo :
1267+ mutex_unlock (& bo -> lock );
12551268failed_to_send_bo :
12561269 if (likely (obj ))
12571270 drm_gem_object_put (obj );
@@ -1807,6 +1820,91 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
18071820 return ret ;
18081821}
18091822
1823+ static void detach_slice_bo (struct qaic_device * qdev , struct qaic_bo * bo )
1824+ {
1825+ qaic_free_slices_bo (bo );
1826+ qaic_unprepare_bo (qdev , bo );
1827+ qaic_init_bo (bo , true);
1828+ list_del (& bo -> bo_list );
1829+ drm_gem_object_put (& bo -> base );
1830+ }
1831+
1832+ int qaic_detach_slice_bo_ioctl (struct drm_device * dev , void * data , struct drm_file * file_priv )
1833+ {
1834+ struct qaic_detach_slice * args = data ;
1835+ int rcu_id , usr_rcu_id , qdev_rcu_id ;
1836+ struct dma_bridge_chan * dbc ;
1837+ struct drm_gem_object * obj ;
1838+ struct qaic_device * qdev ;
1839+ struct qaic_user * usr ;
1840+ unsigned long flags ;
1841+ struct qaic_bo * bo ;
1842+ int ret ;
1843+
1844+ if (args -> pad != 0 )
1845+ return - EINVAL ;
1846+
1847+ usr = file_priv -> driver_priv ;
1848+ usr_rcu_id = srcu_read_lock (& usr -> qddev_lock );
1849+ if (!usr -> qddev ) {
1850+ ret = - ENODEV ;
1851+ goto unlock_usr_srcu ;
1852+ }
1853+
1854+ qdev = usr -> qddev -> qdev ;
1855+ qdev_rcu_id = srcu_read_lock (& qdev -> dev_lock );
1856+ if (qdev -> in_reset ) {
1857+ ret = - ENODEV ;
1858+ goto unlock_dev_srcu ;
1859+ }
1860+
1861+ obj = drm_gem_object_lookup (file_priv , args -> handle );
1862+ if (!obj ) {
1863+ ret = - ENOENT ;
1864+ goto unlock_dev_srcu ;
1865+ }
1866+
1867+ bo = to_qaic_bo (obj );
1868+ ret = mutex_lock_interruptible (& bo -> lock );
1869+ if (ret )
1870+ goto put_bo ;
1871+
1872+ if (!bo -> sliced ) {
1873+ ret = - EINVAL ;
1874+ goto unlock_bo ;
1875+ }
1876+
1877+ dbc = bo -> dbc ;
1878+ rcu_id = srcu_read_lock (& dbc -> ch_lock );
1879+ if (dbc -> usr != usr ) {
1880+ ret = - EINVAL ;
1881+ goto unlock_ch_srcu ;
1882+ }
1883+
1884+ /* Check if BO is committed to H/W for DMA */
1885+ spin_lock_irqsave (& dbc -> xfer_lock , flags );
1886+ if (bo -> queued ) {
1887+ spin_unlock_irqrestore (& dbc -> xfer_lock , flags );
1888+ ret = - EBUSY ;
1889+ goto unlock_ch_srcu ;
1890+ }
1891+ spin_unlock_irqrestore (& dbc -> xfer_lock , flags );
1892+
1893+ detach_slice_bo (qdev , bo );
1894+
1895+ unlock_ch_srcu :
1896+ srcu_read_unlock (& dbc -> ch_lock , rcu_id );
1897+ unlock_bo :
1898+ mutex_unlock (& bo -> lock );
1899+ put_bo :
1900+ drm_gem_object_put (obj );
1901+ unlock_dev_srcu :
1902+ srcu_read_unlock (& qdev -> dev_lock , qdev_rcu_id );
1903+ unlock_usr_srcu :
1904+ srcu_read_unlock (& usr -> qddev_lock , usr_rcu_id );
1905+ return ret ;
1906+ }
1907+
18101908static void empty_xfer_list (struct qaic_device * qdev , struct dma_bridge_chan * dbc )
18111909{
18121910 unsigned long flags ;
@@ -1888,10 +1986,11 @@ void release_dbc(struct qaic_device *qdev, u32 dbc_id)
18881986 dbc -> usr = NULL ;
18891987
18901988 list_for_each_entry_safe (bo , bo_temp , & dbc -> bo_lists , bo_list ) {
1891- qaic_free_slices_bo (bo );
1892- qaic_unprepare_bo (qdev , bo );
1893- qaic_init_bo (bo , true);
1894- list_del (& bo -> bo_list );
1989+ drm_gem_object_get (& bo -> base );
1990+ mutex_lock (& bo -> lock );
1991+ detach_slice_bo (qdev , bo );
1992+ mutex_unlock (& bo -> lock );
1993+ drm_gem_object_put (& bo -> base );
18951994 }
18961995
18971996 dbc -> in_use = false;
0 commit comments