1010#include <linux/module.h>
1111#include <linux/dma-map-ops.h>
1212#include <linux/of.h>
13+ #include <linux/pci.h>
1314#include <linux/pfn.h>
1415#include <linux/xarray.h>
1516#include <linux/virtio_anchor.h>
@@ -292,50 +293,48 @@ static const struct dma_map_ops xen_grant_dma_ops = {
292293 .dma_supported = xen_grant_dma_supported ,
293294};
294295
295- static bool xen_is_dt_grant_dma_device (struct device * dev )
296+ static struct device_node * xen_dt_get_node (struct device * dev )
296297{
297- struct device_node * iommu_np ;
298- bool has_iommu ;
298+ if (dev_is_pci (dev )) {
299+ struct pci_dev * pdev = to_pci_dev (dev );
300+ struct pci_bus * bus = pdev -> bus ;
299301
300- iommu_np = of_parse_phandle (dev -> of_node , "iommus" , 0 );
301- has_iommu = iommu_np &&
302- of_device_is_compatible (iommu_np , "xen,grant-dma" );
303- of_node_put (iommu_np );
302+ /* Walk up to the root bus to look for PCI Host controller */
303+ while (!pci_is_root_bus (bus ))
304+ bus = bus -> parent ;
304305
305- return has_iommu ;
306- }
307-
308- bool xen_is_grant_dma_device (struct device * dev )
309- {
310- /* XXX Handle only DT devices for now */
311- if (dev -> of_node )
312- return xen_is_dt_grant_dma_device (dev );
313-
314- return false;
315- }
316-
317- bool xen_virtio_mem_acc (struct virtio_device * dev )
318- {
319- if (IS_ENABLED (CONFIG_XEN_VIRTIO_FORCE_GRANT ) || xen_pv_domain ())
320- return true;
306+ return of_node_get (bus -> bridge -> parent -> of_node );
307+ }
321308
322- return xen_is_grant_dma_device (dev -> dev . parent );
309+ return of_node_get (dev -> of_node );
323310}
324311
325312static int xen_dt_grant_init_backend_domid (struct device * dev ,
326- struct xen_grant_dma_data * data )
313+ struct device_node * np ,
314+ domid_t * backend_domid )
327315{
328- struct of_phandle_args iommu_spec ;
316+ struct of_phandle_args iommu_spec = { . args_count = 1 } ;
329317
330- if (of_parse_phandle_with_args (dev -> of_node , "iommus" , "#iommu-cells" ,
331- 0 , & iommu_spec )) {
332- dev_err (dev , "Cannot parse iommus property\n" );
333- return - ESRCH ;
318+ if (dev_is_pci (dev )) {
319+ struct pci_dev * pdev = to_pci_dev (dev );
320+ u32 rid = PCI_DEVID (pdev -> bus -> number , pdev -> devfn );
321+
322+ if (of_map_id (np , rid , "iommu-map" , "iommu-map-mask" , & iommu_spec .np ,
323+ iommu_spec .args )) {
324+ dev_dbg (dev , "Cannot translate ID\n" );
325+ return - ESRCH ;
326+ }
327+ } else {
328+ if (of_parse_phandle_with_args (np , "iommus" , "#iommu-cells" ,
329+ 0 , & iommu_spec )) {
330+ dev_dbg (dev , "Cannot parse iommus property\n" );
331+ return - ESRCH ;
332+ }
334333 }
335334
336335 if (!of_device_is_compatible (iommu_spec .np , "xen,grant-dma" ) ||
337336 iommu_spec .args_count != 1 ) {
338- dev_err (dev , "Incompatible IOMMU node\n" );
337+ dev_dbg (dev , "Incompatible IOMMU node\n" );
339338 of_node_put (iommu_spec .np );
340339 return - ESRCH ;
341340 }
@@ -346,12 +345,31 @@ static int xen_dt_grant_init_backend_domid(struct device *dev,
346345 * The endpoint ID here means the ID of the domain where the
347346 * corresponding backend is running
348347 */
349- data -> backend_domid = iommu_spec .args [0 ];
348+ * backend_domid = iommu_spec .args [0 ];
350349
351350 return 0 ;
352351}
353352
354- void xen_grant_setup_dma_ops (struct device * dev )
353+ static int xen_grant_init_backend_domid (struct device * dev ,
354+ domid_t * backend_domid )
355+ {
356+ struct device_node * np ;
357+ int ret = - ENODEV ;
358+
359+ np = xen_dt_get_node (dev );
360+ if (np ) {
361+ ret = xen_dt_grant_init_backend_domid (dev , np , backend_domid );
362+ of_node_put (np );
363+ } else if (IS_ENABLED (CONFIG_XEN_VIRTIO_FORCE_GRANT ) || xen_pv_domain ()) {
364+ dev_info (dev , "Using dom0 as backend\n" );
365+ * backend_domid = 0 ;
366+ ret = 0 ;
367+ }
368+
369+ return ret ;
370+ }
371+
372+ static void xen_grant_setup_dma_ops (struct device * dev , domid_t backend_domid )
355373{
356374 struct xen_grant_dma_data * data ;
357375
@@ -365,16 +383,7 @@ void xen_grant_setup_dma_ops(struct device *dev)
365383 if (!data )
366384 goto err ;
367385
368- if (dev -> of_node ) {
369- if (xen_dt_grant_init_backend_domid (dev , data ))
370- goto err ;
371- } else if (IS_ENABLED (CONFIG_XEN_VIRTIO_FORCE_GRANT )) {
372- dev_info (dev , "Using dom0 as backend\n" );
373- data -> backend_domid = 0 ;
374- } else {
375- /* XXX ACPI device unsupported for now */
376- goto err ;
377- }
386+ data -> backend_domid = backend_domid ;
378387
379388 if (store_xen_grant_dma_data (dev , data )) {
380389 dev_err (dev , "Cannot store Xen grant DMA data\n" );
@@ -392,12 +401,14 @@ void xen_grant_setup_dma_ops(struct device *dev)
392401
393402bool xen_virtio_restricted_mem_acc (struct virtio_device * dev )
394403{
395- bool ret = xen_virtio_mem_acc ( dev ) ;
404+ domid_t backend_domid ;
396405
397- if (ret )
398- xen_grant_setup_dma_ops (dev -> dev .parent );
406+ if (!xen_grant_init_backend_domid (dev -> dev .parent , & backend_domid )) {
407+ xen_grant_setup_dma_ops (dev -> dev .parent , backend_domid );
408+ return true;
409+ }
399410
400- return ret ;
411+ return false ;
401412}
402413
403414MODULE_DESCRIPTION ("Xen grant DMA-mapping layer" );
0 commit comments