@@ -517,10 +517,10 @@ static u32 hint_lookup[] = {
517517 * ccio_io_pdir_entry - Initialize an I/O Pdir.
518518 * @pdir_ptr: A pointer into I/O Pdir.
519519 * @sid: The Space Identifier.
520- * @vba : The virtual address.
520+ * @pba : The physical address.
521521 * @hints: The DMA Hint.
522522 *
523- * Given a virtual address (vba , arg2) and space id, (sid, arg1),
523+ * Given a physical address (pba , arg2) and space id, (sid, arg1),
524524 * load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
525525 * entry consists of 8 bytes as shown below (MSB == bit 0):
526526 *
@@ -543,7 +543,7 @@ static u32 hint_lookup[] = {
543543 * index are bits 12:19 of the value returned by LCI.
544544 */
545545static void
546- ccio_io_pdir_entry (__le64 * pdir_ptr , space_t sid , unsigned long vba ,
546+ ccio_io_pdir_entry (__le64 * pdir_ptr , space_t sid , phys_addr_t pba ,
547547 unsigned long hints )
548548{
549549 register unsigned long pa ;
@@ -557,7 +557,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
557557 ** "hints" parm includes the VALID bit!
558558 ** "dep" clobbers the physical address offset bits as well.
559559 */
560- pa = lpa ( vba ) ;
560+ pa = pba ;
561561 asm volatile ("depw %1,31,12,%0" : "+r" (pa ) : "r" (hints ));
562562 ((u32 * )pdir_ptr )[1 ] = (u32 ) pa ;
563563
@@ -582,7 +582,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
582582 ** Grab virtual index [0:11]
583583 ** Deposit virt_idx bits into I/O PDIR word
584584 */
585- asm volatile ("lci %%r0(%1), %0" : "=r" (ci ) : "r" (vba ));
585+ asm volatile ("lci %%r0(%1), %0" : "=r" (ci ) : "r" (phys_to_virt ( pba ) ));
586586 asm volatile ("extru %1,19,12,%0" : "+r" (ci ) : "r" (ci ));
587587 asm volatile ("depw %1,15,12,%0" : "+r" (pa ) : "r" (ci ));
588588
@@ -704,14 +704,14 @@ ccio_dma_supported(struct device *dev, u64 mask)
704704/**
705705 * ccio_map_single - Map an address range into the IOMMU.
706706 * @dev: The PCI device.
707- * @addr: The start address of the DMA region.
707+ * @addr: The physical address of the DMA region.
708708 * @size: The length of the DMA region.
709709 * @direction: The direction of the DMA transaction (to/from device).
710710 *
711711 * This function implements the pci_map_single function.
712712 */
713713static dma_addr_t
714- ccio_map_single (struct device * dev , void * addr , size_t size ,
714+ ccio_map_single (struct device * dev , phys_addr_t addr , size_t size ,
715715 enum dma_data_direction direction )
716716{
717717 int idx ;
@@ -730,7 +730,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
730730 BUG_ON (size <= 0 );
731731
732732 /* save offset bits */
733- offset = (( unsigned long ) addr ) & ~ IOVP_MASK ;
733+ offset = offset_in_page ( addr );
734734
735735 /* round up to nearest IOVP_SIZE */
736736 size = ALIGN (size + offset , IOVP_SIZE );
@@ -746,15 +746,15 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
746746
747747 pdir_start = & (ioc -> pdir_base [idx ]);
748748
749- DBG_RUN ("%s() %px -> %#lx size: %zu\n" ,
750- __func__ , addr , (long )(iovp | offset ), size );
749+ DBG_RUN ("%s() %pa -> %#lx size: %zu\n" ,
750+ __func__ , & addr , (long )(iovp | offset ), size );
751751
752752 /* If not cacheline aligned, force SAFE_DMA on the whole mess */
753- if ((size % L1_CACHE_BYTES ) || (( unsigned long ) addr % L1_CACHE_BYTES ))
753+ if ((size % L1_CACHE_BYTES ) || (addr % L1_CACHE_BYTES ))
754754 hint |= HINT_SAFE_DMA ;
755755
756756 while (size > 0 ) {
757- ccio_io_pdir_entry (pdir_start , KERNEL_SPACE , ( unsigned long ) addr , hint );
757+ ccio_io_pdir_entry (pdir_start , KERNEL_SPACE , addr , hint );
758758
759759 DBG_RUN (" pdir %p %08x%08x\n" ,
760760 pdir_start ,
@@ -773,25 +773,26 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
773773
774774
775775static dma_addr_t
776- ccio_map_page (struct device * dev , struct page * page , unsigned long offset ,
777- size_t size , enum dma_data_direction direction ,
778- unsigned long attrs )
776+ ccio_map_phys (struct device * dev , phys_addr_t phys , size_t size ,
777+ enum dma_data_direction direction , unsigned long attrs )
779778{
780- return ccio_map_single (dev , page_address (page ) + offset , size ,
781- direction );
779+ if (unlikely (attrs & DMA_ATTR_MMIO ))
780+ return DMA_MAPPING_ERROR ;
781+
782+ return ccio_map_single (dev , phys , size , direction );
782783}
783784
784785
785786/**
786- * ccio_unmap_page - Unmap an address range from the IOMMU.
787+ * ccio_unmap_phys - Unmap an address range from the IOMMU.
787788 * @dev: The PCI device.
788789 * @iova: The start address of the DMA region.
789790 * @size: The length of the DMA region.
790791 * @direction: The direction of the DMA transaction (to/from device).
791792 * @attrs: attributes
792793 */
793794static void
794- ccio_unmap_page (struct device * dev , dma_addr_t iova , size_t size ,
795+ ccio_unmap_phys (struct device * dev , dma_addr_t iova , size_t size ,
795796 enum dma_data_direction direction , unsigned long attrs )
796797{
797798 struct ioc * ioc ;
@@ -853,7 +854,8 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
853854
854855 if (ret ) {
855856 memset (ret , 0 , size );
856- * dma_handle = ccio_map_single (dev , ret , size , DMA_BIDIRECTIONAL );
857+ * dma_handle = ccio_map_single (dev , virt_to_phys (ret ), size ,
858+ DMA_BIDIRECTIONAL );
857859 }
858860
859861 return ret ;
@@ -873,7 +875,7 @@ static void
873875ccio_free (struct device * dev , size_t size , void * cpu_addr ,
874876 dma_addr_t dma_handle , unsigned long attrs )
875877{
876- ccio_unmap_page (dev , dma_handle , size , 0 , 0 );
878+ ccio_unmap_phys (dev , dma_handle , size , 0 , 0 );
877879 free_pages ((unsigned long )cpu_addr , get_order (size ));
878880}
879881
@@ -920,7 +922,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
920922 /* Fast path single entry scatterlists. */
921923 if (nents == 1 ) {
922924 sg_dma_address (sglist ) = ccio_map_single (dev ,
923- sg_virt (sglist ), sglist -> length ,
925+ sg_phys (sglist ), sglist -> length ,
924926 direction );
925927 sg_dma_len (sglist ) = sglist -> length ;
926928 return 1 ;
@@ -1004,7 +1006,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
10041006#ifdef CCIO_COLLECT_STATS
10051007 ioc -> usg_pages += sg_dma_len (sglist ) >> PAGE_SHIFT ;
10061008#endif
1007- ccio_unmap_page (dev , sg_dma_address (sglist ),
1009+ ccio_unmap_phys (dev , sg_dma_address (sglist ),
10081010 sg_dma_len (sglist ), direction , 0 );
10091011 ++ sglist ;
10101012 nents -- ;
@@ -1017,8 +1019,8 @@ static const struct dma_map_ops ccio_ops = {
10171019 .dma_supported = ccio_dma_supported ,
10181020 .alloc = ccio_alloc ,
10191021 .free = ccio_free ,
1020- .map_page = ccio_map_page ,
1021- .unmap_page = ccio_unmap_page ,
1022+ .map_phys = ccio_map_phys ,
1023+ .unmap_phys = ccio_unmap_phys ,
10221024 .map_sg = ccio_map_sg ,
10231025 .unmap_sg = ccio_unmap_sg ,
10241026 .get_sgtable = dma_common_get_sgtable ,
@@ -1072,7 +1074,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
10721074 ioc -> msingle_calls , ioc -> msingle_pages ,
10731075 (int )((ioc -> msingle_pages * 1000 )/ioc -> msingle_calls ));
10741076
1075- /* KLUGE - unmap_sg calls unmap_page for each mapped page */
1077+ /* KLUGE - unmap_sg calls unmap_phys for each mapped page */
10761078 min = ioc -> usingle_calls - ioc -> usg_calls ;
10771079 max = ioc -> usingle_pages - ioc -> usg_pages ;
10781080 seq_printf (m , "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n" ,
0 commit comments