@@ -145,10 +145,9 @@ int arc_cache_mumbojumbo(int c, char *buf, int len)
145145 p_dc -> sz_k = 1 << (dbcr .sz - 1 );
146146
147147 n += scnprintf (buf + n , len - n ,
148- "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s \n" ,
148+ "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n" ,
149149 p_dc -> sz_k , assoc , p_dc -> line_len ,
150150 vipt ? "VIPT" : "PIPT" ,
151- p_dc -> colors > 1 ? " aliasing" : "" ,
152151 IS_USED_CFG (CONFIG_ARC_HAS_DCACHE ));
153152
154153slc_chk :
@@ -703,51 +702,10 @@ static inline void arc_slc_enable(void)
703702 * Exported APIs
704703 */
705704
706- /*
707- * Handle cache congruency of kernel and userspace mappings of page when kernel
708- * writes-to/reads-from
709- *
710- * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
711- * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
712- * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
713- * -In SMP, if hardware caches are coherent
714- *
715- * There's a corollary case, where kernel READs from a userspace mapped page.
716- * If the U-mapping is not congruent to K-mapping, former needs flushing.
717- */
718705void flush_dcache_folio (struct folio * folio )
719706{
720- struct address_space * mapping ;
721-
722- if (!cache_is_vipt_aliasing ()) {
723- clear_bit (PG_dc_clean , & folio -> flags );
724- return ;
725- }
726-
727- /* don't handle anon pages here */
728- mapping = folio_flush_mapping (folio );
729- if (!mapping )
730- return ;
731-
732- /*
733- * pagecache page, file not yet mapped to userspace
734- * Make a note that K-mapping is dirty
735- */
736- if (!mapping_mapped (mapping )) {
737- clear_bit (PG_dc_clean , & folio -> flags );
738- } else if (folio_mapped (folio )) {
739- /* kernel reading from page with U-mapping */
740- phys_addr_t paddr = (unsigned long )folio_address (folio );
741- unsigned long vaddr = folio_pos (folio );
742-
743- /*
744- * vaddr is not actually the virtual address, but is
745- * congruent to every user mapping.
746- */
747- if (addr_not_cache_congruent (paddr , vaddr ))
748- __flush_dcache_pages (paddr , vaddr ,
749- folio_nr_pages (folio ));
750- }
707+ clear_bit (PG_dc_clean , & folio -> flags );
708+ return ;
751709}
752710EXPORT_SYMBOL (flush_dcache_folio );
753711
@@ -921,91 +879,18 @@ noinline void flush_cache_all(void)
921879
922880}
923881
924- #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
925-
926- void flush_cache_mm (struct mm_struct * mm )
927- {
928- flush_cache_all ();
929- }
930-
931- void flush_cache_page (struct vm_area_struct * vma , unsigned long u_vaddr ,
932- unsigned long pfn )
933- {
934- phys_addr_t paddr = pfn << PAGE_SHIFT ;
935-
936- u_vaddr &= PAGE_MASK ;
937-
938- __flush_dcache_pages (paddr , u_vaddr , 1 );
939-
940- if (vma -> vm_flags & VM_EXEC )
941- __inv_icache_pages (paddr , u_vaddr , 1 );
942- }
943-
944- void flush_cache_range (struct vm_area_struct * vma , unsigned long start ,
945- unsigned long end )
946- {
947- flush_cache_all ();
948- }
949-
950- void flush_anon_page (struct vm_area_struct * vma , struct page * page ,
951- unsigned long u_vaddr )
952- {
953- /* TBD: do we really need to clear the kernel mapping */
954- __flush_dcache_pages ((phys_addr_t )page_address (page ), u_vaddr , 1 );
955- __flush_dcache_pages ((phys_addr_t )page_address (page ),
956- (phys_addr_t )page_address (page ), 1 );
957-
958- }
959-
960- #endif
961-
962882void copy_user_highpage (struct page * to , struct page * from ,
963883 unsigned long u_vaddr , struct vm_area_struct * vma )
964884{
965885 struct folio * src = page_folio (from );
966886 struct folio * dst = page_folio (to );
967887 void * kfrom = kmap_atomic (from );
968888 void * kto = kmap_atomic (to );
969- int clean_src_k_mappings = 0 ;
970-
971- /*
972- * If SRC page was already mapped in userspace AND it's U-mapping is
973- * not congruent with K-mapping, sync former to physical page so that
974- * K-mapping in memcpy below, sees the right data
975- *
976- * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
977- * equally valid for SRC page as well
978- *
979- * For !VIPT cache, all of this gets compiled out as
980- * addr_not_cache_congruent() is 0
981- */
982- if (page_mapcount (from ) && addr_not_cache_congruent (kfrom , u_vaddr )) {
983- __flush_dcache_pages ((unsigned long )kfrom , u_vaddr , 1 );
984- clean_src_k_mappings = 1 ;
985- }
986889
987890 copy_page (kto , kfrom );
988891
989- /*
990- * Mark DST page K-mapping as dirty for a later finalization by
991- * update_mmu_cache(). Although the finalization could have been done
992- * here as well (given that both vaddr/paddr are available).
993- * But update_mmu_cache() already has code to do that for other
994- * non copied user pages (e.g. read faults which wire in pagecache page
995- * directly).
996- */
997892 clear_bit (PG_dc_clean , & dst -> flags );
998-
999- /*
1000- * if SRC was already usermapped and non-congruent to kernel mapping
1001- * sync the kernel mapping back to physical page
1002- */
1003- if (clean_src_k_mappings ) {
1004- __flush_dcache_pages ((unsigned long )kfrom ,
1005- (unsigned long )kfrom , 1 );
1006- } else {
1007- clear_bit (PG_dc_clean , & src -> flags );
1008- }
893+ clear_bit (PG_dc_clean , & src -> flags );
1009894
1010895 kunmap_atomic (kto );
1011896 kunmap_atomic (kfrom );
@@ -1140,17 +1025,8 @@ static noinline void __init arc_cache_init_master(void)
11401025 dc -> line_len , L1_CACHE_BYTES );
11411026
11421027 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
1143- if (is_isa_arcompact ()) {
1144- int handled = IS_ENABLED (CONFIG_ARC_CACHE_VIPT_ALIASING );
1145-
1146- if (dc -> colors > 1 ) {
1147- if (!handled )
1148- panic ("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n" );
1149- if (CACHE_COLORS_NUM != dc -> colors )
1150- panic ("CACHE_COLORS_NUM not optimized for config\n" );
1151- } else if (handled && dc -> colors == 1 ) {
1152- panic ("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n" );
1153- }
1028+ if (is_isa_arcompact () && dc -> colors > 1 ) {
1029+ panic ("Aliasing VIPT cache not supported\n" );
11541030 }
11551031 }
11561032
0 commit comments