|
30 | 30 | * -Changes related to MMU v2 (Rel 4.8) |
31 | 31 | * |
32 | 32 | * Vineetg: Aug 29th 2008 |
33 | | - * -In TLB Flush operations (Metal Fix MMU) there is a explict command to |
| 33 | + * -In TLB Flush operations (Metal Fix MMU) there is a explicit command to |
34 | 34 | * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd, |
35 | 35 | * it fails. Thus need to load it with ANY valid value before invoking |
36 | 36 | * TLBIVUTLB cmd |
37 | 37 | * |
38 | 38 | * Vineetg: Aug 21th 2008: |
39 | 39 | * -Reduced the duration of IRQ lockouts in TLB Flush routines |
40 | | - * -Multiple copies of TLB erase code seperated into a "single" function |
| 40 | + * -Multiple copies of TLB erase code separated into a "single" function |
41 | 41 | * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID |
42 | 42 | * in interrupt-safe region. |
43 | 43 | * |
|
66 | 66 | * |
67 | 67 | * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has |
68 | 68 | * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways. |
69 | | - * Given this, the thrasing problem should never happen because once the 3 |
| 69 | + * Given this, the thrashing problem should never happen because once the 3 |
70 | 70 | * J-TLB entries are created (even though 3rd will knock out one of the prev |
71 | 71 | * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy |
72 | 72 | * |
@@ -127,7 +127,7 @@ static void utlb_invalidate(void) |
127 | 127 | * There was however an obscure hardware bug, where uTLB flush would |
128 | 128 | * fail when a prior probe for J-TLB (both totally unrelated) would |
129 | 129 | * return lkup err - because the entry didn't exist in MMU. |
130 | | - * The Workround was to set Index reg with some valid value, prior to |
| 130 | + * The Workaround was to set Index reg with some valid value, prior to |
131 | 131 | * flush. This was fixed in MMU v3 |
132 | 132 | */ |
133 | 133 | unsigned int idx; |
@@ -272,7 +272,7 @@ noinline void local_flush_tlb_all(void) |
272 | 272 | } |
273 | 273 |
|
274 | 274 | /* |
275 | | - * Flush the entrie MM for userland. The fastest way is to move to Next ASID |
| 275 | + * Flush the entire MM for userland. The fastest way is to move to Next ASID |
276 | 276 | */ |
277 | 277 | noinline void local_flush_tlb_mm(struct mm_struct *mm) |
278 | 278 | { |
@@ -303,7 +303,7 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm) |
303 | 303 | * Difference between this and Kernel Range Flush is |
304 | 304 | * -Here the fastest way (if range is too large) is to move to next ASID |
305 | 305 | * without doing any explicit Shootdown |
306 | | - * -In case of kernel Flush, entry has to be shot down explictly |
| 306 | + * -In case of kernel Flush, entry has to be shot down explicitly |
307 | 307 | */ |
308 | 308 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
309 | 309 | unsigned long end) |
@@ -620,7 +620,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, |
620 | 620 | * Super Page size is configurable in hardware (4K to 16M), but fixed once |
621 | 621 | * RTL builds. |
622 | 622 | * |
623 | | - * The exact THP size a Linx configuration will support is a function of: |
| 623 | + * The exact THP size a Linux configuration will support is a function of: |
624 | 624 | * - MMU page size (typical 8K, RTL fixed) |
625 | 625 | * - software page walker address split between PGD:PTE:PFN (typical |
626 | 626 | * 11:8:13, but can be changed with 1 line) |
@@ -698,7 +698,7 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, |
698 | 698 |
|
699 | 699 | #endif |
700 | 700 |
|
701 | | -/* Read the Cache Build Confuration Registers, Decode them and save into |
| 701 | +/* Read the Cache Build Configuration Registers, Decode them and save into |
702 | 702 | * the cpuinfo structure for later use. |
703 | 703 | * No Validation is done here, simply read/convert the BCRs |
704 | 704 | */ |
@@ -803,13 +803,13 @@ void arc_mmu_init(void) |
803 | 803 | pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str))); |
804 | 804 |
|
805 | 805 | /* |
806 | | - * Can't be done in processor.h due to header include depenedencies |
| 806 | + * Can't be done in processor.h due to header include dependencies |
807 | 807 | */ |
808 | 808 | BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE)); |
809 | 809 |
|
810 | 810 | /* |
811 | 811 | * stack top size sanity check, |
812 | | - * Can't be done in processor.h due to header include depenedencies |
| 812 | + * Can't be done in processor.h due to header include dependencies |
813 | 813 | */ |
814 | 814 | BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE)); |
815 | 815 |
|
@@ -881,7 +881,7 @@ void arc_mmu_init(void) |
881 | 881 | * the duplicate one. |
882 | 882 | * -Knob to be verbose abt it.(TODO: hook them up to debugfs) |
883 | 883 | */ |
884 | | -volatile int dup_pd_silent; /* Be slient abt it or complain (default) */ |
| 884 | +volatile int dup_pd_silent; /* Be silent abt it or complain (default) */ |
885 | 885 |
|
886 | 886 | void do_tlb_overlap_fault(unsigned long cause, unsigned long address, |
887 | 887 | struct pt_regs *regs) |
@@ -948,7 +948,7 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, |
948 | 948 |
|
949 | 949 | /*********************************************************************** |
950 | 950 | * Diagnostic Routines |
951 | | - * -Called from Low Level TLB Hanlders if things don;t look good |
| 951 | + * -Called from Low Level TLB Handlers if things don;t look good |
952 | 952 | **********************************************************************/ |
953 | 953 |
|
954 | 954 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA |
|
0 commit comments