@@ -26,8 +26,8 @@ SYM_FUNC_START_WEAK(memmove)
2626 */
2727
2828 /* Return if nothing to do */
29- beq a0 , a1 , return_from_memmove
30- beqz a2 , return_from_memmove
29+ beq a0 , a1 , .Lreturn_from_memmove
30+ beqz a2 , .Lreturn_from_memmove
3131
3232 /*
3333 * Register Uses
@@ -60,7 +60,7 @@ SYM_FUNC_START_WEAK(memmove)
6060 * small enough not to bother.
6161 */
6262 andi t0, a2 , -(2 * SZREG)
63- beqz t0, byte_copy
63+ beqz t0, .Lbyte_copy
6464
6565 /*
6666 * Now solve for t5 and t6.
@@ -87,14 +87,14 @@ SYM_FUNC_START_WEAK(memmove)
8787 */
8888 xor t0, a0 , a1
8989 andi t1, t0, (SZREG - 1 )
90- beqz t1, coaligned_copy
90+ beqz t1, .Lcoaligned_copy
9191 /* Fall through to misaligned fixup copy */
9292
93- misaligned_fixup_copy :
94- bltu a1 , a0 , misaligned_fixup_copy_reverse
93+ .Lmisaligned_fixup_copy :
94+ bltu a1 , a0 , .Lmisaligned_fixup_copy_reverse
9595
96- misaligned_fixup_copy_forward :
97- jal t0, byte_copy_until_aligned_forward
96+ .Lmisaligned_fixup_copy_forward :
97+ jal t0, .Lbyte_copy_until_aligned_forward
9898
9999 andi a5 , a1 , (SZREG - 1 ) /* Find the alignment offset of src (a1) */
100100 slli a6 , a5 , 3 /* Multiply by 8 to convert that to bits to shift */
@@ -153,10 +153,10 @@ misaligned_fixup_copy_forward:
153153 mv t3, t6 /* Fix the dest pointer in case the loop was broken */
154154
155155 add a1 , t3, a5 /* Restore the src pointer */
156- j byte_copy_forward /* Copy any remaining bytes */
156+ j .Lbyte_copy_forward /* Copy any remaining bytes */
157157
158- misaligned_fixup_copy_reverse :
159- jal t0, byte_copy_until_aligned_reverse
158+ .Lmisaligned_fixup_copy_reverse :
159+ jal t0, .Lbyte_copy_until_aligned_reverse
160160
161161 andi a5 , a4 , (SZREG - 1 ) /* Find the alignment offset of src (a4) */
162162 slli a6 , a5 , 3 /* Multiply by 8 to convert that to bits to shift */
@@ -215,18 +215,18 @@ misaligned_fixup_copy_reverse:
215215 mv t4, t5 /* Fix the dest pointer in case the loop was broken */
216216
217217 add a4 , t4, a5 /* Restore the src pointer */
218- j byte_copy_reverse /* Copy any remaining bytes */
218+ j .Lbyte_copy_reverse /* Copy any remaining bytes */
219219
220220/*
221221 * Simple copy loops for SZREG co-aligned memory locations.
222222 * These also make calls to do byte copies for any unaligned
223223 * data at their terminations.
224224 */
225- coaligned_copy :
226- bltu a1 , a0 , coaligned_copy_reverse
225+ .Lcoaligned_copy :
226+ bltu a1 , a0 , .Lcoaligned_copy_reverse
227227
228- coaligned_copy_forward :
229- jal t0, byte_copy_until_aligned_forward
228+ .Lcoaligned_copy_forward :
229+ jal t0, .Lbyte_copy_until_aligned_forward
230230
231231 1 :
232232 REG_L t1, ( 0 * SZREG)(a1)
@@ -235,10 +235,10 @@ coaligned_copy_forward:
235235 REG_S t1, (-1 * SZREG)(t3)
236236 bne t3 , t6, 1b
237237
238- j byte_copy_forward /* Copy any remaining bytes */
238+ j .Lbyte_copy_forward /* Copy any remaining bytes */
239239
240- coaligned_copy_reverse :
241- jal t0, byte_copy_until_aligned_reverse
240+ .Lcoaligned_copy_reverse :
241+ jal t0, .Lbyte_copy_until_aligned_reverse
242242
243243 1 :
244244 REG_L t1, (-1 * SZREG)(a4)
@@ -247,7 +247,7 @@ coaligned_copy_reverse:
247247 REG_S t1, ( 0 * SZREG)(t4)
248248 bne t4 , t5, 1b
249249
250- j byte_copy_reverse /* Copy any remaining bytes */
250+ j .Lbyte_copy_reverse /* Copy any remaining bytes */
251251
252252/*
253253 * These are basically sub-functions within the function. They
@@ -258,7 +258,7 @@ coaligned_copy_reverse:
258258 * up from where they were left and we avoid code duplication
259259 * without any overhead except the call in and return jumps.
260260 */
261- byte_copy_until_aligned_forward :
261+ .Lbyte_copy_until_aligned_forward :
262262 beq t3 , t5, 2f
263263 1 :
264264 lb t1, 0 (a1 )
@@ -269,7 +269,7 @@ byte_copy_until_aligned_forward:
269269 2 :
270270 jalr zero, 0x0(t0) /* Return to multibyte copy loop */
271271
272- byte_copy_until_aligned_reverse :
272+ .Lbyte_copy_until_aligned_reverse :
273273 beq t4 , t6, 2f
274274 1 :
275275 lb t1, -1 (a4 )
@@ -285,10 +285,10 @@ byte_copy_until_aligned_reverse:
285285 * These will byte copy until they reach the end of data to copy.
286286 * At that point, they will call to return from memmove.
287287 */
288- byte_copy :
289- bltu a1 , a0 , byte_copy_reverse
288+ .Lbyte_copy :
289+ bltu a1 , a0 , .Lbyte_copy_reverse
290290
291- byte_copy_forward :
291+ .Lbyte_copy_forward :
292292 beq t3 , t4, 2f
293293 1 :
294294 lb t1, 0 (a1 )
@@ -299,7 +299,7 @@ byte_copy_forward:
299299 2 :
300300 ret
301301
302- byte_copy_reverse :
302+ .Lbyte_copy_reverse :
303303 beq t4 , t3, 2f
304304 1 :
305305 lb t1, -1 (a4 )
@@ -309,7 +309,7 @@ byte_copy_reverse:
309309 bne t4 , t3, 1b
310310 2 :
311311
312- return_from_memmove :
312+ .Lreturn_from_memmove :
313313 ret
314314
315315SYM_FUNC_END (memmove)
0 commit comments