blob: 2277d120267f74114da41f16b126150a43f8a6e0 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0 */
Matthias Weisserd8834a12011-03-10 21:36:32 +00002/*
3 * linux/arch/arm/lib/memset.S
4 *
5 * Copyright (C) 1995-2000 Russell King
6 *
Matthias Weisserd8834a12011-03-10 21:36:32 +00007 * ASM optimised string functions
8 */
Stefan Agner75d7a0d2014-12-18 18:10:33 +01009#include <linux/linkage.h>
Matthias Weisserd8834a12011-03-10 21:36:32 +000010#include <asm/assembler.h>
11
12 .text
13 .align 5
Matthias Weisserd8834a12011-03-10 21:36:32 +000014
Stefan Agner75d7a0d2014-12-18 18:10:33 +010015 .syntax unified
Tom Rini3a649402017-03-18 09:01:44 -040016#if CONFIG_IS_ENABLED(SYS_THUMB_BUILD) && !defined(MEMSET_NO_THUMB_BUILD)
Stefan Agner75d7a0d2014-12-18 18:10:33 +010017 .thumb
18 .thumb_func
19#endif
20ENTRY(memset)
Matthias Weisserd8834a12011-03-10 21:36:32 +000021 ands r3, r0, #3 @ 1 unaligned?
Stefan Agner75d7a0d2014-12-18 18:10:33 +010022 mov ip, r0 @ preserve r0 as return value
23 bne 6f @ 1
Matthias Weisserd8834a12011-03-10 21:36:32 +000024/*
Stefan Agner75d7a0d2014-12-18 18:10:33 +010025 * we know that the pointer in ip is aligned to a word boundary.
Matthias Weisserd8834a12011-03-10 21:36:32 +000026 */
Stefan Agner75d7a0d2014-12-18 18:10:33 +0100271: orr r1, r1, r1, lsl #8
Matthias Weisserd8834a12011-03-10 21:36:32 +000028 orr r1, r1, r1, lsl #16
29 mov r3, r1
30 cmp r2, #16
31 blt 4f
32
33#if ! CALGN(1)+0
34
35/*
Stefan Agner75d7a0d2014-12-18 18:10:33 +010036 * We need 2 extra registers for this loop - use r8 and the LR
Matthias Weisserd8834a12011-03-10 21:36:32 +000037 */
Stefan Agner75d7a0d2014-12-18 18:10:33 +010038 stmfd sp!, {r8, lr}
39 mov r8, r1
Matthias Weisserd8834a12011-03-10 21:36:32 +000040 mov lr, r1
41
422: subs r2, r2, #64
Stefan Agner75d7a0d2014-12-18 18:10:33 +010043 stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
44 stmiage ip!, {r1, r3, r8, lr}
45 stmiage ip!, {r1, r3, r8, lr}
46 stmiage ip!, {r1, r3, r8, lr}
Matthias Weisserd8834a12011-03-10 21:36:32 +000047 bgt 2b
Stefan Agner75d7a0d2014-12-18 18:10:33 +010048 ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go.
Matthias Weisserd8834a12011-03-10 21:36:32 +000049/*
50 * No need to correct the count; we're only testing bits from now on
51 */
52 tst r2, #32
Stefan Agner75d7a0d2014-12-18 18:10:33 +010053 stmiane ip!, {r1, r3, r8, lr}
54 stmiane ip!, {r1, r3, r8, lr}
Matthias Weisserd8834a12011-03-10 21:36:32 +000055 tst r2, #16
Stefan Agner75d7a0d2014-12-18 18:10:33 +010056 stmiane ip!, {r1, r3, r8, lr}
57 ldmfd sp!, {r8, lr}
Matthias Weisserd8834a12011-03-10 21:36:32 +000058
59#else
60
61/*
62 * This version aligns the destination pointer in order to write
63 * whole cache lines at once.
64 */
65
Stefan Agner75d7a0d2014-12-18 18:10:33 +010066 stmfd sp!, {r4-r8, lr}
Matthias Weisserd8834a12011-03-10 21:36:32 +000067 mov r4, r1
68 mov r5, r1
69 mov r6, r1
70 mov r7, r1
Stefan Agner75d7a0d2014-12-18 18:10:33 +010071 mov r8, r1
Matthias Weisserd8834a12011-03-10 21:36:32 +000072 mov lr, r1
73
74 cmp r2, #96
Stefan Agner75d7a0d2014-12-18 18:10:33 +010075 tstgt ip, #31
Matthias Weisserd8834a12011-03-10 21:36:32 +000076 ble 3f
77
Stefan Agner75d7a0d2014-12-18 18:10:33 +010078 and r8, ip, #31
79 rsb r8, r8, #32
80 sub r2, r2, r8
81 movs r8, r8, lsl #(32 - 4)
82 stmiacs ip!, {r4, r5, r6, r7}
83 stmiami ip!, {r4, r5}
84 tst r8, #(1 << 30)
85 mov r8, r1
86 strne r1, [ip], #4
Matthias Weisserd8834a12011-03-10 21:36:32 +000087
883: subs r2, r2, #64
Stefan Agner75d7a0d2014-12-18 18:10:33 +010089 stmiage ip!, {r1, r3-r8, lr}
90 stmiage ip!, {r1, r3-r8, lr}
Matthias Weisserd8834a12011-03-10 21:36:32 +000091 bgt 3b
Stefan Agner75d7a0d2014-12-18 18:10:33 +010092 ldmfdeq sp!, {r4-r8, pc}
Matthias Weisserd8834a12011-03-10 21:36:32 +000093
94 tst r2, #32
Stefan Agner75d7a0d2014-12-18 18:10:33 +010095 stmiane ip!, {r1, r3-r8, lr}
Matthias Weisserd8834a12011-03-10 21:36:32 +000096 tst r2, #16
Stefan Agner75d7a0d2014-12-18 18:10:33 +010097 stmiane ip!, {r4-r7}
98 ldmfd sp!, {r4-r8, lr}
Matthias Weisserd8834a12011-03-10 21:36:32 +000099
100#endif
101
1024: tst r2, #8
Stefan Agner75d7a0d2014-12-18 18:10:33 +0100103 stmiane ip!, {r1, r3}
Matthias Weisserd8834a12011-03-10 21:36:32 +0000104 tst r2, #4
Stefan Agner75d7a0d2014-12-18 18:10:33 +0100105 strne r1, [ip], #4
Matthias Weisserd8834a12011-03-10 21:36:32 +0000106/*
107 * When we get here, we've got less than 4 bytes to zero. We
108 * may have an unaligned pointer as well.
109 */
1105: tst r2, #2
Stefan Agner75d7a0d2014-12-18 18:10:33 +0100111 strbne r1, [ip], #1
112 strbne r1, [ip], #1
Matthias Weisserd8834a12011-03-10 21:36:32 +0000113 tst r2, #1
Stefan Agner75d7a0d2014-12-18 18:10:33 +0100114 strbne r1, [ip], #1
115 ret lr
116
1176: subs r2, r2, #4 @ 1 do we have enough
118 blt 5b @ 1 bytes to align with?
119 cmp r3, #2 @ 1
120 strblt r1, [ip], #1 @ 1
121 strble r1, [ip], #1 @ 1
122 strb r1, [ip], #1 @ 1
123 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
124 b 1b
125ENDPROC(memset)