Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * arch/alpha/lib/ev6-memset.S
  4 *
  5 * This is an efficient (and relatively small) implementation of the C library
  6 * "memset()" function for the 21264 implementation of Alpha.
  7 *
  8 * 21264 version  contributed by Rick Gorton <rick.gorton@alpha-processor.com>
  9 *
 10 * Much of the information about 21264 scheduling/coding comes from:
 11 *	Compiler Writer's Guide for the Alpha 21264
 12 *	abbreviated as 'CWG' in other comments here
 13 *	ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
 14 * Scheduling notation:
 15 *	E	- either cluster
 16 *	U	- upper subcluster; U0 - subcluster U0; U1 - subcluster U1
 17 *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
 18 * The algorithm for the leading and trailing quadwords remains the same,
 19 * however the loop has been unrolled to enable better memory throughput,
 20 * and the code has been replicated for each of the entry points: __memset
 21 * and __memset16 to permit better scheduling to eliminate the stalling
 22 * encountered during the mask replication.
 23 * A future enhancement might be to put in a byte store loop for really
 24 * small (say < 32 bytes) memset()s.  Whether or not that change would be
 25 * a win in the kernel would depend upon the contextual usage.
 26 * WARNING: Maintaining this is going to be more work than the above version,
 27 * as fixes will need to be made in multiple places.  The performance gain
 28 * is worth it.
 29 */
 30#include <asm/export.h>
 31	.set noat
 32	.set noreorder
 33.text
 34	.globl memset
 35	.globl __memset
 36	.globl ___memset
 37	.globl __memset16
 38	.globl __constant_c_memset
 39
 40	.ent ___memset
 41.align 5
 42___memset:
 43	.frame $30,0,$26,0
 44	.prologue 0
 45
 46	/*
 47	 * Serious stalling happens.  The only way to mitigate this is to
 48	 * undertake a major re-write to interleave the constant materialization
 49	 * with other parts of the fall-through code.  This is important, even
 50	 * though it makes maintenance tougher.
 51	 * Do this later.
 52	 */
 53	and $17,255,$1		# E : 00000000000000ch
 54	insbl $17,1,$2		# U : 000000000000ch00
 55	bis $16,$16,$0		# E : return value
 56	ble $18,end_b		# U : zero length requested?
 57
 58	addq $18,$16,$6		# E : max address to write to
 59	bis	$1,$2,$17	# E : 000000000000chch
 60	insbl	$1,2,$3		# U : 0000000000ch0000
 61	insbl	$1,3,$4		# U : 00000000ch000000
 62
 63	or	$3,$4,$3	# E : 00000000chch0000
 64	inswl	$17,4,$5	# U : 0000chch00000000
 65	xor	$16,$6,$1	# E : will complete write be within one quadword?
 66	inswl	$17,6,$2	# U : chch000000000000
 67
 68	or	$17,$3,$17	# E : 00000000chchchch
 69	or	$2,$5,$2	# E : chchchch00000000
 70	bic	$1,7,$1		# E : fit within a single quadword?
 71	and	$16,7,$3	# E : Target addr misalignment
 72
 73	or	$17,$2,$17	# E : chchchchchchchch
 74	beq	$1,within_quad_b # U :
 75	nop			# E :
 76	beq	$3,aligned_b	# U : target is 0mod8
 77
 78	/*
 79	 * Target address is misaligned, and won't fit within a quadword
 80	 */
 81	ldq_u $4,0($16)		# L : Fetch first partial
 82	bis $16,$16,$5		# E : Save the address
 83	insql $17,$16,$2	# U : Insert new bytes
 84	subq $3,8,$3		# E : Invert (for addressing uses)
 85
 86	addq $18,$3,$18		# E : $18 is new count ($3 is negative)
 87	mskql $4,$16,$4		# U : clear relevant parts of the quad
 88	subq $16,$3,$16		# E : $16 is new aligned destination
 89	bis $2,$4,$1		# E : Final bytes
 90
 91	nop
 92	stq_u $1,0($5)		# L : Store result
 93	nop
 94	nop
 95
 96.align 4
 97aligned_b:
 98	/*
 99	 * We are now guaranteed to be quad aligned, with at least
100	 * one partial quad to write.
101	 */
102
103	sra $18,3,$3		# U : Number of remaining quads to write
104	and $18,7,$18		# E : Number of trailing bytes to write
105	bis $16,$16,$5		# E : Save dest address
106	beq $3,no_quad_b	# U : tail stuff only
107
108	/*
109	 * it's worth the effort to unroll this and use wh64 if possible
110	 * Lifted a bunch of code from clear_user.S
111	 * At this point, entry values are:
112	 * $16	Current destination address
113	 * $5	A copy of $16
114	 * $6	The max quadword address to write to
115	 * $18	Number trailer bytes
116	 * $3	Number quads to write
117	 */
118
119	and	$16, 0x3f, $2	# E : Forward work (only useful for unrolled loop)
120	subq	$3, 16, $4	# E : Only try to unroll if > 128 bytes
121	subq	$2, 0x40, $1	# E : bias counter (aligning stuff 0mod64)
122	blt	$4, loop_b	# U :
123
124	/*
125	 * We know we've got at least 16 quads, minimum of one trip
126	 * through unrolled loop.  Do a quad at a time to get us 0mod64
127	 * aligned.
128	 */
129
130	nop			# E :
131	nop			# E :
132	nop			# E :
133	beq	$1, $bigalign_b	# U :
134
135$alignmod64_b:
136	stq	$17, 0($5)	# L :
137	subq	$3, 1, $3	# E : For consistency later
138	addq	$1, 8, $1	# E : Increment towards zero for alignment
139	addq	$5, 8, $4	# E : Initial wh64 address (filler instruction)
140
141	nop
142	nop
143	addq	$5, 8, $5	# E : Inc address
144	blt	$1, $alignmod64_b # U :
145
146$bigalign_b:
147	/*
148	 * $3 - number quads left to go
149	 * $5 - target address (aligned 0mod64)
150	 * $17 - mask of stuff to store
151	 * Scratch registers available: $7, $2, $4, $1
152	 * we know that we'll be taking a minimum of one trip through
153 	 * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
154	 * Assumes the wh64 needs to be for 2 trips through the loop in the future
155	 * The wh64 is issued on for the starting destination address for trip +2
156	 * through the loop, and if there are less than two trips left, the target
157	 * address will be for the current trip.
158	 */
159
160$do_wh64_b:
161	wh64	($4)		# L1 : memory subsystem write hint
162	subq	$3, 24, $2	# E : For determining future wh64 addresses
163	stq	$17, 0($5)	# L :
164	nop			# E :
165
166	addq	$5, 128, $4	# E : speculative target of next wh64
167	stq	$17, 8($5)	# L :
168	stq	$17, 16($5)	# L :
169	addq	$5, 64, $7	# E : Fallback address for wh64 (== next trip addr)
170
171	stq	$17, 24($5)	# L :
172	stq	$17, 32($5)	# L :
173	cmovlt	$2, $7, $4	# E : Latency 2, extra mapping cycle
174	nop
175
176	stq	$17, 40($5)	# L :
177	stq	$17, 48($5)	# L :
178	subq	$3, 16, $2	# E : Repeat the loop at least once more?
179	nop
180
181	stq	$17, 56($5)	# L :
182	addq	$5, 64, $5	# E :
183	subq	$3, 8, $3	# E :
184	bge	$2, $do_wh64_b	# U :
185
186	nop
187	nop
188	nop
189	beq	$3, no_quad_b	# U : Might have finished already
190
191.align 4
192	/*
193	 * Simple loop for trailing quadwords, or for small amounts
194	 * of data (where we can't use an unrolled loop and wh64)
195	 */
196loop_b:
197	stq $17,0($5)		# L :
198	subq $3,1,$3		# E : Decrement number quads left
199	addq $5,8,$5		# E : Inc address
200	bne $3,loop_b		# U : more?
201
202no_quad_b:
203	/*
204	 * Write 0..7 trailing bytes.
205	 */
206	nop			# E :
207	beq $18,end_b		# U : All done?
208	ldq $7,0($5)		# L :
209	mskqh $7,$6,$2		# U : Mask final quad
210
211	insqh $17,$6,$4		# U : New bits
212	bis $2,$4,$1		# E : Put it all together
213	stq $1,0($5)		# L : And back to memory
214	ret $31,($26),1		# L0 :
215
216within_quad_b:
217	ldq_u $1,0($16)		# L :
218	insql $17,$16,$2	# U : New bits
219	mskql $1,$16,$4		# U : Clear old
220	bis $2,$4,$2		# E : New result
221
222	mskql $2,$6,$4		# U :
223	mskqh $1,$6,$2		# U :
224	bis $2,$4,$1		# E :
225	stq_u $1,0($16)		# L :
226
227end_b:
228	nop
229	nop
230	nop
231	ret $31,($26),1		# L0 :
232	.end ___memset
233	EXPORT_SYMBOL(___memset)
234
235	/*
236	 * This is the original body of code, prior to replication and
237	 * rescheduling.  Leave it here, as there may be calls to this
238	 * entry point.
239	 */
240.align 4
241	.ent __constant_c_memset
242__constant_c_memset:
243	.frame $30,0,$26,0
244	.prologue 0
245
246	addq $18,$16,$6		# E : max address to write to
247	bis $16,$16,$0		# E : return value
248	xor $16,$6,$1		# E : will complete write be within one quadword?
249	ble $18,end		# U : zero length requested?
250
251	bic $1,7,$1		# E : fit within a single quadword
252	beq $1,within_one_quad	# U :
253	and $16,7,$3		# E : Target addr misalignment
254	beq $3,aligned		# U : target is 0mod8
255
256	/*
257	 * Target address is misaligned, and won't fit within a quadword
258	 */
259	ldq_u $4,0($16)		# L : Fetch first partial
260	bis $16,$16,$5		# E : Save the address
261	insql $17,$16,$2	# U : Insert new bytes
262	subq $3,8,$3		# E : Invert (for addressing uses)
263
264	addq $18,$3,$18		# E : $18 is new count ($3 is negative)
265	mskql $4,$16,$4		# U : clear relevant parts of the quad
266	subq $16,$3,$16		# E : $16 is new aligned destination
267	bis $2,$4,$1		# E : Final bytes
268
269	nop
270	stq_u $1,0($5)		# L : Store result
271	nop
272	nop
273
274.align 4
275aligned:
276	/*
277	 * We are now guaranteed to be quad aligned, with at least
278	 * one partial quad to write.
279	 */
280
281	sra $18,3,$3		# U : Number of remaining quads to write
282	and $18,7,$18		# E : Number of trailing bytes to write
283	bis $16,$16,$5		# E : Save dest address
284	beq $3,no_quad		# U : tail stuff only
285
286	/*
287	 * it's worth the effort to unroll this and use wh64 if possible
288	 * Lifted a bunch of code from clear_user.S
289	 * At this point, entry values are:
290	 * $16	Current destination address
291	 * $5	A copy of $16
292	 * $6	The max quadword address to write to
293	 * $18	Number trailer bytes
294	 * $3	Number quads to write
295	 */
296
297	and	$16, 0x3f, $2	# E : Forward work (only useful for unrolled loop)
298	subq	$3, 16, $4	# E : Only try to unroll if > 128 bytes
299	subq	$2, 0x40, $1	# E : bias counter (aligning stuff 0mod64)
300	blt	$4, loop	# U :
301
302	/*
303	 * We know we've got at least 16 quads, minimum of one trip
304	 * through unrolled loop.  Do a quad at a time to get us 0mod64
305	 * aligned.
306	 */
307
308	nop			# E :
309	nop			# E :
310	nop			# E :
311	beq	$1, $bigalign	# U :
312
313$alignmod64:
314	stq	$17, 0($5)	# L :
315	subq	$3, 1, $3	# E : For consistency later
316	addq	$1, 8, $1	# E : Increment towards zero for alignment
317	addq	$5, 8, $4	# E : Initial wh64 address (filler instruction)
318
319	nop
320	nop
321	addq	$5, 8, $5	# E : Inc address
322	blt	$1, $alignmod64	# U :
323
324$bigalign:
325	/*
326	 * $3 - number quads left to go
327	 * $5 - target address (aligned 0mod64)
328	 * $17 - mask of stuff to store
329	 * Scratch registers available: $7, $2, $4, $1
330	 * we know that we'll be taking a minimum of one trip through
331 	 * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
332	 * Assumes the wh64 needs to be for 2 trips through the loop in the future
333	 * The wh64 is issued on for the starting destination address for trip +2
334	 * through the loop, and if there are less than two trips left, the target
335	 * address will be for the current trip.
336	 */
337
338$do_wh64:
339	wh64	($4)		# L1 : memory subsystem write hint
340	subq	$3, 24, $2	# E : For determining future wh64 addresses
341	stq	$17, 0($5)	# L :
342	nop			# E :
343
344	addq	$5, 128, $4	# E : speculative target of next wh64
345	stq	$17, 8($5)	# L :
346	stq	$17, 16($5)	# L :
347	addq	$5, 64, $7	# E : Fallback address for wh64 (== next trip addr)
348
349	stq	$17, 24($5)	# L :
350	stq	$17, 32($5)	# L :
351	cmovlt	$2, $7, $4	# E : Latency 2, extra mapping cycle
352	nop
353
354	stq	$17, 40($5)	# L :
355	stq	$17, 48($5)	# L :
356	subq	$3, 16, $2	# E : Repeat the loop at least once more?
357	nop
358
359	stq	$17, 56($5)	# L :
360	addq	$5, 64, $5	# E :
361	subq	$3, 8, $3	# E :
362	bge	$2, $do_wh64	# U :
363
364	nop
365	nop
366	nop
367	beq	$3, no_quad	# U : Might have finished already
368
369.align 4
370	/*
371	 * Simple loop for trailing quadwords, or for small amounts
372	 * of data (where we can't use an unrolled loop and wh64)
373	 */
374loop:
375	stq $17,0($5)		# L :
376	subq $3,1,$3		# E : Decrement number quads left
377	addq $5,8,$5		# E : Inc address
378	bne $3,loop		# U : more?
379
380no_quad:
381	/*
382	 * Write 0..7 trailing bytes.
383	 */
384	nop			# E :
385	beq $18,end		# U : All done?
386	ldq $7,0($5)		# L :
387	mskqh $7,$6,$2		# U : Mask final quad
388
389	insqh $17,$6,$4		# U : New bits
390	bis $2,$4,$1		# E : Put it all together
391	stq $1,0($5)		# L : And back to memory
392	ret $31,($26),1		# L0 :
393
394within_one_quad:
395	ldq_u $1,0($16)		# L :
396	insql $17,$16,$2	# U : New bits
397	mskql $1,$16,$4		# U : Clear old
398	bis $2,$4,$2		# E : New result
399
400	mskql $2,$6,$4		# U :
401	mskqh $1,$6,$2		# U :
402	bis $2,$4,$1		# E :
403	stq_u $1,0($16)		# L :
404
405end:
406	nop
407	nop
408	nop
409	ret $31,($26),1		# L0 :
410	.end __constant_c_memset
411	EXPORT_SYMBOL(__constant_c_memset)
412
413	/*
414	 * This is a replicant of the __constant_c_memset code, rescheduled
415	 * to mask stalls.  Note that entry point names also had to change
416	 */
417	.align 5
418	.ent __memset16
419
420__memset16:
421	.frame $30,0,$26,0
422	.prologue 0
423
424	inswl $17,0,$5		# U : 000000000000c1c2
425	inswl $17,2,$2		# U : 00000000c1c20000
426	bis $16,$16,$0		# E : return value
427	addq	$18,$16,$6	# E : max address to write to
428
429	ble $18, end_w		# U : zero length requested?
430	inswl	$17,4,$3	# U : 0000c1c200000000
431	inswl	$17,6,$4	# U : c1c2000000000000
432	xor	$16,$6,$1	# E : will complete write be within one quadword?
433
434	or	$2,$5,$2	# E : 00000000c1c2c1c2
435	or	$3,$4,$17	# E : c1c2c1c200000000
436	bic	$1,7,$1		# E : fit within a single quadword
437	and	$16,7,$3	# E : Target addr misalignment
438
439	or	$17,$2,$17	# E : c1c2c1c2c1c2c1c2
440	beq $1,within_quad_w	# U :
441	nop
442	beq $3,aligned_w	# U : target is 0mod8
443
444	/*
445	 * Target address is misaligned, and won't fit within a quadword
446	 */
447	ldq_u $4,0($16)		# L : Fetch first partial
448	bis $16,$16,$5		# E : Save the address
449	insql $17,$16,$2	# U : Insert new bytes
450	subq $3,8,$3		# E : Invert (for addressing uses)
451
452	addq $18,$3,$18		# E : $18 is new count ($3 is negative)
453	mskql $4,$16,$4		# U : clear relevant parts of the quad
454	subq $16,$3,$16		# E : $16 is new aligned destination
455	bis $2,$4,$1		# E : Final bytes
456
457	nop
458	stq_u $1,0($5)		# L : Store result
459	nop
460	nop
461
462.align 4
463aligned_w:
464	/*
465	 * We are now guaranteed to be quad aligned, with at least
466	 * one partial quad to write.
467	 */
468
469	sra $18,3,$3		# U : Number of remaining quads to write
470	and $18,7,$18		# E : Number of trailing bytes to write
471	bis $16,$16,$5		# E : Save dest address
472	beq $3,no_quad_w	# U : tail stuff only
473
474	/*
475	 * it's worth the effort to unroll this and use wh64 if possible
476	 * Lifted a bunch of code from clear_user.S
477	 * At this point, entry values are:
478	 * $16	Current destination address
479	 * $5	A copy of $16
480	 * $6	The max quadword address to write to
481	 * $18	Number trailer bytes
482	 * $3	Number quads to write
483	 */
484
485	and	$16, 0x3f, $2	# E : Forward work (only useful for unrolled loop)
486	subq	$3, 16, $4	# E : Only try to unroll if > 128 bytes
487	subq	$2, 0x40, $1	# E : bias counter (aligning stuff 0mod64)
488	blt	$4, loop_w	# U :
489
490	/*
491	 * We know we've got at least 16 quads, minimum of one trip
492	 * through unrolled loop.  Do a quad at a time to get us 0mod64
493	 * aligned.
494	 */
495
496	nop			# E :
497	nop			# E :
498	nop			# E :
499	beq	$1, $bigalign_w	# U :
500
501$alignmod64_w:
502	stq	$17, 0($5)	# L :
503	subq	$3, 1, $3	# E : For consistency later
504	addq	$1, 8, $1	# E : Increment towards zero for alignment
505	addq	$5, 8, $4	# E : Initial wh64 address (filler instruction)
506
507	nop
508	nop
509	addq	$5, 8, $5	# E : Inc address
510	blt	$1, $alignmod64_w	# U :
511
512$bigalign_w:
513	/*
514	 * $3 - number quads left to go
515	 * $5 - target address (aligned 0mod64)
516	 * $17 - mask of stuff to store
517	 * Scratch registers available: $7, $2, $4, $1
518	 * we know that we'll be taking a minimum of one trip through
519 	 * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
520	 * Assumes the wh64 needs to be for 2 trips through the loop in the future
521	 * The wh64 is issued on for the starting destination address for trip +2
522	 * through the loop, and if there are less than two trips left, the target
523	 * address will be for the current trip.
524	 */
525
526$do_wh64_w:
527	wh64	($4)		# L1 : memory subsystem write hint
528	subq	$3, 24, $2	# E : For determining future wh64 addresses
529	stq	$17, 0($5)	# L :
530	nop			# E :
531
532	addq	$5, 128, $4	# E : speculative target of next wh64
533	stq	$17, 8($5)	# L :
534	stq	$17, 16($5)	# L :
535	addq	$5, 64, $7	# E : Fallback address for wh64 (== next trip addr)
536
537	stq	$17, 24($5)	# L :
538	stq	$17, 32($5)	# L :
539	cmovlt	$2, $7, $4	# E : Latency 2, extra mapping cycle
540	nop
541
542	stq	$17, 40($5)	# L :
543	stq	$17, 48($5)	# L :
544	subq	$3, 16, $2	# E : Repeat the loop at least once more?
545	nop
546
547	stq	$17, 56($5)	# L :
548	addq	$5, 64, $5	# E :
549	subq	$3, 8, $3	# E :
550	bge	$2, $do_wh64_w	# U :
551
552	nop
553	nop
554	nop
555	beq	$3, no_quad_w	# U : Might have finished already
556
557.align 4
558	/*
559	 * Simple loop for trailing quadwords, or for small amounts
560	 * of data (where we can't use an unrolled loop and wh64)
561	 */
562loop_w:
563	stq $17,0($5)		# L :
564	subq $3,1,$3		# E : Decrement number quads left
565	addq $5,8,$5		# E : Inc address
566	bne $3,loop_w		# U : more?
567
568no_quad_w:
569	/*
570	 * Write 0..7 trailing bytes.
571	 */
572	nop			# E :
573	beq $18,end_w		# U : All done?
574	ldq $7,0($5)		# L :
575	mskqh $7,$6,$2		# U : Mask final quad
576
577	insqh $17,$6,$4		# U : New bits
578	bis $2,$4,$1		# E : Put it all together
579	stq $1,0($5)		# L : And back to memory
580	ret $31,($26),1		# L0 :
581
582within_quad_w:
583	ldq_u $1,0($16)		# L :
584	insql $17,$16,$2	# U : New bits
585	mskql $1,$16,$4		# U : Clear old
586	bis $2,$4,$2		# E : New result
587
588	mskql $2,$6,$4		# U :
589	mskqh $1,$6,$2		# U :
590	bis $2,$4,$1		# E :
591	stq_u $1,0($16)		# L :
592
593end_w:
594	nop
595	nop
596	nop
597	ret $31,($26),1		# L0 :
598
599	.end __memset16
600	EXPORT_SYMBOL(__memset16)
601
602memset = ___memset
603__memset = ___memset
604	EXPORT_SYMBOL(memset)
605	EXPORT_SYMBOL(__memset)
v3.15
 
  1/*
  2 * arch/alpha/lib/ev6-memset.S
  3 *
  4 * This is an efficient (and relatively small) implementation of the C library
  5 * "memset()" function for the 21264 implementation of Alpha.
  6 *
  7 * 21264 version  contributed by Rick Gorton <rick.gorton@alpha-processor.com>
  8 *
  9 * Much of the information about 21264 scheduling/coding comes from:
 10 *	Compiler Writer's Guide for the Alpha 21264
 11 *	abbreviated as 'CWG' in other comments here
 12 *	ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
 13 * Scheduling notation:
 14 *	E	- either cluster
 15 *	U	- upper subcluster; U0 - subcluster U0; U1 - subcluster U1
 16 *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
 17 * The algorithm for the leading and trailing quadwords remains the same,
 18 * however the loop has been unrolled to enable better memory throughput,
 19 * and the code has been replicated for each of the entry points: __memset
 20 * and __memsetw to permit better scheduling to eliminate the stalling
 21 * encountered during the mask replication.
 22 * A future enhancement might be to put in a byte store loop for really
 23 * small (say < 32 bytes) memset()s.  Whether or not that change would be
 24 * a win in the kernel would depend upon the contextual usage.
 25 * WARNING: Maintaining this is going to be more work than the above version,
 26 * as fixes will need to be made in multiple places.  The performance gain
 27 * is worth it.
 28 */
 29
 30	.set noat
 31	.set noreorder
 32.text
 33	.globl memset
 34	.globl __memset
 35	.globl ___memset
 36	.globl __memsetw
 37	.globl __constant_c_memset
 38
 39	.ent ___memset
 40.align 5
 41___memset:
 42	.frame $30,0,$26,0
 43	.prologue 0
 44
 45	/*
 46	 * Serious stalling happens.  The only way to mitigate this is to
 47	 * undertake a major re-write to interleave the constant materialization
 48	 * with other parts of the fall-through code.  This is important, even
 49	 * though it makes maintenance tougher.
 50	 * Do this later.
 51	 */
 52	and $17,255,$1		# E : 00000000000000ch
 53	insbl $17,1,$2		# U : 000000000000ch00
 54	bis $16,$16,$0		# E : return value
 55	ble $18,end_b		# U : zero length requested?
 56
 57	addq $18,$16,$6		# E : max address to write to
 58	bis	$1,$2,$17	# E : 000000000000chch
 59	insbl	$1,2,$3		# U : 0000000000ch0000
 60	insbl	$1,3,$4		# U : 00000000ch000000
 61
 62	or	$3,$4,$3	# E : 00000000chch0000
 63	inswl	$17,4,$5	# U : 0000chch00000000
 64	xor	$16,$6,$1	# E : will complete write be within one quadword?
 65	inswl	$17,6,$2	# U : chch000000000000
 66
 67	or	$17,$3,$17	# E : 00000000chchchch
 68	or	$2,$5,$2	# E : chchchch00000000
 69	bic	$1,7,$1		# E : fit within a single quadword?
 70	and	$16,7,$3	# E : Target addr misalignment
 71
 72	or	$17,$2,$17	# E : chchchchchchchch
 73	beq	$1,within_quad_b # U :
 74	nop			# E :
 75	beq	$3,aligned_b	# U : target is 0mod8
 76
 77	/*
 78	 * Target address is misaligned, and won't fit within a quadword
 79	 */
 80	ldq_u $4,0($16)		# L : Fetch first partial
 81	bis $16,$16,$5		# E : Save the address
 82	insql $17,$16,$2	# U : Insert new bytes
 83	subq $3,8,$3		# E : Invert (for addressing uses)
 84
 85	addq $18,$3,$18		# E : $18 is new count ($3 is negative)
 86	mskql $4,$16,$4		# U : clear relevant parts of the quad
 87	subq $16,$3,$16		# E : $16 is new aligned destination
 88	bis $2,$4,$1		# E : Final bytes
 89
 90	nop
 91	stq_u $1,0($5)		# L : Store result
 92	nop
 93	nop
 94
 95.align 4
 96aligned_b:
 97	/*
 98	 * We are now guaranteed to be quad aligned, with at least
 99	 * one partial quad to write.
100	 */
101
102	sra $18,3,$3		# U : Number of remaining quads to write
103	and $18,7,$18		# E : Number of trailing bytes to write
104	bis $16,$16,$5		# E : Save dest address
105	beq $3,no_quad_b	# U : tail stuff only
106
107	/*
108	 * it's worth the effort to unroll this and use wh64 if possible
109	 * Lifted a bunch of code from clear_user.S
110	 * At this point, entry values are:
111	 * $16	Current destination address
112	 * $5	A copy of $16
113	 * $6	The max quadword address to write to
114	 * $18	Number trailer bytes
115	 * $3	Number quads to write
116	 */
117
118	and	$16, 0x3f, $2	# E : Forward work (only useful for unrolled loop)
119	subq	$3, 16, $4	# E : Only try to unroll if > 128 bytes
120	subq	$2, 0x40, $1	# E : bias counter (aligning stuff 0mod64)
121	blt	$4, loop_b	# U :
122
123	/*
124	 * We know we've got at least 16 quads, minimum of one trip
125	 * through unrolled loop.  Do a quad at a time to get us 0mod64
126	 * aligned.
127	 */
128
129	nop			# E :
130	nop			# E :
131	nop			# E :
132	beq	$1, $bigalign_b	# U :
133
134$alignmod64_b:
135	stq	$17, 0($5)	# L :
136	subq	$3, 1, $3	# E : For consistency later
137	addq	$1, 8, $1	# E : Increment towards zero for alignment
138	addq	$5, 8, $4	# E : Initial wh64 address (filler instruction)
139
140	nop
141	nop
142	addq	$5, 8, $5	# E : Inc address
143	blt	$1, $alignmod64_b # U :
144
145$bigalign_b:
146	/*
147	 * $3 - number quads left to go
148	 * $5 - target address (aligned 0mod64)
149	 * $17 - mask of stuff to store
150	 * Scratch registers available: $7, $2, $4, $1
151	 * we know that we'll be taking a minimum of one trip through
152 	 * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
153	 * Assumes the wh64 needs to be for 2 trips through the loop in the future
154	 * The wh64 is issued on for the starting destination address for trip +2
155	 * through the loop, and if there are less than two trips left, the target
156	 * address will be for the current trip.
157	 */
158
159$do_wh64_b:
160	wh64	($4)		# L1 : memory subsystem write hint
161	subq	$3, 24, $2	# E : For determining future wh64 addresses
162	stq	$17, 0($5)	# L :
163	nop			# E :
164
165	addq	$5, 128, $4	# E : speculative target of next wh64
166	stq	$17, 8($5)	# L :
167	stq	$17, 16($5)	# L :
168	addq	$5, 64, $7	# E : Fallback address for wh64 (== next trip addr)
169
170	stq	$17, 24($5)	# L :
171	stq	$17, 32($5)	# L :
172	cmovlt	$2, $7, $4	# E : Latency 2, extra mapping cycle
173	nop
174
175	stq	$17, 40($5)	# L :
176	stq	$17, 48($5)	# L :
177	subq	$3, 16, $2	# E : Repeat the loop at least once more?
178	nop
179
180	stq	$17, 56($5)	# L :
181	addq	$5, 64, $5	# E :
182	subq	$3, 8, $3	# E :
183	bge	$2, $do_wh64_b	# U :
184
185	nop
186	nop
187	nop
188	beq	$3, no_quad_b	# U : Might have finished already
189
190.align 4
191	/*
192	 * Simple loop for trailing quadwords, or for small amounts
193	 * of data (where we can't use an unrolled loop and wh64)
194	 */
195loop_b:
196	stq $17,0($5)		# L :
197	subq $3,1,$3		# E : Decrement number quads left
198	addq $5,8,$5		# E : Inc address
199	bne $3,loop_b		# U : more?
200
201no_quad_b:
202	/*
203	 * Write 0..7 trailing bytes.
204	 */
205	nop			# E :
206	beq $18,end_b		# U : All done?
207	ldq $7,0($5)		# L :
208	mskqh $7,$6,$2		# U : Mask final quad
209
210	insqh $17,$6,$4		# U : New bits
211	bis $2,$4,$1		# E : Put it all together
212	stq $1,0($5)		# L : And back to memory
213	ret $31,($26),1		# L0 :
214
215within_quad_b:
216	ldq_u $1,0($16)		# L :
217	insql $17,$16,$2	# U : New bits
218	mskql $1,$16,$4		# U : Clear old
219	bis $2,$4,$2		# E : New result
220
221	mskql $2,$6,$4		# U :
222	mskqh $1,$6,$2		# U :
223	bis $2,$4,$1		# E :
224	stq_u $1,0($16)		# L :
225
226end_b:
227	nop
228	nop
229	nop
230	ret $31,($26),1		# L0 :
231	.end ___memset
 
232
233	/*
234	 * This is the original body of code, prior to replication and
235	 * rescheduling.  Leave it here, as there may be calls to this
236	 * entry point.
237	 */
238.align 4
239	.ent __constant_c_memset
240__constant_c_memset:
241	.frame $30,0,$26,0
242	.prologue 0
243
244	addq $18,$16,$6		# E : max address to write to
245	bis $16,$16,$0		# E : return value
246	xor $16,$6,$1		# E : will complete write be within one quadword?
247	ble $18,end		# U : zero length requested?
248
249	bic $1,7,$1		# E : fit within a single quadword
250	beq $1,within_one_quad	# U :
251	and $16,7,$3		# E : Target addr misalignment
252	beq $3,aligned		# U : target is 0mod8
253
254	/*
255	 * Target address is misaligned, and won't fit within a quadword
256	 */
257	ldq_u $4,0($16)		# L : Fetch first partial
258	bis $16,$16,$5		# E : Save the address
259	insql $17,$16,$2	# U : Insert new bytes
260	subq $3,8,$3		# E : Invert (for addressing uses)
261
262	addq $18,$3,$18		# E : $18 is new count ($3 is negative)
263	mskql $4,$16,$4		# U : clear relevant parts of the quad
264	subq $16,$3,$16		# E : $16 is new aligned destination
265	bis $2,$4,$1		# E : Final bytes
266
267	nop
268	stq_u $1,0($5)		# L : Store result
269	nop
270	nop
271
272.align 4
273aligned:
274	/*
275	 * We are now guaranteed to be quad aligned, with at least
276	 * one partial quad to write.
277	 */
278
279	sra $18,3,$3		# U : Number of remaining quads to write
280	and $18,7,$18		# E : Number of trailing bytes to write
281	bis $16,$16,$5		# E : Save dest address
282	beq $3,no_quad		# U : tail stuff only
283
284	/*
285	 * it's worth the effort to unroll this and use wh64 if possible
286	 * Lifted a bunch of code from clear_user.S
287	 * At this point, entry values are:
288	 * $16	Current destination address
289	 * $5	A copy of $16
290	 * $6	The max quadword address to write to
291	 * $18	Number trailer bytes
292	 * $3	Number quads to write
293	 */
294
295	and	$16, 0x3f, $2	# E : Forward work (only useful for unrolled loop)
296	subq	$3, 16, $4	# E : Only try to unroll if > 128 bytes
297	subq	$2, 0x40, $1	# E : bias counter (aligning stuff 0mod64)
298	blt	$4, loop	# U :
299
300	/*
301	 * We know we've got at least 16 quads, minimum of one trip
302	 * through unrolled loop.  Do a quad at a time to get us 0mod64
303	 * aligned.
304	 */
305
306	nop			# E :
307	nop			# E :
308	nop			# E :
309	beq	$1, $bigalign	# U :
310
311$alignmod64:
312	stq	$17, 0($5)	# L :
313	subq	$3, 1, $3	# E : For consistency later
314	addq	$1, 8, $1	# E : Increment towards zero for alignment
315	addq	$5, 8, $4	# E : Initial wh64 address (filler instruction)
316
317	nop
318	nop
319	addq	$5, 8, $5	# E : Inc address
320	blt	$1, $alignmod64	# U :
321
322$bigalign:
323	/*
324	 * $3 - number quads left to go
325	 * $5 - target address (aligned 0mod64)
326	 * $17 - mask of stuff to store
327	 * Scratch registers available: $7, $2, $4, $1
328	 * we know that we'll be taking a minimum of one trip through
329 	 * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
330	 * Assumes the wh64 needs to be for 2 trips through the loop in the future
331	 * The wh64 is issued on for the starting destination address for trip +2
332	 * through the loop, and if there are less than two trips left, the target
333	 * address will be for the current trip.
334	 */
335
336$do_wh64:
337	wh64	($4)		# L1 : memory subsystem write hint
338	subq	$3, 24, $2	# E : For determining future wh64 addresses
339	stq	$17, 0($5)	# L :
340	nop			# E :
341
342	addq	$5, 128, $4	# E : speculative target of next wh64
343	stq	$17, 8($5)	# L :
344	stq	$17, 16($5)	# L :
345	addq	$5, 64, $7	# E : Fallback address for wh64 (== next trip addr)
346
347	stq	$17, 24($5)	# L :
348	stq	$17, 32($5)	# L :
349	cmovlt	$2, $7, $4	# E : Latency 2, extra mapping cycle
350	nop
351
352	stq	$17, 40($5)	# L :
353	stq	$17, 48($5)	# L :
354	subq	$3, 16, $2	# E : Repeat the loop at least once more?
355	nop
356
357	stq	$17, 56($5)	# L :
358	addq	$5, 64, $5	# E :
359	subq	$3, 8, $3	# E :
360	bge	$2, $do_wh64	# U :
361
362	nop
363	nop
364	nop
365	beq	$3, no_quad	# U : Might have finished already
366
367.align 4
368	/*
369	 * Simple loop for trailing quadwords, or for small amounts
370	 * of data (where we can't use an unrolled loop and wh64)
371	 */
372loop:
373	stq $17,0($5)		# L :
374	subq $3,1,$3		# E : Decrement number quads left
375	addq $5,8,$5		# E : Inc address
376	bne $3,loop		# U : more?
377
378no_quad:
379	/*
380	 * Write 0..7 trailing bytes.
381	 */
382	nop			# E :
383	beq $18,end		# U : All done?
384	ldq $7,0($5)		# L :
385	mskqh $7,$6,$2		# U : Mask final quad
386
387	insqh $17,$6,$4		# U : New bits
388	bis $2,$4,$1		# E : Put it all together
389	stq $1,0($5)		# L : And back to memory
390	ret $31,($26),1		# L0 :
391
392within_one_quad:
393	ldq_u $1,0($16)		# L :
394	insql $17,$16,$2	# U : New bits
395	mskql $1,$16,$4		# U : Clear old
396	bis $2,$4,$2		# E : New result
397
398	mskql $2,$6,$4		# U :
399	mskqh $1,$6,$2		# U :
400	bis $2,$4,$1		# E :
401	stq_u $1,0($16)		# L :
402
403end:
404	nop
405	nop
406	nop
407	ret $31,($26),1		# L0 :
408	.end __constant_c_memset
 
409
410	/*
411	 * This is a replicant of the __constant_c_memset code, rescheduled
412	 * to mask stalls.  Note that entry point names also had to change
413	 */
414	.align 5
415	.ent __memsetw
416
417__memsetw:
418	.frame $30,0,$26,0
419	.prologue 0
420
421	inswl $17,0,$5		# U : 000000000000c1c2
422	inswl $17,2,$2		# U : 00000000c1c20000
423	bis $16,$16,$0		# E : return value
424	addq	$18,$16,$6	# E : max address to write to
425
426	ble $18, end_w		# U : zero length requested?
427	inswl	$17,4,$3	# U : 0000c1c200000000
428	inswl	$17,6,$4	# U : c1c2000000000000
429	xor	$16,$6,$1	# E : will complete write be within one quadword?
430
431	or	$2,$5,$2	# E : 00000000c1c2c1c2
432	or	$3,$4,$17	# E : c1c2c1c200000000
433	bic	$1,7,$1		# E : fit within a single quadword
434	and	$16,7,$3	# E : Target addr misalignment
435
436	or	$17,$2,$17	# E : c1c2c1c2c1c2c1c2
437	beq $1,within_quad_w	# U :
438	nop
439	beq $3,aligned_w	# U : target is 0mod8
440
441	/*
442	 * Target address is misaligned, and won't fit within a quadword
443	 */
444	ldq_u $4,0($16)		# L : Fetch first partial
445	bis $16,$16,$5		# E : Save the address
446	insql $17,$16,$2	# U : Insert new bytes
447	subq $3,8,$3		# E : Invert (for addressing uses)
448
449	addq $18,$3,$18		# E : $18 is new count ($3 is negative)
450	mskql $4,$16,$4		# U : clear relevant parts of the quad
451	subq $16,$3,$16		# E : $16 is new aligned destination
452	bis $2,$4,$1		# E : Final bytes
453
454	nop
455	stq_u $1,0($5)		# L : Store result
456	nop
457	nop
458
459.align 4
460aligned_w:
461	/*
462	 * We are now guaranteed to be quad aligned, with at least
463	 * one partial quad to write.
464	 */
465
466	sra $18,3,$3		# U : Number of remaining quads to write
467	and $18,7,$18		# E : Number of trailing bytes to write
468	bis $16,$16,$5		# E : Save dest address
469	beq $3,no_quad_w	# U : tail stuff only
470
471	/*
472	 * it's worth the effort to unroll this and use wh64 if possible
473	 * Lifted a bunch of code from clear_user.S
474	 * At this point, entry values are:
475	 * $16	Current destination address
476	 * $5	A copy of $16
477	 * $6	The max quadword address to write to
478	 * $18	Number trailer bytes
479	 * $3	Number quads to write
480	 */
481
482	and	$16, 0x3f, $2	# E : Forward work (only useful for unrolled loop)
483	subq	$3, 16, $4	# E : Only try to unroll if > 128 bytes
484	subq	$2, 0x40, $1	# E : bias counter (aligning stuff 0mod64)
485	blt	$4, loop_w	# U :
486
487	/*
488	 * We know we've got at least 16 quads, minimum of one trip
489	 * through unrolled loop.  Do a quad at a time to get us 0mod64
490	 * aligned.
491	 */
492
493	nop			# E :
494	nop			# E :
495	nop			# E :
496	beq	$1, $bigalign_w	# U :
497
498$alignmod64_w:
499	stq	$17, 0($5)	# L :
500	subq	$3, 1, $3	# E : For consistency later
501	addq	$1, 8, $1	# E : Increment towards zero for alignment
502	addq	$5, 8, $4	# E : Initial wh64 address (filler instruction)
503
504	nop
505	nop
506	addq	$5, 8, $5	# E : Inc address
507	blt	$1, $alignmod64_w	# U :
508
509$bigalign_w:
510	/*
511	 * $3 - number quads left to go
512	 * $5 - target address (aligned 0mod64)
513	 * $17 - mask of stuff to store
514	 * Scratch registers available: $7, $2, $4, $1
515	 * we know that we'll be taking a minimum of one trip through
516 	 * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
517	 * Assumes the wh64 needs to be for 2 trips through the loop in the future
518	 * The wh64 is issued on for the starting destination address for trip +2
519	 * through the loop, and if there are less than two trips left, the target
520	 * address will be for the current trip.
521	 */
522
523$do_wh64_w:
524	wh64	($4)		# L1 : memory subsystem write hint
525	subq	$3, 24, $2	# E : For determining future wh64 addresses
526	stq	$17, 0($5)	# L :
527	nop			# E :
528
529	addq	$5, 128, $4	# E : speculative target of next wh64
530	stq	$17, 8($5)	# L :
531	stq	$17, 16($5)	# L :
532	addq	$5, 64, $7	# E : Fallback address for wh64 (== next trip addr)
533
534	stq	$17, 24($5)	# L :
535	stq	$17, 32($5)	# L :
536	cmovlt	$2, $7, $4	# E : Latency 2, extra mapping cycle
537	nop
538
539	stq	$17, 40($5)	# L :
540	stq	$17, 48($5)	# L :
541	subq	$3, 16, $2	# E : Repeat the loop at least once more?
542	nop
543
544	stq	$17, 56($5)	# L :
545	addq	$5, 64, $5	# E :
546	subq	$3, 8, $3	# E :
547	bge	$2, $do_wh64_w	# U :
548
549	nop
550	nop
551	nop
552	beq	$3, no_quad_w	# U : Might have finished already
553
554.align 4
555	/*
556	 * Simple loop for trailing quadwords, or for small amounts
557	 * of data (where we can't use an unrolled loop and wh64)
558	 */
559loop_w:
560	stq $17,0($5)		# L :
561	subq $3,1,$3		# E : Decrement number quads left
562	addq $5,8,$5		# E : Inc address
563	bne $3,loop_w		# U : more?
564
565no_quad_w:
566	/*
567	 * Write 0..7 trailing bytes.
568	 */
569	nop			# E :
570	beq $18,end_w		# U : All done?
571	ldq $7,0($5)		# L :
572	mskqh $7,$6,$2		# U : Mask final quad
573
574	insqh $17,$6,$4		# U : New bits
575	bis $2,$4,$1		# E : Put it all together
576	stq $1,0($5)		# L : And back to memory
577	ret $31,($26),1		# L0 :
578
579within_quad_w:
580	ldq_u $1,0($16)		# L :
581	insql $17,$16,$2	# U : New bits
582	mskql $1,$16,$4		# U : Clear old
583	bis $2,$4,$2		# E : New result
584
585	mskql $2,$6,$4		# U :
586	mskqh $1,$6,$2		# U :
587	bis $2,$4,$1		# E :
588	stq_u $1,0($16)		# L :
589
590end_w:
591	nop
592	nop
593	nop
594	ret $31,($26),1		# L0 :
595
596	.end __memsetw
 
597
598memset = ___memset
599__memset = ___memset