aboutsummaryrefslogtreecommitdiff
blob: cff0ead958671e74eaf890f00f30db9ebcca4779 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
/* Assembler macros for C-SKY.
   Copyright (C) 2018-2020 Free Software Foundation, Inc.
   This file is part of the GNU C Library.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library.  If not, see
   <https://www.gnu.org/licenses/>.  */

#include <sysdeps/generic/sysdep.h>
#include <features.h>

#ifdef __ASSEMBLER__

# define ASM_SIZE_DIRECTIVE(name) .size name,.-name

/* Define an entry point visible from C.  */
# define ENTRY(name)		\
	.globl name;		\
	.type name,@function;	\
	.align 4;		\
	name##:;		\
	cfi_startproc;		\
	CALL_MCOUNT

# undef  END
# define END(name)		\
	cfi_endproc;		\
	ASM_SIZE_DIRECTIVE(name)

/* If compiled for profiling, call `mcount' at the start of each function.  */
# ifdef PROF
#  ifdef __PIC__
#   define CALL_MCOUNT				\
	subi	sp, 4;				\
	stw	lr, (sp, 0);			\
	grs	t0, .Lgetpc;			\
.Lgetpc:					\
	lrw	gb, .Lgetpc@GOTPC;		\
	addu	gb, t0;				\
	lrw	t1, _mcount@PLT;		\
	ldr.w	t0, (gb, t1 << 0);		\
	jmp	t0;
#  else
#   define CALL_MCOUNT				\
	subi	sp, 4;				\
	stw	lr, (sp, 0);			\
	jbsr	_mcount;
#  endif
# else
#  define CALL_MCOUNT	/* Do nothing.  */
# endif

# if defined (__CK860__)
/* Instruction fetch will be faster when the label is 16 bytes aligned.
   Filling with nop instruction to avoid extra jump.  */
#  define LABLE_ALIGN	\
	.balignw 16, 0x6c03

#  define PRE_BNEZAD(R)

#  define BNEZAD(R, L)	\
	bnezad	R, L
# else
#  define LABLE_ALIGN	\
	.balignw 8, 0x6c03

#  define PRE_BNEZAD(R)	\
	subi	R, 1

#  define BNEZAD(R, L)	\
	bnez	R, L
# endif

#endif