1
1
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 -filetype=obj -o - < %s | llvm-readobj -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX700 --check-prefix=NOTES %s
2
- ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx800 -filetype=obj -o - < %s | llvm-readobj -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX800 --check-prefix=NOTES %s
2
+ ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -filetype=obj -o - < %s | llvm-readobj -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX803 --check-prefix=NOTES %s
3
3
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=obj -o - < %s | llvm-readobj -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX900 --check-prefix=NOTES %s
4
4
5
+ @var = addrspace (1 ) global float 0 .0
6
+
5
7
; CHECK: ---
6
8
; CHECK: Version: [ 1, 0 ]
7
-
8
9
; CHECK: Kernels:
9
- ; CHECK: - Name: test
10
- ; CHECK: SymbolName: 'test@kd'
11
- ; CHECK: CodeProps:
12
- ; CHECK: KernargSegmentSize: 24
13
- ; CHECK: GroupSegmentFixedSize: 0
14
- ; CHECK: PrivateSegmentFixedSize: 0
15
- ; CHECK: KernargSegmentAlign: 8
16
- ; CHECK: WavefrontSize: 64
17
- ; GFX700: NumSGPRs: 6
18
- ; GFX800: NumSGPRs: 96
19
- ; GFX900: NumSGPRs: 6
20
- ; GFX700: NumVGPRs: 4
21
- ; GFX800: NumVGPRs: 6
22
- ; GFX900: NumVGPRs: 6
23
- ; CHECK: MaxFlatWorkGroupSize: 256
10
+
11
+ ; CHECK: - Name: test
12
+ ; CHECK: SymbolName: 'test@kd'
13
+ ; CHECK: CodeProps:
14
+ ; CHECK: KernargSegmentSize: 24
15
+ ; CHECK: GroupSegmentFixedSize: 0
16
+ ; CHECK: PrivateSegmentFixedSize: 0
17
+ ; CHECK: KernargSegmentAlign: 8
18
+ ; CHECK: WavefrontSize: 64
19
+ ; CHECK: NumSGPRs: 6
20
+ ; GFX700: NumVGPRs: 4
21
+ ; GFX803: NumVGPRs: 6
22
+ ; GFX900: NumVGPRs: 6
23
+ ; CHECK: MaxFlatWorkGroupSize: 256
24
24
define amdgpu_kernel void @test (
25
25
half addrspace (1 )* %r ,
26
26
half addrspace (1 )* %a ,
@@ -32,3 +32,111 @@ entry:
32
32
store half %r.val , half addrspace (1 )* %r
33
33
ret void
34
34
}
35
+
36
+ ; CHECK: - Name: num_spilled_sgprs
37
+ ; CHECK: SymbolName: 'num_spilled_sgprs@kd'
38
+ ; CHECK: CodeProps:
39
+ ; CHECK: NumSpilledSGPRs: 41
40
+ define amdgpu_kernel void @num_spilled_sgprs (
41
+ i32 addrspace (1 )* %out0 , i32 addrspace (1 )* %out1 , i32 addrspace (1 )* %out2 ,
42
+ i32 addrspace (1 )* %out3 , i32 addrspace (1 )* %out4 , i32 addrspace (1 )* %out5 ,
43
+ i32 addrspace (1 )* %out6 , i32 addrspace (1 )* %out7 , i32 addrspace (1 )* %out8 ,
44
+ i32 addrspace (1 )* %out9 , i32 addrspace (1 )* %outa , i32 addrspace (1 )* %outb ,
45
+ i32 addrspace (1 )* %outc , i32 addrspace (1 )* %outd , i32 addrspace (1 )* %oute ,
46
+ i32 addrspace (1 )* %outf , i32 %in0 , i32 %in1 , i32 %in2 , i32 %in3 , i32 %in4 ,
47
+ i32 %in5 , i32 %in6 , i32 %in7 , i32 %in8 , i32 %in9 , i32 %ina , i32 %inb ,
48
+ i32 %inc , i32 %ind , i32 %ine , i32 %inf ) #0 {
49
+ entry:
50
+ store i32 %in0 , i32 addrspace (1 )* %out0
51
+ store i32 %in1 , i32 addrspace (1 )* %out1
52
+ store i32 %in2 , i32 addrspace (1 )* %out2
53
+ store i32 %in3 , i32 addrspace (1 )* %out3
54
+ store i32 %in4 , i32 addrspace (1 )* %out4
55
+ store i32 %in5 , i32 addrspace (1 )* %out5
56
+ store i32 %in6 , i32 addrspace (1 )* %out6
57
+ store i32 %in7 , i32 addrspace (1 )* %out7
58
+ store i32 %in8 , i32 addrspace (1 )* %out8
59
+ store i32 %in9 , i32 addrspace (1 )* %out9
60
+ store i32 %ina , i32 addrspace (1 )* %outa
61
+ store i32 %inb , i32 addrspace (1 )* %outb
62
+ store i32 %inc , i32 addrspace (1 )* %outc
63
+ store i32 %ind , i32 addrspace (1 )* %outd
64
+ store i32 %ine , i32 addrspace (1 )* %oute
65
+ store i32 %inf , i32 addrspace (1 )* %outf
66
+ ret void
67
+ }
68
+
69
+ ; CHECK: - Name: num_spilled_vgprs
70
+ ; CHECK: SymbolName: 'num_spilled_vgprs@kd'
71
+ ; CHECK: CodeProps:
72
+ ; CHECK: NumSpilledVGPRs: 14
73
+ define amdgpu_kernel void @num_spilled_vgprs () #1 {
74
+ %val0 = load volatile float , float addrspace (1 )* @var
75
+ %val1 = load volatile float , float addrspace (1 )* @var
76
+ %val2 = load volatile float , float addrspace (1 )* @var
77
+ %val3 = load volatile float , float addrspace (1 )* @var
78
+ %val4 = load volatile float , float addrspace (1 )* @var
79
+ %val5 = load volatile float , float addrspace (1 )* @var
80
+ %val6 = load volatile float , float addrspace (1 )* @var
81
+ %val7 = load volatile float , float addrspace (1 )* @var
82
+ %val8 = load volatile float , float addrspace (1 )* @var
83
+ %val9 = load volatile float , float addrspace (1 )* @var
84
+ %val10 = load volatile float , float addrspace (1 )* @var
85
+ %val11 = load volatile float , float addrspace (1 )* @var
86
+ %val12 = load volatile float , float addrspace (1 )* @var
87
+ %val13 = load volatile float , float addrspace (1 )* @var
88
+ %val14 = load volatile float , float addrspace (1 )* @var
89
+ %val15 = load volatile float , float addrspace (1 )* @var
90
+ %val16 = load volatile float , float addrspace (1 )* @var
91
+ %val17 = load volatile float , float addrspace (1 )* @var
92
+ %val18 = load volatile float , float addrspace (1 )* @var
93
+ %val19 = load volatile float , float addrspace (1 )* @var
94
+ %val20 = load volatile float , float addrspace (1 )* @var
95
+ %val21 = load volatile float , float addrspace (1 )* @var
96
+ %val22 = load volatile float , float addrspace (1 )* @var
97
+ %val23 = load volatile float , float addrspace (1 )* @var
98
+ %val24 = load volatile float , float addrspace (1 )* @var
99
+ %val25 = load volatile float , float addrspace (1 )* @var
100
+ %val26 = load volatile float , float addrspace (1 )* @var
101
+ %val27 = load volatile float , float addrspace (1 )* @var
102
+ %val28 = load volatile float , float addrspace (1 )* @var
103
+ %val29 = load volatile float , float addrspace (1 )* @var
104
+ %val30 = load volatile float , float addrspace (1 )* @var
105
+
106
+ store volatile float %val0 , float addrspace (1 )* @var
107
+ store volatile float %val1 , float addrspace (1 )* @var
108
+ store volatile float %val2 , float addrspace (1 )* @var
109
+ store volatile float %val3 , float addrspace (1 )* @var
110
+ store volatile float %val4 , float addrspace (1 )* @var
111
+ store volatile float %val5 , float addrspace (1 )* @var
112
+ store volatile float %val6 , float addrspace (1 )* @var
113
+ store volatile float %val7 , float addrspace (1 )* @var
114
+ store volatile float %val8 , float addrspace (1 )* @var
115
+ store volatile float %val9 , float addrspace (1 )* @var
116
+ store volatile float %val10 , float addrspace (1 )* @var
117
+ store volatile float %val11 , float addrspace (1 )* @var
118
+ store volatile float %val12 , float addrspace (1 )* @var
119
+ store volatile float %val13 , float addrspace (1 )* @var
120
+ store volatile float %val14 , float addrspace (1 )* @var
121
+ store volatile float %val15 , float addrspace (1 )* @var
122
+ store volatile float %val16 , float addrspace (1 )* @var
123
+ store volatile float %val17 , float addrspace (1 )* @var
124
+ store volatile float %val18 , float addrspace (1 )* @var
125
+ store volatile float %val19 , float addrspace (1 )* @var
126
+ store volatile float %val20 , float addrspace (1 )* @var
127
+ store volatile float %val21 , float addrspace (1 )* @var
128
+ store volatile float %val22 , float addrspace (1 )* @var
129
+ store volatile float %val23 , float addrspace (1 )* @var
130
+ store volatile float %val24 , float addrspace (1 )* @var
131
+ store volatile float %val25 , float addrspace (1 )* @var
132
+ store volatile float %val26 , float addrspace (1 )* @var
133
+ store volatile float %val27 , float addrspace (1 )* @var
134
+ store volatile float %val28 , float addrspace (1 )* @var
135
+ store volatile float %val29 , float addrspace (1 )* @var
136
+ store volatile float %val30 , float addrspace (1 )* @var
137
+
138
+ ret void
139
+ }
140
+
141
+ attributes #0 = { "amdgpu-num-sgpr" ="14" }
142
+ attributes #1 = { "amdgpu-num-vgpr" ="20" }
0 commit comments