3
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
6
define <16 x i8> @f1(<16 x i8> *%ptr) {
8
; CHECK: vl %v24, 0(%r2)
10
%ret = load <16 x i8>, <16 x i8> *%ptr
15
define <8 x i16> @f2(<8 x i16> *%ptr) {
17
; CHECK: vl %v24, 0(%r2)
19
%ret = load <8 x i16>, <8 x i16> *%ptr
24
define <4 x i32> @f3(<4 x i32> *%ptr) {
26
; CHECK: vl %v24, 0(%r2)
28
%ret = load <4 x i32>, <4 x i32> *%ptr
33
define <2 x i64> @f4(<2 x i64> *%ptr) {
35
; CHECK: vl %v24, 0(%r2)
37
%ret = load <2 x i64>, <2 x i64> *%ptr
42
define <4 x float> @f5(<4 x float> *%ptr) {
44
; CHECK: vl %v24, 0(%r2)
46
%ret = load <4 x float>, <4 x float> *%ptr
51
define <2 x double> @f6(<2 x double> *%ptr) {
53
; CHECK: vl %v24, 0(%r2)
55
%ret = load <2 x double>, <2 x double> *%ptr
59
; Test the highest aligned in-range offset.
60
define <16 x i8> @f7(<16 x i8> *%base) {
62
; CHECK: vl %v24, 4080(%r2)
64
%ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 255
65
%ret = load <16 x i8>, <16 x i8> *%ptr
69
; Test the highest unaligned in-range offset.
70
define <16 x i8> @f8(i8 *%base) {
72
; CHECK: vl %v24, 4095(%r2)
74
%addr = getelementptr i8, i8 *%base, i64 4095
75
%ptr = bitcast i8 *%addr to <16 x i8> *
76
%ret = load <16 x i8>, <16 x i8> *%ptr, align 1
80
; Test the next offset up, which requires separate address logic,
81
define <16 x i8> @f9(<16 x i8> *%base) {
83
; CHECK: aghi %r2, 4096
84
; CHECK: vl %v24, 0(%r2)
86
%ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 256
87
%ret = load <16 x i8>, <16 x i8> *%ptr
91
; Test negative offsets, which also require separate address logic,
92
define <16 x i8> @f10(<16 x i8> *%base) {
94
; CHECK: aghi %r2, -16
95
; CHECK: vl %v24, 0(%r2)
97
%ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 -1
98
%ret = load <16 x i8>, <16 x i8> *%ptr
102
; Check that indexes are allowed.
103
define <16 x i8> @f11(i8 *%base, i64 %index) {
105
; CHECK: vl %v24, 0(%r3,%r2)
107
%addr = getelementptr i8, i8 *%base, i64 %index
108
%ptr = bitcast i8 *%addr to <16 x i8> *
109
%ret = load <16 x i8>, <16 x i8> *%ptr, align 1
114
define <2 x i8> @f12(<2 x i8> *%ptr) {
116
; CHECK: vlreph %v24, 0(%r2)
118
%ret = load <2 x i8>, <2 x i8> *%ptr
123
define <4 x i8> @f13(<4 x i8> *%ptr) {
125
; CHECK: vlrepf %v24, 0(%r2)
127
%ret = load <4 x i8>, <4 x i8> *%ptr
132
define <8 x i8> @f14(<8 x i8> *%ptr) {
134
; CHECK: vlrepg %v24, 0(%r2)
136
%ret = load <8 x i8>, <8 x i8> *%ptr
141
define <2 x i16> @f15(<2 x i16> *%ptr) {
143
; CHECK: vlrepf %v24, 0(%r2)
145
%ret = load <2 x i16>, <2 x i16> *%ptr
150
define <4 x i16> @f16(<4 x i16> *%ptr) {
152
; CHECK: vlrepg %v24, 0(%r2)
154
%ret = load <4 x i16>, <4 x i16> *%ptr
159
define <2 x i32> @f17(<2 x i32> *%ptr) {
161
; CHECK: vlrepg %v24, 0(%r2)
163
%ret = load <2 x i32>, <2 x i32> *%ptr
168
define <2 x float> @f18(<2 x float> *%ptr) {
170
; CHECK: vlrepg %v24, 0(%r2)
172
%ret = load <2 x float>, <2 x float> *%ptr