ebevhan created this revision.
ebevhan added reviewers: rjmccall, leonardchan, bjope.
Herald added a project: clang.
Herald added a subscriber: cfe-commits.

This patch adds codegen to Clang for fixed-point shift
operations.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D83294

Files:
  clang/lib/CodeGen/CGExprScalar.cpp
  clang/test/Frontend/fixed_point_compound.c
  clang/test/Frontend/fixed_point_shift.c

Index: clang/test/Frontend/fixed_point_shift.c
===================================================================
--- clang/test/Frontend/fixed_point_shift.c
+++ clang/test/Frontend/fixed_point_shift.c
@@ -35,3 +35,361 @@
 _Sat unsigned _Accum sua_const2 = (_Sat unsigned _Accum)128.0uk << 10;
 // SIGNED-DAG:   @sua_const2 = {{.*}}global i32 -1
 // UNSIGNED-DAG: @sua_const2 = {{.*}}global i32 2147483647
+
+// CHECK-LABEL: @SignedLeftShift(
+void SignedLeftShift() {
+  short _Accum sa;
+  _Accum a;
+  long _Accum la;
+
+  short _Fract sf;
+  _Fract f;
+  long _Fract lf;
+
+  int i;
+  unsigned u;
+
+  // CHECK:         [[TMP0:%.*]] = load i16, i16* %sa, align 2
+  // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
+  // CHECK-NEXT:    [[TMP3:%.*]] = shl i16 [[TMP0]], [[TMP2]]
+  // CHECK-NEXT:    store i16 [[TMP3]], i16* %sa, align 2
+  sa = sa << i;
+
+  // CHECK:         [[TMP4:%.*]] = load i32, i32* %a, align 4
+  // CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP6:%.*]] = shl i32 [[TMP4]], [[TMP5]]
+  // CHECK-NEXT:    store i32 [[TMP6]], i32* %a, align 4
+  a = a << i;
+
+  // CHECK:         [[TMP7:%.*]] = load i64, i64* %la, align 8
+  // CHECK-NEXT:    [[TMP8:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
+  // CHECK-NEXT:    [[TMP10:%.*]] = shl i64 [[TMP7]], [[TMP9]]
+  // CHECK-NEXT:    store i64 [[TMP10]], i64* %la, align 8
+  la = la << i;
+
+  // CHECK:         [[TMP11:%.*]] = load i8, i8* %sf, align 1
+  // CHECK-NEXT:    [[TMP12:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP13:%.*]] = trunc i32 [[TMP12]] to i8
+  // CHECK-NEXT:    [[TMP14:%.*]] = shl i8 [[TMP11]], [[TMP13]]
+  // CHECK-NEXT:    store i8 [[TMP14]], i8* %sf, align 1
+  sf = sf << i;
+
+  // CHECK:         [[TMP15:%.*]] = load i16, i16* %f, align 2
+  // CHECK-NEXT:    [[TMP16:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
+  // CHECK-NEXT:    [[TMP18:%.*]] = shl i16 [[TMP15]], [[TMP17]]
+  // CHECK-NEXT:    store i16 [[TMP18]], i16* %f, align 2
+  f = f << i;
+
+  // CHECK:         [[TMP19:%.*]] = load i32, i32* %lf, align 4
+  // CHECK-NEXT:    [[TMP20:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP21:%.*]] = shl i32 [[TMP19]], [[TMP20]]
+  // CHECK-NEXT:    store i32 [[TMP21]], i32* %lf, align 4
+  lf = lf << i;
+
+  // CHECK:         [[TMP22:%.*]] = load i32, i32* %a, align 4
+  // CHECK-NEXT:    [[TMP23:%.*]] = load i32, i32* %u, align 4
+  // CHECK-NEXT:    [[TMP24:%.*]] = shl i32 [[TMP22]], [[TMP23]]
+  // CHECK-NEXT:    store i32 [[TMP24]], i32* %a, align 4
+  a = a << u;
+
+  // CHECK:         [[TMP25:%.*]] = load i16, i16* %f, align 2
+  // CHECK-NEXT:    [[TMP26:%.*]] = load i32, i32* %u, align 4
+  // CHECK-NEXT:    [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
+  // CHECK-NEXT:    [[TMP28:%.*]] = shl i16 [[TMP25]], [[TMP27]]
+  // CHECK-NEXT:    store i16 [[TMP28]], i16* %f, align 2
+  f = f << u;
+}
+
+// CHECK-LABEL: @UnsignedLeftShift(
+void UnsignedLeftShift() {
+  unsigned short _Accum usa;
+  unsigned _Accum ua;
+  unsigned long _Accum ula;
+
+  unsigned short _Fract usf;
+  unsigned _Fract uf;
+  unsigned long _Fract ulf;
+
+  int i;
+  unsigned u;
+
+  // CHECK:         [[TMP0:%.*]] = load i16, i16* %usa, align 2
+  // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
+  // CHECK-NEXT:    [[TMP3:%.*]] = shl i16 [[TMP0]], [[TMP2]]
+  // CHECK-NEXT:    store i16 [[TMP3]], i16* %usa, align 2
+  usa = usa << i;
+
+  // CHECK:         [[TMP4:%.*]] = load i32, i32* %ua, align 4
+  // CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP6:%.*]] = shl i32 [[TMP4]], [[TMP5]]
+  // CHECK-NEXT:    store i32 [[TMP6]], i32* %ua, align 4
+  ua = ua << i;
+
+  // CHECK:         [[TMP7:%.*]] = load i64, i64* %ula, align 8
+  // CHECK-NEXT:    [[TMP8:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
+  // CHECK-NEXT:    [[TMP10:%.*]] = shl i64 [[TMP7]], [[TMP9]]
+  // CHECK-NEXT:    store i64 [[TMP10]], i64* %ula, align 8
+  ula = ula << i;
+
+  // CHECK:         [[TMP11:%.*]] = load i8, i8* %usf, align 1
+  // CHECK-NEXT:    [[TMP12:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP13:%.*]] = trunc i32 [[TMP12]] to i8
+  // CHECK-NEXT:    [[TMP14:%.*]] = shl i8 [[TMP11]], [[TMP13]]
+  // CHECK-NEXT:    store i8 [[TMP14]], i8* %usf, align 1
+  usf = usf << i;
+
+  // CHECK:         [[TMP15:%.*]] = load i16, i16* %uf, align 2
+  // CHECK-NEXT:    [[TMP16:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
+  // CHECK-NEXT:    [[TMP18:%.*]] = shl i16 [[TMP15]], [[TMP17]]
+  // CHECK-NEXT:    store i16 [[TMP18]], i16* %uf, align 2
+  uf = uf << i;
+
+  // CHECK:         [[TMP19:%.*]] = load i32, i32* %ulf, align 4
+  // CHECK-NEXT:    [[TMP20:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP21:%.*]] = shl i32 [[TMP19]], [[TMP20]]
+  // CHECK-NEXT:    store i32 [[TMP21]], i32* %ulf, align 4
+  ulf = ulf << i;
+
+  // CHECK:         [[TMP22:%.*]] = load i32, i32* %ua, align 4
+  // CHECK-NEXT:    [[TMP23:%.*]] = load i32, i32* %u, align 4
+  // CHECK-NEXT:    [[TMP24:%.*]] = shl i32 [[TMP22]], [[TMP23]]
+  // CHECK-NEXT:    store i32 [[TMP24]], i32* %ua, align 4
+  ua = ua << u;
+
+  // CHECK:         [[TMP25:%.*]] = load i16, i16* %uf, align 2
+  // CHECK-NEXT:    [[TMP26:%.*]] = load i32, i32* %u, align 4
+  // CHECK-NEXT:    [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
+  // CHECK-NEXT:    [[TMP28:%.*]] = shl i16 [[TMP25]], [[TMP27]]
+  // CHECK-NEXT:    store i16 [[TMP28]], i16* %uf, align 2
+  uf = uf << u;
+}
+
+// CHECK-LABEL: @SignedRightShift(
+void SignedRightShift() {
+  short _Accum sa;
+  _Accum a;
+  long _Accum la;
+
+  short _Fract sf;
+  _Fract f;
+  long _Fract lf;
+
+  int i;
+  unsigned u;
+
+  // CHECK:         [[TMP0:%.*]] = load i16, i16* %sa, align 2
+  // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
+  // CHECK-NEXT:    [[TMP3:%.*]] = ashr i16 [[TMP0]], [[TMP2]]
+  // CHECK-NEXT:    store i16 [[TMP3]], i16* %sa, align 2
+  sa = sa >> i;
+
+  // CHECK:         [[TMP4:%.*]] = load i32, i32* %a, align 4
+  // CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP6:%.*]] = ashr i32 [[TMP4]], [[TMP5]]
+  // CHECK-NEXT:    store i32 [[TMP6]], i32* %a, align 4
+  a = a >> i;
+
+  // CHECK:         [[TMP7:%.*]] = load i64, i64* %la, align 8
+  // CHECK-NEXT:    [[TMP8:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
+  // CHECK-NEXT:    [[TMP10:%.*]] = ashr i64 [[TMP7]], [[TMP9]]
+  // CHECK-NEXT:    store i64 [[TMP10]], i64* %la, align 8
+  la = la >> i;
+
+  // CHECK:         [[TMP11:%.*]] = load i8, i8* %sf, align 1
+  // CHECK-NEXT:    [[TMP12:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP13:%.*]] = trunc i32 [[TMP12]] to i8
+  // CHECK-NEXT:    [[TMP14:%.*]] = ashr i8 [[TMP11]], [[TMP13]]
+  // CHECK-NEXT:    store i8 [[TMP14]], i8* %sf, align 1
+  sf = sf >> i;
+
+  // CHECK:         [[TMP15:%.*]] = load i16, i16* %f, align 2
+  // CHECK-NEXT:    [[TMP16:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
+  // CHECK-NEXT:    [[TMP18:%.*]] = ashr i16 [[TMP15]], [[TMP17]]
+  // CHECK-NEXT:    store i16 [[TMP18]], i16* %f, align 2
+  f = f >> i;
+
+  // CHECK:         [[TMP19:%.*]] = load i32, i32* %lf, align 4
+  // CHECK-NEXT:    [[TMP20:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP21:%.*]] = ashr i32 [[TMP19]], [[TMP20]]
+  // CHECK-NEXT:    store i32 [[TMP21]], i32* %lf, align 4
+  lf = lf >> i;
+
+  // CHECK:         [[TMP22:%.*]] = load i32, i32* %a, align 4
+  // CHECK-NEXT:    [[TMP23:%.*]] = load i32, i32* %u, align 4
+  // CHECK-NEXT:    [[TMP24:%.*]] = ashr i32 [[TMP22]], [[TMP23]]
+  // CHECK-NEXT:    store i32 [[TMP24]], i32* %a, align 4
+  a = a >> u;
+
+  // CHECK:         [[TMP25:%.*]] = load i16, i16* %f, align 2
+  // CHECK-NEXT:    [[TMP26:%.*]] = load i32, i32* %u, align 4
+  // CHECK-NEXT:    [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
+  // CHECK-NEXT:    [[TMP28:%.*]] = ashr i16 [[TMP25]], [[TMP27]]
+  // CHECK-NEXT:    store i16 [[TMP28]], i16* %f, align 2
+  f = f >> u;
+}
+
+// CHECK-LABEL: @UnsignedRightShift(
+void UnsignedRightShift() {
+  unsigned short _Accum usa;
+  unsigned _Accum ua;
+  unsigned long _Accum ula;
+
+  unsigned short _Fract usf;
+  unsigned _Fract uf;
+  unsigned long _Fract ulf;
+
+  int i;
+  unsigned u;
+
+  // CHECK:         [[TMP0:%.*]] = load i16, i16* %usa, align 2
+  // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
+  // SIGNED-NEXT:   [[TMP3:%.*]] = lshr i16 [[TMP0]], [[TMP2]]
+  // UNSIGNED-NEXT: [[TMP3:%.*]] = ashr i16 [[TMP0]], [[TMP2]]
+  // CHECK-NEXT:    store i16 [[TMP3]], i16* %usa, align 2
+  usa = usa >> i;
+
+  // CHECK:         [[TMP4:%.*]] = load i32, i32* %ua, align 4
+  // CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* %i, align 4
+  // SIGNED-NEXT:   [[TMP6:%.*]] = lshr i32 [[TMP4]], [[TMP5]]
+  // UNSIGNED-NEXT: [[TMP6:%.*]] = ashr i32 [[TMP4]], [[TMP5]]
+  // CHECK-NEXT:    store i32 [[TMP6]], i32* %ua, align 4
+  ua = ua >> i;
+
+  // CHECK:         [[TMP7:%.*]] = load i64, i64* %ula, align 8
+  // CHECK-NEXT:    [[TMP8:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
+  // SIGNED-NEXT:   [[TMP10:%.*]] = lshr i64 [[TMP7]], [[TMP9]]
+  // UNSIGNED-NEXT: [[TMP10:%.*]] = ashr i64 [[TMP7]], [[TMP9]]
+  // CHECK-NEXT:    store i64 [[TMP10]], i64* %ula, align 8
+  ula = ula >> i;
+
+  // CHECK:         [[TMP11:%.*]] = load i8, i8* %usf, align 1
+  // CHECK-NEXT:    [[TMP12:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP13:%.*]] = trunc i32 [[TMP12]] to i8
+  // SIGNED-NEXT:   [[TMP14:%.*]] = lshr i8 [[TMP11]], [[TMP13]]
+  // UNSIGNED-NEXT: [[TMP10:%.*]] = ashr i8 [[TMP11]], [[TMP13]]
+  // CHECK-NEXT:    store i8 [[TMP14]], i8* %usf, align 1
+  usf = usf >> i;
+
+  // CHECK:         [[TMP15:%.*]] = load i16, i16* %uf, align 2
+  // CHECK-NEXT:    [[TMP16:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
+  // SIGNED-NEXT:   [[TMP18:%.*]] = lshr i16 [[TMP15]], [[TMP17]]
+  // UNSIGNED-NEXT: [[TMP10:%.*]] = ashr i16 [[TMP15]], [[TMP17]]
+  // CHECK-NEXT:    store i16 [[TMP18]], i16* %uf, align 2
+  uf = uf >> i;
+
+  // CHECK:         [[TMP19:%.*]] = load i32, i32* %ulf, align 4
+  // CHECK-NEXT:    [[TMP20:%.*]] = load i32, i32* %i, align 4
+  // SIGNED-NEXT:   [[TMP21:%.*]] = lshr i32 [[TMP19]], [[TMP20]]
+  // UNSIGNED-NEXT: [[TMP10:%.*]] = ashr i32 [[TMP19]], [[TMP20]]
+  // CHECK-NEXT:    store i32 [[TMP21]], i32* %ulf, align 4
+  ulf = ulf >> i;
+
+  // CHECK:         [[TMP22:%.*]] = load i32, i32* %ua, align 4
+  // CHECK-NEXT:    [[TMP23:%.*]] = load i32, i32* %u, align 4
+  // SIGNED-NEXT:   [[TMP24:%.*]] = lshr i32 [[TMP22]], [[TMP23]]
+  // UNSIGNED-NEXT: [[TMP10:%.*]] = ashr i32 [[TMP22]], [[TMP23]]
+  // CHECK-NEXT:    store i32 [[TMP24]], i32* %ua, align 4
+  ua = ua >> u;
+
+  // CHECK:         [[TMP25:%.*]] = load i16, i16* %uf, align 2
+  // CHECK-NEXT:    [[TMP26:%.*]] = load i32, i32* %u, align 4
+  // CHECK-NEXT:    [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
+  // SIGNED-NEXT:   [[TMP28:%.*]] = lshr i16 [[TMP25]], [[TMP27]]
+  // UNSIGNED-NEXT: [[TMP10:%.*]] = ashr i16 [[TMP25]], [[TMP27]]
+  // CHECK-NEXT:    store i16 [[TMP28]], i16* %uf, align 2
+  uf = uf >> u;
+}
+
+void SaturatedLeftShift() {
+  _Sat short _Accum ssa;
+  _Sat _Accum sa;
+
+  _Sat short _Fract ssf;
+  _Sat _Fract sf;
+
+  _Sat unsigned short _Accum susa;
+  _Sat unsigned _Accum sua;
+
+  _Sat unsigned short _Fract susf;
+  _Sat unsigned _Fract suf;
+
+  int i;
+
+  // CHECK:         [[TMP0:%.*]] = load i16, i16* %ssa, align 2
+  // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
+  // CHECK-NEXT:    [[TMP3:%.*]] = call i16 @llvm.sshl.sat.i16(i16 [[TMP0]], i16 [[TMP2]])
+  // CHECK-NEXT:    store i16 [[TMP3]], i16* %ssa, align 2
+  ssa = ssa << i;
+
+  // CHECK:         [[TMP4:%.*]] = load i32, i32* %sa, align 4
+  // CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP6:%.*]] = call i32 @llvm.sshl.sat.i32(i32 [[TMP4]], i32 [[TMP5]])
+  // CHECK-NEXT:    store i32 [[TMP6]], i32* %sa, align 4
+  sa = sa << i;
+
+  // CHECK:         [[TMP7:%.*]] = load i8, i8* %ssf, align 1
+  // CHECK-NEXT:    [[TMP8:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP9:%.*]] = trunc i32 [[TMP8]] to i8
+  // CHECK-NEXT:    [[TMP10:%.*]] = call i8 @llvm.sshl.sat.i8(i8 [[TMP7]], i8 [[TMP9]])
+  // CHECK-NEXT:    store i8 [[TMP10]], i8* %ssf, align 1
+  ssf = ssf << i;
+
+  // CHECK:         [[TMP11:%.*]] = load i16, i16* %sf, align 2
+  // CHECK-NEXT:    [[TMP12:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP13:%.*]] = trunc i32 [[TMP12]] to i16
+  // CHECK-NEXT:    [[TMP14:%.*]] = call i16 @llvm.sshl.sat.i16(i16 [[TMP11]], i16 [[TMP13]])
+  // CHECK-NEXT:    store i16 [[TMP14]], i16* %sf, align 2
+  sf = sf << i;
+
+  // CHECK:         [[TMP15:%.*]] = load i16, i16* %susa, align 2
+  // CHECK-NEXT:    [[TMP16:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
+  // SIGNED-NEXT:   [[RES:%.*]] = call i16 @llvm.ushl.sat.i16(i16 [[TMP15]], i16 [[TMP17]])
+  // UNSIGNED-NEXT: [[TMP18:%.*]] = call i16 @llvm.sshl.sat.i16(i16 [[TMP15]], i16 [[TMP17]])
+  // UNSIGNED-NEXT: [[TMP19:%.*]] = icmp slt i16 [[TMP18]], 0
+  // UNSIGNED-NEXT: [[RES:%.*]] = select i1 [[TMP19]], i16 0, i16 [[TMP18]]
+  // CHECK-NEXT:    store i16 [[RES]], i16* %susa, align 2
+  susa = susa << i;
+
+  // CHECK:         [[TMP19:%.*]] = load i32, i32* %sua, align 4
+  // CHECK-NEXT:    [[TMP20:%.*]] = load i32, i32* %i, align 4
+  // SIGNED-NEXT:   [[RES:%.*]] = call i32 @llvm.ushl.sat.i32(i32 [[TMP19]], i32 [[TMP20]])
+  // UNSIGNED-NEXT: [[TMP22:%.*]] = call i32 @llvm.sshl.sat.i32(i32 [[TMP19]], i32 [[TMP20]])
+  // UNSIGNED-NEXT: [[TMP23:%.*]] = icmp slt i32 [[TMP22]], 0
+  // UNSIGNED-NEXT: [[RES:%.*]] = select i1 [[TMP23]], i32 0, i32 [[TMP22]]
+  // CHECK-NEXT:    store i32 [[RES]], i32* %sua, align 4
+  sua = sua << i;
+
+  // CHECK:         [[TMP22:%.*]] = load i8, i8* %susf, align 1
+  // CHECK-NEXT:    [[TMP23:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP24:%.*]] = trunc i32 [[TMP23]] to i8
+  // SIGNED-NEXT:   [[RES:%.*]] = call i8 @llvm.ushl.sat.i8(i8 [[TMP22]], i8 [[TMP24]])
+  // UNSIGNED-NEXT: [[TMP27:%.*]] = call i8 @llvm.sshl.sat.i8(i8 [[TMP22]], i8 [[TMP24]])
+  // UNSIGNED-NEXT: [[TMP28:%.*]] = icmp slt i8 [[TMP27]], 0
+  // UNSIGNED-NEXT: [[RES:%.*]] = select i1 [[TMP28]], i8 0, i8 [[TMP27]]
+  // CHECK-NEXT:    store i8 [[RES]], i8* %susf, align 1
+  susf = susf << i;
+
+  // CHECK:         [[TMP26:%.*]] = load i16, i16* %suf, align 2
+  // CHECK-NEXT:    [[TMP27:%.*]] = load i32, i32* %i, align 4
+  // CHECK-NEXT:    [[TMP28:%.*]] = trunc i32 [[TMP27]] to i16
+  // SIGNED-NEXT:   [[RES:%.*]] = call i16 @llvm.ushl.sat.i16(i16 [[TMP26]], i16 [[TMP28]])
+  // UNSIGNED-NEXT: [[TMP32:%.*]] = call i16 @llvm.sshl.sat.i16(i16 [[TMP26]], i16 [[TMP28]])
+  // UNSIGNED-NEXT: [[TMP33:%.*]] = icmp slt i16 [[TMP32]], 0
+  // UNSIGNED-NEXT: [[RES:%.*]] = select i1 [[TMP33]], i16 0, i16 [[TMP32]]
+  // CHECK-NEXT:    store i16 [[RES]], i16* %suf, align 2
+  suf = suf << i;
+}
Index: clang/test/Frontend/fixed_point_compound.c
===================================================================
--- clang/test/Frontend/fixed_point_compound.c
+++ clang/test/Frontend/fixed_point_compound.c
@@ -372,3 +372,29 @@
   c /= sa;
 }
 
+void Shift() {
+  // CHECK:         [[TMP0:%.*]] = load i32, i32* @i, align 4
+  // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @a, align 4
+  // CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP1]], [[TMP0]]
+  // CHECK-NEXT:    store i32 [[TMP2]], i32* @a, align 4
+  a <<= i;
+
+  // CHECK:         [[TMP3:%.*]] = load i32, i32* @i, align 4
+  // CHECK-NEXT:    [[TMP4:%.*]] = load i16, i16* @suf, align 2
+  // CHECK-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP3]] to i16
+  // SIGNED-NEXT:   [[TMP6:%.*]] = call i16 @llvm.ushl.sat.i16(i16 [[TMP4]], i16 [[TMP5]])
+  // SIGNED-NEXT:   store i16 [[TMP6]], i16* @suf, align 2
+  // UNSIGNED-NEXT: [[TMP6:%.*]] = call i16 @llvm.sshl.sat.i16(i16 [[TMP4]], i16 [[TMP5]])
+  // UNSIGNED-NEXT: [[TMP7:%.*]] = icmp slt i16 [[TMP6]], 0
+  // UNSIGNED-NEXT: [[SATMIN:%.*]] = select i1 [[TMP7]], i16 0, i16 [[TMP6]]
+  // UNSIGNED-NEXT: store i16 [[SATMIN]], i16* @suf, align 2
+  suf <<= i;
+
+  // CHECK:         [[TMP7:%.*]] = load i32, i32* @i, align 4
+  // CHECK-NEXT:    [[TMP8:%.*]] = load i64, i64* @ula, align 8
+  // CHECK-NEXT:    [[TMP9:%.*]] = zext i32 [[TMP7]] to i64
+  // SIGNED-NEXT:   [[TMP10:%.*]] = lshr i64 [[TMP8]], [[TMP9]]
+  // UNSIGNED-NEXT: [[TMP10:%.*]] = ashr i64 [[TMP8]], [[TMP9]]
+  // CHECK-NEXT:    store i64 [[TMP10]], i64* @ula, align 8
+  ula >>= i;
+}
Index: clang/lib/CodeGen/CGExprScalar.cpp
===================================================================
--- clang/lib/CodeGen/CGExprScalar.cpp
+++ clang/lib/CodeGen/CGExprScalar.cpp
@@ -3592,16 +3592,26 @@
   Value *LHS = op.LHS;
   Value *RHS = op.RHS;
 
+  // Shifts are special. The common semantic is not affected by the RHS in this
+  // case; we don't want to widen the LHS just to accommodate the integral
+  // part of the RHS.
+  bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
+                 BinaryOperator::isShiftAssignOp(op.Opcode);
   auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
   auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
   auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
-  auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema, true);
+  auto CommonFixedSema = LHSFixedSema.getCommonSemantics(
+      IsShift ? LHSFixedSema : RHSFixedSema, true);
 
   // Convert the operands to the full precision type.
   Value *FullLHS = EmitFixedPointConversion(LHS, LHSFixedSema, CommonFixedSema,
                                             op.E->getExprLoc());
-  Value *FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema,
-                                            op.E->getExprLoc());
+  Value *FullRHS;
+  if (IsShift)
+    FullRHS = Builder.CreateIntCast(RHS, FullLHS->getType(), false);
+  else
+    FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema,
+                                       op.E->getExprLoc());
 
   // Perform the actual operation.
   Value *Result;
@@ -3656,6 +3666,26 @@
         {FullLHS, FullRHS, Builder.getInt32(CommonFixedSema.getScale())});
     break;    
   }
+  case BO_ShlAssign:
+  case BO_Shl: {
+    if (CommonFixedSema.isSaturated()) {
+      llvm::Intrinsic::ID IID = CommonFixedSema.isSigned()
+                                    ? llvm::Intrinsic::sshl_sat
+                                    : llvm::Intrinsic::ushl_sat;
+      Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
+    } else {
+      Result = Builder.CreateShl(FullLHS, FullRHS);
+    }
+    break;
+  }
+  case BO_ShrAssign:
+  case BO_Shr: {
+    if (CommonFixedSema.isSigned())
+      Result = Builder.CreateAShr(FullLHS, FullRHS);
+    else
+      Result = Builder.CreateLShr(FullLHS, FullRHS);
+    break;
+  }
   case BO_LT:
     return CommonFixedSema.isSigned() ? Builder.CreateICmpSLT(FullLHS, FullRHS)
                                       : Builder.CreateICmpULT(FullLHS, FullRHS);
@@ -3675,13 +3705,9 @@
     return Builder.CreateICmpEQ(FullLHS, FullRHS);
   case BO_NE:
     return Builder.CreateICmpNE(FullLHS, FullRHS);
-  case BO_Shl:
-  case BO_Shr:
   case BO_Cmp:
   case BO_LAnd:
   case BO_LOr:
-  case BO_ShlAssign:
-  case BO_ShrAssign:
     llvm_unreachable("Found unimplemented fixed point binary operation");
   case BO_PtrMemD:
   case BO_PtrMemI:
@@ -3827,6 +3853,11 @@
 }
 
 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
+
+  // TODO: This misses out on the sanitizer check below.
+  if (Ops.isFixedPointOp())
+    return EmitFixedPointBinOp(Ops);
+
   // LLVM requires the LHS and RHS to be the same type: promote or truncate the
   // RHS to the same size as the LHS.
   Value *RHS = Ops.RHS;
@@ -3894,6 +3925,11 @@
 }
 
 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
+
+  // TODO: This misses out on the sanitizer check below.
+  if (Ops.isFixedPointOp())
+    return EmitFixedPointBinOp(Ops);
+
   // LLVM requires the LHS and RHS to be the same type: promote or truncate the
   // RHS to the same size as the LHS.
   Value *RHS = Ops.RHS;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to