From b727a8260235157a0ebc0cd374cc5c67cd2db7eb Mon Sep 17 00:00:00 2001
From: Michael Gottesman <mgottesman@apple.com>
Date: Sat, 5 Jan 2013 14:19:12 -0800
Subject: [PATCH] Multiprecision subtraction builtins.

We lower these into 2x chained usub.with.overflow intrinsics.
---
 include/clang/Basic/Builtins.def       |    4 ++
 lib/CodeGen/CGBuiltin.cpp              |   24 ++++++++++-
 test/CodeGen/builtins-multipercision.c |   73 ++++++++++++++++++++++++++++++++
 3 files changed, 99 insertions(+), 2 deletions(-)

diff --git a/include/clang/Basic/Builtins.def b/include/clang/Basic/Builtins.def
index 6d8afef..63fca6f 100644
--- a/include/clang/Basic/Builtins.def
+++ b/include/clang/Basic/Builtins.def
@@ -930,6 +930,10 @@ BUILTIN(__builtin_addcs, "UsUsCUsCUsCUs*", "n")
 BUILTIN(__builtin_addc, "UiUiCUiCUiCUi*", "n")
 BUILTIN(__builtin_addcl, "ULiULiCULiCULiCULi*", "n")
 BUILTIN(__builtin_addcll, "ULLiULLiCULLiCULLiCULLi*", "n")
+BUILTIN(__builtin_subcs, "UsUsCUsCUsCUs*", "n")
+BUILTIN(__builtin_subc, "UiUiCUiCUiCUi*", "n")
+BUILTIN(__builtin_subcl, "ULiULiCULiCULiCULi*", "n")
+BUILTIN(__builtin_subcll, "ULLiULLiCULLiCULLiCULLi*", "n")
 
 #undef BUILTIN
 #undef LIBBUILTIN
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 9badeaf..2e33122 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -1349,7 +1349,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
   case Builtin::BI__builtin_addcs:
   case Builtin::BI__builtin_addc:
   case Builtin::BI__builtin_addcl:
-  case Builtin::BI__builtin_addcll: {
+  case Builtin::BI__builtin_addcll:
+  case Builtin::BI__builtin_subcs:
+  case Builtin::BI__builtin_subc:
+  case Builtin::BI__builtin_subcl:
+  case Builtin::BI__builtin_subcll: {
 
     // We translate all of these builtins from expressions of the form:
     //   int x = ..., y = ..., carryin = ..., carryout, result;
@@ -1375,7 +1379,23 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
     std::pair<llvm::Value*, unsigned> CarryOutPtr =
       EmitPointerWithAlignment(E->getArg(3));
 
-    const llvm::Intrinsic::ID IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
+    // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
+    llvm::Intrinsic::ID IntrinsicId;
+    switch (BuiltinID) {
+    default: llvm_unreachable("Unknown multiprecision builtin id.");
+    case Builtin::BI__builtin_addcs:
+    case Builtin::BI__builtin_addc:
+    case Builtin::BI__builtin_addcl:
+    case Builtin::BI__builtin_addcll:
+      IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
+      break;
+    case Builtin::BI__builtin_subcs:
+    case Builtin::BI__builtin_subc:
+    case Builtin::BI__builtin_subcl:
+    case Builtin::BI__builtin_subcll:
+      IntrinsicId = llvm::Intrinsic::usub_with_overflow;
+      break;
+    }
 
     // Construct our resulting LLVM IR expression.
     llvm::Value *Carry1;
diff --git a/test/CodeGen/builtins-multipercision.c b/test/CodeGen/builtins-multipercision.c
index d49d0fd..162a7ae 100644
--- a/test/CodeGen/builtins-multipercision.c
+++ b/test/CodeGen/builtins-multipercision.c
@@ -72,3 +72,76 @@ unsigned long long test_addcll(unsigned long long x, unsigned long long y,
 
   return carryout;
 }
+
+unsigned short test_subcs(unsigned short x, unsigned short y,
+                          unsigned short carryin, unsigned short *z) {
+  // CHECK: @test_subcs
+  // CHECK: %{{.+}} = {{.*}} call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %x, i16 %y)
+  // CHECK: %{{.+}} = extractvalue { i16, i1 } %{{.+}}, 1
+  // CHECK: %{{.+}} = extractvalue { i16, i1 } %{{.+}}, 0
+  // CHECK: %{{.+}} = {{.*}} call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %{{.+}}, i16 %carryin)
+  // CHECK: %{{.+}} = extractvalue { i16, i1 } %{{.+}}, 1
+  // CHECK: %{{.+}} = extractvalue { i16, i1 } %{{.+}}, 0
+  // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
+  // CHECK: %{{.+}} = zext i1 %{{.+}} to i16
+  // CHECK: store i16 %{{.+}}, i16* %z, align 2
+
+  unsigned short carryout;
+  *z = __builtin_subcs(x, y, carryin, &carryout);
+
+  return carryout;
+}
+
+unsigned test_subc(unsigned x, unsigned y, unsigned carryin, unsigned *z) {
+  // CHECK: @test_subc
+  // CHECK: %{{.+}} = {{.*}} call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %x, i32 %y)
+  // CHECK: %{{.+}} = extractvalue { i32, i1 } %{{.+}}, 1
+  // CHECK: %{{.+}} = extractvalue { i32, i1 } %{{.+}}, 0
+  // CHECK: %{{.+}} = {{.*}} call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %carryin)
+  // CHECK: %{{.+}} = extractvalue { i32, i1 } %{{.+}}, 1
+  // CHECK: %{{.+}} = extractvalue { i32, i1 } %{{.+}}, 0
+  // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
+  // CHECK: %{{.+}} = zext i1 %{{.+}} to i32
+  // CHECK: store i32 %{{.+}}, i32* %z, align 4
+  unsigned carryout;
+  *z = __builtin_subc(x, y, carryin, &carryout);
+
+  return carryout;
+}
+
+unsigned long test_subcl(unsigned long x, unsigned long y,
+                         unsigned long carryin, unsigned long *z) {
+  // CHECK: @test_subcl
+  // CHECK: %{{.+}} = {{.*}} call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %x, i64 %y)
+  // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 1
+  // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 0
+  // CHECK: %{{.+}} = {{.*}} call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %{{.+}}, i64 %carryin)
+  // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 1
+  // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 0
+  // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
+  // CHECK: %{{.+}} = zext i1 %{{.+}} to i64
+  // CHECK: store i64 %{{.+}}, i64* %z, align 8
+  unsigned long carryout;
+  *z = __builtin_subcl(x, y, carryin, &carryout);
+
+  return carryout;
+}
+
+unsigned long long test_subcll(unsigned long long x, unsigned long long y,
+                               unsigned long long carryin,
+                               unsigned long long *z) {
+  // CHECK: @test_subcll
+  // CHECK: %{{.+}} = {{.*}} call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %x, i64 %y)
+  // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 1
+  // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 0
+  // CHECK: %{{.+}} = {{.*}} call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %{{.+}}, i64 %carryin)
+  // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 1
+  // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 0
+  // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
+  // CHECK: %{{.+}} = zext i1 %{{.+}} to i64
+  // CHECK: store i64 %{{.+}}, i64* %z, align 8
+  unsigned long long carryout;
+  *z = __builtin_subcll(x, y, carryin, &carryout);
+
+  return carryout;
+}
-- 
1.7.9.6 (Apple Git-31.1)

