jfb updated this revision to Diff 279896.
jfb added a comment.

Follow John's suggestions


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D79279/new/

https://reviews.llvm.org/D79279

Files:
  clang/include/clang/Basic/Builtins.def
  clang/include/clang/Basic/DiagnosticSemaKinds.td
  clang/include/clang/Sema/Sema.h
  clang/lib/CodeGen/CGBuilder.h
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/lib/CodeGen/CGExpr.cpp
  clang/lib/Sema/SemaChecking.cpp
  clang/test/CodeGen/builtin-overloaded-memfns.c
  clang/test/CodeGenObjC/builtin-memfns.m
  clang/test/Sema/builtin-overloaded-memfns.cpp
  clang/test/SemaOpenCL/invalid-pipe-builtin-cl2.0.cl
  clang/test/SemaOpenCL/to_addr_builtin.cl

Index: clang/test/SemaOpenCL/to_addr_builtin.cl
===================================================================
--- clang/test/SemaOpenCL/to_addr_builtin.cl
+++ clang/test/SemaOpenCL/to_addr_builtin.cl
@@ -15,7 +15,7 @@
   // expected-error@-2{{implicit declaration of function 'to_global' is invalid in OpenCL}}
   // expected-warning@-3{{incompatible integer to pointer conversion assigning to '__global int *__private' from 'int'}}
 #else
-  // expected-error@-5{{invalid number of arguments to function: 'to_global'}}
+  // expected-error@-5{{too many arguments to function call, expected 1, have 2}}
 #endif
 
   int x;
Index: clang/test/SemaOpenCL/invalid-pipe-builtin-cl2.0.cl
===================================================================
--- clang/test/SemaOpenCL/invalid-pipe-builtin-cl2.0.cl
+++ clang/test/SemaOpenCL/invalid-pipe-builtin-cl2.0.cl
@@ -10,7 +10,7 @@
   read_pipe(p, &tmp);
   read_pipe(p, ptr);
   read_pipe(tmp, p);    // expected-error {{first argument to 'read_pipe' must be a pipe type}}
-  read_pipe(p);   // expected-error {{invalid number of arguments to function: 'read_pipe'}}
+  read_pipe(p);         // expected-error {{invalid number of arguments to function: 'read_pipe'}}
   read_pipe(p, rid, tmp, ptr);
   read_pipe(p, tmp, tmp, ptr);   // expected-error {{invalid argument type to function 'read_pipe' (expecting 'reserve_id_t' having '__private int')}}
   read_pipe(p, rid, rid, ptr);   // expected-error {{invalid argument type to function 'read_pipe' (expecting 'unsigned int' having '__private reserve_id_t')}}
@@ -39,7 +39,7 @@
   write_pipe(p, &tmp);
   write_pipe(p, ptr);
   write_pipe(tmp, p);    // expected-error {{first argument to 'write_pipe' must be a pipe type}}
-  write_pipe(p);   // expected-error {{invalid number of arguments to function: 'write_pipe'}}
+  write_pipe(p);         // expected-error {{invalid number of arguments to function: 'write_pipe'}}
   write_pipe(p, rid, tmp, ptr);
   write_pipe(p, tmp, tmp, ptr);   // expected-error {{invalid argument type to function 'write_pipe' (expecting 'reserve_id_t' having '__private int')}}
   write_pipe(p, rid, rid, ptr);   // expected-error {{invalid argument type to function 'write_pipe' (expecting 'unsigned int' having '__private reserve_id_t')}}
Index: clang/test/Sema/builtin-overloaded-memfns.cpp
===================================================================
--- /dev/null
+++ clang/test/Sema/builtin-overloaded-memfns.cpp
@@ -0,0 +1,222 @@
+// RUN: %clang_cc1 %s -verify -fsyntax-only -triple=arm64-unknown-unknown -fms-extensions -DCPY=1
+// RUN: %clang_cc1 %s -verify -fsyntax-only -triple=arm64-unknown-unknown -fms-extensions -DCPY=0
+
+// Test memcpy and memmove with the same code, since they're basically the same constraints.
+#if CPY
+#define MEM(...) __builtin_memcpy_overloaded(__VA_ARGS__)
+#else
+#define MEM(...) __builtin_memmove_overloaded(__VA_ARGS__)
+#endif
+
+#define NULL (void *)0
+#define nullptr __nullptr
+using size_t = __SIZE_TYPE__;
+using sizeless_t = __SVInt8_t;
+using float4 = float __attribute__((ext_vector_type(4)));
+struct Intish {
+  int i;
+};
+struct NotLockFree {
+  char buf[512];
+};
+struct TrivialCpy {
+  char buf[8];
+  TrivialCpy();
+  TrivialCpy(const TrivialCpy &) = default;
+};
+struct NotTrivialCpy {
+  char buf[8];
+  NotTrivialCpy();
+  NotTrivialCpy(const NotTrivialCpy &);
+};
+
+void arg_count() {
+  MEM();                                   // expected-error {{too few arguments to function call, expected 3, have 0}}
+  MEM(0);                                  // expected-error {{too few arguments to function call, expected 3, have 1}}
+  MEM(0, 0);                               // expected-error {{too few arguments to function call, expected 3, have 2}}
+  MEM(0, 0, 0, 0);                         // expected-error {{too many arguments to function call, expected 3, have 4}}
+  __builtin_memset_overloaded();           // expected-error {{too few arguments to function call, expected 3, have 0}}
+  __builtin_memset_overloaded(0);          // expected-error {{too few arguments to function call, expected 3, have 1}}
+  __builtin_memset_overloaded(0, 0);       // expected-error {{too few arguments to function call, expected 3, have 2}}
+  __builtin_memset_overloaded(0, 0, 0, 0); // expected-error {{too many arguments to function call, expected 3, have 4}}
+}
+
+void null(char *dst, const char *src, size_t size) {
+  MEM(0, src, 0);                              // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  MEM(0, src, size);                           // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  MEM(dst, 0, 0);                              // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  MEM(dst, 0, size);                           // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  __builtin_memset_overloaded(0, 0, 0);        // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  __builtin_memset_overloaded(0, 0, size);     // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  MEM(dst, 0, 42);                             // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  MEM(dst, 0, 42);                             // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  MEM(dst, NULL, 42);                          // expected-warning {{null passed to a callee that requires a non-null argument}}
+  MEM(dst, nullptr, 42);                       // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'nullptr_t'}}
+  MEM(0, src, 42);                             // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  MEM(NULL, src, 42);                          // expected-warning {{null passed to a callee that requires a non-null argument}}
+  MEM(nullptr, src, 42);                       // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'nullptr_t'}}
+  __builtin_memset_overloaded(0, 0, 42);       // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  __builtin_memset_overloaded(NULL, 0, 42);    // expected-warning {{null passed to a callee that requires a non-null argument}}
+  __builtin_memset_overloaded(nullptr, 0, 42); // expected-error{{cannot initialize a parameter of type 'void *' with an rvalue of type 'nullptr_t'}}
+}
+
+void good_arg_types(char *dst, const char *src, size_t size) {
+  MEM(dst, src, 0);
+  MEM(dst, dst, ~(size_t)0);
+  MEM(dst, src, 42);
+  MEM(dst, src, size);
+  MEM(dst, (char *)src, size);
+  MEM(dst, (const void *)src, size);
+  MEM((void *)dst, src, size);
+  MEM(dst, (volatile const char *)src, size);
+  MEM((volatile char *)dst, src, size);
+  MEM(dst, (__unaligned const char *)src, size);
+  MEM((__unaligned char *)dst, src, size);
+  MEM(dst, (const char *__restrict)src, size);
+  MEM((char *__restrict)dst, src, size);
+  MEM(dst, (_Atomic const char *)src, size);
+  MEM((_Atomic char *)dst, src, size);
+  MEM((int *)dst, (_Atomic const Intish *)src, size);
+  MEM((_Atomic Intish *)dst, (const int *)src, size);
+  MEM((void *)dst, (_Atomic const int *)src, size);
+  MEM((_Atomic int *)dst, (const void *)src, size);
+  MEM(dst, (const __attribute__((address_space(32))) char *)src, size);
+  MEM((__attribute__((address_space(32))) char *)dst, src, size);
+  MEM((__attribute__((address_space(32))) char *)dst, (const __attribute__((address_space(64))) char *)src, size);
+  MEM(dst, (__attribute__((address_space(32))) __unaligned const volatile void *__restrict)src, size);
+  MEM((__attribute__((address_space(32))) __unaligned volatile void *__restrict)dst, src, size);
+
+  __builtin_memset_overloaded(dst, 0, 0);
+  __builtin_memset_overloaded(dst, 0, ~(size_t)0);
+  __builtin_memset_overloaded(dst, 0, 42);
+  __builtin_memset_overloaded(dst, 0, size);
+  __builtin_memset_overloaded((void *)dst, 0, size);
+  __builtin_memset_overloaded((volatile char *)dst, 0, size);
+  __builtin_memset_overloaded((__unaligned char *)dst, 0, size);
+  __builtin_memset_overloaded((_Atomic char *)dst, 0, size);
+  __builtin_memset_overloaded((int *)dst, 0, size);
+  __builtin_memset_overloaded((_Atomic Intish *)dst, 0, size);
+  __builtin_memset_overloaded((__attribute__((address_space(32))) char *)dst, 0, size);
+  __builtin_memset_overloaded((__attribute__((address_space(32))) __unaligned volatile void *)dst, 0, size);
+}
+
+void bad_arg_types(char *dst, const char *src, size_t size) {
+  MEM(dst, 42, size);                                                                        // expected-error {{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  MEM(42, src, size);                                                                        // expected-error {{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  MEM(dst, src, dst);                                                                        // expected-error {{cannot initialize a parameter of type 'unsigned long' with an lvalue of type 'char *'}}
+  MEM((const char *)dst, src, size);                                                         // expected-error {{argument must be non-const, got 'const char'}}
+  MEM((__attribute__((address_space(32))) __unaligned const volatile char *)dst, src, size); // expected-error {{argument must be non-const, got 'const volatile __unaligned __attribute__((address_space(32))) char'}}
+  MEM(dst, (volatile _Atomic const char *)src, size);                                        // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('char' and 'const volatile _Atomic(char)' cannot have both _Atomic and volatile)}}
+  MEM((volatile _Atomic char *)dst, src, size);                                              // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('volatile _Atomic(char)' and 'const char' cannot have both _Atomic and volatile)}}
+  MEM((volatile _Atomic char *)dst, (_Atomic const char *)src, size);                        // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('volatile _Atomic(char)' and 'const _Atomic(char)' cannot have both _Atomic and volatile)}}
+  MEM((_Atomic char *)dst, (volatile _Atomic const char *)src, size);                        // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('_Atomic(char)' and 'const volatile _Atomic(char)' cannot have both _Atomic and volatile)}}
+  MEM(dst, (_Atomic const int *)src, size);                                                  // expected-error{{_Atomic sizes must match, 'char' is 1 bytes and 'const _Atomic(int)' is 4 bytes}}
+  MEM((_Atomic int *)dst, src, size);                                                        // expected-error{{_Atomic sizes must match, '_Atomic(int)' is 4 bytes and 'const char' is 1 bytes}}
+  MEM((_Atomic NotLockFree *)dst, (_Atomic const NotLockFree *)src, size);                   // expected-error{{_Atomic type must always be lock-free, '_Atomic(NotLockFree)' isn't}}
+
+  __builtin_memset_overloaded(42, 0, size);                                                                        // expected-error {{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}}
+  __builtin_memset_overloaded((const char *)dst, 0, size);                                                         // expected-error {{argument must be non-const, got 'const char'}}
+  __builtin_memset_overloaded((__attribute__((address_space(32))) __unaligned const volatile char *)dst, 0, size); // expected-error {{argument must be non-const, got 'const volatile __unaligned __attribute__((address_space(32))) char'}}
+  __builtin_memset_overloaded((volatile _Atomic char *)dst, 0, size);                                              // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('volatile _Atomic(char)' cannot have both _Atomic and volatile)}}
+  __builtin_memset_overloaded((_Atomic NotLockFree *)dst, 0, size);                                                // expected-error{{_Atomic type must always be lock-free, '_Atomic(NotLockFree)' isn't}}
+}
+
+void array_arg_types() {
+  extern char adst[512];
+  extern volatile char avdst[512];
+  extern const char asrc[512];
+  extern const volatile char avsrc[512];
+
+  MEM(adst, asrc, sizeof(adst));
+  MEM(avdst, avsrc, sizeof(avdst));
+  MEM(asrc, asrc, sizeof(adst));     // expected-error {{argument must be non-const, got 'const char'}}
+  MEM(adst, asrc, sizeof(adst) + 1); // TODO diagnose size overflow?
+  __builtin_memset_overloaded(adst, 0, sizeof(adst));
+  __builtin_memset_overloaded(avdst, 0, sizeof(avdst));
+  __builtin_memset_overloaded(asrc, 0, sizeof(asrc));     // expected-error {{argument must be non-const, got 'const char'}}
+  __builtin_memset_overloaded(adst, 0, sizeof(adst) + 1); // TODO diagnose size overflow?
+}
+
+void atomic_array_arg_types() {
+  extern _Atomic char aadst[512];
+  extern volatile _Atomic char aavdst[512];
+  extern const _Atomic char aasrc[512];
+  extern const _Atomic volatile char aavsrc[512];
+
+  MEM(aadst, aasrc, sizeof(aadst));
+  MEM(aavdst, aasrc, sizeof(aadst)); // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('volatile _Atomic(char)' and 'const _Atomic(char)' cannot have both _Atomic and volatile)}}
+  MEM(aadst, aavsrc, sizeof(aadst)); // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('_Atomic(char)' and 'const volatile _Atomic(char)' cannot have both _Atomic and volatile)}}
+  __builtin_memset_overloaded(aadst, 0, sizeof(aadst));
+  __builtin_memset_overloaded(aavdst, 0, sizeof(aavdst)); // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('volatile _Atomic(char)' cannot have both _Atomic and volatile)}}
+}
+
+void trivial_arg_types() {
+  TrivialCpy trivialDst;
+  const TrivialCpy trivialSrc;
+  MEM(&trivialDst, &trivialSrc, sizeof(TrivialCpy));
+  MEM((__attribute__((address_space(32))) __unaligned volatile TrivialCpy * __restrict) & trivialDst, (__attribute__((address_space(64))) __unaligned const volatile TrivialCpy *__restrict) & trivialSrc, sizeof(TrivialCpy));
+  __builtin_memset_overloaded(&trivialDst, 0, sizeof(trivialDst));
+  __builtin_memset_overloaded((__attribute__((address_space(32))) __unaligned volatile TrivialCpy * __restrict) & trivialDst, 0, sizeof(trivialDst));
+
+  TrivialCpy trivialDstArr[2];
+  const TrivialCpy trivialSrcArr[2];
+  MEM(trivialDstArr, trivialSrcArr, sizeof(TrivialCpy) * 2);
+  __builtin_memset_overloaded(trivialDstArr, 0, sizeof(TrivialCpy) * 2);
+}
+
+void nontrivial_arg_types() {
+  NotTrivialCpy notTrivialDst;
+  const NotTrivialCpy notTrivialSrc;
+  MEM(&notTrivialDst, &notTrivialSrc, sizeof(NotTrivialCpy));            // expected-error{{address argument must be a pointer to a trivially-copyable type ('NotTrivialCpy' invalid)}}
+  __builtin_memset_overloaded(&notTrivialDst, 0, sizeof(NotTrivialCpy)); // expected-error{{address argument must be a pointer to a trivially-copyable type ('NotTrivialCpy' invalid)}}
+
+  NotTrivialCpy notTrivialDstArr[2];
+  const NotTrivialCpy notTrivialSrcArr[2];
+  MEM(notTrivialDstArr, notTrivialSrcArr, sizeof(NotTrivialCpy) * 2);          // expected-error{{address argument must be a pointer to a trivially-copyable type ('NotTrivialCpy' invalid)}}
+  __builtin_memset_overloaded(notTrivialDstArr, 0, sizeof(NotTrivialCpy) * 2); // expected-error{{address argument must be a pointer to a trivially-copyable type ('NotTrivialCpy' invalid)}}
+}
+
+class Incomplete; // expected-note 3 {{forward declaration of 'Incomplete'}}
+void inclomplete_arg_types(char *dst, const char *src, size_t size) {
+  MEM((Incomplete *)dst, src, size);                       // expected-error{{address argument must be a pointer to a trivially-copyable type ('Incomplete' invalid)}}
+  MEM(dst, (const Incomplete *)src, size);                 // expected-error{{address argument must be a pointer to a trivially-copyable type ('const Incomplete' invalid)}}
+  __builtin_memset_overloaded((Incomplete *)dst, 0, size); // expected-error{{address argument must be a pointer to a trivially-copyable type ('Incomplete' invalid)}}
+
+  MEM((_Atomic Incomplete *)dst, src, size);                       // expected-error {{_Atomic cannot be applied to incomplete type 'Incomplete'}}
+  MEM(dst, (_Atomic const Incomplete *)src, size);                 // expected-error {{_Atomic cannot be applied to incomplete type 'Incomplete'}}
+  __builtin_memset_overloaded((_Atomic Incomplete *)dst, 0, size); // expected-error {{_Atomic cannot be applied to incomplete type 'Incomplete'}}
+}
+
+void sizeless_arg_types(char *dst, const char *src, size_t size) {
+  MEM((sizeless_t *)dst, src, size);
+  MEM(dst, (const sizeless_t *)src, size);
+  __builtin_memset_overloaded((sizeless_t *)dst, 0, size);
+
+  MEM((_Atomic sizeless_t *)dst, src, size);                       // expected-error {{_Atomic cannot be applied to sizeless type 'sizeless_t' (aka '__SVInt8_t')}}
+  MEM(dst, (_Atomic const sizeless_t *)src, size);                 // expected-error {{_Atomic cannot be applied to sizeless type 'sizeless_t' (aka '__SVInt8_t')}}
+  __builtin_memset_overloaded((_Atomic sizeless_t *)dst, 0, size); // expected-error {{_Atomic cannot be applied to sizeless type 'sizeless_t' (aka '__SVInt8_t')}}
+}
+
+void vector_arg_types(char *dst, const char *src, size_t size) {
+  MEM((float4 *)dst, src, size);
+  MEM(dst, (const float4 *)src, size);
+  __builtin_memset_overloaded((float4 *)dst, 0, size);
+
+  MEM((_Atomic float4 *)dst, (const float4 *)src, size);
+  MEM((float4 *)dst, (_Atomic const float4 *)src, size);
+  __builtin_memset_overloaded((_Atomic float4 *)dst, 0, size);
+}
+
+void extint_arg_types(char *dst, const char *src, size_t size) {
+  MEM((_ExtInt(2) *)dst, src, size);
+  MEM(dst, (const _ExtInt(2) *)src, size);
+  __builtin_memset_overloaded((_ExtInt(2) *)dst, 0, size);
+
+  MEM((_Atomic _ExtInt(8) *)dst, (const _ExtInt(8) *)src, size);
+  MEM((_ExtInt(8) *)dst, (_Atomic const _ExtInt(8) *)src, size);
+  __builtin_memset_overloaded((_Atomic _ExtInt(8) *)dst, 0, size);
+
+  MEM((_Atomic _ExtInt(9) *)dst, (const _ExtInt(9) *)src, size);   // expected-error {{_Atomic cannot be applied to integer type '_ExtInt(9)' with a non power of 2 precision}}
+  MEM((_ExtInt(9) *)dst, (_Atomic const _ExtInt(9) *)src, size);   // expected-error {{_Atomic cannot be applied to integer type '_ExtInt(9)' with a non power of 2 precision}}
+  __builtin_memset_overloaded((_Atomic _ExtInt(9) *)dst, 0, size); // expected-error {{_Atomic cannot be applied to integer type '_ExtInt(9)' with a non power of 2 precision}}
+}
Index: clang/test/CodeGenObjC/builtin-memfns.m
===================================================================
--- clang/test/CodeGenObjC/builtin-memfns.m
+++ clang/test/CodeGenObjC/builtin-memfns.m
@@ -1,10 +1,38 @@
 // RUN: %clang_cc1 -triple x86_64-apple-macosx10.8.0 -emit-llvm -o - %s | FileCheck %s
 
-void *memcpy(void *restrict s1, const void *restrict s2, unsigned long n);
+typedef __SIZE_TYPE__ size_t;
+
+void *memcpy(void *restrict s1, const void *restrict s2, size_t n);
+void *memmove(void *restrict s1, const void *restrict s2, size_t n);
+void *memset(void *s1, int v, size_t n);
 
 // PR13697
-void test1(int *a, id b) {
-  // CHECK: @test1
+void cpy1(int *a, id b) {
+  // CHECK-LABEL: @cpy1(
+  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i1 false)
+  memcpy(a, b, 8);
+}
+
+void cpy2(id a, int *b) {
+  // CHECK-LABEL: @cpy2(
   // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i1 false)
   memcpy(a, b, 8);
 }
+
+void move1(int *a, id b) {
+  // CHECK-LABEL: @move1(
+  // CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i1 false)
+  memmove(a, b, 8);
+}
+
+void move2(id a, int *b) {
+  // CHECK-LABEL: @move2(
+  // CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i1 false)
+  memmove(a, b, 8);
+}
+
+void set(id a) {
+  // CHECK-LABEL: @set(
+  // CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 42, i64 8, i1 false)
+  memset(a, 42, 8);
+}
Index: clang/test/CodeGen/builtin-overloaded-memfns.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtin-overloaded-memfns.c
@@ -0,0 +1,319 @@
+// RUN: %clang_cc1 -triple arm64-unknown-unknown -fms-extensions -emit-llvm < %s| FileCheck %s
+
+typedef __SIZE_TYPE__ size_t;
+
+// CHECK-LABEL: volatile_dst_cpy_void(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true)
+void volatile_dst_cpy_void(volatile void *dst, const void *src, size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: volatile_dst_move_void(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true)
+void volatile_dst_move_void(volatile void *dst, const void *src, size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: volatile_dst_set_void(
+// CHECK: call void @llvm.memset.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8 0, i64 %{{[0-9]*}}, i1 true)
+void volatile_dst_set_void(volatile void *dst, size_t size) { __builtin_memset_overloaded(dst, 0, size); }
+
+// CHECK-LABEL: volatile_src_cpy_void(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true)
+void volatile_src_cpy_void(void *dst, volatile const void *src, size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: volatile_src_move_void(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true)
+void volatile_src_move_void(void *dst, volatile const void *src, size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: volatile_dstsrc_cpy_void(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true)
+void volatile_dstsrc_cpy_void(volatile void *dst, volatile const void *src, size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: volatile_dstsrc_move_void(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true)
+void volatile_dstsrc_move_void(volatile void *dst, volatile const void *src, size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: volatile_dst_cpy_char(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true)
+void volatile_dst_cpy_char(volatile char *dst, const char *src, size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: volatile_dst_move_char(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true)
+void volatile_dst_move_char(volatile char *dst, const char *src, size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: volatile_dst_set_char(
+// CHECK: call void @llvm.memset.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8 0, i64 %{{[0-9]*}}, i1 true)
+void volatile_dst_set_char(volatile char *dst, size_t size) { __builtin_memset_overloaded(dst, 0, size); }
+
+// CHECK-LABEL: volatile_dst_cpy_int(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8* align 4 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true)
+void volatile_dst_cpy_int(volatile int *dst, const int *src, size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: volatile_dst_move_int(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8* align 4 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true)
+void volatile_dst_move_int(volatile int *dst, const int *src, size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: volatile_dst_set_int(
+// CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8 0, i64 %{{[0-9]*}}, i1 true)
+void volatile_dst_set_int(volatile int *dst, size_t size) { __builtin_memset_overloaded(dst, 0, size); }
+
+// CHECK-LABEL: unaligned_dst_cpy_int(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 4 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false)
+void unaligned_dst_cpy_int(__unaligned int *dst, const int *src, size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: unaligned_dst_move_int(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 4 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false)
+void unaligned_dst_move_int(__unaligned int *dst, const int *src, size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: unaligned_dst_set_int(
+// CHECK: call void @llvm.memset.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8 0, i64 %{{[0-9]*}}, i1 false)
+void unaligned_dst_set_int(__unaligned int *dst, size_t size) { __builtin_memset_overloaded(dst, 0, size); }
+
+// CHECK-LABEL: unaligned_src_cpy_int(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false)
+void unaligned_src_cpy_int(int *dst, __unaligned const int *src, size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: unaligned_src_move_int(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false)
+void unaligned_src_move_int(int *dst, __unaligned const int *src, size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: addrspace_srcdst_cpy_char(
+// CHECK: call void @llvm.memcpy.p32i8.p32i8.i64(i8 addrspace(32)* align 1 %{{[0-9]*}}, i8 addrspace(32)* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false)
+void addrspace_srcdst_cpy_char(__attribute__((address_space(32))) char *dst, __attribute__((address_space(32))) const char *src, size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: addrspace_srcdst_move_char(
+// CHECK: call void @llvm.memmove.p32i8.p32i8.i64(i8 addrspace(32)* align 1 %{{[0-9]*}}, i8 addrspace(32)* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false)
+void addrspace_srcdst_move_char(__attribute__((address_space(32))) char *dst, __attribute__((address_space(32))) const char *src, size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: addrspace_dst_set_char(
+// CHECK: call void @llvm.memset.p32i8.i64(i8 addrspace(32)* align 1 %{{[0-9]*}}, i8 0, i64 %{{[0-9]*}}, i1 false)
+void addrspace_dst_set_char(__attribute__((address_space(32))) char *dst, size_t size) { __builtin_memset_overloaded(dst, 0, size); }
+
+// CHECK-LABEL: restrict_srcdst_cpy_char(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false)
+void restrict_srcdst_cpy_char(char *__restrict dst, const char *__restrict src, size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: restrict_srcdst_move_char(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false)
+void restrict_srcdst_move_char(char *__restrict dst, const char *__restrict src, size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: restrict_dst_set_char(
+// CHECK: call void @llvm.memset.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8 0, i64 %{{[0-9]*}}, i1 false)
+void restrict_dst_set_char(char *__restrict dst, size_t size) { __builtin_memset_overloaded(dst, 0, size); }
+
+// CHECK-LABEL: atomic_srcdst_cpy_char(
+// CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i32 1)
+void atomic_srcdst_cpy_char(_Atomic char *dst, _Atomic const char *src, size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: atomic_srcdst_move_char(
+// CHECK: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i32 1)
+void atomic_srcdst_move_char(_Atomic char *dst, _Atomic const char *src, size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: atomic_dst_set_char(
+// CHECK: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8 0, i64 %{{[0-9]*}}, i32 1)
+void atomic_dst_set_char(_Atomic char *dst, size_t size) { __builtin_memset_overloaded(dst, 0, size); }
+
+// CHECK-LABEL: atomic_srcdst_cpy_int(
+// CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8* align 4 %{{[0-9]*}}, i64 %{{[0-9]*}}, i32 4)
+void atomic_srcdst_cpy_int(_Atomic int *dst, _Atomic const int *src, size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: atomic_srcdst_move_int(
+// CHECK: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8* align 4 %{{[0-9]*}}, i64 %{{[0-9]*}}, i32 4)
+void atomic_srcdst_move_int(_Atomic int *dst, _Atomic const int *src, size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: atomic_dst_set_int(
+// CHECK: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8 0, i64 %{{[0-9]*}}, i32 4)
+void atomic_dst_set_int(_Atomic int *dst, size_t size) { __builtin_memset_overloaded(dst, 0, size); }
+
+// CHECK-LABEL: atomic_srcdst_cpy_longlong(
+// CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 8 %{{[0-9]*}}, i8* align 8 %{{[0-9]*}}, i64 %{{[0-9]*}}, i32 8)
+void atomic_srcdst_cpy_longlong(_Atomic long long *dst, _Atomic const long long *src, size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: atomic_srcdst_move_longlong(
+// CHECK: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 8 %{{[0-9]*}}, i8* align 8 %{{[0-9]*}}, i64 %{{[0-9]*}}, i32 8)
+void atomic_srcdst_move_longlong(_Atomic long long *dst, _Atomic const long long *src, size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: atomic_dst_set_longlong(
+// CHECK: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 8 %{{[0-9]*}}, i8 0, i64 %{{[0-9]*}}, i32 8)
+void atomic_dst_set_longlong(_Atomic long long *dst, size_t size) { __builtin_memset_overloaded(dst, 0, size); }
+
+// CHECK-LABEL: atomic_static_srcdst_cpy_char(
+// CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i32 1)
+void atomic_static_srcdst_cpy_char(_Atomic char dst[static 2], _Atomic const char src[2], size_t size) { __builtin_memcpy_overloaded(dst, src, size); }
+
+// CHECK-LABEL: atomic_static_srcdst_move_char(
+// CHECK: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i32 1)
+void atomic_static_srcdst_move_char(_Atomic char dst[static 2], _Atomic const char src[2], size_t size) { __builtin_memmove_overloaded(dst, src, size); }
+
+// CHECK-LABEL: atomic_static_dst_set_char(
+// CHECK: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8 0, i64 %{{[0-9]*}}, i32 1)
+void atomic_static_dst_set_char(_Atomic char dst[static 2], size_t size) { __builtin_memset_overloaded(dst, 0, size); }
+
+extern _Atomic char dst_atomic[2];
+extern _Atomic const char src_atomic[2];
+
+// CHECK-LABEL: atomic_array_srcdst_cpy_char(
+// CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8* align 1 getelementptr {{.*}}, i64 %{{[0-9]*}}, i32 1)
+void atomic_array_srcdst_cpy_char(size_t size) { __builtin_memcpy_overloaded(dst_atomic, src_atomic, size); }
+
+// CHECK-LABEL: atomic_array_srcdst_move_char(
+// CHECK: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8* align 1 getelementptr {{.*}}, i64 %{{[0-9]*}}, i32 1)
+void atomic_array_srcdst_move_char(size_t size) { __builtin_memmove_overloaded(dst_atomic, src_atomic, size); }
+
+// CHECK-LABEL: atomic_array_dst_set_char(
+// CHECK: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8 0, i64 %{{[0-9]*}}, i32 1)
+void atomic_array_dst_set_char(size_t size) { __builtin_memset_overloaded(dst_atomic, 0, size); }
+
+// CHECK-LABEL: atomic_local_srcdst_cpy_char(
+// CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8* align 4 %{{[0-9]*}}, i64 4, i32 4)
+void atomic_local_srcdst_cpy_char(size_t size) {
+  _Atomic int dst;
+  _Atomic const int src;
+  __builtin_memcpy_overloaded(&dst, &src, sizeof(dst));
+}
+
+// CHECK-LABEL: atomic_local_srcdst_move_char(
+// CHECK: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8* align 4 %{{[0-9]*}}, i64 4, i32 4)
+void atomic_local_srcdst_move_char(size_t size) {
+  _Atomic int dst;
+  _Atomic const int src;
+  __builtin_memmove_overloaded(&dst, &src, sizeof(dst));
+}
+
+// CHECK-LABEL: atomic_local_dst_set_char(
+// CHECK: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8 0, i64 4, i32 4)
+void atomic_local_dst_set_char(size_t size) {
+  _Atomic int dst;
+  __builtin_memset_overloaded(&dst, 0, sizeof(dst));
+}
+
+// CHECK-LABEL: vla_srcdst_cpy_char(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9a-z]*}}, i8* align 1 %{{[0-9a-z]*}}, i64 %{{[0-9]*}}, i1 true)
+void vla_srcdst_cpy_char(size_t size) {
+  volatile char dst[size];
+  const volatile char src[size];
+  __builtin_memcpy_overloaded(dst, src, size);
+}
+
+// CHECK-LABEL: vla_srcdst_move_char(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %{{[0-9a-z]*}}, i8* align 1 %{{[0-9a-z]*}}, i64 %{{[0-9]*}}, i1 true)
+void vla_srcdst_move_char(size_t size) {
+  volatile char dst[size];
+  const volatile char src[size];
+  __builtin_memmove_overloaded(dst, src, size);
+}
+
+// CHECK-LABEL: vla_dst_set_char(
+// CHECK: call void @llvm.memset.p0i8.i64(i8* align 1 %{{[0-9a-z]*}}, i8 0, i64 %{{[0-9]*}}, i1 true)
+void vla_dst_set_char(size_t size) {
+  volatile char dst[size];
+  __builtin_memset_overloaded(dst, 0, size);
+}
+
+// CHECK-LABEL: static_srcdst_cpy_char(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false)
+void static_srcdst_cpy_char(char dst[static 42], const char src[static 42], size_t size) {
+  __builtin_memcpy_overloaded(dst, src, size);
+}
+
+// CHECK-LABEL: static_srcdst_move_char(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false)
+void static_srcdst_move_char(char dst[static 42], const char src[static 42], size_t size) {
+  __builtin_memmove_overloaded(dst, src, size);
+}
+
+// CHECK-LABEL: static_dst_set_char(
+// CHECK: call void @llvm.memset.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8 0, i64 %{{[0-9]*}}, i1 false)
+void static_dst_set_char(char dst[static 42], size_t size) {
+  __builtin_memset_overloaded(dst, 0, size);
+}
+
+extern char dst_unsized[];
+extern volatile char dst_vunsized[];
+extern const char src_cunsized[];
+extern const volatile char src_cvunsized[];
+
+// CHECK-LABEL: array_volatile_unsized_dst_cpy(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8* align 1 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_unsized_dst_cpy(size_t size) { __builtin_memcpy_overloaded(dst_vunsized, src_cunsized, size); }
+
+// CHECK-LABEL: array_volatile_unsized_dst_move(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8* align 1 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_unsized_dst_move(size_t size) { __builtin_memmove_overloaded(dst_vunsized, src_cunsized, size); }
+
+// CHECK-LABEL: array_volatile_unsized_dst_set(
+// CHECK: call void @llvm.memset.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8 0, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_unsized_dst_set(size_t size) { __builtin_memset_overloaded(dst_vunsized, 0, size); }
+
+// CHECK-LABEL: array_volatile_unsized_src_cpy(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8* align 1 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_unsized_src_cpy(size_t size) { __builtin_memcpy_overloaded(dst_unsized, src_cvunsized, size); }
+
+// CHECK-LABEL: array_volatile_unsized_src_move(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8* align 1 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_unsized_src_move(size_t size) { __builtin_memmove_overloaded(dst_unsized, src_cvunsized, size); }
+
+// CHECK-LABEL: array_volatile_unsized_dstsrc_cpy(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8* align 1 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_unsized_dstsrc_cpy(size_t size) { __builtin_memcpy_overloaded(dst_vunsized, src_cvunsized, size); }
+
+// CHECK-LABEL: array_volatile_unsized_dstsrc_move(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8* align 1 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_unsized_dstsrc_move(size_t size) { __builtin_memmove_overloaded(dst_vunsized, src_cvunsized, size); }
+
+extern __attribute__((aligned(128))) char dst_512[512];
+extern __attribute__((aligned(128))) volatile char dst_v512[512];
+extern __attribute__((aligned(128))) const char src_c512[512];
+extern __attribute__((aligned(128))) const volatile char src_cv512[512];
+
+// CHECK-LABEL: array_volatile_dst_cpy(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8* align 128 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_dst_cpy(size_t size) { __builtin_memcpy_overloaded(dst_v512, src_c512, size); }
+
+// CHECK-LABEL: array_volatile_dst_move(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8* align 128 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_dst_move(size_t size) { __builtin_memmove_overloaded(dst_v512, src_c512, size); }
+
+// CHECK-LABEL: array_volatile_dst_set(
+// CHECK: call void @llvm.memset.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8 0, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_dst_set(size_t size) { __builtin_memset_overloaded(dst_v512, 0, size); }
+
+// CHECK-LABEL: array_volatile_src_cpy(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8* align 128 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_src_cpy(size_t size) { __builtin_memcpy_overloaded(dst_512, src_cv512, size); }
+
+// CHECK-LABEL: array_volatile_src_move(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8* align 128 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_src_move(size_t size) { __builtin_memmove_overloaded(dst_512, src_cv512, size); }
+
+// CHECK-LABEL: array_volatile_dstsrc_cpy(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8* align 128 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_dstsrc_cpy(size_t size) { __builtin_memcpy_overloaded(dst_v512, src_cv512, size); }
+
+// CHECK-LABEL: array_volatile_dstsrc_move(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8* align 128 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void array_volatile_dstsrc_move(size_t size) { __builtin_memmove_overloaded(dst_v512, src_cv512, size); }
+
+extern __attribute__((aligned(128))) volatile char dst_v512_32[512][32];
+extern __attribute__((aligned(128))) const volatile char src_cv512_32[512][32];
+
+// CHECK-LABEL: multiarray_volatile_dstsrc_cpy(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8* align 128 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void multiarray_volatile_dstsrc_cpy(size_t size) { __builtin_memcpy_overloaded(dst_v512_32, src_cv512_32, size); }
+
+// CHECK-LABEL: multiarray_volatile_dstsrc_move(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8* align 128 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void multiarray_volatile_dstsrc_move(size_t size) { __builtin_memmove_overloaded(dst_v512_32, src_cv512_32, size); }
+
+// CHECK-LABEL: multiarray_volatile_dst_set(
+// CHECK: call void @llvm.memset.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8 0, i64 %{{[0-9]*}}, i1 true)
+void multiarray_volatile_dst_set(size_t size) { __builtin_memset_overloaded(dst_v512_32, 0, size); }
+
+// CHECK-LABEL: multiarray_idx_volatile_dstsrc_cpy(
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 getelementptr {{.*}}, i8* align 32 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void multiarray_idx_volatile_dstsrc_cpy(size_t size) { __builtin_memcpy_overloaded(dst_v512_32[1], src_cv512_32[1], size); }
+
+// CHECK-LABEL: multiarray_idx_volatile_dstsrc_move(
+// CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 32 getelementptr {{.*}}, i8* align 32 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true)
+void multiarray_idx_volatile_dstsrc_move(size_t size) { __builtin_memmove_overloaded(dst_v512_32[1], src_cv512_32[1], size); }
+
+// CHECK-LABEL: multiarray_idx_volatile_dst_set(
+// CHECK: call void @llvm.memset.p0i8.i64(i8* align 32 getelementptr {{.*}}, i8 0, i64 %{{[0-9]*}}, i1 true)
+void multiarray_idx_volatile_dst_set(size_t size) { __builtin_memset_overloaded(dst_v512_32[1], 0, size); }
Index: clang/lib/Sema/SemaChecking.cpp
===================================================================
--- clang/lib/Sema/SemaChecking.cpp
+++ clang/lib/Sema/SemaChecking.cpp
@@ -1274,11 +1274,8 @@
 // \return True if a semantic error has been found, false otherwise.
 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
                                     CallExpr *Call) {
-  if (Call->getNumArgs() != 1) {
-    S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num)
-        << Call->getDirectCallee() << Call->getSourceRange();
+  if (checkArgCount(S, Call, 1))
     return true;
-  }
 
   auto RT = Call->getArg(0)->getType();
   if (!RT->isPointerType() || RT->getPointeeType()
@@ -1715,6 +1712,11 @@
     }
     break;
   }
+  case Builtin::BI__builtin_memcpy_overloaded:
+  case Builtin::BI__builtin_memmove_overloaded:
+    return SemaBuiltinMemcpyOverloaded(TheCallResult);
+  case Builtin::BI__builtin_memset_overloaded:
+    return SemaBuiltinMemsetOverloaded(TheCallResult);
 #define BUILTIN(ID, TYPE, ATTRS)
 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
   case Builtin::BI##ID: \
@@ -4731,7 +4733,7 @@
       !AtomTy->isScalarType()) {
     // For GNU atomics, require a trivially-copyable type. This is not part of
     // the GNU atomics specification, but we enforce it for sanity.
-    Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy)
+    Diag(ExprRange.getBegin(), diag::err_argument_needs_trivial_copy)
         << Ptr->getType() << Ptr->getSourceRange();
     return ExprError();
   }
@@ -5424,6 +5426,184 @@
   return TheCallResult;
 }
 
+/// Perform semantic checking for __builtin_memcpy_overloaded and
+/// __builtin_memmove_overloaded, which are overloaded based on the pointer
+/// types of the destination and source arguments.
+ExprResult Sema::SemaBuiltinMemcpyOverloaded(ExprResult TheCallResult) {
+  CallExpr *TheCall = (CallExpr *)TheCallResult.get();
+
+  if (checkArgCount(*this, TheCall, 3))
+    return ExprError();
+
+  ExprResult DstPtr = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
+  if (DstPtr.isInvalid())
+    return ExprError();
+  clang::Expr *DstOp = DstPtr.get();
+
+  ExprResult SrcPtr = DefaultFunctionArrayLvalueConversion(TheCall->getArg(1));
+  if (SrcPtr.isInvalid())
+    return ExprError();
+  clang::Expr *SrcOp = SrcPtr.get();
+
+  const PointerType *DstTy = DstOp->getType()->getAs<PointerType>();
+  const PointerType *SrcTy = SrcOp->getType()->getAs<PointerType>();
+  if (!DstTy)
+    return ExprError(
+        Diag(TheCall->getBeginLoc(), diag::err_init_conversion_failed)
+        << InitializedEntity::EK_Parameter << Context.VoidPtrTy
+        << DstOp->isLValue() << DstOp->getType() << /*no difference*/ 0
+        << DstOp->getSourceRange());
+  if (!SrcTy)
+    return ExprError(
+        Diag(TheCall->getBeginLoc(), diag::err_init_conversion_failed)
+        << InitializedEntity::EK_Parameter << Context.VoidPtrTy
+        << SrcOp->isLValue() << SrcOp->getType() << /*no difference*/ 0
+        << SrcOp->getSourceRange());
+
+  QualType DstValTy = DstTy->getPointeeType();
+  QualType SrcValTy = SrcTy->getPointeeType();
+  bool isVolatile = (DstTy && DstValTy.isVolatileQualified()) ||
+                    (SrcTy && SrcValTy.isVolatileQualified());
+  bool isAtomic = (DstTy && DstValTy->isAtomicType()) ||
+                  (SrcTy && SrcValTy->isAtomicType());
+
+  if (isAtomic && isVolatile)
+    return ExprError(Diag(TheCall->getBeginLoc(),
+                          PDiag(diag::err_atomic_volatile_unsupported))
+                     << (DstTy != SrcTy) << DstValTy << SrcValTy
+                     << DstOp->getSourceRange() << SrcOp->getSourceRange());
+
+  if (DstValTy.isConstQualified())
+    return ExprError(Diag(TheCall->getBeginLoc(), PDiag(diag::err_const_arg))
+                     << DstValTy << DstOp->getSourceRange());
+
+  if (!DstValTy.getUnqualifiedType()->isVoidType() &&
+      !DstValTy.isTriviallyCopyableType(Context) && !DstValTy->isAtomicType())
+    return ExprError(Diag(TheCall->getBeginLoc(),
+                          PDiag(diag::err_argument_needs_trivial_copy))
+                     << DstValTy << DstOp->getSourceRange());
+  if (!SrcValTy.getUnqualifiedType()->isVoidType() &&
+      !SrcValTy.isTriviallyCopyableType(Context) && !SrcValTy->isAtomicType())
+    return ExprError(Diag(TheCall->getBeginLoc(),
+                          PDiag(diag::err_argument_needs_trivial_copy))
+                     << SrcValTy << SrcOp->getSourceRange());
+
+  CharUnits DstElSz = Context.getTypeSizeInChars(DstValTy);
+  CharUnits SrcElSz = Context.getTypeSizeInChars(SrcValTy);
+  CharUnits InlineWidth = Context.toCharUnitsFromBits(
+      Context.getTargetInfo().getMaxAtomicInlineWidth());
+  if (DstValTy->isAtomicType() &&
+      !(DstElSz.isPowerOfTwo() && DstElSz <= InlineWidth))
+    return ExprError(Diag(TheCall->getBeginLoc(),
+                          PDiag(diag::err_atomic_type_must_be_lock_free))
+                     << DstValTy << DstOp->getSourceRange());
+  if (SrcValTy->isAtomicType() &&
+      !(SrcElSz.isPowerOfTwo() && SrcElSz <= InlineWidth))
+    return ExprError(Diag(TheCall->getBeginLoc(),
+                          PDiag(diag::err_atomic_type_must_be_lock_free))
+                     << SrcValTy << SrcOp->getSourceRange());
+
+  if ((DstValTy->isAtomicType() || SrcValTy->isAtomicType()) &&
+      (!DstValTy.getUnqualifiedType()->isVoidType() &&
+       !SrcValTy.getUnqualifiedType()->isVoidType()) &&
+      (DstElSz != SrcElSz))
+    return ExprError(
+        Diag(TheCall->getBeginLoc(), PDiag(diag::err_atomic_sizes_must_match))
+        << DstValTy << (unsigned)DstElSz.getQuantity() << SrcValTy
+        << (unsigned)SrcElSz.getQuantity() << DstOp->getSourceRange()
+        << SrcOp->getSourceRange());
+
+  ExprResult SizeRes(TheCall->getArg(2));
+  InitializedEntity SizeEntity = InitializedEntity::InitializeParameter(
+      Context, Context.getSizeType(), false);
+  SizeRes = PerformCopyInitialization(SizeEntity, SourceLocation(), SizeRes);
+  if (SizeRes.isInvalid())
+    return ExprError();
+  TheCall->setArg(2, SizeRes.get());
+
+  bool IsNonZero;
+  if (!SizeRes.get()->isValueDependent() &&
+      SizeRes.get()->EvaluateAsBooleanCondition(IsNonZero, Context) &&
+      IsNonZero) {
+    CheckNonNullArgument(*this, DstOp, TheCall->getExprLoc());
+    CheckNonNullArgument(*this, SrcOp, TheCall->getExprLoc());
+  }
+  return TheCallResult;
+}
+/// Perform semantic checking for __builtin_memset_overloaded and
+/// __builtin_memset_overloaded, which is overloaded based on the pointer type
+/// of the destination argument.
+ExprResult Sema::SemaBuiltinMemsetOverloaded(ExprResult TheCallResult) {
+  CallExpr *TheCall = (CallExpr *)TheCallResult.get();
+
+  if (checkArgCount(*this, TheCall, 3))
+    return ExprError();
+
+  ExprResult DstPtr = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
+  if (DstPtr.isInvalid())
+    return ExprError();
+  clang::Expr *DstOp = DstPtr.get();
+
+  if (const PointerType *DstTy = DstOp->getType()->getAs<PointerType>()) {
+    QualType DstValTy = DstTy->getPointeeType();
+    if (DstValTy.isConstQualified())
+      return ExprError(Diag(TheCall->getBeginLoc(), PDiag(diag::err_const_arg))
+                       << DstValTy << DstOp->getSourceRange());
+
+    bool isVolatile = DstValTy.isVolatileQualified();
+    bool isAtomic = DstValTy->isAtomicType();
+    if (!DstValTy.getUnqualifiedType()->isVoidType() &&
+        !DstValTy.isTriviallyCopyableType(Context) && !isAtomic)
+      return ExprError(Diag(TheCall->getBeginLoc(),
+                            PDiag(diag::err_argument_needs_trivial_copy))
+                       << DstValTy << DstOp->getSourceRange());
+
+    if (isAtomic && isVolatile)
+      return ExprError(Diag(TheCall->getBeginLoc(),
+                            PDiag(diag::err_atomic_volatile_unsupported))
+                       << false << DstValTy << DstValTy
+                       << DstOp->getSourceRange());
+
+    if (isAtomic) {
+      CharUnits ElSz = Context.getTypeSizeInChars(DstValTy);
+      CharUnits InlineWidth = Context.toCharUnitsFromBits(
+          Context.getTargetInfo().getMaxAtomicInlineWidth());
+      if (!(ElSz.isPowerOfTwo() && ElSz <= InlineWidth))
+        return ExprError(Diag(TheCall->getBeginLoc(),
+                              PDiag(diag::err_atomic_type_must_be_lock_free))
+                         << DstValTy << DstOp->getSourceRange());
+    }
+  } else
+    return ExprError(
+        Diag(TheCall->getBeginLoc(), diag::err_init_conversion_failed)
+        << InitializedEntity::EK_Parameter << Context.VoidPtrTy
+        << DstOp->isLValue() << DstOp->getType() << /*no difference*/ 0
+        << DstOp->getSourceRange());
+
+  ExprResult ValRes(TheCall->getArg(1));
+  InitializedEntity ValEntity = InitializedEntity::InitializeParameter(
+      Context, Context.UnsignedCharTy, false);
+  ValRes = PerformCopyInitialization(ValEntity, SourceLocation(), ValRes);
+  if (ValRes.isInvalid())
+    return ExprError();
+  TheCall->setArg(1, ValRes.get());
+
+  ExprResult SizeRes(TheCall->getArg(2));
+  InitializedEntity SizeEntity = InitializedEntity::InitializeParameter(
+      Context, Context.getSizeType(), false);
+  SizeRes = PerformCopyInitialization(SizeEntity, SourceLocation(), SizeRes);
+  if (SizeRes.isInvalid())
+    return ExprError();
+  TheCall->setArg(2, SizeRes.get());
+
+  bool IsNonZero;
+  if (!SizeRes.get()->isValueDependent() &&
+      SizeRes.get()->EvaluateAsBooleanCondition(IsNonZero, Context) &&
+      IsNonZero)
+    CheckNonNullArgument(*this, DstOp, TheCall->getExprLoc());
+  return TheCallResult;
+}
+
 /// CheckObjCString - Checks that the argument to the builtin
 /// CFString constructor is correct
 /// Note: It might also make sense to do the UTF-16 conversion here (would
Index: clang/lib/CodeGen/CGExpr.cpp
===================================================================
--- clang/lib/CodeGen/CGExpr.cpp
+++ clang/lib/CodeGen/CGExpr.cpp
@@ -1061,13 +1061,13 @@
 //                         LValue Expression Emission
 //===----------------------------------------------------------------------===//
 
-/// EmitPointerWithAlignment - Given an expression of pointer type, try to
-/// derive a more accurate bound on the alignment of the pointer.
+/// Given an expression of pointer type, try to derive a more accurate bound on
+/// the alignment of the pointer.
 Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
                                                   LValueBaseInfo *BaseInfo,
                                                   TBAAAccessInfo *TBAAInfo) {
   // We allow this with ObjC object pointers because of fragile ABIs.
-  assert(E->getType()->isPointerType() ||
+  assert(E->getType()->isPointerType() || E->getType()->isArrayType() ||
          E->getType()->isObjCObjectPointerType());
   E = E->IgnoreParens();
 
@@ -1164,6 +1164,9 @@
 
   // TODO: conditional operators, comma.
 
+  if (E->getType()->isArrayType())
+    return EmitArrayToPointerDecay(E, BaseInfo, TBAAInfo);
+
   // Otherwise, use the alignment of the type.
   CharUnits Align =
       CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -625,6 +625,16 @@
   return {Width, Signed};
 }
 
+static QualType getPtrArgType(CodeGenModule &CGM, const CallExpr *E,
+                              unsigned ArgNo) {
+  QualType ArgTy = E->getArg(ArgNo)->IgnoreImpCasts()->getType();
+  if (ArgTy->isArrayType())
+    return CGM.getContext().getAsArrayType(ArgTy)->getElementType();
+  if (ArgTy->isObjCObjectPointerType())
+    return ArgTy->castAs<clang::ObjCObjectPointerType>()->getPointeeType();
+  return ArgTy->castAs<clang::PointerType>()->getPointeeType();
+}
+
 Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
   llvm::Type *DestType = Int8PtrTy;
   if (ArgValue->getType() != DestType)
@@ -2618,16 +2628,27 @@
   }
   case Builtin::BImemcpy:
   case Builtin::BI__builtin_memcpy:
+  case Builtin::BI__builtin_memcpy_overloaded:
   case Builtin::BImempcpy:
   case Builtin::BI__builtin_mempcpy: {
+    QualType DestTy = getPtrArgType(CGM, E, 0);
+    QualType SrcTy = getPtrArgType(CGM, E, 1);
     Address Dest = EmitPointerWithAlignment(E->getArg(0));
     Address Src = EmitPointerWithAlignment(E->getArg(1));
+    bool isVolatile =
+        DestTy.isVolatileQualified() || SrcTy.isVolatileQualified();
+    bool isAtomic = DestTy->isAtomicType() || SrcTy->isAtomicType();
     Value *SizeVal = EmitScalarExpr(E->getArg(2));
     EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
                         E->getArg(0)->getExprLoc(), FD, 0);
     EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
                         E->getArg(1)->getExprLoc(), FD, 1);
-    Builder.CreateMemCpy(Dest, Src, SizeVal, false);
+    if (isAtomic) {
+      CharUnits ElementSize = CGM.getContext().getTypeSizeInChars(DestTy);
+      Builder.CreateElementUnorderedAtomicMemCpy(Dest, Src, SizeVal,
+                                                 ElementSize);
+    } else
+      Builder.CreateMemCpy(Dest, Src, SizeVal, isVolatile);
     if (BuiltinID == Builtin::BImempcpy ||
         BuiltinID == Builtin::BI__builtin_mempcpy)
       return RValue::get(Builder.CreateInBoundsGEP(Dest.getPointer(), SizeVal));
@@ -2696,26 +2717,46 @@
   }
 
   case Builtin::BImemmove:
-  case Builtin::BI__builtin_memmove: {
+  case Builtin::BI__builtin_memmove:
+  case Builtin::BI__builtin_memmove_overloaded: {
+    QualType DestTy = getPtrArgType(CGM, E, 0);
+    QualType SrcTy = getPtrArgType(CGM, E, 1);
     Address Dest = EmitPointerWithAlignment(E->getArg(0));
     Address Src = EmitPointerWithAlignment(E->getArg(1));
+    bool isVolatile =
+        DestTy.isVolatileQualified() || SrcTy.isVolatileQualified();
+    bool isAtomic = DestTy->isAtomicType() || SrcTy->isAtomicType();
     Value *SizeVal = EmitScalarExpr(E->getArg(2));
     EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
                         E->getArg(0)->getExprLoc(), FD, 0);
     EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
                         E->getArg(1)->getExprLoc(), FD, 1);
-    Builder.CreateMemMove(Dest, Src, SizeVal, false);
+    if (isAtomic) {
+      CharUnits ElementSize = CGM.getContext().getTypeSizeInChars(DestTy);
+      Builder.CreateElementUnorderedAtomicMemMove(Dest, Src, SizeVal,
+                                                  ElementSize);
+    } else
+      Builder.CreateMemMove(Dest, Src, SizeVal, isVolatile);
     return RValue::get(Dest.getPointer());
   }
   case Builtin::BImemset:
-  case Builtin::BI__builtin_memset: {
+  case Builtin::BI__builtin_memset:
+  case Builtin::BI__builtin_memset_overloaded: {
+    QualType DestTy = getPtrArgType(CGM, E, 0);
     Address Dest = EmitPointerWithAlignment(E->getArg(0));
     Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
                                          Builder.getInt8Ty());
+    bool isVolatile = DestTy.isVolatileQualified();
+    bool isAtomic = DestTy->isAtomicType();
     Value *SizeVal = EmitScalarExpr(E->getArg(2));
     EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
                         E->getArg(0)->getExprLoc(), FD, 0);
-    Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
+    if (isAtomic) {
+      CharUnits ElementSize = CGM.getContext().getTypeSizeInChars(DestTy);
+      Builder.CreateElementUnorderedAtomicMemSet(Dest, ByteVal, SizeVal,
+                                                 ElementSize);
+    } else
+      Builder.CreateMemSet(Dest, ByteVal, SizeVal, isVolatile);
     return RValue::get(Dest.getPointer());
   }
   case Builtin::BI__builtin___memset_chk: {
Index: clang/lib/CodeGen/CGBuilder.h
===================================================================
--- clang/lib/CodeGen/CGBuilder.h
+++ clang/lib/CodeGen/CGBuilder.h
@@ -279,6 +279,15 @@
                         IsVolatile);
   }
 
+  using CGBuilderBaseTy::CreateElementUnorderedAtomicMemCpy;
+  llvm::CallInst *CreateElementUnorderedAtomicMemCpy(Address Dest, Address Src,
+                                                     llvm::Value *Size,
+                                                     CharUnits ElementSize) {
+    return CreateElementUnorderedAtomicMemCpy(
+        Dest.getPointer(), Dest.getAlignment().getAsAlign(), Src.getPointer(),
+        Src.getAlignment().getAsAlign(), Size, ElementSize.getQuantity());
+  }
+
   using CGBuilderBaseTy::CreateMemCpyInline;
   llvm::CallInst *CreateMemCpyInline(Address Dest, Address Src, uint64_t Size) {
     return CreateMemCpyInline(
@@ -294,6 +303,15 @@
                          Size, IsVolatile);
   }
 
+  using CGBuilderBaseTy::CreateElementUnorderedAtomicMemMove;
+  llvm::CallInst *CreateElementUnorderedAtomicMemMove(Address Dest, Address Src,
+                                                      llvm::Value *Size,
+                                                      CharUnits ElementSize) {
+    return CreateElementUnorderedAtomicMemMove(
+        Dest.getPointer(), Dest.getAlignment().getAsAlign(), Src.getPointer(),
+        Src.getAlignment().getAsAlign(), Size, ElementSize.getQuantity());
+  }
+
   using CGBuilderBaseTy::CreateMemSet;
   llvm::CallInst *CreateMemSet(Address Dest, llvm::Value *Value,
                                llvm::Value *Size, bool IsVolatile = false) {
@@ -301,6 +319,16 @@
                         Dest.getAlignment().getAsAlign(), IsVolatile);
   }
 
+  using CGBuilderBaseTy::CreateElementUnorderedAtomicMemSet;
+  llvm::CallInst *CreateElementUnorderedAtomicMemSet(Address Dest,
+                                                     llvm::Value *Value,
+                                                     llvm::Value *Size,
+                                                     CharUnits ElementSize) {
+    return CreateElementUnorderedAtomicMemSet(Dest.getPointer(), Value, Size,
+                                              Dest.getAlignment().getAsAlign(),
+                                              ElementSize.getQuantity());
+  }
+
   using CGBuilderBaseTy::CreatePreserveStructAccessIndex;
   Address CreatePreserveStructAccessIndex(Address Addr,
                                           unsigned Index,
Index: clang/include/clang/Sema/Sema.h
===================================================================
--- clang/include/clang/Sema/Sema.h
+++ clang/include/clang/Sema/Sema.h
@@ -12184,6 +12184,8 @@
   bool SemaBuiltinSetjmp(CallExpr *TheCall);
   ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
   ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
+  ExprResult SemaBuiltinMemcpyOverloaded(ExprResult TheCallResult);
+  ExprResult SemaBuiltinMemsetOverloaded(ExprResult TheCallResult);
   ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
                                      AtomicExpr::AtomicOp Op);
   ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
Index: clang/include/clang/Basic/DiagnosticSemaKinds.td
===================================================================
--- clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -7941,9 +7941,6 @@
 def err_atomic_op_needs_non_const_pointer : Error<
   "address argument to atomic operation must be a pointer to non-const "
   "type (%0 invalid)">;
-def err_atomic_op_needs_trivial_copy : Error<
-  "address argument to atomic operation must be a pointer to a "
-  "trivially-copyable type (%0 invalid)">;
 def err_atomic_op_needs_atomic_int_or_ptr : Error<
   "address argument to atomic operation must be a pointer to %select{|atomic }0"
   "integer or pointer (%1 invalid)">;
@@ -8909,6 +8906,16 @@
   "null returned from %select{function|method}0 that requires a non-null return value">,
   InGroup<NonNull>;
 
+def err_const_arg : Error<"argument must be non-const, got %0">;
+
+def err_argument_needs_trivial_copy : Error<"address argument must be a pointer to a trivially-copyable type (%0 invalid)">;
+  
+def err_atomic_volatile_unsupported : Error<"mixing _Atomic and volatile qualifiers is unsupported (%select{%1|%1 and %2}0 cannot have both _Atomic and volatile)">;
+  
+def err_atomic_sizes_must_match : Error<"_Atomic sizes must match, %0 is %1 bytes and %2 is %3 bytes">;
+
+def err_atomic_type_must_be_lock_free : Error<"_Atomic type must always be lock-free, %0 isn't">;
+
 def err_lifetimebound_no_object_param : Error<
   "'lifetimebound' attribute cannot be applied; %select{static |non-}0member "
   "function has no implicit object parameter">;
@@ -9703,8 +9710,6 @@
   "cannot refer to a block inside block">;
 
 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
-def err_opencl_builtin_to_addr_arg_num : Error<
-  "invalid number of arguments to function: %0">;
 def err_opencl_builtin_to_addr_invalid_arg : Error<
   "invalid argument %0 to function: %1, expecting a generic pointer argument">;
 
Index: clang/include/clang/Basic/Builtins.def
===================================================================
--- clang/include/clang/Basic/Builtins.def
+++ clang/include/clang/Basic/Builtins.def
@@ -486,9 +486,12 @@
 BUILTIN(__builtin_memcmp, "ivC*vC*z", "nF")
 BUILTIN(__builtin_memcpy, "v*v*vC*z", "nF")
 BUILTIN(__builtin_memcpy_inline, "vv*vC*Iz", "nt")
+BUILTIN(__builtin_memcpy_overloaded, "v*v*vC*z", "nt")
 BUILTIN(__builtin_memmove, "v*v*vC*z", "nF")
+BUILTIN(__builtin_memmove_overloaded, "v*v*vC*z", "nt")
 BUILTIN(__builtin_mempcpy, "v*v*vC*z", "nF")
 BUILTIN(__builtin_memset, "v*v*iz", "nF")
+BUILTIN(__builtin_memset_overloaded, "v*v*iz", "nt")
 BUILTIN(__builtin_printf, "icC*.", "Fp:0:")
 BUILTIN(__builtin_stpcpy, "c*c*cC*", "nF")
 BUILTIN(__builtin_stpncpy, "c*c*cC*z", "nF")
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to