Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package aws-c-cal for openSUSE:Factory 
checked in at 2024-06-27 16:04:57
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/aws-c-cal (Old)
 and      /work/SRC/openSUSE:Factory/.aws-c-cal.new.18349 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "aws-c-cal"

Thu Jun 27 16:04:57 2024 rev:7 rq:1183557 version:0.7.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/aws-c-cal/aws-c-cal.changes      2024-06-06 
12:32:22.943957675 +0200
+++ /work/SRC/openSUSE:Factory/.aws-c-cal.new.18349/aws-c-cal.changes   
2024-06-27 16:05:31.819522053 +0200
@@ -1,0 +2,12 @@
+Wed Jun 26 11:51:44 UTC 2024 - John Paul Adrian Glaubitz 
<adrian.glaub...@suse.com>
+
+- Update to version 0.7.0
+  * clang-format 18 by @graebm in (#187)
+  * Implement runtime check on libcrypto linkage
+    by @WillChilds-Klein in (#186)
+  * Pin AWS-LC until it's fixed for manylinux1
+    by @graebm in (#188)
+  * Make AES GCM more consistent cross platform
+    by @DmitriyMusatkin in (#189)
+
+-------------------------------------------------------------------

Old:
----
  v0.6.15.tar.gz

New:
----
  v0.7.0.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ aws-c-cal.spec ++++++
--- /var/tmp/diff_new_pack.2mLaFK/_old  2024-06-27 16:05:32.323540559 +0200
+++ /var/tmp/diff_new_pack.2mLaFK/_new  2024-06-27 16:05:32.327540705 +0200
@@ -19,7 +19,7 @@
 %define library_version 1.0.0
 %define library_soversion 0unstable
 Name:           aws-c-cal
-Version:        0.6.15
+Version:        0.7.0
 Release:        0
 Summary:        AWS C99 wrapper for cryptography primitives
 License:        Apache-2.0

++++++ v0.6.15.tar.gz -> v0.7.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/.github/workflows/ci.yml 
new/aws-c-cal-0.7.0/.github/workflows/ci.yml
--- old/aws-c-cal-0.6.15/.github/workflows/ci.yml       2024-05-09 
17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/.github/workflows/ci.yml        2024-06-21 
17:52:29.000000000 +0200
@@ -129,6 +129,15 @@
         python -c "from urllib.request import urlretrieve; urlretrieve('${{ 
env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION 
}}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')"
         python builder.pyz build -p ${{ env.PACKAGE_NAME }}
 
+  windows-debug:
+    runs-on: windows-2022 # latest
+    steps:
+    - name: Build ${{ env.PACKAGE_NAME }} + consumers
+      run: |
+        python -c "from urllib.request import urlretrieve; urlretrieve('${{ 
env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION 
}}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')"
+        python builder.pyz build -p ${{ env.PACKAGE_NAME }} --config Debug 
--variant=just-tests
+
+
   windows-vc14:
     runs-on: windows-2019 # windows-2019 is last env with Visual Studio 2015 
(v14.0)
     strategy:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/.github/workflows/clang-format.yml 
new/aws-c-cal-0.7.0/.github/workflows/clang-format.yml
--- old/aws-c-cal-0.6.15/.github/workflows/clang-format.yml     2024-05-09 
17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/.github/workflows/clang-format.yml      2024-06-21 
17:52:29.000000000 +0200
@@ -5,14 +5,12 @@
 jobs:
   clang-format:
 
-    runs-on: ubuntu-20.04 # latest
+    runs-on: ubuntu-24.04 # latest
 
     steps:
     - name: Checkout Sources
-      uses: actions/checkout@v1
+      uses: actions/checkout@v4
 
     - name: clang-format lint
-      uses: DoozyX/clang-format-lint-action@v0.3.1
-      with:
-        # List of extensions to check
-        extensions: c,h
+      run: |
+        ./format-check.py
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/builder.json 
new/aws-c-cal-0.7.0/builder.json
--- old/aws-c-cal-0.6.15/builder.json   2024-05-09 17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/builder.json    2024-06-21 17:52:29.000000000 +0200
@@ -15,7 +15,8 @@
         "linux": {
             "upstream": [
                 {
-                    "name": "aws-lc"
+                    "name": "aws-lc",
+                    "revision": "v1.29.0", "_comment": "avoid commit a0d636e7 
which breaks manylinux1"
                 }
             ]
         },
@@ -89,6 +90,11 @@
         },
         "no-tests": {
             "!test_steps": []
+        },
+        "just-tests": {
+            "!test_steps": [
+                "test"
+            ]
         }
     },
     "test_steps": [
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/format-check.py 
new/aws-c-cal-0.7.0/format-check.py
--- old/aws-c-cal-0.6.15/format-check.py        1970-01-01 01:00:00.000000000 
+0100
+++ new/aws-c-cal-0.7.0/format-check.py 2024-06-21 17:52:29.000000000 +0200
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+import argparse
+import os
+from pathlib import Path
+import re
+from subprocess import list2cmdline, run
+from tempfile import NamedTemporaryFile
+
+CLANG_FORMAT_VERSION = '18.1.6'
+
+INCLUDE_REGEX = re.compile(
+    r'^(include|source|tests|verification)/.*\.(c|h|inl)$')
+EXCLUDE_REGEX = re.compile(r'^$')
+
+arg_parser = argparse.ArgumentParser(description="Check with clang-format")
+arg_parser.add_argument('-i', '--inplace-edit', action='store_true',
+                        help="Edit files inplace")
+args = arg_parser.parse_args()
+
+os.chdir(Path(__file__).parent)
+
+# create file containing list of all files to format
+filepaths_file = NamedTemporaryFile(delete=False)
+for dirpath, dirnames, filenames in os.walk('.'):
+    for filename in filenames:
+        # our regexes expect filepath to use forward slash
+        filepath = Path(dirpath, filename).as_posix()
+        if not INCLUDE_REGEX.match(filepath):
+            continue
+        if EXCLUDE_REGEX.match(filepath):
+            continue
+
+        filepaths_file.write(f"{filepath}\n".encode())
+filepaths_file.close()
+
+# use pipx to run clang-format from PyPI
+# this is a simple way to run the same clang-format version regardless of OS
+cmd = ['pipx', 'run', f'clang-format=={CLANG_FORMAT_VERSION}',
+       f'--files={filepaths_file.name}']
+if args.inplace_edit:
+    cmd += ['-i']
+else:
+    cmd += ['--Werror', '--dry-run']
+
+print(f"{Path.cwd()}$ {list2cmdline(cmd)}")
+if run(cmd).returncode:
+    exit(1)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/format-check.sh 
new/aws-c-cal-0.7.0/format-check.sh
--- old/aws-c-cal-0.6.15/format-check.sh        2024-05-09 17:57:57.000000000 
+0200
+++ new/aws-c-cal-0.7.0/format-check.sh 1970-01-01 01:00:00.000000000 +0100
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-
-if [[ -z $CLANG_FORMAT ]] ; then
-    CLANG_FORMAT=clang-format
-fi
-
-if NOT type $CLANG_FORMAT 2> /dev/null ; then
-    echo "No appropriate clang-format found."
-    exit 1
-fi
-
-FAIL=0
-SOURCE_FILES=`find bin source include tests -type f \( -name '*.h' -o -name 
'*.c' \)`
-for i in $SOURCE_FILES
-do
-    $CLANG_FORMAT -output-replacements-xml $i | grep -c "<replacement " > 
/dev/null
-    if [ $? -ne 1 ]
-    then
-        echo "$i failed clang-format check."
-        FAIL=1
-    fi
-done
-
-exit $FAIL
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/aws-c-cal-0.6.15/include/aws/cal/private/symmetric_cipher_priv.h 
new/aws-c-cal-0.7.0/include/aws/cal/private/symmetric_cipher_priv.h
--- old/aws-c-cal-0.6.15/include/aws/cal/private/symmetric_cipher_priv.h        
2024-05-09 17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/include/aws/cal/private/symmetric_cipher_priv.h 
2024-06-21 17:52:29.000000000 +0200
@@ -34,9 +34,9 @@
     /**
      deprecated for use, only for backwards compat.
      Use state to represent current state of cipher.
-     good represented if the ciphter was initialized
+     good represented if the cipher was initialized
      without any errors, ready to process input,
-     and not finialized yet. This corresponds to
+     and not finalized yet. This corresponds to
      the state AWS_SYMMETRIC_CIPHER_READY.
     */
     bool good;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/include/aws/cal/symmetric_cipher.h 
new/aws-c-cal-0.7.0/include/aws/cal/symmetric_cipher.h
--- old/aws-c-cal-0.6.15/include/aws/cal/symmetric_cipher.h     2024-05-09 
17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/include/aws/cal/symmetric_cipher.h      2024-06-21 
17:52:29.000000000 +0200
@@ -15,25 +15,21 @@
 
 struct aws_symmetric_cipher;
 
-typedef struct aws_symmetric_cipher *(aws_aes_cbc_256_new_fn)(
-    struct aws_allocator *allocator,
-    const struct aws_byte_cursor *key,
-    const struct aws_byte_cursor *iv);
-
-typedef struct aws_symmetric_cipher *(aws_aes_ctr_256_new_fn)(
-    struct aws_allocator *allocator,
-    const struct aws_byte_cursor *key,
-    const struct aws_byte_cursor *iv);
-
-typedef struct aws_symmetric_cipher *(aws_aes_gcm_256_new_fn)(
-    struct aws_allocator *allocator,
-    const struct aws_byte_cursor *key,
-    const struct aws_byte_cursor *iv,
-    const struct aws_byte_cursor *aad,
-    const struct aws_byte_cursor *decryption_tag);
+typedef struct aws_symmetric_cipher *(aws_aes_cbc_256_new_fn)(struct 
aws_allocator *allocator,
+                                                              const struct 
aws_byte_cursor *key,
+                                                              const struct 
aws_byte_cursor *iv);
+
+typedef struct aws_symmetric_cipher *(aws_aes_ctr_256_new_fn)(struct 
aws_allocator *allocator,
+                                                              const struct 
aws_byte_cursor *key,
+                                                              const struct 
aws_byte_cursor *iv);
+
+typedef struct aws_symmetric_cipher *(aws_aes_gcm_256_new_fn)(struct 
aws_allocator *allocator,
+                                                              const struct 
aws_byte_cursor *key,
+                                                              const struct 
aws_byte_cursor *iv,
+                                                              const struct 
aws_byte_cursor *aad);
 
-typedef struct aws_symmetric_cipher *(
-    aws_aes_keywrap_256_new_fn)(struct aws_allocator *allocator, const struct 
aws_byte_cursor *key);
+typedef struct aws_symmetric_cipher *(aws_aes_keywrap_256_new_fn)(struct 
aws_allocator *allocator,
+                                                                  const struct 
aws_byte_cursor *key);
 
 enum aws_symmetric_cipher_state {
     AWS_SYMMETRIC_CIPHER_READY,
@@ -91,15 +87,15 @@
  *
  * respectively.
  *
- * If they are set, that key and iv will be copied internally and used by the 
cipher.
- *
- * If tag and aad are set they will be copied internally and used by the 
cipher.
- * decryption_tag would most likely be used for a decrypt operation to detect 
tampering or corruption.
- * The Tag for the most recent encrypt operation will be available in:
+ * If aad is set it will be copied and applied to the cipher.
  *
- * aws_symmetric_cipher_get_tag()
+ * If they are set, that key and iv will be copied internally and used by the 
cipher.
  *
- * If aad is set it will be copied and applied to the cipher.
+ * For decryption purposes tag can be provided via 
aws_symmetric_cipher_set_tag method.
+ * Note: for decrypt operations, tag must be provided before first decrypt is 
called.
+ * (this is a windows bcrypt limitations, but for consistency sake same 
limitation is extended to other platforms)
+ * Tag generated during encryption can be retrieved using 
aws_symmetric_cipher_get_tag method
+ * after finalize is called.
  *
  * Returns NULL on failure. You can check aws_last_error() to get the error 
code indicating the failure cause.
  */
@@ -107,8 +103,7 @@
     struct aws_allocator *allocator,
     const struct aws_byte_cursor *key,
     const struct aws_byte_cursor *iv,
-    const struct aws_byte_cursor *aad,
-    const struct aws_byte_cursor *decryption_tag);
+    const struct aws_byte_cursor *aad);
 
 /**
  * Creates an instance of AES Keywrap with 256-bit key.
@@ -194,6 +189,12 @@
  * Resets the cipher state for starting a new encrypt or decrypt operation. 
Note encrypt/decrypt cannot be mixed on the
  * same cipher without a call to reset in between them. However, this leaves 
the key, iv etc... materials setup for
  * immediate reuse.
+ * Note: GCM tag is not preserved between operations. If you intend to do 
encrypt followed directly by decrypt, make
+ * sure to make a copy of tag before reseting the cipher and pass that copy 
for decryption.
+ *
+ * Warning: In most cases it's a really bad idea to reset a cipher and perform 
another operation using that cipher.
+ * Key and IV should not be reused for different operations. Instead of 
reseting the cipher, destroy the cipher
+ * and create new one with a new key/iv pair. Use reset at your own risk, and 
only after careful consideration.
  *
  * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the 
failure cause if it returns
  * AWS_OP_ERR;
@@ -212,6 +213,11 @@
 AWS_CAL_API struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct 
aws_symmetric_cipher *cipher);
 
 /**
+ * Sets the GMAC tag on the cipher. Does nothing for ciphers that do not 
support tag.
+ */
+AWS_CAL_API void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher 
*cipher, struct aws_byte_cursor tag);
+
+/**
  * Gets the original initialization vector as a cursor.
  * The memory in this cursor is unsafe as it refers to the internal buffer.
  * This was done because the use case doesn't require fetching these during an
@@ -244,7 +250,7 @@
 
 /**
  * Retuns the current state of the cipher. Ther state of the cipher can be 
ready for use, finalized, or has encountered
- * an error. if the cipher is in a finished or eror state, it must be reset 
before further use.
+ * an error. if the cipher is in a finished or error state, it must be reset 
before further use.
  */
 AWS_CAL_API enum aws_symmetric_cipher_state 
aws_symmetric_cipher_get_state(const struct aws_symmetric_cipher *cipher);
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/source/cal.c 
new/aws-c-cal-0.7.0/source/cal.c
--- old/aws-c-cal-0.6.15/source/cal.c   2024-05-09 17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/source/cal.c    2024-06-21 17:52:29.000000000 +0200
@@ -6,7 +6,7 @@
 #include <aws/common/common.h>
 #include <aws/common/error.h>
 
-#define AWS_DEFINE_ERROR_INFO_CAL(CODE, STR) [(CODE)-0x1C00] = 
AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-cal")
+#define AWS_DEFINE_ERROR_INFO_CAL(CODE, STR) [(CODE) - 0x1C00] = 
AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-cal")
 
 static struct aws_error_info s_errors[] = {
     AWS_DEFINE_ERROR_INFO_CAL(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED, 
"Verify on a cryptographic signature failed."),
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/source/darwin/commoncrypto_aes.c 
new/aws-c-cal-0.7.0/source/darwin/commoncrypto_aes.c
--- old/aws-c-cal-0.6.15/source/darwin/commoncrypto_aes.c       2024-05-09 
17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/source/darwin/commoncrypto_aes.c        2024-06-21 
17:52:29.000000000 +0200
@@ -360,6 +360,14 @@
     return &cc_cipher->cipher_base;
 }
 
+static int s_gcm_decrypt(struct aws_symmetric_cipher *cipher, struct 
aws_byte_cursor input, struct aws_byte_buf *out) {
+    if (cipher->tag.buffer == NULL) {
+        return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+    }
+
+    return s_decrypt(cipher, input, out);
+}
+
 #ifdef SUPPORT_AES_GCM_VIA_SPI
 
 /*
@@ -428,7 +436,7 @@
     struct cc_aes_cipher *cc_cipher = cipher->impl;
 
     size_t tag_length = AWS_AES_256_CIPHER_BLOCK_SIZE;
-    CCStatus status = s_cc_crypto_gcm_finalize(cc_cipher->encryptor_handle, 
cipher->tag.buffer, tag_length);
+    CCStatus status = s_cc_crypto_gcm_finalize(cc_cipher->decryptor_handle, 
cipher->tag.buffer, tag_length);
     if (status != kCCSuccess) {
         cipher->state = AWS_SYMMETRIC_CIPHER_ERROR;
         return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
@@ -441,8 +449,7 @@
     struct cc_aes_cipher *cc_cipher,
     const struct aws_byte_cursor *key,
     const struct aws_byte_cursor *iv,
-    const struct aws_byte_cursor *aad,
-    const struct aws_byte_cursor *tag) {
+    const struct aws_byte_cursor *aad) {
     if (!cc_cipher->cipher_base.key.len) {
         if (key) {
             aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.key, 
cc_cipher->cipher_base.allocator, *key);
@@ -471,10 +478,6 @@
         aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.aad, 
cc_cipher->cipher_base.allocator, *aad);
     }
 
-    if (tag && tag->len) {
-        aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.tag, 
cc_cipher->cipher_base.allocator, *tag);
-    }
-
     CCCryptorStatus status = CCCryptorCreateWithMode(
         kCCEncrypt,
         kCCModeGCM,
@@ -548,9 +551,10 @@
     struct cc_aes_cipher *cc_cipher = cipher->impl;
 
     int ret_val = s_reset(cipher);
+    aws_byte_buf_clean_up_secure(&cc_cipher->cipher_base.tag);
 
     if (ret_val == AWS_OP_SUCCESS) {
-        ret_val = s_initialize_gcm_cipher_materials(cc_cipher, NULL, NULL, 
NULL, NULL);
+        ret_val = s_initialize_gcm_cipher_materials(cc_cipher, NULL, NULL, 
NULL);
     }
 
     return ret_val;
@@ -559,7 +563,7 @@
 static struct aws_symmetric_cipher_vtable s_aes_gcm_vtable = {
     .finalize_decryption = s_finalize_gcm_decryption,
     .finalize_encryption = s_finalize_gcm_encryption,
-    .decrypt = s_decrypt,
+    .decrypt = s_gcm_decrypt,
     .encrypt = s_encrypt,
     .provider = "CommonCrypto",
     .alg_name = "AES-GCM 256",
@@ -571,15 +575,14 @@
     struct aws_allocator *allocator,
     const struct aws_byte_cursor *key,
     const struct aws_byte_cursor *iv,
-    const struct aws_byte_cursor *aad,
-    const struct aws_byte_cursor *tag) {
+    const struct aws_byte_cursor *aad) {
     struct cc_aes_cipher *cc_cipher = aws_mem_calloc(allocator, 1, 
sizeof(struct cc_aes_cipher));
     cc_cipher->cipher_base.allocator = allocator;
     cc_cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE;
     cc_cipher->cipher_base.impl = cc_cipher;
     cc_cipher->cipher_base.vtable = &s_aes_gcm_vtable;
 
-    if (s_initialize_gcm_cipher_materials(cc_cipher, key, iv, aad, tag) != 
AWS_OP_SUCCESS) {
+    if (s_initialize_gcm_cipher_materials(cc_cipher, key, iv, aad) != 
AWS_OP_SUCCESS) {
         s_destroy(&cc_cipher->cipher_base);
         return NULL;
     }
@@ -596,14 +599,12 @@
     struct aws_allocator *allocator,
     const struct aws_byte_cursor *key,
     const struct aws_byte_cursor *iv,
-    const struct aws_byte_cursor *aad,
-    const struct aws_byte_cursor *tag) {
+    const struct aws_byte_cursor *aad) {
 
     (void)allocator;
     (void)key;
     (void)iv;
     (void)aad;
-    (void)tag;
     aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED);
     return NULL;
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/source/ecc.c 
new/aws-c-cal-0.7.0/source/ecc.c
--- old/aws-c-cal-0.6.15/source/ecc.c   2024-05-09 17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/source/ecc.c    2024-06-21 17:52:29.000000000 +0200
@@ -62,16 +62,14 @@
     return AWS_OP_SUCCESS;
 }
 
-typedef struct aws_ecc_key_pair *(aws_ecc_key_pair_new_from_public_key_fn)(
-    struct aws_allocator *allocator,
-    enum aws_ecc_curve_name curve_name,
-    const struct aws_byte_cursor *public_key_x,
-    const struct aws_byte_cursor *public_key_y);
+typedef struct aws_ecc_key_pair 
*(aws_ecc_key_pair_new_from_public_key_fn)(struct aws_allocator *allocator,
+                                                                           
enum aws_ecc_curve_name curve_name,
+                                                                           
const struct aws_byte_cursor *public_key_x,
+                                                                           
const struct aws_byte_cursor *public_key_y);
 
-typedef struct aws_ecc_key_pair *(aws_ecc_key_pair_new_from_private_key_fn)(
-    struct aws_allocator *allocator,
-    enum aws_ecc_curve_name curve_name,
-    const struct aws_byte_cursor *priv_key);
+typedef struct aws_ecc_key_pair 
*(aws_ecc_key_pair_new_from_private_key_fn)(struct aws_allocator *allocator,
+                                                                            
enum aws_ecc_curve_name curve_name,
+                                                                            
const struct aws_byte_cursor *priv_key);
 
 #ifndef BYO_CRYPTO
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/source/rsa.c 
new/aws-c-cal-0.7.0/source/rsa.c
--- old/aws-c-cal-0.6.15/source/rsa.c   2024-05-09 17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/source/rsa.c    2024-06-21 17:52:29.000000000 +0200
@@ -8,11 +8,11 @@
 #include <aws/cal/hash.h>
 #include <aws/cal/private/der.h>
 
-typedef struct aws_rsa_key_pair *(
-    aws_rsa_key_pair_new_from_public_pkcs1_fn)(struct aws_allocator 
*allocator, struct aws_byte_cursor public_key);
+typedef struct aws_rsa_key_pair 
*(aws_rsa_key_pair_new_from_public_pkcs1_fn)(struct aws_allocator *allocator,
+                                                                             
struct aws_byte_cursor public_key);
 
-typedef struct aws_rsa_key_pair *(
-    aws_rsa_key_pair_new_from_private_pkcs1_fn)(struct aws_allocator 
*allocator, struct aws_byte_cursor private_key);
+typedef struct aws_rsa_key_pair 
*(aws_rsa_key_pair_new_from_private_pkcs1_fn)(struct aws_allocator *allocator,
+                                                                              
struct aws_byte_cursor private_key);
 
 #ifndef BYO_CRYPTO
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/source/symmetric_cipher.c 
new/aws-c-cal-0.7.0/source/symmetric_cipher.c
--- old/aws-c-cal-0.6.15/source/symmetric_cipher.c      2024-05-09 
17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/source/symmetric_cipher.c       2024-06-21 
17:52:29.000000000 +0200
@@ -22,8 +22,7 @@
     struct aws_allocator *allocator,
     const struct aws_byte_cursor *key,
     const struct aws_byte_cursor *iv,
-    const struct aws_byte_cursor *aad,
-    const struct aws_byte_cursor *decryption_tag);
+    const struct aws_byte_cursor *aad);
 
 extern struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl(
     struct aws_allocator *allocator,
@@ -54,13 +53,11 @@
     struct aws_allocator *allocator,
     const struct aws_byte_cursor *key,
     const struct aws_byte_cursor *iv,
-    const struct aws_byte_cursor *aad,
-    const struct aws_byte_cursor *decryption_tag) {
+    const struct aws_byte_cursor *aad) {
     (void)allocator;
     (void)key;
     (void)iv;
     (void)aad;
-    (void)decryption_tag;
     abort();
 }
 
@@ -127,13 +124,12 @@
     struct aws_allocator *allocator,
     const struct aws_byte_cursor *key,
     const struct aws_byte_cursor *iv,
-    const struct aws_byte_cursor *aad,
-    const struct aws_byte_cursor *decryption_tag) {
+    const struct aws_byte_cursor *aad) {
     if (s_validate_key_materials(key, AWS_AES_256_KEY_BYTE_LEN, iv, 
AWS_AES_256_CIPHER_BLOCK_SIZE - sizeof(uint32_t)) !=
         AWS_OP_SUCCESS) {
         return NULL;
     }
-    return s_aes_gcm_new_fn(allocator, key, iv, aad, decryption_tag);
+    return s_aes_gcm_new_fn(allocator, key, iv, aad);
 }
 
 struct aws_symmetric_cipher *aws_aes_keywrap_256_new(
@@ -223,6 +219,12 @@
     return aws_byte_cursor_from_buf(&cipher->tag);
 }
 
+void aws_symmetric_cipher_set_tag(struct aws_symmetric_cipher *cipher, struct 
aws_byte_cursor tag) {
+    AWS_PRECONDITION(aws_byte_cursor_is_valid(&tag));
+    aws_byte_buf_clean_up_secure(&cipher->tag);
+    aws_byte_buf_init_copy_from_cursor(&cipher->tag, cipher->allocator, tag);
+}
+
 struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector(const 
struct aws_symmetric_cipher *cipher) {
     return aws_byte_cursor_from_buf(&cipher->iv);
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/source/unix/openssl_aes.c 
new/aws-c-cal-0.7.0/source/unix/openssl_aes.c
--- old/aws-c-cal-0.6.15/source/unix/openssl_aes.c      2024-05-09 
17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/source/unix/openssl_aes.c       2024-06-21 
17:52:29.000000000 +0200
@@ -123,9 +123,7 @@
     aws_byte_buf_clean_up_secure(&cipher->key);
     aws_byte_buf_clean_up_secure(&cipher->iv);
 
-    if (cipher->tag.buffer) {
-        aws_byte_buf_clean_up_secure(&cipher->tag);
-    }
+    aws_byte_buf_clean_up_secure(&cipher->tag);
 
     if (cipher->aad.buffer) {
         aws_byte_buf_clean_up_secure(&cipher->aad);
@@ -326,28 +324,51 @@
     return NULL;
 }
 
+static int s_gcm_decrypt(struct aws_symmetric_cipher *cipher, struct 
aws_byte_cursor input, struct aws_byte_buf *out) {
+    if (cipher->tag.buffer == NULL) {
+        return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+    }
+
+    return s_decrypt(cipher, input, out);
+}
+
 static int s_finalize_gcm_encryption(struct aws_symmetric_cipher *cipher, 
struct aws_byte_buf *out) {
     struct openssl_aes_cipher *openssl_cipher = cipher->impl;
 
+    if (cipher->tag.buffer == NULL) {
+        aws_byte_buf_init(&cipher->tag, cipher->allocator, 
AWS_AES_256_CIPHER_BLOCK_SIZE);
+    }
+
     int ret_val = s_finalize_encryption(cipher, out);
 
     if (ret_val == AWS_OP_SUCCESS) {
-        if (!cipher->tag.len) {
-            if (!EVP_CIPHER_CTX_ctrl(
-                    openssl_cipher->encryptor_ctx,
-                    EVP_CTRL_GCM_GET_TAG,
-                    (int)cipher->tag.capacity,
-                    cipher->tag.buffer)) {
-                cipher->state = AWS_SYMMETRIC_CIPHER_ERROR;
-                return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
-            }
-            cipher->tag.len = AWS_AES_256_CIPHER_BLOCK_SIZE;
+        if (!EVP_CIPHER_CTX_ctrl(
+                openssl_cipher->encryptor_ctx, EVP_CTRL_GCM_GET_TAG, 
(int)cipher->tag.capacity, cipher->tag.buffer)) {
+            cipher->state = AWS_SYMMETRIC_CIPHER_ERROR;
+            return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
         }
+        cipher->tag.len = AWS_AES_256_CIPHER_BLOCK_SIZE;
     }
 
     return ret_val;
 }
 
+static int s_finalize_gcm_decryption(struct aws_symmetric_cipher *cipher, 
struct aws_byte_buf *out) {
+    struct openssl_aes_cipher *openssl_cipher = cipher->impl;
+
+    if (openssl_cipher->cipher_base.tag.len) {
+        if (!EVP_CIPHER_CTX_ctrl(
+                openssl_cipher->decryptor_ctx,
+                EVP_CTRL_GCM_SET_TAG,
+                (int)openssl_cipher->cipher_base.tag.len,
+                openssl_cipher->cipher_base.tag.buffer)) {
+            return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+        }
+    }
+
+    return s_finalize_decryption(cipher, out);
+}
+
 static int s_init_gcm_cipher_materials(struct aws_symmetric_cipher *cipher) {
     struct openssl_aes_cipher *openssl_cipher = cipher->impl;
 
@@ -388,15 +409,7 @@
         }
     }
 
-    if (openssl_cipher->cipher_base.tag.len) {
-        if (!EVP_CIPHER_CTX_ctrl(
-                openssl_cipher->decryptor_ctx,
-                EVP_CTRL_GCM_SET_TAG,
-                (int)openssl_cipher->cipher_base.tag.len,
-                openssl_cipher->cipher_base.tag.buffer)) {
-            return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
-        }
-    }
+    aws_byte_buf_clean_up_secure(&openssl_cipher->cipher_base.tag);
 
     return AWS_OP_SUCCESS;
 }
@@ -416,9 +429,9 @@
     .provider = "OpenSSL Compatible LibCrypto",
     .destroy = s_destroy,
     .reset = s_reset_gcm_cipher_materials,
-    .decrypt = s_decrypt,
+    .decrypt = s_gcm_decrypt,
     .encrypt = s_encrypt,
-    .finalize_decryption = s_finalize_decryption,
+    .finalize_decryption = s_finalize_gcm_decryption,
     .finalize_encryption = s_finalize_gcm_encryption,
 };
 
@@ -426,8 +439,7 @@
     struct aws_allocator *allocator,
     const struct aws_byte_cursor *key,
     const struct aws_byte_cursor *iv,
-    const struct aws_byte_cursor *aad,
-    const struct aws_byte_cursor *decryption_tag) {
+    const struct aws_byte_cursor *aad) {
 
     struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, 
sizeof(struct openssl_aes_cipher));
     cipher->cipher_base.allocator = allocator;
@@ -465,14 +477,6 @@
         aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.aad, 
allocator, *aad);
     }
 
-    /* Set tag for the decryptor to use.*/
-    if (decryption_tag) {
-        aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.tag, 
allocator, *decryption_tag);
-    } else {
-        /* we'll need this later when we grab the tag during encryption time. 
*/
-        aws_byte_buf_init(&cipher->cipher_base.tag, allocator, 
AWS_AES_256_CIPHER_BLOCK_SIZE);
-    }
-
     /* Initialize the cipher contexts with the specified key and IV. */
     if (s_init_gcm_cipher_materials(&cipher->cipher_base)) {
         goto error;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/source/unix/openssl_platform_init.c 
new/aws-c-cal-0.7.0/source/unix/openssl_platform_init.c
--- old/aws-c-cal-0.6.15/source/unix/openssl_platform_init.c    2024-05-09 
17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/source/unix/openssl_platform_init.c     2024-06-21 
17:52:29.000000000 +0200
@@ -22,6 +22,10 @@
 #define OPENSSL_SUPPRESS_DEPRECATED
 #include <openssl/crypto.h>
 
+#if defined(OPENSSL_IS_AWSLC)
+#    include <openssl/service_indicator.h>
+#endif
+
 static struct openssl_hmac_ctx_table hmac_ctx_table;
 static struct openssl_evp_md_ctx_table evp_md_ctx_table;
 
@@ -555,6 +559,47 @@
     return AWS_LIBCRYPTO_NONE;
 }
 
+/* Validate at runtime that we're linked against the same libcrypto we 
compiled against. */
+static void s_validate_libcrypto_linkage(void) {
+    /* NOTE: the choice of stack buffer size is somewhat arbitrary. it's
+     * possible, but unlikely, that libcrypto version strings may exceed this 
in
+     * the future. we guard against buffer overflow by limiting write size in
+     * snprintf with the size of the buffer itself. if libcrypto version 
strings
+     * do eventually exceed the chosen size, this runtime check will fail and
+     * will need to be addressed by increasing buffer size.*/
+    char expected_version[64] = {0};
+#if defined(OPENSSL_IS_AWSLC)
+    /* get FIPS mode at runtime becuase headers don't give any indication of
+     * AWS-LC's FIPSness at aws-c-cal compile time. version number can still be
+     * captured at preprocess/compile time from AWSLC_VERSION_NUMBER_STRING.*/
+    const char *mode = FIPS_mode() ? "AWS-LC FIPS" : "AWS-LC";
+    snprintf(expected_version, sizeof(expected_version), "%s %s", mode, 
AWSLC_VERSION_NUMBER_STRING);
+#elif defined(OPENSSL_IS_BORINGSSL)
+    snprintf(expected_version, sizeof(expected_version), "BoringSSL");
+#elif defined(OPENSSL_IS_OPENSSL)
+    snprintf(expected_version, sizeof(expected_version), OPENSSL_VERSION_TEXT);
+#elif !defined(BYO_CRYPTO)
+#    error Unsupported libcrypto!
+#endif
+    const char *runtime_version = SSLeay_version(SSLEAY_VERSION);
+    AWS_LOGF_DEBUG(
+        AWS_LS_CAL_LIBCRYPTO_RESOLVE,
+        "Compiled with libcrypto %s, linked to libcrypto %s",
+        expected_version,
+        runtime_version);
+#if defined(OPENSSL_IS_OPENSSL)
+    /* Validate that the string "AWS-LC" doesn't appear in OpenSSL version 
str. */
+    AWS_FATAL_ASSERT(strstr("AWS-LC", expected_version) == NULL);
+    AWS_FATAL_ASSERT(strstr("AWS-LC", runtime_version) == NULL);
+    /* Validate both expected and runtime versions begin with OpenSSL's 
version str prefix. */
+    const char *openssl_prefix = "OpenSSL ";
+    AWS_FATAL_ASSERT(strncmp(openssl_prefix, expected_version, 
strlen(openssl_prefix)) == 0);
+    AWS_FATAL_ASSERT(strncmp(openssl_prefix, runtime_version, 
strlen(openssl_prefix)) == 0);
+#else
+    AWS_FATAL_ASSERT(strcmp(expected_version, runtime_version) == 0 && 
"libcrypto mislink");
+#endif
+}
+
 static enum aws_libcrypto_version s_resolve_libcrypto(void) {
     /* Try to auto-resolve against what's linked in/process space */
     AWS_LOGF_DEBUG(AWS_LS_CAL_LIBCRYPTO_RESOLVE, "searching process and loaded 
modules");
@@ -583,6 +628,8 @@
         result = s_resolve_libcrypto_lib();
     }
 
+    s_validate_libcrypto_linkage();
+
     return result;
 }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/source/windows/bcrypt_aes.c 
new/aws-c-cal-0.7.0/source/windows/bcrypt_aes.c
--- old/aws-c-cal-0.6.15/source/windows/bcrypt_aes.c    2024-05-09 
17:57:57.000000000 +0200
+++ new/aws-c-cal-0.7.0/source/windows/bcrypt_aes.c     2024-06-21 
17:52:29.000000000 +0200
@@ -9,8 +9,6 @@
 /* keep the space to prevent formatters from reordering this with the 
Windows.h header. */
 #include <bcrypt.h>
 
-#define NT_SUCCESS(status) ((NTSTATUS)status >= 0)
-
 /* handles for AES modes and algorithms we'll be using. These are initialized 
once and allowed to leak. */
 static aws_thread_once s_aes_thread_once = AWS_THREAD_ONCE_STATIC_INIT;
 static BCRYPT_ALG_HANDLE s_aes_cbc_algorithm_handle = NULL;
@@ -32,7 +30,7 @@
     struct aws_byte_buf overflow;
     /* This gets updated as the algorithms run so it isn't the original IV. 
That's why its separate */
     struct aws_byte_buf working_iv;
-    /* A buffer to keep around for the GMAC for GCM. */
+    /* A buffer to keep around for the GMAC for GCM. Purely for BCrypt needs, 
we dont touch it. */
     struct aws_byte_buf working_mac_buffer;
 };
 
@@ -50,7 +48,7 @@
         (ULONG)(wcslen(BCRYPT_CHAIN_MODE_CBC) + 1),
         0);
 
-    AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for CBC chaining 
mode failed");
+    AWS_FATAL_ASSERT(BCRYPT_SUCCESS(status) && "BCryptSetProperty for CBC 
chaining mode failed");
 
     /* Set up GCM algorithm */
     status = BCryptOpenAlgorithmProvider(&s_aes_gcm_algorithm_handle, 
BCRYPT_AES_ALGORITHM, NULL, 0);
@@ -63,7 +61,7 @@
         (ULONG)(wcslen(BCRYPT_CHAIN_MODE_GCM) + 1),
         0);
 
-    AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for GCM chaining 
mode failed");
+    AWS_FATAL_ASSERT(BCRYPT_SUCCESS(status) && "BCryptSetProperty for GCM 
chaining mode failed");
 
     /* Setup CTR algorithm */
     status = BCryptOpenAlgorithmProvider(&s_aes_ctr_algorithm_handle, 
BCRYPT_AES_ALGORITHM, NULL, 0);
@@ -78,13 +76,13 @@
         (ULONG)(wcslen(BCRYPT_CHAIN_MODE_ECB) + 1),
         0);
 
-    AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for ECB chaining 
mode failed");
+    AWS_FATAL_ASSERT(BCRYPT_SUCCESS(status) && "BCryptSetProperty for ECB 
chaining mode failed");
 
     /* Setup KEYWRAP algorithm */
     status = BCryptOpenAlgorithmProvider(&s_aes_keywrap_algorithm_handle, 
BCRYPT_AES_ALGORITHM, NULL, 0);
     AWS_FATAL_ASSERT(s_aes_ctr_algorithm_handle && 
"BCryptOpenAlgorithmProvider() failed");
 
-    AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for KeyWrap 
failed");
+    AWS_FATAL_ASSERT(BCRYPT_SUCCESS(status) && "BCryptSetProperty for KeyWrap 
failed");
 }
 
 static BCRYPT_KEY_HANDLE s_import_key_blob(
@@ -109,7 +107,7 @@
 
     aws_byte_buf_clean_up_secure(&key_data_buf);
 
-    if (!NT_SUCCESS(status)) {
+    if (!BCRYPT_SUCCESS(status)) {
         aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
         return NULL;
     }
@@ -156,7 +154,6 @@
     struct aes_bcrypt_cipher *cipher,
     const struct aws_byte_cursor *key,
     const struct aws_byte_cursor *iv,
-    const struct aws_byte_cursor *tag,
     const struct aws_byte_cursor *aad,
     size_t iv_size,
     bool is_ctr_mode,
@@ -180,19 +177,10 @@
         }
     }
 
+    aws_byte_buf_clean_up_secure(&cipher->cipher.tag);
+
     /* these fields are only used in GCM mode. */
     if (is_gcm) {
-        if (!cipher->cipher.tag.len) {
-            if (tag) {
-                aws_byte_buf_init_copy_from_cursor(&cipher->cipher.tag, 
cipher->cipher.allocator, *tag);
-            } else {
-                aws_byte_buf_init(&cipher->cipher.tag, 
cipher->cipher.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE);
-                aws_byte_buf_secure_zero(&cipher->cipher.tag);
-                /* windows handles this, just go ahead and tell the API it's 
got a length. */
-                cipher->cipher.tag.len = AWS_AES_256_CIPHER_BLOCK_SIZE;
-            }
-        }
-
         if (!cipher->cipher.aad.len) {
             if (aad) {
                 aws_byte_buf_init_copy_from_cursor(&cipher->cipher.aad, 
cipher->cipher.allocator, *aad);
@@ -217,7 +205,7 @@
     cipher->cipher_flags = 0;
 
     /* In GCM mode, the IV is set on the auth info pointer and a working copy
-       is passed to each encryt call. CBC and CTR mode function differently 
here
+       is passed to each encrypt call. CBC and CTR mode function differently 
here
        and the IV is set on the key itself. */
     if (!is_gcm && cipher->cipher.iv.len) {
         NTSTATUS status = BCryptSetProperty(
@@ -227,7 +215,7 @@
             (ULONG)cipher->cipher.iv.len,
             0);
 
-        if (!NT_SUCCESS(status)) {
+        if (!BCRYPT_SUCCESS(status)) {
             cipher->cipher.state = AWS_SYMMETRIC_CIPHER_ERROR;
             return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
         }
@@ -241,8 +229,8 @@
         cipher->auth_info_ptr->pbNonce = cipher->cipher.iv.buffer;
         cipher->auth_info_ptr->cbNonce = (ULONG)cipher->cipher.iv.len;
         cipher->auth_info_ptr->dwFlags = BCRYPT_AUTH_MODE_CHAIN_CALLS_FLAG;
-        cipher->auth_info_ptr->pbTag = cipher->cipher.tag.buffer;
-        cipher->auth_info_ptr->cbTag = (ULONG)cipher->cipher.tag.len;
+        cipher->auth_info_ptr->pbTag = NULL;
+        cipher->auth_info_ptr->cbTag = 0;
         cipher->auth_info_ptr->pbMacContext = 
cipher->working_mac_buffer.buffer;
         cipher->auth_info_ptr->cbMacContext = 
(ULONG)cipher->working_mac_buffer.len;
 
@@ -289,8 +277,7 @@
     struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
 
     s_clear_reusable_components(cipher);
-    return s_initialize_cipher_materials(
-        cipher_impl, NULL, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, 
false, false);
+    return s_initialize_cipher_materials(cipher_impl, NULL, NULL, NULL, 
AWS_AES_256_CIPHER_BLOCK_SIZE, false, false);
 }
 
 static int s_reset_ctr_cipher(struct aws_symmetric_cipher *cipher) {
@@ -301,16 +288,14 @@
     /* reset the working iv back to the original IV. We do this because
        we're manually maintaining the counter. */
     aws_byte_buf_append_dynamic(&cipher_impl->working_iv, &iv_cur);
-    return s_initialize_cipher_materials(
-        cipher_impl, NULL, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, 
true, false);
+    return s_initialize_cipher_materials(cipher_impl, NULL, NULL, NULL, 
AWS_AES_256_CIPHER_BLOCK_SIZE, true, false);
 }
 
 static int s_reset_gcm_cipher(struct aws_symmetric_cipher *cipher) {
     struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
 
     s_clear_reusable_components(cipher);
-    return s_initialize_cipher_materials(
-        cipher_impl, NULL, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE - 
4, false, true);
+    return s_initialize_cipher_materials(cipher_impl, NULL, NULL, NULL, 
AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true);
 }
 
 static int s_aes_default_encrypt(
@@ -319,10 +304,6 @@
     struct aws_byte_buf *out) {
     struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
 
-    if (to_encrypt->len == 0) {
-        return AWS_OP_SUCCESS;
-    }
-
     size_t predicted_write_length =
         cipher_impl->cipher_flags & BCRYPT_BLOCK_PADDING
             ? to_encrypt->len + (AWS_AES_256_CIPHER_BLOCK_SIZE - 
(to_encrypt->len % AWS_AES_256_CIPHER_BLOCK_SIZE))
@@ -357,7 +338,7 @@
         &length_written,
         cipher_impl->cipher_flags);
 
-    if (!NT_SUCCESS(status)) {
+    if (!BCRYPT_SUCCESS(status)) {
         cipher->state = AWS_SYMMETRIC_CIPHER_ERROR;
         return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
     }
@@ -412,7 +393,11 @@
 
     struct aws_byte_buf final_to_encrypt = s_fill_in_overflow(cipher, 
&to_encrypt);
     struct aws_byte_cursor final_cur = 
aws_byte_cursor_from_buf(&final_to_encrypt);
-    int ret_val = s_aes_default_encrypt(cipher, &final_cur, out);
+    int ret_val = AWS_OP_SUCCESS;
+    if (final_cur.len > 0) {
+        ret_val = s_aes_default_encrypt(cipher, &final_cur, out);
+    }
+
     aws_byte_buf_clean_up_secure(&final_to_encrypt);
 
     return ret_val;
@@ -440,10 +425,6 @@
     struct aws_byte_buf *out) {
     struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
 
-    if (to_decrypt->len == 0) {
-        return AWS_OP_SUCCESS;
-    }
-
     PUCHAR iv = NULL;
     ULONG iv_size = 0;
 
@@ -474,7 +455,7 @@
         &length_written,
         cipher_impl->cipher_flags);
 
-    if (!NT_SUCCESS(status)) {
+    if (!BCRYPT_SUCCESS(status)) {
         cipher->state = AWS_SYMMETRIC_CIPHER_ERROR;
         return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
     }
@@ -489,7 +470,11 @@
     struct aws_byte_buf *out) {
     struct aws_byte_buf final_to_decrypt = s_fill_in_overflow(cipher, 
&to_decrypt);
     struct aws_byte_cursor final_cur = 
aws_byte_cursor_from_buf(&final_to_decrypt);
-    int ret_val = s_default_aes_decrypt(cipher, &final_cur, out);
+    int ret_val = AWS_OP_SUCCESS;
+    if (final_cur.len > 0) {
+        ret_val = s_default_aes_decrypt(cipher, &final_cur, out);
+    }
+
     aws_byte_buf_clean_up_secure(&final_to_decrypt);
 
     return ret_val;
@@ -537,7 +522,7 @@
     cipher->alg_handle = s_aes_cbc_algorithm_handle;
     cipher->cipher.vtable = &s_aes_cbc_vtable;
 
-    if (s_initialize_cipher_materials(cipher, key, iv, NULL, NULL, 
AWS_AES_256_CIPHER_BLOCK_SIZE, false, false) !=
+    if (s_initialize_cipher_materials(cipher, key, iv, NULL, 
AWS_AES_256_CIPHER_BLOCK_SIZE, false, false) !=
         AWS_OP_SUCCESS) {
         goto error;
     }
@@ -555,55 +540,113 @@
     return NULL;
 }
 
-/* the buffer management for this mode is a good deal easier because we don't 
care about padding.
-   We do care about keeping the final buffer less than a block size til the 
finalize call so we can
-   turn the auth chaining flag off and compute the GMAC correctly. */
+/*
+ * The buffer management for gcm mode is a good deal easier than ctr and cbc 
modes because we don't care about padding.
+ * In chained mode, BCrypt expects the data to be passed in in multiples of 
block size,
+ * followed by a finalize call that turns off chaining and provides any 
remaining data.
+ * This function takes care of managing this state - you give it data to work 
and cipher state and
+ * it will return what data can be sent to bcrypt now and as side effect will 
update the cipher state
+ * with any leftover data.
+ * Note: this function takes a scratch buffer that might be used for to back 
data returned by the cursor.
+ * It is on caller to cleanup that scratch buffer.
+ */
+static struct aws_byte_cursor s_gcm_get_working_slice(
+    struct aes_bcrypt_cipher *cipher_impl,
+    struct aws_byte_cursor data,
+    struct aws_byte_buf *scratch) {
+    AWS_PRECONDITION(cipher_impl);
+    AWS_PRECONDITION(scratch);
+
+    AWS_ZERO_STRUCT(*scratch);
+
+    struct aws_byte_cursor working_cur;
+    AWS_ZERO_STRUCT(working_cur);
+    /* If there's overflow, prepend it to the working buffer, then append the 
data */
+    if (cipher_impl->overflow.len) {
+        aws_byte_buf_init(scratch, cipher_impl->cipher.allocator, 
cipher_impl->overflow.len + data.len);
+        struct aws_byte_cursor overflow_cur = 
aws_byte_cursor_from_buf(&cipher_impl->overflow);
+        aws_byte_buf_append(scratch, &overflow_cur);
+        aws_byte_buf_reset(&cipher_impl->overflow, true);
+        aws_byte_buf_append(scratch, &data);
+        working_cur = aws_byte_cursor_from_buf(scratch);
+    } else {
+        working_cur = data;
+    }
+
+    struct aws_byte_cursor return_cur;
+    AWS_ZERO_STRUCT(return_cur);
+
+    if (working_cur.len >= AWS_AES_256_CIPHER_BLOCK_SIZE) {
+        size_t seek_to = working_cur.len - (working_cur.len % 
AWS_AES_256_CIPHER_BLOCK_SIZE);
+        return_cur = aws_byte_cursor_advance(&working_cur, seek_to);
+        aws_byte_buf_append_dynamic(&cipher_impl->overflow, &working_cur);
+
+    } else {
+        aws_byte_buf_append_dynamic(&cipher_impl->overflow, &working_cur);
+    }
+
+    return return_cur;
+}
+
+/*
+ * bcrypt requires pbTag and cbTag initialized before starting chained encrypt 
or decrypt.
+ * why bcrypt needs it initialized early and every other lib can wait until is 
a mystery.
+ * following function is a helper to init the state correctly for encrypt (and 
decrypt has a similar function later).
+ * For encrypt this blows away whatever tag user might have set and ensures 
that it's at least block size.
+ * Note: gcm supports shorter tags, but bcrypt always generates block sized one
+ * (caller can decide to make them shorter by removing bytes from the end).
+ */
+static void s_gcm_ensure_tag_setup_for_encrypt(struct aws_symmetric_cipher 
*cipher) {
+    struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+    if (cipher_impl->auth_info_ptr->pbTag == NULL) {
+        if (cipher->tag.buffer == NULL) {
+            aws_byte_buf_init(&cipher->tag, cipher->allocator, 
AWS_AES_256_CIPHER_BLOCK_SIZE);
+        } else {
+            aws_byte_buf_secure_zero(&cipher->tag);
+            aws_byte_buf_reserve(&cipher->tag, AWS_AES_256_CIPHER_BLOCK_SIZE);
+        }
+        cipher_impl->auth_info_ptr->pbTag = cipher->tag.buffer;
+        cipher_impl->auth_info_ptr->cbTag = (ULONG)cipher->tag.capacity;
+        /* bcrypt will either end up filling full tag buffer or in an error 
state,
+        /* in which tag will not be correct */
+        cipher->tag.len = AWS_AES_256_CIPHER_BLOCK_SIZE;
+    }
+}
+
 static int s_aes_gcm_encrypt(
     struct aws_symmetric_cipher *cipher,
     struct aws_byte_cursor to_encrypt,
     struct aws_byte_buf *out) {
     struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
 
-    if (to_encrypt.len == 0) {
-        return AWS_OP_SUCCESS;
-    }
+    s_gcm_ensure_tag_setup_for_encrypt(cipher);
 
     struct aws_byte_buf working_buffer;
-    AWS_ZERO_STRUCT(working_buffer);
-
-    /* If there's overflow, prepend it to the working buffer, then append the 
data to encrypt */
-    if (cipher_impl->overflow.len) {
-        struct aws_byte_cursor overflow_cur = 
aws_byte_cursor_from_buf(&cipher_impl->overflow);
+    struct aws_byte_cursor working_cur = s_gcm_get_working_slice(cipher_impl, 
to_encrypt, &working_buffer);
 
-        aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, 
overflow_cur);
-        aws_byte_buf_reset(&cipher_impl->overflow, true);
-        aws_byte_buf_append_dynamic(&working_buffer, &to_encrypt);
-    } else {
-        aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, 
to_encrypt);
+    int ret_val = AWS_OP_SUCCESS;
+    if (working_cur.len > 0) {
+        ret_val = s_aes_default_encrypt(cipher, &working_cur, out);
     }
 
-    int ret_val = AWS_OP_ERR;
+    aws_byte_buf_clean_up_secure(&working_buffer);
+    return ret_val;
+}
 
-    /* whatever is remaining in an incomplete block, copy it to the overflow. 
If we don't have a full block
-       wait til next time or for the finalize call. */
-    if (working_buffer.len > AWS_AES_256_CIPHER_BLOCK_SIZE) {
-        size_t offset = working_buffer.len % AWS_AES_256_CIPHER_BLOCK_SIZE;
-        size_t seek_to = working_buffer.len - (AWS_AES_256_CIPHER_BLOCK_SIZE + 
offset);
-        struct aws_byte_cursor working_buf_cur = 
aws_byte_cursor_from_buf(&working_buffer);
-        struct aws_byte_cursor working_slice = 
aws_byte_cursor_advance(&working_buf_cur, seek_to);
-        /* this is just here to make it obvious. The previous line advanced 
working_buf_cur to where the
-           new overfloew should be. */
-        struct aws_byte_cursor new_overflow_cur = working_buf_cur;
-        aws_byte_buf_append_dynamic(&cipher_impl->overflow, &new_overflow_cur);
+static int s_gcm_ensure_tag_setup_for_decrypt(struct aws_symmetric_cipher 
*cipher) {
+    struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
 
-        ret_val = s_aes_default_encrypt(cipher, &working_slice, out);
-    } else {
-        struct aws_byte_cursor working_buffer_cur = 
aws_byte_cursor_from_buf(&working_buffer);
-        aws_byte_buf_append_dynamic(&cipher_impl->overflow, 
&working_buffer_cur);
-        ret_val = AWS_OP_SUCCESS;
+    if (cipher->tag.buffer == NULL) {
+        return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
     }
-    aws_byte_buf_clean_up_secure(&working_buffer);
-    return ret_val;
+
+    if (cipher_impl->auth_info_ptr->pbTag == NULL) {
+        cipher_impl->auth_info_ptr->pbTag = cipher->tag.buffer;
+        cipher_impl->auth_info_ptr->cbTag = (ULONG)cipher->tag.len;
+    }
+
+    return AWS_OP_SUCCESS;
 }
 
 static int s_aes_gcm_decrypt(
@@ -612,44 +655,18 @@
     struct aws_byte_buf *out) {
     struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
 
-    if (to_decrypt.len == 0) {
-        return AWS_OP_SUCCESS;
+    if (s_gcm_ensure_tag_setup_for_decrypt(cipher)) {
+        return AWS_OP_ERR;
     }
 
     struct aws_byte_buf working_buffer;
-    AWS_ZERO_STRUCT(working_buffer);
+    struct aws_byte_cursor working_cur = s_gcm_get_working_slice(cipher_impl, 
to_decrypt, &working_buffer);
 
-    /* If there's overflow, prepend it to the working buffer, then append the 
data to encrypt */
-    if (cipher_impl->overflow.len) {
-        struct aws_byte_cursor overflow_cur = 
aws_byte_cursor_from_buf(&cipher_impl->overflow);
-
-        aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, 
overflow_cur);
-        aws_byte_buf_reset(&cipher_impl->overflow, true);
-        aws_byte_buf_append_dynamic(&working_buffer, &to_decrypt);
-    } else {
-        aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, 
to_decrypt);
+    int ret_val = AWS_OP_SUCCESS;
+    if (working_cur.len > 0) {
+        ret_val = s_default_aes_decrypt(cipher, &working_cur, out);
     }
 
-    int ret_val = AWS_OP_ERR;
-
-    /* whatever is remaining in an incomplete block, copy it to the overflow. 
If we don't have a full block
-       wait til next time or for the finalize call. */
-    if (working_buffer.len > AWS_AES_256_CIPHER_BLOCK_SIZE) {
-        size_t offset = working_buffer.len % AWS_AES_256_CIPHER_BLOCK_SIZE;
-        size_t seek_to = working_buffer.len - (AWS_AES_256_CIPHER_BLOCK_SIZE + 
offset);
-        struct aws_byte_cursor working_buf_cur = 
aws_byte_cursor_from_buf(&working_buffer);
-        struct aws_byte_cursor working_slice = 
aws_byte_cursor_advance(&working_buf_cur, seek_to);
-        /* this is just here to make it obvious. The previous line advanced 
working_buf_cur to where the
-           new overfloew should be. */
-        struct aws_byte_cursor new_overflow_cur = working_buf_cur;
-        aws_byte_buf_append_dynamic(&cipher_impl->overflow, &new_overflow_cur);
-
-        ret_val = s_default_aes_decrypt(cipher, &working_slice, out);
-    } else {
-        struct aws_byte_cursor working_buffer_cur = 
aws_byte_cursor_from_buf(&working_buffer);
-        aws_byte_buf_append_dynamic(&cipher_impl->overflow, 
&working_buffer_cur);
-        ret_val = AWS_OP_SUCCESS;
-    }
     aws_byte_buf_clean_up_secure(&working_buffer);
     return ret_val;
 }
@@ -657,10 +674,14 @@
 static int s_aes_gcm_finalize_encryption(struct aws_symmetric_cipher *cipher, 
struct aws_byte_buf *out) {
     struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
 
+    s_gcm_ensure_tag_setup_for_encrypt(cipher);
+
     cipher_impl->auth_info_ptr->dwFlags &= ~BCRYPT_AUTH_MODE_CHAIN_CALLS_FLAG;
+
     /* take whatever is remaining, make the final encrypt call with the auth 
chain flag turned off. */
     struct aws_byte_cursor remaining_cur = 
aws_byte_cursor_from_buf(&cipher_impl->overflow);
     int ret_val = s_aes_default_encrypt(cipher, &remaining_cur, out);
+
     aws_byte_buf_secure_zero(&cipher_impl->overflow);
     aws_byte_buf_secure_zero(&cipher_impl->working_iv);
     return ret_val;
@@ -668,7 +689,13 @@
 
 static int s_aes_gcm_finalize_decryption(struct aws_symmetric_cipher *cipher, 
struct aws_byte_buf *out) {
     struct aes_bcrypt_cipher *cipher_impl = cipher->impl;
+
+    if (s_gcm_ensure_tag_setup_for_decrypt(cipher)) {
+        return AWS_OP_ERR;
+    }
+
     cipher_impl->auth_info_ptr->dwFlags &= ~BCRYPT_AUTH_MODE_CHAIN_CALLS_FLAG;
+
     /* take whatever is remaining, make the final decrypt call with the auth 
chain flag turned off. */
     struct aws_byte_cursor remaining_cur = 
aws_byte_cursor_from_buf(&cipher_impl->overflow);
     int ret_val = s_default_aes_decrypt(cipher, &remaining_cur, out);
@@ -692,8 +719,7 @@
     struct aws_allocator *allocator,
     const struct aws_byte_cursor *key,
     const struct aws_byte_cursor *iv,
-    const struct aws_byte_cursor *aad,
-    const struct aws_byte_cursor *decryption_tag) {
+    const struct aws_byte_cursor *aad) {
 
     aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL);
     struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, 
sizeof(struct aes_bcrypt_cipher));
@@ -705,8 +731,8 @@
     cipher->cipher.vtable = &s_aes_gcm_vtable;
 
     /* GCM does the counting under the hood, so we let it handle the final 4 
bytes of the IV. */
-    if (s_initialize_cipher_materials(
-            cipher, key, iv, decryption_tag, aad, 
AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true) != AWS_OP_SUCCESS) {
+    if (s_initialize_cipher_materials(cipher, key, iv, aad, 
AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true) !=
+        AWS_OP_SUCCESS) {
         goto error;
     }
 
@@ -830,7 +856,7 @@
                 &lengthWritten,
                 cipher_impl->cipher_flags);
 
-            if (!NT_SUCCESS(status)) {
+            if (!BCRYPT_SUCCESS(status)) {
                 cipher->state = AWS_SYMMETRIC_CIPHER_ERROR;
                 ret_val = aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
                 goto clean_up;
@@ -913,7 +939,7 @@
     cipher->alg_handle = s_aes_ctr_algorithm_handle;
     cipher->cipher.vtable = &s_aes_ctr_vtable;
 
-    if (s_initialize_cipher_materials(cipher, key, iv, NULL, NULL, 
AWS_AES_256_CIPHER_BLOCK_SIZE, true, false) !=
+    if (s_initialize_cipher_materials(cipher, key, iv, NULL, 
AWS_AES_256_CIPHER_BLOCK_SIZE, true, false) !=
         AWS_OP_SUCCESS) {
         goto error;
     }
@@ -963,7 +989,7 @@
     status = BCryptExportKey(
         key_handle_to_encrypt, cipher_impl->key_handle, 
BCRYPT_AES_WRAP_KEY_BLOB, NULL, 0, &output_size, 0);
 
-    if (!NT_SUCCESS(status)) {
+    if (!BCRYPT_SUCCESS(status)) {
         cipher->state = AWS_SYMMETRIC_CIPHER_ERROR;
         return aws_raise_error(AWS_ERROR_INVALID_STATE);
     }
@@ -985,7 +1011,7 @@
         &len_written,
         0);
 
-    if (!NT_SUCCESS(status)) {
+    if (!BCRYPT_SUCCESS(status)) {
         cipher->state = AWS_SYMMETRIC_CIPHER_ERROR;
         goto clean_up;
     }
@@ -1022,7 +1048,7 @@
         0);
     int ret_val = AWS_OP_ERR;
 
-    if (NT_SUCCESS(status) && import_key) {
+    if (BCRYPT_SUCCESS(status) && import_key) {
         ULONG export_size = 0;
 
         struct aws_byte_buf key_data_blob;
@@ -1041,7 +1067,7 @@
 
         key_data_blob.len += export_size;
 
-        if (NT_SUCCESS(status)) {
+        if (BCRYPT_SUCCESS(status)) {
 
             if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, 
export_size)) {
                 goto clean_up;
@@ -1052,7 +1078,7 @@
             AWS_FATAL_ASSERT(
                 aws_byte_buf_write(
                     out, key_data_blob.buffer + 
sizeof(BCRYPT_KEY_DATA_BLOB_HEADER), stream_header->cbKeyData) &&
-                "Copying key data failed but the allocation should have 
already occured successfully");
+                "Copying key data failed but the allocation should have 
already occurred successfully");
             ret_val = AWS_OP_SUCCESS;
 
         } else {
@@ -1077,7 +1103,7 @@
 
     s_clear_reusable_components(cipher);
 
-    return s_initialize_cipher_materials(cipher_impl, NULL, NULL, NULL, NULL, 
0, false, false);
+    return s_initialize_cipher_materials(cipher_impl, NULL, NULL, NULL, 0, 
false, false);
 }
 
 static struct aws_symmetric_cipher_vtable s_aes_keywrap_vtable = {
@@ -1104,7 +1130,7 @@
     cipher->alg_handle = s_aes_keywrap_algorithm_handle;
     cipher->cipher.vtable = &s_aes_keywrap_vtable;
 
-    if (s_initialize_cipher_materials(cipher, key, NULL, NULL, NULL, 0, false, 
false) != AWS_OP_SUCCESS) {
+    if (s_initialize_cipher_materials(cipher, key, NULL, NULL, 0, false, 
false) != AWS_OP_SUCCESS) {
         goto error;
     }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/tests/CMakeLists.txt 
new/aws-c-cal-0.7.0/tests/CMakeLists.txt
--- old/aws-c-cal-0.6.15/tests/CMakeLists.txt   2024-05-09 17:57:57.000000000 
+0200
+++ new/aws-c-cal-0.7.0/tests/CMakeLists.txt    2024-06-21 17:52:29.000000000 
+0200
@@ -117,6 +117,9 @@
 add_test_case(aes_keywrap_validate_materials_fails)
 add_test_case(aes_test_input_too_large)
 add_test_case(aes_test_encrypt_empty_input)
+add_test_case(aes_test_empty_input_gcm_tag_corner_cases)
+add_test_case(aes_test_gcm_tag_corner_cases)
+add_test_case(aes_test_gcm_tag_large_input_corner_cases)
 
 add_test_case(der_encode_integer)
 add_test_case(der_encode_integer_zero)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/aws-c-cal-0.6.15/tests/aes256_test.c 
new/aws-c-cal-0.7.0/tests/aes256_test.c
--- old/aws-c-cal-0.6.15/tests/aes256_test.c    2024-05-09 17:57:57.000000000 
+0200
+++ new/aws-c-cal-0.7.0/tests/aes256_test.c     2024-06-21 17:52:29.000000000 
+0200
@@ -253,9 +253,9 @@
     ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher));
     struct aws_byte_buf decrypted_buf;
     aws_byte_buf_init(&decrypted_buf, allocator, 
AWS_AES_256_CIPHER_BLOCK_SIZE);
-    struct aws_byte_cursor encryted_cur = 
aws_byte_cursor_from_buf(&encrypted_buf);
+    struct aws_byte_cursor encrypted_cur = 
aws_byte_cursor_from_buf(&encrypted_buf);
     ASSERT_INT_EQUALS(AWS_SYMMETRIC_CIPHER_READY, 
aws_symmetric_cipher_get_state(cipher));
-    ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encryted_cur, 
&decrypted_buf));
+    ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_cur, 
&decrypted_buf));
     ASSERT_INT_EQUALS(AWS_SYMMETRIC_CIPHER_READY, 
aws_symmetric_cipher_get_state(cipher));
     ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, 
&decrypted_buf));
     ASSERT_INT_EQUALS(AWS_SYMMETRIC_CIPHER_FINALIZED, 
aws_symmetric_cipher_get_state(cipher));
@@ -498,11 +498,11 @@
 
     struct aws_byte_buf decrypted_buf;
     aws_byte_buf_init(&decrypted_buf, allocator, 
AWS_AES_256_CIPHER_BLOCK_SIZE);
-    struct aws_byte_cursor encryted_cur = 
aws_byte_cursor_from_buf(&encrypted_buf);
+    struct aws_byte_cursor encrypted_cur = 
aws_byte_cursor_from_buf(&encrypted_buf);
 
     ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher));
     ASSERT_INT_EQUALS(AWS_SYMMETRIC_CIPHER_READY, 
aws_symmetric_cipher_get_state(cipher));
-    ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encryted_cur, 
&decrypted_buf));
+    ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_cur, 
&decrypted_buf));
     ASSERT_INT_EQUALS(AWS_SYMMETRIC_CIPHER_READY, 
aws_symmetric_cipher_get_state(cipher));
     ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, 
&decrypted_buf));
     ASSERT_INT_EQUALS(AWS_SYMMETRIC_CIPHER_FINALIZED, 
aws_symmetric_cipher_get_state(cipher));
@@ -560,7 +560,7 @@
     const struct aws_byte_cursor expected,
     const struct aws_byte_cursor tag,
     const struct aws_byte_cursor *aad) {
-    struct aws_symmetric_cipher *cipher = aws_aes_gcm_256_new(allocator, &key, 
&iv, aad, &tag);
+    struct aws_symmetric_cipher *cipher = aws_aes_gcm_256_new(allocator, &key, 
&iv, aad);
     ASSERT_NOT_NULL(cipher);
 
     struct aws_byte_buf encrypted_buf;
@@ -569,6 +569,7 @@
     /* slice on a weird boundary to hit boundary conditions. */
     while (data_cpy.len) {
         struct aws_byte_cursor to_encrypt = aws_byte_cursor_advance(&data_cpy, 
(size_t)aws_min_i64(24, data_cpy.len));
+        AWS_LOGF_DEBUG(0, "to encrypt test size %zu", to_encrypt.len);
         ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, to_encrypt, 
&encrypted_buf));
     }
     ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, 
&encrypted_buf));
@@ -584,6 +585,8 @@
 
     ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher));
 
+    aws_symmetric_cipher_set_tag(cipher, tag);
+
     /* slice on a weird boundary to hit boundary conditions. */
     while (encrypted_cur.len) {
         struct aws_byte_cursor to_decrypt =
@@ -1077,7 +1080,7 @@
 
 static int s_aes_gcm_test_with_generated_key_iv_fn(struct aws_allocator 
*allocator, void *ctx) {
     (void)ctx;
-    struct aws_symmetric_cipher *cipher = aws_aes_gcm_256_new(allocator, NULL, 
NULL, NULL, NULL);
+    struct aws_symmetric_cipher *cipher = aws_aes_gcm_256_new(allocator, NULL, 
NULL, NULL);
     ASSERT_NOT_NULL(cipher);
 
     struct aws_byte_buf encrypted_buf;
@@ -1087,19 +1090,25 @@
     ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input, 
&encrypted_buf));
     ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, 
&encrypted_buf));
 
+    struct aws_byte_buf encryption_tag;
+    aws_byte_buf_init_copy_from_cursor(&encryption_tag, allocator, 
aws_symmetric_cipher_get_tag(cipher));
+
     ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher));
 
+    aws_symmetric_cipher_set_tag(cipher, 
aws_byte_cursor_from_buf(&encryption_tag));
+
     struct aws_byte_buf decrypted_buf;
     aws_byte_buf_init(&decrypted_buf, allocator, 
AWS_AES_256_CIPHER_BLOCK_SIZE);
-    struct aws_byte_cursor encryted_cur = 
aws_byte_cursor_from_buf(&encrypted_buf);
+    struct aws_byte_cursor encrypted_cur = 
aws_byte_cursor_from_buf(&encrypted_buf);
     ASSERT_INT_EQUALS(AWS_SYMMETRIC_CIPHER_READY, 
aws_symmetric_cipher_get_state(cipher));
-    ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encryted_cur, 
&decrypted_buf));
+    ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_cur, 
&decrypted_buf));
     ASSERT_INT_EQUALS(AWS_SYMMETRIC_CIPHER_READY, 
aws_symmetric_cipher_get_state(cipher));
     ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, 
&decrypted_buf));
     ASSERT_INT_EQUALS(AWS_SYMMETRIC_CIPHER_FINALIZED, 
aws_symmetric_cipher_get_state(cipher));
 
     ASSERT_BIN_ARRAYS_EQUALS(input.ptr, input.len, decrypted_buf.buffer, 
decrypted_buf.len);
 
+    aws_byte_buf_clean_up(&encryption_tag);
     aws_byte_buf_clean_up(&decrypted_buf);
     aws_byte_buf_clean_up(&encrypted_buf);
     aws_symmetric_cipher_destroy(cipher);
@@ -1121,22 +1130,22 @@
 
     struct aws_byte_cursor key = aws_byte_cursor_from_array(valid_key_size, 
sizeof(valid_key_size));
     struct aws_byte_cursor iv = aws_byte_cursor_from_array(iv_too_small, 
sizeof(iv_too_small));
-    ASSERT_NULL(aws_aes_gcm_256_new(allocator, &key, &iv, NULL, NULL));
+    ASSERT_NULL(aws_aes_gcm_256_new(allocator, &key, &iv, NULL));
     
ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, 
aws_last_error());
 
     key = aws_byte_cursor_from_array(valid_key_size, sizeof(valid_key_size));
     iv = aws_byte_cursor_from_array(iv_too_large, sizeof(iv_too_large));
-    ASSERT_NULL(aws_aes_gcm_256_new(allocator, &key, &iv, NULL, NULL));
+    ASSERT_NULL(aws_aes_gcm_256_new(allocator, &key, &iv, NULL));
     
ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_CIPHER_MATERIAL_SIZE_FOR_ALGORITHM, 
aws_last_error());
 
     key = aws_byte_cursor_from_array(key_too_small, sizeof(key_too_small));
     iv = aws_byte_cursor_from_array(valid_iv_size, sizeof(valid_iv_size));
-    ASSERT_NULL(aws_aes_gcm_256_new(allocator, &key, &iv, NULL, NULL));
+    ASSERT_NULL(aws_aes_gcm_256_new(allocator, &key, &iv, NULL));
     ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, 
aws_last_error());
 
     key = aws_byte_cursor_from_array(key_too_small, sizeof(key_too_small));
     iv = aws_byte_cursor_from_array(key_too_large, sizeof(key_too_large));
-    ASSERT_NULL(aws_aes_gcm_256_new(allocator, &key, &iv, NULL, NULL));
+    ASSERT_NULL(aws_aes_gcm_256_new(allocator, &key, &iv, NULL));
     ASSERT_UINT_EQUALS(AWS_ERROR_CAL_INVALID_KEY_LENGTH_FOR_ALGORITHM, 
aws_last_error());
 
     return AWS_OP_SUCCESS;
@@ -1514,7 +1523,7 @@
     struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv));
     struct aws_byte_cursor aad_cur = aws_byte_cursor_from_array(aad, 
sizeof(aad));
 
-    struct aws_symmetric_cipher *cipher = aws_aes_gcm_256_new(allocator, 
&key_cur, &iv_cur, &aad_cur, NULL);
+    struct aws_symmetric_cipher *cipher = aws_aes_gcm_256_new(allocator, 
&key_cur, &iv_cur, &aad_cur);
 
     // encrypt
     struct aws_byte_cursor data_cur = {0};
@@ -1527,13 +1536,20 @@
 
     ASSERT_INT_EQUALS(0, encrypt_buf.len);
 
+    struct aws_byte_buf encryption_tag;
+    aws_byte_buf_init_copy_from_cursor(&encryption_tag, allocator, 
aws_symmetric_cipher_get_tag(cipher));
+
     aws_symmetric_cipher_reset(cipher);
+
+    aws_symmetric_cipher_set_tag(cipher, 
aws_byte_cursor_from_buf(&encryption_tag));
+
     struct aws_byte_buf decrypted_buf = {0};
     aws_byte_buf_init(&decrypted_buf, allocator, 
AWS_AES_256_CIPHER_BLOCK_SIZE);
     struct aws_byte_cursor ciphertext_cur = 
aws_byte_cursor_from_buf(&encrypt_buf);
     ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, ciphertext_cur, 
&decrypted_buf));
     ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, 
&decrypted_buf));
 
+    aws_byte_buf_clean_up(&encryption_tag);
     aws_byte_buf_clean_up(&encrypt_buf);
     aws_byte_buf_clean_up(&decrypted_buf);
     aws_symmetric_cipher_destroy(cipher);
@@ -1541,3 +1557,159 @@
     return AWS_OP_SUCCESS;
 }
 AWS_TEST_CASE(aes_test_encrypt_empty_input, s_aes_test_encrypt_empty_input)
+
+static int s_aes_gcm_corner_case_checker(
+    struct aws_allocator *allocator,
+    struct aws_byte_cursor key_cur,
+    struct aws_byte_cursor iv_cur,
+    struct aws_byte_cursor aad_cur,
+    struct aws_byte_cursor data_cur,
+    struct aws_byte_cursor expected_tag_cur) {
+
+    /* just a random tag value which should not match anything*/
+    uint8_t wrong_tag[] = {
+        0x83, 0xC0, 0xE4, 0x2B, 0xB1, 0x95, 0xE2, 0x62, 0xCB, 0x3B, 0x3A, 
0x74, 0xA0, 0xDA, 0xE1, 0xC8};
+    struct aws_byte_cursor wrong_tag_cur = 
aws_byte_cursor_from_array(wrong_tag, sizeof(wrong_tag));
+
+    struct aws_symmetric_cipher *cipher = aws_aes_gcm_256_new(allocator, 
&key_cur, &iv_cur, &aad_cur);
+
+    struct aws_byte_cursor tag = aws_symmetric_cipher_get_tag(cipher);
+
+    ASSERT_TRUE(tag.len == 0 && tag.ptr == NULL);
+
+    aws_symmetric_cipher_set_tag(cipher, wrong_tag_cur);
+
+    // encrypt
+    struct aws_byte_buf encrypt_buf = {0};
+    aws_byte_buf_init(&encrypt_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE * 
2);
+    ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, data_cur, 
&encrypt_buf));
+
+    // finalize
+    ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, 
&encrypt_buf));
+
+    if (data_cur.len == 0) {
+        ASSERT_INT_EQUALS(0, encrypt_buf.len);
+    } else {
+        ASSERT_TRUE(encrypt_buf.len > 0);
+    }
+
+    struct aws_byte_cursor encryption_tag = 
aws_symmetric_cipher_get_tag(cipher);
+
+    ASSERT_BIN_ARRAYS_EQUALS(expected_tag_cur.ptr, expected_tag_cur.len, 
encryption_tag.ptr, encryption_tag.len);
+
+    /* reset and verify decrypt works */
+    aws_symmetric_cipher_reset(cipher);
+    tag = aws_symmetric_cipher_get_tag(cipher);
+
+    ASSERT_TRUE(tag.len == 0 && tag.ptr == NULL);
+
+    aws_symmetric_cipher_set_tag(cipher, expected_tag_cur);
+
+    struct aws_byte_buf decrypted_buf = {0};
+    aws_byte_buf_init(&decrypted_buf, allocator, 
AWS_AES_256_CIPHER_BLOCK_SIZE);
+    struct aws_byte_cursor ciphertext_cur = 
aws_byte_cursor_from_buf(&encrypt_buf);
+    ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, ciphertext_cur, 
&decrypted_buf));
+    ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, 
&decrypted_buf));
+
+    /* reset and verify decrypt with wrong tag fails */
+    aws_symmetric_cipher_reset(cipher);
+    aws_byte_buf_reset(&decrypted_buf, true);
+    aws_symmetric_cipher_set_tag(cipher, wrong_tag_cur);
+    ciphertext_cur = aws_byte_cursor_from_buf(&encrypt_buf);
+    ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, ciphertext_cur, 
&decrypted_buf));
+    ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, 
aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf));
+
+    /* reset and verify decrypt with no tag fails */
+    aws_symmetric_cipher_reset(cipher);
+    aws_byte_buf_reset(&decrypted_buf, true);
+    ciphertext_cur = aws_byte_cursor_from_buf(&encrypt_buf);
+    ASSERT_ERROR(AWS_ERROR_INVALID_ARGUMENT, 
aws_symmetric_cipher_decrypt(cipher, ciphertext_cur, &decrypted_buf));
+
+    aws_byte_buf_clean_up(&encrypt_buf);
+    aws_byte_buf_clean_up(&decrypted_buf);
+    aws_symmetric_cipher_destroy(cipher);
+    return AWS_OP_SUCCESS;
+}
+
+static int s_aes_test_empty_input_gcm_tag_corner_cases(struct aws_allocator 
*allocator, void *ctx) {
+    (void)ctx;
+
+    uint8_t iv[] = {0xFB, 0x7B, 0x4A, 0x82, 0x4E, 0x82, 0xDA, 0xA6, 0xC8, 
0xBC, 0x12, 0x51};
+
+    uint8_t key[] = {0x20, 0x14, 0x2E, 0x89, 0x8C, 0xD2, 0xFD, 0x98, 0x0F, 
0xBF, 0x34, 0xDE, 0x6B, 0xC8, 0x5C, 0x14,
+                     0xDA, 0x7D, 0x57, 0xBD, 0x28, 0xF4, 0xAA, 0x5C, 0xF1, 
0x72, 0x8A, 0xB6, 0x4E, 0x84, 0x31, 0x42};
+
+    uint8_t aad[] = {0x16, 0x7B, 0x5C, 0x22, 0x61, 0x77, 0x73, 0x3A, 0x78, 
0x2D, 0x61, 0x6D, 0x7A, 0x2D, 0x63, 0x65,
+                     0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 
0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47,
+                     0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 
0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D};
+
+    uint8_t expected_tag[] = {
+        0x81, 0xC0, 0xE4, 0x2B, 0xB1, 0x95, 0xE2, 0x62, 0xCB, 0x3B, 0x3A, 
0x74, 0xA0, 0xDA, 0xE1, 0xC8};
+
+    struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, 
sizeof(key));
+    struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv));
+    struct aws_byte_cursor aad_cur = aws_byte_cursor_from_array(aad, 
sizeof(aad));
+    struct aws_byte_cursor expected_tag_cur = 
aws_byte_cursor_from_array(expected_tag, sizeof(expected_tag));
+    struct aws_byte_cursor data_cur = {0};
+
+    return s_aes_gcm_corner_case_checker(allocator, key_cur, iv_cur, aad_cur, 
data_cur, expected_tag_cur);
+}
+AWS_TEST_CASE(aes_test_empty_input_gcm_tag_corner_cases, 
s_aes_test_empty_input_gcm_tag_corner_cases)
+
+static int s_aes_test_gcm_tag_corner_cases(struct aws_allocator *allocator, 
void *ctx) {
+    (void)ctx;
+
+    uint8_t iv[] = {0xFB, 0x7B, 0x4A, 0x82, 0x4E, 0x82, 0xDA, 0xA6, 0xC8, 
0xBC, 0x12, 0x51};
+
+    uint8_t key[] = {0x20, 0x14, 0x2E, 0x89, 0x8C, 0xD2, 0xFD, 0x98, 0x0F, 
0xBF, 0x34, 0xDE, 0x6B, 0xC8, 0x5C, 0x14,
+                     0xDA, 0x7D, 0x57, 0xBD, 0x28, 0xF4, 0xAA, 0x5C, 0xF1, 
0x72, 0x8A, 0xB6, 0x4E, 0x84, 0x31, 0x42};
+
+    uint8_t aad[] = {0x16, 0x7B, 0x5C, 0x22, 0x61, 0x77, 0x73, 0x3A, 0x78, 
0x2D, 0x61, 0x6D, 0x7A, 0x2D, 0x63, 0x65,
+                     0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 
0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47,
+                     0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 
0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D};
+
+    uint8_t data[] = {
+        0x84, 0x99, 0x89, 0x3E, 0x16, 0xB0, 0xBA, 0x8B, 0x00, 0x7D, 0x54, 
0x66, 0x5A, 0x84, 0x99, 0x89, 0x3E};
+
+    uint8_t expected_tag[] = {
+        0x76, 0x4D, 0x21, 0xD6, 0xC0, 0xD8, 0xC7, 0xF9, 0xCA, 0x6D, 0xF2, 
0x19, 0xAE, 0x56, 0xDC, 0x1F};
+
+    struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, 
sizeof(key));
+    struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv));
+    struct aws_byte_cursor aad_cur = aws_byte_cursor_from_array(aad, 
sizeof(aad));
+    struct aws_byte_cursor expected_tag_cur = 
aws_byte_cursor_from_array(expected_tag, sizeof(expected_tag));
+    struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, 
sizeof(data));
+
+    return s_aes_gcm_corner_case_checker(allocator, key_cur, iv_cur, aad_cur, 
data_cur, expected_tag_cur);
+}
+AWS_TEST_CASE(aes_test_gcm_tag_corner_cases, s_aes_test_gcm_tag_corner_cases)
+
+static int s_aes_test_gcm_tag_large_input_corner_cases(struct aws_allocator 
*allocator, void *ctx) {
+    (void)ctx;
+
+    uint8_t iv[] = {0xFB, 0x7B, 0x4A, 0x82, 0x4E, 0x82, 0xDA, 0xA6, 0xC8, 
0xBC, 0x12, 0x51};
+
+    uint8_t key[] = {0x20, 0x14, 0x2E, 0x89, 0x8C, 0xD2, 0xFD, 0x98, 0x0F, 
0xBF, 0x34, 0xDE, 0x6B, 0xC8, 0x5C, 0x14,
+                     0xDA, 0x7D, 0x57, 0xBD, 0x28, 0xF4, 0xAA, 0x5C, 0xF1, 
0x72, 0x8A, 0xB6, 0x4E, 0x84, 0x31, 0x42};
+
+    uint8_t aad[] = {0x16, 0x7B, 0x5C, 0x22, 0x61, 0x77, 0x73, 0x3A, 0x78, 
0x2D, 0x61, 0x6D, 0x7A, 0x2D, 0x63, 0x65,
+                     0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 
0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47,
+                     0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 
0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D};
+
+    uint8_t data[] = {0x84, 0x99, 0x89, 0x3E, 0x16, 0xB0, 0xBA, 0x8B, 0x00, 
0x7D, 0x54, 0x66, 0x5A,
+                      0x84, 0x99, 0x89, 0x3E, 0x84, 0x99, 0x89, 0x3E, 0x16, 
0xB0, 0xBA, 0x8B, 0x00,
+                      0x7D, 0x54, 0x66, 0x5A, 0x84, 0x99, 0x89, 0x3E, 0x84, 
0x99, 0x89, 0x3E, 0x16,
+                      0xB0, 0xBA, 0x8B, 0x00, 0x7D, 0x54, 0x66, 0x5A, 0x84, 
0x99, 0x89, 0x3E};
+
+    uint8_t expected_tag[] = {
+        0xEA, 0x5E, 0x8A, 0x4B, 0x76, 0xE8, 0x9D, 0xC5, 0xF1, 0x32, 0x14, 
0x64, 0xD0, 0x93, 0x74, 0xB7};
+
+    struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, 
sizeof(key));
+    struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv));
+    struct aws_byte_cursor aad_cur = aws_byte_cursor_from_array(aad, 
sizeof(aad));
+    struct aws_byte_cursor expected_tag_cur = 
aws_byte_cursor_from_array(expected_tag, sizeof(expected_tag));
+    struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, 
sizeof(data));
+
+    return s_aes_gcm_corner_case_checker(allocator, key_cur, iv_cur, aad_cur, 
data_cur, expected_tag_cur);
+}
+AWS_TEST_CASE(aes_test_gcm_tag_large_input_corner_cases, 
s_aes_test_gcm_tag_large_input_corner_cases)

Reply via email to