Copilot commented on code in PR #2106:
URL: https://github.com/apache/nifi-minifi-cpp/pull/2106#discussion_r2831943523
##########
behave_framework/src/minifi_test_framework/core/minifi_test_context.py:
##########
@@ -16,32 +16,45 @@
#
from __future__ import annotations
-from typing import TYPE_CHECKING
+
+import os
+from typing import Any
+import docker
from behave.runner import Context
from docker.models.networks import Network
-from minifi_test_framework.containers.container import Container
-from OpenSSL import crypto
+from minifi_test_framework.containers.container_protocol import
ContainerProtocol
+from minifi_test_framework.containers.minifi_protocol import MinifiProtocol
-if TYPE_CHECKING:
- from minifi_test_framework.containers.minifi_container import
MinifiContainer
DEFAULT_MINIFI_CONTAINER_NAME = "minifi-primary"
+class MinifiContainer(ContainerProtocol, MinifiProtocol):
+ pass
+
+
class MinifiTestContext(Context):
- containers: dict[str, Container]
+ containers: dict[str, ContainerProtocol]
scenario_id: str
network: Network
minifi_container_image: str
resource_dir: str | None
- root_ca_key: crypto.PKey
- root_ca_cert: crypto.X509
+ root_ca_key: Any
+ root_ca_cert: Any
Review Comment:
`root_ca_key` / `root_ca_cert` are typed as `Any`. Since `ssl_utils` now
uses `cryptography`, these can be typed more precisely (e.g.,
`cryptography.x509.Certificate` and an `RSAPrivateKey` type) to keep static
checking useful.
##########
behave_framework/src/minifi_test_framework/core/ssl_utils.py:
##########
@@ -1,161 +1,125 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import time
-import logging
-import random
-
-from M2Crypto import X509, EVP, RSA, ASN1
-from OpenSSL import crypto
+import datetime
+from cryptography import x509
+from cryptography.x509.oid import NameOID
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives.asymmetric import rsa
+from cryptography.hazmat.primitives import serialization
Review Comment:
This file no longer includes the ASF license header that is present across
the rest of the repository. Please restore the standard Apache 2.0 header at
the top of the file to maintain licensing compliance.
##########
docker/installed/win.Dockerfile:
##########
@@ -0,0 +1,24 @@
+#escape=`
+
+FROM mcr.microsoft.com/windows/servercore:ltsc2022
+
+LABEL maintainer="Apache NiFi <[email protected]>"
+
+ARG MSI_SOURCE="nifi-minifi-cpp.msi"
+
+ENV MINIFI_HOME="C:\Program Files\ApacheNiFiMiNiFi\nifi-minifi-cpp"
+
+SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop';
$ProgressPreference = 'SilentlyContinue';"]
+
+RUN Set-ExecutionPolicy Bypass -Scope Process -Force;
[System.Net.ServicePointManager]::SecurityProtocol =
[System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object
System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
Review Comment:
Downloading and executing the Chocolatey install script with `iex
((New-Object
System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))`
introduces a supply-chain remote code execution risk: if
`community.chocolatey.org` or the connection is compromised, arbitrary code can
run during the Docker build with high privileges. To harden this, rely on a
trusted, versioned installation source (e.g., a base image or offline package)
and/or verify the installer’s integrity via a pinned checksum or signature
instead of executing it directly from the remote URL.
##########
behave_framework/src/minifi_test_framework/containers/container_windows.py:
##########
@@ -0,0 +1,371 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+import logging
+import os
+import tempfile
+import base64
+import tarfile
+import io
+from typing import Union, Optional, Tuple, List, Dict, TYPE_CHECKING
+
+import docker
+from docker.models.networks import Network
+from docker.models.containers import Container
+
+from minifi_test_framework.containers.container_protocol import
ContainerProtocol
+from minifi_test_framework.containers.directory import Directory
+from minifi_test_framework.containers.file import File
+from minifi_test_framework.containers.host_file import HostFile
+
+if TYPE_CHECKING:
+ from minifi_test_framework.core.minifi_test_context import
MinifiTestContext
+
+
+class WindowsContainer(ContainerProtocol):
+ def __init__(self, image_name: str, container_name: str, network: Network,
command: str | None = None, entrypoint: str | None = None):
+ super().__init__()
+ self.image_name: str = image_name
+ self.container_name: str = container_name
+ self.network: Network = network
+
+ self.client: docker.DockerClient = docker.from_env()
+ self.container: Optional[Container] = None
+ self.files: List[File] = []
+ self.dirs: List[Directory] = []
+ self.host_files: List[HostFile] = []
+ self.volumes: Dict = {}
+ self.command: str | None = command
+ self.entrypoint: str | None = entrypoint
+ self._temp_dir: Optional[tempfile.TemporaryDirectory] = None
+ self.ports: Optional[Dict[str, int]] = None
+ self.environment: List[str] = []
+
+ def _normalize_path(self, path: str) -> str:
+ """
+ Converts paths to Windows format (C:\\...)
+ Handles leading slashes and ensures backslashes.
+ """
+ clean_path = path.strip().replace("/", "\\")
+ if clean_path.startswith("\\"):
+ clean_path = clean_path[1:]
+
+ # If it doesn't already have a drive letter, assume C:
+ if ":" not in clean_path:
+ return f"C:\\{clean_path}"
+ return clean_path
+
+ def deploy(self, context: MinifiTestContext | None) -> bool:
+ # Cleanup previous temp dir if it exists to prevent leaks
+ if self._temp_dir:
+ self._temp_dir.cleanup()
+ self._temp_dir = tempfile.TemporaryDirectory()
+
+ # 1. Prepare Volumes (Directory objects)
+ for directory in self.dirs:
+ # Mirror directory structure locally in temp
+ # usage of os.sep ensures we use host-native separators for the
temp setup
+ rel_path = directory.path.strip("/\\")
+ temp_subdir = os.path.join(self._temp_dir.name, rel_path)
+ os.makedirs(temp_subdir, exist_ok=True)
+
+ for file_name, content in directory.files.items():
+ file_path = os.path.join(temp_subdir, file_name)
+ # Write with UTF-8 to ensure content is preserved on host
+ with open(file_path, "w", encoding="utf-8") as temp_file:
+ logging.info(f"writing content into {temp_file.name}")
+ temp_file.write(content)
+
+ # Mount using Windows path format
+ container_bind_path = self._normalize_path(directory.path)
+ self.volumes[temp_subdir] = {
+ "bind": container_bind_path,
+ "mode": directory.mode
+ }
+
+ for host_file in self.host_files:
+ container_bind_path =
self._normalize_path(host_file.container_path)
+ self.volumes[host_file.host_path] = {"bind": container_bind_path,
"mode": host_file.mode}
+
+ # 3. Cleanup existing container
+ try:
+ existing_container =
self.client.containers.get(self.container_name)
+ logging.warning(f"Found existing container
'{self.container_name}'. Removing it first.")
+ existing_container.remove(force=True)
+ except docker.errors.NotFound:
+ pass
+
+ # 4. Create and Start
+ try:
+ print(f"Creating and starting container
'{self.container_name}'...")
+ self.container = self.client.containers.create(
+ image=self.image_name,
+ name=self.container_name,
+ ports=self.ports,
+ environment=self.environment,
+ volumes=self.volumes,
+ network=self.network.name,
+ command=self.command,
+ entrypoint=self.entrypoint,
+ detach=True,
+ tty=False
+ )
+
+ self.container.start()
+
+ for file in self.files:
+ self._copy_content_to_container(file.content, file.path)
+
+ except Exception as e:
+ logging.error(f"Error starting container: {e}")
+ self.clean_up()
+ raise
+ return True
+
+ def _copy_content_to_container(self, content: str, target_path: str):
+ if not self.container:
+ return
+
+ win_path = self._normalize_path(target_path)
+ dir_name = os.path.dirname(win_path)
+ file_name = os.path.basename(win_path)
+
+ self._run_powershell(f"New-Item -ItemType Directory -Force -Path
'{dir_name}'")
+
+ tar_stream = io.BytesIO()
+ with tarfile.open(fileobj=tar_stream, mode='w') as tar:
+ encoded_data = content.encode('utf-8')
+ tarinfo = tarfile.TarInfo(name=file_name)
+ tarinfo.size = len(encoded_data)
+ tar.addfile(tarinfo, io.BytesIO(encoded_data))
+
Review Comment:
`_copy_content_to_container` assumes `content` is a `str` and
unconditionally calls `.encode('utf-8')`, but `File.content` is defined as `str
| bytes`. If a Windows container needs to copy binary content (e.g., JKS
files/certs), this will fail. Consider accepting `bytes | str` and only
encoding when the input is text.
##########
behave_framework/src/minifi_test_framework/containers/container_windows.py:
##########
@@ -0,0 +1,371 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+import logging
+import os
+import tempfile
+import base64
+import tarfile
+import io
+from typing import Union, Optional, Tuple, List, Dict, TYPE_CHECKING
+
+import docker
+from docker.models.networks import Network
+from docker.models.containers import Container
+
+from minifi_test_framework.containers.container_protocol import
ContainerProtocol
+from minifi_test_framework.containers.directory import Directory
+from minifi_test_framework.containers.file import File
+from minifi_test_framework.containers.host_file import HostFile
+
+if TYPE_CHECKING:
+ from minifi_test_framework.core.minifi_test_context import
MinifiTestContext
+
+
+class WindowsContainer(ContainerProtocol):
+ def __init__(self, image_name: str, container_name: str, network: Network,
command: str | None = None, entrypoint: str | None = None):
+ super().__init__()
+ self.image_name: str = image_name
+ self.container_name: str = container_name
+ self.network: Network = network
+
+ self.client: docker.DockerClient = docker.from_env()
+ self.container: Optional[Container] = None
+ self.files: List[File] = []
+ self.dirs: List[Directory] = []
+ self.host_files: List[HostFile] = []
+ self.volumes: Dict = {}
+ self.command: str | None = command
+ self.entrypoint: str | None = entrypoint
+ self._temp_dir: Optional[tempfile.TemporaryDirectory] = None
+ self.ports: Optional[Dict[str, int]] = None
+ self.environment: List[str] = []
+
+ def _normalize_path(self, path: str) -> str:
+ """
+ Converts paths to Windows format (C:\\...)
+ Handles leading slashes and ensures backslashes.
+ """
+ clean_path = path.strip().replace("/", "\\")
+ if clean_path.startswith("\\"):
+ clean_path = clean_path[1:]
+
+ # If it doesn't already have a drive letter, assume C:
+ if ":" not in clean_path:
+ return f"C:\\{clean_path}"
+ return clean_path
+
+ def deploy(self, context: MinifiTestContext | None) -> bool:
+ # Cleanup previous temp dir if it exists to prevent leaks
+ if self._temp_dir:
+ self._temp_dir.cleanup()
+ self._temp_dir = tempfile.TemporaryDirectory()
+
+ # 1. Prepare Volumes (Directory objects)
+ for directory in self.dirs:
+ # Mirror directory structure locally in temp
+ # usage of os.sep ensures we use host-native separators for the
temp setup
+ rel_path = directory.path.strip("/\\")
+ temp_subdir = os.path.join(self._temp_dir.name, rel_path)
+ os.makedirs(temp_subdir, exist_ok=True)
+
+ for file_name, content in directory.files.items():
+ file_path = os.path.join(temp_subdir, file_name)
+ # Write with UTF-8 to ensure content is preserved on host
+ with open(file_path, "w", encoding="utf-8") as temp_file:
+ logging.info(f"writing content into {temp_file.name}")
+ temp_file.write(content)
+
+ # Mount using Windows path format
+ container_bind_path = self._normalize_path(directory.path)
+ self.volumes[temp_subdir] = {
+ "bind": container_bind_path,
+ "mode": directory.mode
+ }
+
+ for host_file in self.host_files:
+ container_bind_path =
self._normalize_path(host_file.container_path)
+ self.volumes[host_file.host_path] = {"bind": container_bind_path,
"mode": host_file.mode}
+
+ # 3. Cleanup existing container
+ try:
+ existing_container =
self.client.containers.get(self.container_name)
+ logging.warning(f"Found existing container
'{self.container_name}'. Removing it first.")
+ existing_container.remove(force=True)
+ except docker.errors.NotFound:
+ pass
+
+ # 4. Create and Start
+ try:
+ print(f"Creating and starting container
'{self.container_name}'...")
Review Comment:
`print(...)` is used for container startup logging. Using the existing
`logging` calls (as in `LinuxContainer.deploy`) would keep test output
consistent and easier to control in CI.
```suggestion
logging.info(f"Creating and starting container
'{self.container_name}'...")
```
##########
run_flake8.sh:
##########
@@ -19,4 +19,4 @@
set -euo pipefail
directory=${1:-.}
-flake8 --exclude .venv,venv,thirdparty,build,cmake-build-*,github_env
--builtins log,REL_SUCCESS,REL_FAILURE,REL_ORIGINAL,raw_input --ignore
E501,W503,F811 "${directory}"
+flake8 --exclude
.venv,venv,venv2,behave-venv,thirdparty,build,cmake-build-*,github_env
--builtins log,REL_SUCCESS,REL_FAILURE,REL_ORIGINAL,raw_input --ignore
E501,W503,F811 "${directory}"
Review Comment:
The repo’s Behave test virtualenv is created in `./behave_venv` (see
`docker/RunBehaveTests.sh`), but flake8 excludes `behave-venv` (dash). This
means flake8 will still traverse the Behave venv by default. Consider excluding
`behave_venv` here (and keep/remove `behave-venv` depending on what is actually
used).
##########
behave_framework/src/minifi_test_framework/core/ssl_utils.py:
##########
@@ -1,161 +1,125 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import time
-import logging
-import random
-
-from M2Crypto import X509, EVP, RSA, ASN1
-from OpenSSL import crypto
+import datetime
+from cryptography import x509
+from cryptography.x509.oid import NameOID
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives.asymmetric import rsa
+from cryptography.hazmat.primitives import serialization
def gen_cert():
- """
- Generate TLS certificate request for testing
- """
-
- req, key = gen_req()
- pub_key = req.get_pubkey()
- subject = req.get_subject()
- cert = X509.X509()
- # noinspection PyTypeChecker
- cert.set_serial_number(1)
- cert.set_version(2)
- cert.set_subject(subject)
- t = int(time.time())
- now = ASN1.ASN1_UTCTIME()
- now.set_time(t)
- now_plus_year = ASN1.ASN1_UTCTIME()
- now_plus_year.set_time(t + 60 * 60 * 24 * 365)
- cert.set_not_before(now)
- cert.set_not_after(now_plus_year)
- issuer = X509.X509_Name()
- issuer.C = 'US'
- issuer.CN = 'minifi-listen'
- cert.set_issuer(issuer)
- cert.set_pubkey(pub_key)
- cert.sign(key, 'sha256')
+ key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
- return cert, key
-
-
-def rsa_gen_key_callback():
- pass
-
-
-def gen_req():
- """
- Generate TLS certificate request for testing
- """
+ subject = issuer = x509.Name([
+ x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"),
+ x509.NameAttribute(NameOID.COMMON_NAME, u"minifi-listen"),
+ ])
- logging.info('Generating test certificate request')
- key = EVP.PKey()
- req = X509.Request()
- rsa = RSA.gen_key(1024, 65537, rsa_gen_key_callback)
- key.assign_rsa(rsa)
- req.set_pubkey(key)
- name = req.get_subject()
- name.C = 'US'
- name.CN = 'minifi-listen'
- req.sign(key, 'sha256')
+ cert = x509.CertificateBuilder().subject_name(
+ subject
+ ).issuer_name(
+ issuer
+ ).public_key(
+ key.public_key()
+ ).serial_number(
+ x509.random_serial_number()
+ ).not_valid_before(
+ datetime.datetime.utcnow()
+ ).not_valid_after(
+ datetime.datetime.utcnow() + datetime.timedelta(days=365)
+ ).sign(key, hashes.SHA256())
- return req, key
+ return cert, key
def make_self_signed_cert(common_name):
- ca_key = crypto.PKey()
- ca_key.generate_key(crypto.TYPE_RSA, 2048)
+ key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
- ca_cert = crypto.X509()
- ca_cert.set_version(2)
- ca_cert.set_serial_number(random.randint(50000000, 100000000))
-
- ca_subj = ca_cert.get_subject()
- ca_subj.commonName = common_name
-
- ca_cert.add_extensions([
- crypto.X509Extension(b"subjectKeyIdentifier", False, b"hash",
subject=ca_cert),
- ])
-
- ca_cert.add_extensions([
- crypto.X509Extension(b"authorityKeyIdentifier", False,
b"keyid:always", issuer=ca_cert),
+ subject = issuer = x509.Name([
+ x509.NameAttribute(NameOID.COMMON_NAME, common_name),
])
- ca_cert.add_extensions([
- crypto.X509Extension(b"basicConstraints", False, b"CA:TRUE"),
- crypto.X509Extension(b"keyUsage", False, b"keyCertSign, cRLSign"),
- ])
-
- ca_cert.set_issuer(ca_subj)
- ca_cert.set_pubkey(ca_key)
-
- ca_cert.gmtime_adj_notBefore(0)
- ca_cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
+ cert = x509.CertificateBuilder().subject_name(
+ subject
+ ).issuer_name(
+ issuer
+ ).public_key(
+ key.public_key()
+ ).serial_number(
+ x509.random_serial_number()
+ ).not_valid_before(
+ datetime.datetime.utcnow()
+ ).not_valid_after(
+ datetime.datetime.utcnow() + datetime.timedelta(days=3650)
+ ).add_extension(
+ x509.SubjectKeyIdentifier.from_public_key(key.public_key()),
+ critical=False,
+ ).add_extension(
+ x509.BasicConstraints(ca=True, path_length=None),
+ critical=True,
+ ).sign(key, hashes.SHA256())
- ca_cert.sign(ca_key, 'sha256')
-
- return ca_cert, ca_key
+ return cert, key
def _make_cert(common_name, ca_cert, ca_key, extended_key_usage=None):
- key = crypto.PKey()
- key.generate_key(crypto.TYPE_RSA, 2048)
-
- cert = crypto.X509()
- cert.set_version(2)
- cert.set_serial_number(random.randint(50000000, 100000000))
-
- client_subj = cert.get_subject()
- client_subj.commonName = common_name
+ key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
- cert.add_extensions([
- crypto.X509Extension(b"basicConstraints", False, b"CA:FALSE"),
- crypto.X509Extension(b"subjectKeyIdentifier", False, b"hash",
subject=cert),
+ subject = x509.Name([
+ x509.NameAttribute(NameOID.COMMON_NAME, common_name),
])
- extensions = [crypto.X509Extension(b"authorityKeyIdentifier", False,
b"keyid:always", issuer=ca_cert),
- crypto.X509Extension(b"keyUsage", False,
b"digitalSignature")]
+ builder = x509.CertificateBuilder().subject_name(
+ subject
+ ).issuer_name(
+ ca_cert.subject
+ ).public_key(
+ key.public_key()
+ ).serial_number(
+ x509.random_serial_number()
+ ).not_valid_before(
+ datetime.datetime.utcnow()
+ ).not_valid_after(
+ datetime.datetime.utcnow() + datetime.timedelta(days=3650)
+ ).add_extension(
+ x509.BasicConstraints(ca=False, path_length=None),
+ critical=True,
+ ).add_extension(
+ x509.SubjectKeyIdentifier.from_public_key(key.public_key()),
+ critical=False,
+ ).add_extension(
+ x509.SubjectAlternativeName([x509.DNSName(common_name)]),
+ critical=False,
+ )
if extended_key_usage:
- extensions.append(crypto.X509Extension(b"extendedKeyUsage", False,
extended_key_usage))
-
- cert.add_extensions([
- crypto.X509Extension(b"subjectAltName", False, b"DNS.1:" +
common_name.encode())
- ])
-
- cert.add_extensions(extensions)
-
- cert.set_issuer(ca_cert.get_subject())
- cert.set_pubkey(key)
-
- cert.gmtime_adj_notBefore(0)
- cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
-
- cert.sign(ca_key, 'sha256')
+ builder = builder.add_extension(
+ x509.ExtendedKeyUsage(extended_key_usage),
+ critical=False
+ )
+ cert = builder.sign(ca_key, hashes.SHA256())
return cert, key
def make_client_cert(common_name, ca_cert, ca_key):
- return _make_cert(common_name=common_name, ca_cert=ca_cert, ca_key=ca_key,
extended_key_usage=b"clientAuth")
+ return _make_cert(common_name, ca_cert, ca_key, [x509.OID_CLIENT_AUTH])
def make_server_cert(common_name, ca_cert, ca_key):
- return _make_cert(common_name=common_name, ca_cert=ca_cert, ca_key=ca_key,
extended_key_usage=b"serverAuth")
+ return _make_cert(common_name, ca_cert, ca_key, [x509.OID_SERVER_AUTH])
Review Comment:
`x509.OID_CLIENT_AUTH` / `x509.OID_SERVER_AUTH` are not valid identifiers in
`cryptography.x509` (extended key usage OIDs are exposed via
`cryptography.x509.oid.ExtendedKeyUsageOID`). As written, these calls will
raise at runtime when creating certs. Switch to the correct ExtendedKeyUsageOID
constants (or pass `ObjectIdentifier` instances) so certificate generation
works.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]