Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-google-resumable-media for 
openSUSE:Factory checked in at 2023-09-13 20:45:42
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-google-resumable-media (Old)
 and      /work/SRC/openSUSE:Factory/.python-google-resumable-media.new.1766 
(New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-google-resumable-media"

Wed Sep 13 20:45:42 2023 rev:18 rq:1110838 version:2.6.0

Changes:
--------
--- 
/work/SRC/openSUSE:Factory/python-google-resumable-media/python-google-resumable-media.changes
      2023-05-11 14:13:58.717507158 +0200
+++ 
/work/SRC/openSUSE:Factory/.python-google-resumable-media.new.1766/python-google-resumable-media.changes
    2023-09-13 20:47:56.765311458 +0200
@@ -1,0 +2,11 @@
+Tue Sep 12 13:11:29 UTC 2023 - John Paul Adrian Glaubitz 
<adrian.glaub...@suse.com>
+
+- Update to 2.6.0
+  Features
+  * Add support for concurrent XML MPU uploads (#395)
+  * Introduce compatibility with native namespace packages (#385)
+  Bug Fixes
+  * Add google-auth to aiohttp extra (#386)
+- Update file pattern in %files section
+
+-------------------------------------------------------------------

Old:
----
  google-resumable-media-2.5.0.tar.gz

New:
----
  google-resumable-media-2.6.0.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-google-resumable-media.spec ++++++
--- /var/tmp/diff_new_pack.4ivLbF/_old  2023-09-13 20:47:57.797348275 +0200
+++ /var/tmp/diff_new_pack.4ivLbF/_new  2023-09-13 20:47:57.797348275 +0200
@@ -19,7 +19,7 @@
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
 %define skip_python2 1
 Name:           python-google-resumable-media
-Version:        2.5.0
+Version:        2.6.0
 Release:        0
 Summary:        Utilities for Google Media Downloads and Resumable Uploads
 License:        Apache-2.0
@@ -62,7 +62,6 @@
 %license LICENSE
 %doc README.rst
 %{python_sitelib}/google_resumable_media*-info
-%{python_sitelib}/google_resumable_media*pth
 %{python_sitelib}/google/resumable_media*
 %{python_sitelib}/google/_async_resumable_media*
 

++++++ google-resumable-media-2.5.0.tar.gz -> 
google-resumable-media-2.6.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/google-resumable-media-2.5.0/PKG-INFO 
new/google-resumable-media-2.6.0/PKG-INFO
--- old/google-resumable-media-2.5.0/PKG-INFO   2023-04-24 21:02:52.186299300 
+0200
+++ new/google-resumable-media-2.6.0/PKG-INFO   2023-09-06 20:12:17.037366400 
+0200
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: google-resumable-media
-Version: 2.5.0
+Version: 2.6.0
 Summary: Utilities for Google Media Downloads and Resumable Uploads
 Home-page: https://github.com/googleapis/google-resumable-media-python
 Author: Google Cloud Platform
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/google-resumable-media-2.5.0/google/__init__.py 
new/google-resumable-media-2.6.0/google/__init__.py
--- old/google-resumable-media-2.5.0/google/__init__.py 2023-04-24 
21:00:19.000000000 +0200
+++ new/google-resumable-media-2.6.0/google/__init__.py 1970-01-01 
01:00:00.000000000 +0100
@@ -1,22 +0,0 @@
-# Copyright 2017 Google Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-try:
-    import pkg_resources
-
-    pkg_resources.declare_namespace(__name__)
-except ImportError:
-    import pkgutil
-
-    __path__ = pkgutil.extend_path(__path__, __name__)  # type: ignore
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/google/resumable_media/_helpers.py 
new/google-resumable-media-2.6.0/google/resumable_media/_helpers.py
--- old/google-resumable-media-2.5.0/google/resumable_media/_helpers.py 
2023-04-24 21:00:19.000000000 +0200
+++ new/google-resumable-media-2.6.0/google/resumable_media/_helpers.py 
2023-09-06 20:09:24.000000000 +0200
@@ -243,6 +243,34 @@
     return (expected_checksum, checksum_object)
 
 
+def _get_uploaded_checksum_from_headers(response, get_headers, checksum_type):
+    """Get the computed checksum and checksum object from the response headers.
+
+    Args:
+        response (~requests.Response): The HTTP response object.
+        get_headers (callable: response->dict): returns response headers.
+        checksum_type Optional(str): The checksum type to read from the 
headers,
+            exactly as it will appear in the headers (case-sensitive). Must be
+            "md5", "crc32c" or None.
+
+    Returns:
+        Tuple (Optional[str], object): The checksum of the response,
+        if it can be detected from the ``X-Goog-Hash`` header, and the
+        appropriate checksum object for the expected checksum.
+    """
+    if checksum_type not in ["md5", "crc32c", None]:
+        raise ValueError("checksum must be ``'md5'``, ``'crc32c'`` or 
``None``")
+    elif checksum_type in ["md5", "crc32c"]:
+        headers = get_headers(response)
+        remote_checksum = _parse_checksum_header(
+            headers.get(_HASH_HEADER), response, checksum_label=checksum_type
+        )
+    else:
+        remote_checksum = None
+
+    return remote_checksum
+
+
 def _parse_checksum_header(header_value, response, checksum_label):
     """Parses the checksum header from an ``X-Goog-Hash`` value.
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/google/resumable_media/_upload.py 
new/google-resumable-media-2.6.0/google/resumable_media/_upload.py
--- old/google-resumable-media-2.5.0/google/resumable_media/_upload.py  
2023-04-24 21:00:19.000000000 +0200
+++ new/google-resumable-media-2.6.0/google/resumable_media/_upload.py  
2023-09-06 20:09:24.000000000 +0200
@@ -33,6 +33,8 @@
 from google.resumable_media import _helpers
 from google.resumable_media import common
 
+from xml.etree import ElementTree
+
 
 _CONTENT_TYPE_HEADER = "content-type"
 _CONTENT_RANGE_TEMPLATE = "bytes {:d}-{:d}/{:d}"
@@ -54,6 +56,7 @@
     "{:d} bytes have been read from the stream, which exceeds "
     "the expected total {:d}."
 )
+_DELETE = "DELETE"
 _POST = "POST"
 _PUT = "PUT"
 _UPLOAD_CHECKSUM_MISMATCH_MESSAGE = (
@@ -63,6 +66,14 @@
 _UPLOAD_METADATA_NO_APPROPRIATE_CHECKSUM_MESSAGE = (
     "Response metadata had no ``{}`` value; checksum could not be validated."
 )
+_UPLOAD_HEADER_NO_APPROPRIATE_CHECKSUM_MESSAGE = (
+    "Response headers had no ``{}`` value; checksum could not be validated."
+)
+_MPU_INITIATE_QUERY = "?uploads"
+_MPU_PART_QUERY_TEMPLATE = "?partNumber={part}&uploadId={upload_id}"
+_S3_COMPAT_XML_NAMESPACE = "{http://s3.amazonaws.com/doc/2006-03-01/}";
+_UPLOAD_ID_NODE = "UploadId"
+_MPU_FINAL_QUERY_TEMPLATE = "?uploadId={upload_id}"
 
 
 class UploadBase(object):
@@ -236,7 +247,7 @@
         upload_url (str): The URL where the content will be uploaded.
         headers (Optional[Mapping[str, str]]): Extra headers that should
             be sent with the request, e.g. headers for encrypted data.
-        checksum Optional([str]): The type of checksum to compute to verify
+        checksum (Optional([str])): The type of checksum to compute to verify
             the integrity of the object. The request metadata will be amended
             to include the computed value. Using this option will override a
             manually-set checksum value. Supported values are "md5", "crc32c"
@@ -341,10 +352,8 @@
         upload_url (str): The URL where the resumable upload will be initiated.
         chunk_size (int): The size of each chunk used to upload the resource.
         headers (Optional[Mapping[str, str]]): Extra headers that should
-            be sent with the :meth:`initiate` request, e.g. headers for
-            encrypted data. These **will not** be sent with
-            :meth:`transmit_next_chunk` or :meth:`recover` requests.
-        checksum Optional([str]): The type of checksum to compute to verify
+            be sent with every request.
+        checksum (Optional([str])): The type of checksum to compute to verify
             the integrity of the object. After the upload is complete, the
             server-computed checksum of the resulting object will be read
             and google.resumable_media.common.DataCorruption will be raised on
@@ -587,8 +596,7 @@
               * the body of the request
               * headers for the request
 
-            The headers **do not** incorporate the ``_headers`` on the
-            current instance.
+            The headers incorporate the ``_headers`` on the current instance.
 
         Raises:
             ValueError: If the current upload has finished.
@@ -718,7 +726,7 @@
             self._bytes_uploaded = int(match.group("end_byte")) + 1
 
     def _validate_checksum(self, response):
-        """Check the computed checksum, if any, against the response headers.
+        """Check the computed checksum, if any, against the recieved metadata.
 
         Args:
             response (object): The HTTP response object.
@@ -860,6 +868,512 @@
         raise NotImplementedError("This implementation is virtual.")
 
 
+class XMLMPUContainer(UploadBase):
+    """Initiate and close an upload using the XML MPU API.
+
+    An XML MPU sends an initial request and then receives an upload ID.
+    Using the upload ID, the upload is then done in numbered parts and the
+    parts can be uploaded concurrently.
+
+    In order to avoid concurrency issues with this container object, the
+    uploading of individual parts is handled separately, by XMLMPUPart objects
+    spawned from this container class. The XMLMPUPart objects are not
+    necessarily in the same process as the container, so they do not update the
+    container automatically.
+
+    MPUs are sometimes referred to as "Multipart Uploads", which is ambiguous
+    given the JSON multipart upload, so the abbreviation "MPU" will be used
+    throughout.
+
+    See: https://cloud.google.com/storage/docs/multipart-uploads
+
+    Args:
+        upload_url (str): The URL of the object (without query parameters). The
+            initiate, PUT, and finalization requests will all use this URL, 
with
+            varying query parameters.
+        filename (str): The name (path) of the file to upload.
+        headers (Optional[Mapping[str, str]]): Extra headers that should
+            be sent with every request.
+
+    Attributes:
+        upload_url (str): The URL where the content will be uploaded.
+        upload_id (Optional(str)): The ID of the upload from the initialization
+            response.
+    """
+
+    def __init__(self, upload_url, filename, headers=None, upload_id=None):
+        super().__init__(upload_url, headers=headers)
+        self._filename = filename
+        self._upload_id = upload_id
+        self._parts = {}
+
+    @property
+    def upload_id(self):
+        return self._upload_id
+
+    def register_part(self, part_number, etag):
+        """Register an uploaded part by part number and corresponding etag.
+
+        XMLMPUPart objects represent individual parts, and their part number
+        and etag can be registered to the container object with this method
+        and therefore incorporated in the finalize() call to finish the upload.
+
+        This method accepts part_number and etag, but not XMLMPUPart objects
+        themselves, to reduce the complexity involved in running XMLMPUPart
+        uploads in separate processes.
+
+        Args:
+            part_number (int): The part number. Parts are assembled into the
+                final uploaded object with finalize() in order of their part
+                numbers.
+            etag (str): The etag included in the server response after upload.
+        """
+        self._parts[part_number] = etag
+
+    def _prepare_initiate_request(self, content_type):
+        """Prepare the contents of HTTP request to initiate upload.
+
+        This is everything that must be done before a request that doesn't
+        require network I/O (or other I/O). This is based on the `sans-I/O`_
+        philosophy.
+
+        Args:
+            content_type (str): The content type of the resource, e.g. a JPEG
+                image has content type ``image/jpeg``.
+
+        Returns:
+            Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
+
+              * HTTP verb for the request (always POST)
+              * the URL for the request
+              * the body of the request
+              * headers for the request
+
+        Raises:
+            ValueError: If the current upload has already been initiated.
+
+        .. _sans-I/O: https://sans-io.readthedocs.io/
+        """
+        if self.upload_id is not None:
+            raise ValueError("This upload has already been initiated.")
+
+        initiate_url = self.upload_url + _MPU_INITIATE_QUERY
+
+        headers = {
+            **self._headers,
+            _CONTENT_TYPE_HEADER: content_type,
+        }
+        return _POST, initiate_url, None, headers
+
+    def _process_initiate_response(self, response):
+        """Process the response from an HTTP request that initiated the upload.
+
+        This is everything that must be done after a request that doesn't
+        require network I/O (or other I/O). This is based on the `sans-I/O`_
+        philosophy.
+
+        This method takes the URL from the ``Location`` header and stores it
+        for future use. Within that URL, we assume the ``upload_id`` query
+        parameter has been included, but we do not check.
+
+        Args:
+            response (object): The HTTP response object.
+
+        Raises:
+            ~google.resumable_media.common.InvalidResponse: If the status
+                code is not 200.
+
+        .. _sans-I/O: https://sans-io.readthedocs.io/
+        """
+        _helpers.require_status_code(response, (http.client.OK,), 
self._get_status_code)
+        root = ElementTree.fromstring(response.text)
+        self._upload_id = root.find(_S3_COMPAT_XML_NAMESPACE + 
_UPLOAD_ID_NODE).text
+
+    def initiate(
+        self,
+        transport,
+        content_type,
+        timeout=None,
+    ):
+        """Initiate an MPU and record the upload ID.
+
+        Args:
+            transport (object): An object which can make authenticated
+                requests.
+            content_type (str): The content type of the resource, e.g. a JPEG
+                image has content type ``image/jpeg``.
+            timeout (Optional[Union[float, Tuple[float, float]]]):
+                The number of seconds to wait for the server response.
+                Depending on the retry strategy, a request may be repeated
+                several times using the same timeout each time.
+
+                Can also be passed as a tuple (connect_timeout, read_timeout).
+                See :meth:`requests.Session.request` documentation for details.
+
+        Raises:
+            NotImplementedError: Always, since virtual.
+        """
+        raise NotImplementedError("This implementation is virtual.")
+
+    def _prepare_finalize_request(self):
+        """Prepare the contents of an HTTP request to finalize the upload.
+
+        All of the parts must be registered before calling this method.
+
+        Returns:
+            Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
+
+              * HTTP verb for the request (always POST)
+              * the URL for the request
+              * the body of the request
+              * headers for the request
+
+        Raises:
+            ValueError: If the upload has not been initiated.
+        """
+        if self.upload_id is None:
+            raise ValueError("This upload has not yet been initiated.")
+
+        final_query = 
_MPU_FINAL_QUERY_TEMPLATE.format(upload_id=self._upload_id)
+        finalize_url = self.upload_url + final_query
+        final_xml_root = ElementTree.Element("CompleteMultipartUpload")
+        for part_number, etag in self._parts.items():
+            part = ElementTree.SubElement(final_xml_root, "Part")  # put in a 
loop
+            ElementTree.SubElement(part, "PartNumber").text = str(part_number)
+            ElementTree.SubElement(part, "ETag").text = etag
+        payload = ElementTree.tostring(final_xml_root)
+        return _POST, finalize_url, payload, self._headers
+
+    def _process_finalize_response(self, response):
+        """Process the response from an HTTP request that finalized the upload.
+
+        This is everything that must be done after a request that doesn't
+        require network I/O (or other I/O). This is based on the `sans-I/O`_
+        philosophy.
+
+        Args:
+            response (object): The HTTP response object.
+
+        Raises:
+            ~google.resumable_media.common.InvalidResponse: If the status
+                code is not 200.
+
+        .. _sans-I/O: https://sans-io.readthedocs.io/
+        """
+
+        _helpers.require_status_code(response, (http.client.OK,), 
self._get_status_code)
+        self._finished = True
+
+    def finalize(
+        self,
+        transport,
+        timeout=None,
+    ):
+        """Finalize an MPU request with all the parts.
+
+        Args:
+            transport (object): An object which can make authenticated
+                requests.
+            timeout (Optional[Union[float, Tuple[float, float]]]):
+                The number of seconds to wait for the server response.
+                Depending on the retry strategy, a request may be repeated
+                several times using the same timeout each time.
+
+                Can also be passed as a tuple (connect_timeout, read_timeout).
+                See :meth:`requests.Session.request` documentation for details.
+
+        Raises:
+            NotImplementedError: Always, since virtual.
+        """
+        raise NotImplementedError("This implementation is virtual.")
+
+    def _prepare_cancel_request(self):
+        """Prepare the contents of an HTTP request to cancel the upload.
+
+        Returns:
+            Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
+
+              * HTTP verb for the request (always DELETE)
+              * the URL for the request
+              * the body of the request
+              * headers for the request
+
+        Raises:
+            ValueError: If the upload has not been initiated.
+        """
+        if self.upload_id is None:
+            raise ValueError("This upload has not yet been initiated.")
+
+        cancel_query = 
_MPU_FINAL_QUERY_TEMPLATE.format(upload_id=self._upload_id)
+        cancel_url = self.upload_url + cancel_query
+        return _DELETE, cancel_url, None, self._headers
+
+    def _process_cancel_response(self, response):
+        """Process the response from an HTTP request that canceled the upload.
+
+        This is everything that must be done after a request that doesn't
+        require network I/O (or other I/O). This is based on the `sans-I/O`_
+        philosophy.
+
+        Args:
+            response (object): The HTTP response object.
+
+        Raises:
+            ~google.resumable_media.common.InvalidResponse: If the status
+                code is not 204.
+
+        .. _sans-I/O: https://sans-io.readthedocs.io/
+        """
+
+        _helpers.require_status_code(
+            response, (http.client.NO_CONTENT,), self._get_status_code
+        )
+
+    def cancel(
+        self,
+        transport,
+        timeout=None,
+    ):
+        """Cancel an MPU request and permanently delete any uploaded parts.
+
+        This cannot be undone.
+
+        Args:
+            transport (object): An object which can make authenticated
+                requests.
+            timeout (Optional[Union[float, Tuple[float, float]]]):
+                The number of seconds to wait for the server response.
+                Depending on the retry strategy, a request may be repeated
+                several times using the same timeout each time.
+
+                Can also be passed as a tuple (connect_timeout, read_timeout).
+                See :meth:`requests.Session.request` documentation for details.
+
+        Raises:
+            NotImplementedError: Always, since virtual.
+        """
+        raise NotImplementedError("This implementation is virtual.")
+
+
+class XMLMPUPart(UploadBase):
+    """Upload a single part of an existing XML MPU container.
+
+    An XML MPU sends an initial request and then receives an upload ID.
+    Using the upload ID, the upload is then done in numbered parts and the
+    parts can be uploaded concurrently.
+
+    In order to avoid concurrency issues with the container object, the
+    uploading of individual parts is handled separately by multiple objects
+    of this class. Once a part is uploaded, it can be registered with the
+    container with `container.register_part(part.part_number, part.etag)`.
+
+    MPUs are sometimes referred to as "Multipart Uploads", which is ambiguous
+    given the JSON multipart upload, so the abbreviation "MPU" will be used
+    throughout.
+
+    See: https://cloud.google.com/storage/docs/multipart-uploads
+
+    Args:
+        upload_url (str): The URL of the object (without query parameters).
+        upload_id (str): The ID of the upload from the initialization response.
+        filename (str): The name (path) of the file to upload.
+        start (int): The byte index of the beginning of the part.
+        end (int): The byte index of the end of the part.
+        part_number (int): The part number. Part numbers will be assembled in
+            sequential order when the container is finalized.
+        headers (Optional[Mapping[str, str]]): Extra headers that should
+            be sent with every request.
+        checksum (Optional([str])): The type of checksum to compute to verify
+            the integrity of the object. The request headers will be amended
+            to include the computed value. Supported values are "md5", "crc32c"
+            and None. The default is None.
+
+    Attributes:
+        upload_url (str): The URL of the object (without query parameters).
+        upload_id (str): The ID of the upload from the initialization response.
+        filename (str): The name (path) of the file to upload.
+        start (int): The byte index of the beginning of the part.
+        end (int): The byte index of the end of the part.
+        part_number (int): The part number. Part numbers will be assembled in
+            sequential order when the container is finalized.
+        etag (Optional(str)): The etag returned by the service after upload.
+    """
+
+    def __init__(
+        self,
+        upload_url,
+        upload_id,
+        filename,
+        start,
+        end,
+        part_number,
+        headers=None,
+        checksum=None,
+    ):
+        super().__init__(upload_url, headers=headers)
+        self._filename = filename
+        self._start = start
+        self._end = end
+        self._upload_id = upload_id
+        self._part_number = part_number
+        self._etag = None
+        self._checksum_type = checksum
+        self._checksum_object = None
+
+    @property
+    def part_number(self):
+        return self._part_number
+
+    @property
+    def upload_id(self):
+        return self._upload_id
+
+    @property
+    def filename(self):
+        return self._filename
+
+    @property
+    def etag(self):
+        return self._etag
+
+    @property
+    def start(self):
+        return self._start
+
+    @property
+    def end(self):
+        return self._end
+
+    def _prepare_upload_request(self):
+        """Prepare the contents of HTTP request to upload a part.
+
+        This is everything that must be done before a request that doesn't
+        require network I/O. This is based on the `sans-I/O`_ philosophy.
+
+        For the time being, this **does require** some form of I/O to read
+        a part from ``stream`` (via :func:`get_part_payload`). However, this
+        will (almost) certainly not be network I/O.
+
+        Returns:
+            Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
+
+              * HTTP verb for the request (always PUT)
+              * the URL for the request
+              * the body of the request
+              * headers for the request
+
+            The headers incorporate the ``_headers`` on the current instance.
+
+        Raises:
+            ValueError: If the current upload has finished.
+
+        .. _sans-I/O: https://sans-io.readthedocs.io/
+        """
+        if self.finished:
+            raise ValueError("This part has already been uploaded.")
+
+        with open(self._filename, "br") as f:
+            f.seek(self._start)
+            payload = f.read(self._end - self._start)
+
+        self._checksum_object = 
_helpers._get_checksum_object(self._checksum_type)
+        if self._checksum_object is not None:
+            self._checksum_object.update(payload)
+
+        part_query = _MPU_PART_QUERY_TEMPLATE.format(
+            part=self._part_number, upload_id=self._upload_id
+        )
+        upload_url = self.upload_url + part_query
+        return _PUT, upload_url, payload, self._headers
+
+    def _process_upload_response(self, response):
+        """Process the response from an HTTP request.
+
+        This is everything that must be done after a request that doesn't
+        require network I/O (or other I/O). This is based on the `sans-I/O`_
+        philosophy.
+
+        Args:
+            response (object): The HTTP response object.
+
+        Raises:
+            ~google.resumable_media.common.InvalidResponse: If the status
+                code is not 200 or the response is missing data.
+
+        .. _sans-I/O: https://sans-io.readthedocs.io/
+        """
+        _helpers.require_status_code(
+            response,
+            (http.client.OK,),
+            self._get_status_code,
+        )
+
+        self._validate_checksum(response)
+
+        etag = _helpers.header_required(response, "etag", self._get_headers)
+        self._etag = etag
+        self._finished = True
+
+    def upload(
+        self,
+        transport,
+        timeout=None,
+    ):
+        """Upload the part.
+
+        Args:
+            transport (object): An object which can make authenticated
+                requests.
+            timeout (Optional[Union[float, Tuple[float, float]]]):
+                The number of seconds to wait for the server response.
+                Depending on the retry strategy, a request may be repeated
+                several times using the same timeout each time.
+
+                Can also be passed as a tuple (connect_timeout, read_timeout).
+                See :meth:`requests.Session.request` documentation for details.
+
+        Raises:
+            NotImplementedError: Always, since virtual.
+        """
+        raise NotImplementedError("This implementation is virtual.")
+
+    def _validate_checksum(self, response):
+        """Check the computed checksum, if any, against the response headers.
+
+        Args:
+            response (object): The HTTP response object.
+
+        Raises:
+            ~google.resumable_media.common.DataCorruption: If the checksum
+            computed locally and the checksum reported by the remote host do
+            not match.
+        """
+        if self._checksum_type is None:
+            return
+
+        remote_checksum = _helpers._get_uploaded_checksum_from_headers(
+            response, self._get_headers, self._checksum_type
+        )
+
+        if remote_checksum is None:
+            metadata_key = _helpers._get_metadata_key(self._checksum_type)
+            raise common.InvalidResponse(
+                response,
+                
_UPLOAD_METADATA_NO_APPROPRIATE_CHECKSUM_MESSAGE.format(metadata_key),
+                self._get_headers(response),
+            )
+        local_checksum = _helpers.prepare_checksum_digest(
+            self._checksum_object.digest()
+        )
+        if local_checksum != remote_checksum:
+            raise common.DataCorruption(
+                response,
+                _UPLOAD_CHECKSUM_MISMATCH_MESSAGE.format(
+                    self._checksum_type.upper(), local_checksum, 
remote_checksum
+                ),
+            )
+
+
 def get_boundary():
     """Get a random boundary for a multipart request.
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/google/resumable_media/requests/__init__.py 
new/google-resumable-media-2.6.0/google/resumable_media/requests/__init__.py
--- 
old/google-resumable-media-2.5.0/google/resumable_media/requests/__init__.py    
    2023-04-24 21:00:19.000000000 +0200
+++ 
new/google-resumable-media-2.6.0/google/resumable_media/requests/__init__.py    
    2023-09-06 20:09:24.000000000 +0200
@@ -669,7 +669,8 @@
 from google.resumable_media.requests.download import RawDownload
 from google.resumable_media.requests.upload import ResumableUpload
 from google.resumable_media.requests.upload import SimpleUpload
-
+from google.resumable_media.requests.upload import XMLMPUContainer
+from google.resumable_media.requests.upload import XMLMPUPart
 
 __all__ = [
     "ChunkedDownload",
@@ -679,4 +680,6 @@
     "RawDownload",
     "ResumableUpload",
     "SimpleUpload",
+    "XMLMPUContainer",
+    "XMLMPUPart",
 ]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/google/resumable_media/requests/upload.py 
new/google-resumable-media-2.6.0/google/resumable_media/requests/upload.py
--- old/google-resumable-media-2.5.0/google/resumable_media/requests/upload.py  
2023-04-24 21:00:19.000000000 +0200
+++ new/google-resumable-media-2.6.0/google/resumable_media/requests/upload.py  
2023-09-06 20:09:24.000000000 +0200
@@ -555,3 +555,208 @@
         return _request_helpers.wait_and_retry(
             retriable_request, self._get_status_code, self._retry_strategy
         )
+
+
+class XMLMPUContainer(_request_helpers.RequestsMixin, _upload.XMLMPUContainer):
+    """Initiate and close an upload using the XML MPU API.
+
+    An XML MPU sends an initial request and then receives an upload ID.
+    Using the upload ID, the upload is then done in numbered parts and the
+    parts can be uploaded concurrently.
+
+    In order to avoid concurrency issues with this container object, the
+    uploading of individual parts is handled separately, by XMLMPUPart objects
+    spawned from this container class. The XMLMPUPart objects are not
+    necessarily in the same process as the container, so they do not update the
+    container automatically.
+
+    MPUs are sometimes referred to as "Multipart Uploads", which is ambiguous
+    given the JSON multipart upload, so the abbreviation "MPU" will be used
+    throughout.
+
+    See: https://cloud.google.com/storage/docs/multipart-uploads
+
+    Args:
+        upload_url (str): The URL of the object (without query parameters). The
+            initiate, PUT, and finalization requests will all use this URL, 
with
+            varying query parameters.
+        headers (Optional[Mapping[str, str]]): Extra headers that should
+            be sent with the :meth:`initiate` request, e.g. headers for
+            encrypted data. These headers will be propagated to individual
+            XMLMPUPart objects spawned from this container as well.
+
+    Attributes:
+        upload_url (str): The URL where the content will be uploaded.
+        upload_id (Optional(int)): The ID of the upload from the initialization
+            response.
+    """
+
+    def initiate(
+        self,
+        transport,
+        content_type,
+        timeout=(
+            _request_helpers._DEFAULT_CONNECT_TIMEOUT,
+            _request_helpers._DEFAULT_READ_TIMEOUT,
+        ),
+    ):
+        """Initiate an MPU and record the upload ID.
+
+        Args:
+            transport (object): An object which can make authenticated
+                requests.
+            content_type (str): The content type of the resource, e.g. a JPEG
+                image has content type ``image/jpeg``.
+            timeout (Optional[Union[float, Tuple[float, float]]]):
+                The number of seconds to wait for the server response.
+                Depending on the retry strategy, a request may be repeated
+                several times using the same timeout each time.
+
+                Can also be passed as a tuple (connect_timeout, read_timeout).
+                See :meth:`requests.Session.request` documentation for details.
+
+        Returns:
+            ~requests.Response: The HTTP response returned by ``transport``.
+        """
+
+        method, url, payload, headers = self._prepare_initiate_request(
+            content_type,
+        )
+
+        # Wrap the request business logic in a function to be retried.
+        def retriable_request():
+            result = transport.request(
+                method, url, data=payload, headers=headers, timeout=timeout
+            )
+
+            self._process_initiate_response(result)
+
+            return result
+
+        return _request_helpers.wait_and_retry(
+            retriable_request, self._get_status_code, self._retry_strategy
+        )
+
+    def finalize(
+        self,
+        transport,
+        timeout=(
+            _request_helpers._DEFAULT_CONNECT_TIMEOUT,
+            _request_helpers._DEFAULT_READ_TIMEOUT,
+        ),
+    ):
+        """Finalize an MPU request with all the parts.
+
+        Args:
+            transport (object): An object which can make authenticated
+                requests.
+            timeout (Optional[Union[float, Tuple[float, float]]]):
+                The number of seconds to wait for the server response.
+                Depending on the retry strategy, a request may be repeated
+                several times using the same timeout each time.
+
+                Can also be passed as a tuple (connect_timeout, read_timeout).
+                See :meth:`requests.Session.request` documentation for details.
+
+        Returns:
+            ~requests.Response: The HTTP response returned by ``transport``.
+        """
+        method, url, payload, headers = self._prepare_finalize_request()
+
+        # Wrap the request business logic in a function to be retried.
+        def retriable_request():
+            result = transport.request(
+                method, url, data=payload, headers=headers, timeout=timeout
+            )
+
+            self._process_finalize_response(result)
+
+            return result
+
+        return _request_helpers.wait_and_retry(
+            retriable_request, self._get_status_code, self._retry_strategy
+        )
+
+    def cancel(
+        self,
+        transport,
+        timeout=(
+            _request_helpers._DEFAULT_CONNECT_TIMEOUT,
+            _request_helpers._DEFAULT_READ_TIMEOUT,
+        ),
+    ):
+        """Cancel an MPU request and permanently delete any uploaded parts.
+
+        This cannot be undone.
+
+        Args:
+            transport (object): An object which can make authenticated
+                requests.
+            timeout (Optional[Union[float, Tuple[float, float]]]):
+                The number of seconds to wait for the server response.
+                Depending on the retry strategy, a request may be repeated
+                several times using the same timeout each time.
+
+                Can also be passed as a tuple (connect_timeout, read_timeout).
+                See :meth:`requests.Session.request` documentation for details.
+
+        Returns:
+            ~requests.Response: The HTTP response returned by ``transport``.
+        """
+        method, url, payload, headers = self._prepare_cancel_request()
+
+        # Wrap the request business logic in a function to be retried.
+        def retriable_request():
+            result = transport.request(
+                method, url, data=payload, headers=headers, timeout=timeout
+            )
+
+            self._process_cancel_response(result)
+
+            return result
+
+        return _request_helpers.wait_and_retry(
+            retriable_request, self._get_status_code, self._retry_strategy
+        )
+
+
+class XMLMPUPart(_request_helpers.RequestsMixin, _upload.XMLMPUPart):
+    def upload(
+        self,
+        transport,
+        timeout=(
+            _request_helpers._DEFAULT_CONNECT_TIMEOUT,
+            _request_helpers._DEFAULT_READ_TIMEOUT,
+        ),
+    ):
+        """Upload the part.
+
+        Args:
+            transport (object): An object which can make authenticated
+                requests.
+            timeout (Optional[Union[float, Tuple[float, float]]]):
+                The number of seconds to wait for the server response.
+                Depending on the retry strategy, a request may be repeated
+                several times using the same timeout each time.
+
+                Can also be passed as a tuple (connect_timeout, read_timeout).
+                See :meth:`requests.Session.request` documentation for details.
+
+        Returns:
+            ~requests.Response: The HTTP response returned by ``transport``.
+        """
+        method, url, payload, headers = self._prepare_upload_request()
+
+        # Wrap the request business logic in a function to be retried.
+        def retriable_request():
+            result = transport.request(
+                method, url, data=payload, headers=headers, timeout=timeout
+            )
+
+            self._process_upload_response(result)
+
+            return result
+
+        return _request_helpers.wait_and_retry(
+            retriable_request, self._get_status_code, self._retry_strategy
+        )
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/google_resumable_media.egg-info/PKG-INFO 
new/google-resumable-media-2.6.0/google_resumable_media.egg-info/PKG-INFO
--- old/google-resumable-media-2.5.0/google_resumable_media.egg-info/PKG-INFO   
2023-04-24 21:02:52.000000000 +0200
+++ new/google-resumable-media-2.6.0/google_resumable_media.egg-info/PKG-INFO   
2023-09-06 20:12:16.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: google-resumable-media
-Version: 2.5.0
+Version: 2.6.0
 Summary: Utilities for Google Media Downloads and Resumable Uploads
 Home-page: https://github.com/googleapis/google-resumable-media-python
 Author: Google Cloud Platform
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/google_resumable_media.egg-info/SOURCES.txt 
new/google-resumable-media-2.6.0/google_resumable_media.egg-info/SOURCES.txt
--- 
old/google-resumable-media-2.5.0/google_resumable_media.egg-info/SOURCES.txt    
    2023-04-24 21:02:52.000000000 +0200
+++ 
new/google-resumable-media-2.6.0/google_resumable_media.egg-info/SOURCES.txt    
    2023-09-06 20:12:16.000000000 +0200
@@ -3,7 +3,6 @@
 README.rst
 setup.cfg
 setup.py
-google/__init__.py
 google/_async_resumable_media/__init__.py
 google/_async_resumable_media/_download.py
 google/_async_resumable_media/_helpers.py
@@ -24,7 +23,6 @@
 google_resumable_media.egg-info/PKG-INFO
 google_resumable_media.egg-info/SOURCES.txt
 google_resumable_media.egg-info/dependency_links.txt
-google_resumable_media.egg-info/namespace_packages.txt
 google_resumable_media.egg-info/not-zip-safe
 google_resumable_media.egg-info/requires.txt
 google_resumable_media.egg-info/top_level.txt
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/google_resumable_media.egg-info/namespace_packages.txt
 
new/google-resumable-media-2.6.0/google_resumable_media.egg-info/namespace_packages.txt
--- 
old/google-resumable-media-2.5.0/google_resumable_media.egg-info/namespace_packages.txt
     2023-04-24 21:02:52.000000000 +0200
+++ 
new/google-resumable-media-2.6.0/google_resumable_media.egg-info/namespace_packages.txt
     1970-01-01 01:00:00.000000000 +0100
@@ -1 +0,0 @@
-google
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/google_resumable_media.egg-info/requires.txt 
new/google-resumable-media-2.6.0/google_resumable_media.egg-info/requires.txt
--- 
old/google-resumable-media-2.5.0/google_resumable_media.egg-info/requires.txt   
    2023-04-24 21:02:52.000000000 +0200
+++ 
new/google-resumable-media-2.6.0/google_resumable_media.egg-info/requires.txt   
    2023-09-06 20:12:16.000000000 +0200
@@ -2,6 +2,7 @@
 
 [aiohttp]
 aiohttp<4.0.0dev,>=3.6.2
+google-auth<2.0dev,>=1.22.0
 
 [requests]
 requests<3.0.0dev,>=2.18.0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/google_resumable_media.egg-info/top_level.txt 
new/google-resumable-media-2.6.0/google_resumable_media.egg-info/top_level.txt
--- 
old/google-resumable-media-2.5.0/google_resumable_media.egg-info/top_level.txt  
    2023-04-24 21:02:52.000000000 +0200
+++ 
new/google-resumable-media-2.6.0/google_resumable_media.egg-info/top_level.txt  
    2023-09-06 20:12:16.000000000 +0200
@@ -1 +1,2 @@
 google
+testing
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/google-resumable-media-2.5.0/setup.py 
new/google-resumable-media-2.6.0/setup.py
--- old/google-resumable-media-2.5.0/setup.py   2023-04-24 21:00:19.000000000 
+0200
+++ new/google-resumable-media-2.6.0/setup.py   2023-09-06 20:09:24.000000000 
+0200
@@ -30,20 +30,21 @@
     'requests': [
         'requests >= 2.18.0, < 3.0.0dev',
     ],
-    'aiohttp': 'aiohttp >= 3.6.2, < 4.0.0dev'
+    'aiohttp': ['aiohttp >= 3.6.2, < 4.0.0dev', 'google-auth >= 1.22.0, < 
2.0dev']
 }
 
 setuptools.setup(
     name='google-resumable-media',
-    version = "2.5.0",
+    version = "2.6.0",
     description='Utilities for Google Media Downloads and Resumable Uploads',
     author='Google Cloud Platform',
     author_email='googleapis-publis...@google.com',
     long_description=README,
-    namespace_packages=['google'],
     scripts=[],
     url='https://github.com/googleapis/google-resumable-media-python',
-    packages=setuptools.find_packages(exclude=('tests*',)),
+    packages=setuptools.find_namespace_packages(
+        exclude=("tests*", "docs*")
+    ),
     license='Apache 2.0',
     platforms='Posix; MacOS X; Windows',
     include_package_data=True,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/tests/system/requests/conftest.py 
new/google-resumable-media-2.6.0/tests/system/requests/conftest.py
--- old/google-resumable-media-2.5.0/tests/system/requests/conftest.py  
2023-04-24 21:00:19.000000000 +0200
+++ new/google-resumable-media-2.6.0/tests/system/requests/conftest.py  
2023-09-06 20:09:24.000000000 +0200
@@ -53,6 +53,6 @@
 def bucket(authorized_transport):
     ensure_bucket(authorized_transport)
 
-    yield utils.BUCKET_URL
+    yield utils.BUCKET_NAME
 
     cleanup_bucket(authorized_transport)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/tests/system/requests/test_upload.py 
new/google-resumable-media-2.6.0/tests/system/requests/test_upload.py
--- old/google-resumable-media-2.5.0/tests/system/requests/test_upload.py       
2023-04-24 21:00:19.000000000 +0200
+++ new/google-resumable-media-2.6.0/tests/system/requests/test_upload.py       
2023-09-06 20:09:24.000000000 +0200
@@ -659,3 +659,118 @@
             content_type=BYTES_CONTENT_TYPE,
         )
         self._check_range_sent(response2, 2 * chunk_size, total_bytes - 1, 
total_bytes)
+
+
+@pytest.mark.parametrize("checksum", ["md5", "crc32c", None])
+def test_XMLMPU(authorized_transport, bucket, cleanup, checksum):
+    with open(ICO_FILE, "rb") as file_obj:
+        actual_contents = file_obj.read()
+
+    blob_name = os.path.basename(ICO_FILE)
+    # Make sure to clean up the uploaded blob when we are done.
+    cleanup(blob_name, authorized_transport)
+    check_does_not_exist(authorized_transport, blob_name)
+
+    # Create the actual upload object.
+    upload_url = utils.XML_UPLOAD_URL_TEMPLATE.format(bucket=bucket, 
blob=blob_name)
+    container = resumable_requests.XMLMPUContainer(upload_url, blob_name)
+    # Initiate
+    container.initiate(authorized_transport, ICO_CONTENT_TYPE)
+    assert container.upload_id
+
+    part = resumable_requests.XMLMPUPart(
+        upload_url,
+        container.upload_id,
+        ICO_FILE,
+        0,
+        len(actual_contents),
+        1,
+        checksum=checksum,
+    )
+    part.upload(authorized_transport)
+    assert part.etag
+
+    container.register_part(1, part.etag)
+    container.finalize(authorized_transport)
+    assert container.finished
+
+    # Download the content to make sure it's "working as expected".
+    check_content(blob_name, actual_contents, authorized_transport)
+
+
+@pytest.mark.parametrize("checksum", ["md5", "crc32c"])
+def test_XMLMPU_with_bad_checksum(authorized_transport, bucket, checksum):
+    with open(ICO_FILE, "rb") as file_obj:
+        actual_contents = file_obj.read()
+
+    blob_name = os.path.basename(ICO_FILE)
+    # No need to clean up, since the upload will not be finalized successfully.
+    check_does_not_exist(authorized_transport, blob_name)
+
+    # Create the actual upload object.
+    upload_url = utils.XML_UPLOAD_URL_TEMPLATE.format(bucket=bucket, 
blob=blob_name)
+    container = resumable_requests.XMLMPUContainer(upload_url, blob_name)
+    # Initiate
+    container.initiate(authorized_transport, ICO_CONTENT_TYPE)
+    assert container.upload_id
+
+    try:
+        part = resumable_requests.XMLMPUPart(
+            upload_url,
+            container.upload_id,
+            ICO_FILE,
+            0,
+            len(actual_contents),
+            1,
+            checksum=checksum,
+        )
+
+        fake_checksum_object = _helpers._get_checksum_object(checksum)
+        fake_checksum_object.update(b"bad data")
+        fake_prepared_checksum_digest = _helpers.prepare_checksum_digest(
+            fake_checksum_object.digest()
+        )
+        with mock.patch.object(
+            _helpers,
+            "prepare_checksum_digest",
+            return_value=fake_prepared_checksum_digest,
+        ):
+            with pytest.raises(common.DataCorruption):
+                part.upload(authorized_transport)
+    finally:
+        utils.retry_transient_errors(authorized_transport.delete)(
+            upload_url + "?uploadId=" + str(container.upload_id)
+        )
+
+
+def test_XMLMPU_cancel(authorized_transport, bucket):
+    with open(ICO_FILE, "rb") as file_obj:
+        actual_contents = file_obj.read()
+
+    blob_name = os.path.basename(ICO_FILE)
+    check_does_not_exist(authorized_transport, blob_name)
+
+    # Create the actual upload object.
+    upload_url = utils.XML_UPLOAD_URL_TEMPLATE.format(bucket=bucket, 
blob=blob_name)
+    container = resumable_requests.XMLMPUContainer(upload_url, blob_name)
+    # Initiate
+    container.initiate(authorized_transport, ICO_CONTENT_TYPE)
+    assert container.upload_id
+
+    part = resumable_requests.XMLMPUPart(
+        upload_url,
+        container.upload_id,
+        ICO_FILE,
+        0,
+        len(actual_contents),
+        1,
+    )
+    part.upload(authorized_transport)
+    assert part.etag
+
+    container.register_part(1, part.etag)
+    container.cancel(authorized_transport)
+
+    # Validate the cancel worked by expecting a 404 on finalize.
+    with pytest.raises(resumable_media.InvalidResponse):
+        container.finalize(authorized_transport)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/google-resumable-media-2.5.0/tests/system/utils.py 
new/google-resumable-media-2.6.0/tests/system/utils.py
--- old/google-resumable-media-2.5.0/tests/system/utils.py      2023-04-24 
21:00:19.000000000 +0200
+++ new/google-resumable-media-2.6.0/tests/system/utils.py      2023-09-06 
20:09:24.000000000 +0200
@@ -38,6 +38,9 @@
 
 METADATA_URL_TEMPLATE = BUCKET_URL + "/o/{blob_name}"
 
+XML_UPLOAD_URL_TEMPLATE = "https://{bucket}.storage.googleapis.com/{blob}";
+
+
 GCS_RW_SCOPE = "https://www.googleapis.com/auth/devstorage.read_write";
 # Generated using random.choice() with all 256 byte choices.
 ENCRYPTION_KEY = (
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/tests/unit/requests/test_upload.py 
new/google-resumable-media-2.6.0/tests/unit/requests/test_upload.py
--- old/google-resumable-media-2.5.0/tests/unit/requests/test_upload.py 
2023-04-24 21:00:19.000000000 +0200
+++ new/google-resumable-media-2.6.0/tests/unit/requests/test_upload.py 
2023-09-06 20:09:24.000000000 +0200
@@ -15,7 +15,8 @@
 import http.client
 import io
 import json
-
+import pytest  # type: ignore
+import tempfile
 from unittest import mock
 
 import google.resumable_media.requests.upload as upload_mod
@@ -30,6 +31,25 @@
 JSON_TYPE = "application/json; charset=UTF-8"
 JSON_TYPE_LINE = b"content-type: application/json; charset=UTF-8\r\n"
 EXPECTED_TIMEOUT = (61, 60)
+EXAMPLE_XML_UPLOAD_URL = 
"https://test-project.storage.googleapis.com/test-bucket";
+EXAMPLE_XML_MPU_INITIATE_TEXT_TEMPLATE = """<?xml version="1.0" 
encoding="UTF-8"?>
+<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/";>
+  <Bucket>travel-maps</Bucket>
+  <Key>paris.jpg</Key>
+  <UploadId>{upload_id}</UploadId>
+</InitiateMultipartUploadResult>
+"""
+UPLOAD_ID = "VXBsb2FkIElEIGZvciBlbHZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"
+PARTS = {1: "39a59594290b0f9a30662a56d695b71d", 2: 
"00000000290b0f9a30662a56d695b71d"}
+FILE_DATA = b"testdata" * 128
+
+
+@pytest.fixture(scope="session")
+def filename():
+    with tempfile.NamedTemporaryFile() as f:
+        f.write(FILE_DATA)
+        f.flush()
+        yield f.name
 
 
 class TestSimpleUpload(object):
@@ -333,8 +353,54 @@
         )
 
 
-def _make_response(status_code=http.client.OK, headers=None):
+def test_mpu_container():
+    container = upload_mod.XMLMPUContainer(EXAMPLE_XML_UPLOAD_URL, filename)
+
+    response_text = 
EXAMPLE_XML_MPU_INITIATE_TEXT_TEMPLATE.format(upload_id=UPLOAD_ID)
+
+    transport = mock.Mock(spec=["request"])
+    transport.request.return_value = _make_response(text=response_text)
+    container.initiate(transport, BASIC_CONTENT)
+    assert container.upload_id == UPLOAD_ID
+
+    for part, etag in PARTS.items():
+        container.register_part(part, etag)
+
+    assert container._parts == PARTS
+
+    transport = mock.Mock(spec=["request"])
+    transport.request.return_value = _make_response()
+    container.finalize(transport)
+    assert container.finished
+
+
+def test_mpu_container_cancel():
+    container = upload_mod.XMLMPUContainer(
+        EXAMPLE_XML_UPLOAD_URL, filename, upload_id=UPLOAD_ID
+    )
+
+    transport = mock.Mock(spec=["request"])
+    transport.request.return_value = _make_response(status_code=204)
+    container.cancel(transport)
+
+
+def test_mpu_part(filename):
+    part = upload_mod.XMLMPUPart(EXAMPLE_XML_UPLOAD_URL, UPLOAD_ID, filename, 
0, 128, 1)
+
+    transport = mock.Mock(spec=["request"])
+    transport.request.return_value = _make_response(headers={"etag": PARTS[1]})
+
+    part.upload(transport)
+
+    assert part.finished
+    assert part.etag == PARTS[1]
+
+
+def _make_response(status_code=http.client.OK, headers=None, text=None):
     headers = headers or {}
     return mock.Mock(
-        headers=headers, status_code=status_code, spec=["headers", 
"status_code"]
+        headers=headers,
+        status_code=status_code,
+        text=text,
+        spec=["headers", "status_code", "text"],
     )
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/tests/unit/test__helpers.py 
new/google-resumable-media-2.6.0/tests/unit/test__helpers.py
--- old/google-resumable-media-2.5.0/tests/unit/test__helpers.py        
2023-04-24 21:00:19.000000000 +0200
+++ new/google-resumable-media-2.6.0/tests/unit/test__helpers.py        
2023-09-06 20:09:24.000000000 +0200
@@ -493,6 +493,14 @@
         assert new_url == "{}&{}".format(MEDIA_URL, expected)
 
 
+def test__get_uploaded_checksum_from_headers_error_handling():
+    response = _mock_response({})
+
+    with pytest.raises(ValueError):
+        _helpers._get_uploaded_checksum_from_headers(response, None, "invalid")
+    assert _helpers._get_uploaded_checksum_from_headers(response, None, None) 
is None
+
+
 def _mock_response(headers):
     return mock.Mock(
         headers=headers,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/google-resumable-media-2.5.0/tests/unit/test__upload.py 
new/google-resumable-media-2.6.0/tests/unit/test__upload.py
--- old/google-resumable-media-2.5.0/tests/unit/test__upload.py 2023-04-24 
21:00:19.000000000 +0200
+++ new/google-resumable-media-2.6.0/tests/unit/test__upload.py 2023-09-06 
20:09:24.000000000 +0200
@@ -15,6 +15,7 @@
 import http.client
 import io
 import sys
+import tempfile
 
 from unittest import mock
 import pytest  # type: ignore
@@ -32,6 +33,26 @@
 BASIC_CONTENT = "text/plain"
 JSON_TYPE = "application/json; charset=UTF-8"
 JSON_TYPE_LINE = b"content-type: application/json; charset=UTF-8\r\n"
+EXAMPLE_XML_UPLOAD_URL = 
"https://test-project.storage.googleapis.com/test-bucket";
+EXAMPLE_HEADERS = {"example-key": "example-content"}
+EXAMPLE_XML_MPU_INITIATE_TEXT_TEMPLATE = """<?xml version="1.0" 
encoding="UTF-8"?>
+<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/";>
+  <Bucket>travel-maps</Bucket>
+  <Key>paris.jpg</Key>
+  <UploadId>{upload_id}</UploadId>
+</InitiateMultipartUploadResult>
+"""
+UPLOAD_ID = "VXBsb2FkIElEIGZvciBlbHZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"
+PARTS = {1: "39a59594290b0f9a30662a56d695b71d", 2: 
"00000000290b0f9a30662a56d695b71d"}
+FILE_DATA = b"testdata" * 128
+
+
+@pytest.fixture(scope="session")
+def filename():
+    with tempfile.NamedTemporaryFile() as f:
+        f.write(FILE_DATA)
+        f.flush()
+        yield f.name
 
 
 class TestUploadBase(object):
@@ -1230,6 +1251,253 @@
         assert result == "bytes 1000-10000/*"
 
 
+def test_xml_mpu_container_constructor_and_properties(filename):
+    container = _upload.XMLMPUContainer(EXAMPLE_XML_UPLOAD_URL, filename)
+    assert container.upload_url == EXAMPLE_XML_UPLOAD_URL
+    assert container.upload_id is None
+    assert container._headers == {}
+    assert container._parts == {}
+    assert container._filename == filename
+
+    container = _upload.XMLMPUContainer(
+        EXAMPLE_XML_UPLOAD_URL,
+        filename,
+        headers=EXAMPLE_HEADERS,
+        upload_id=UPLOAD_ID,
+    )
+    container._parts = PARTS
+    assert container.upload_url == EXAMPLE_XML_UPLOAD_URL
+    assert container.upload_id == UPLOAD_ID
+    assert container._headers == EXAMPLE_HEADERS
+    assert container._parts == PARTS
+    assert container._filename == filename
+
+
+def test_xml_mpu_container_initiate(filename):
+    container = _upload.XMLMPUContainer(
+        EXAMPLE_XML_UPLOAD_URL, filename, upload_id=UPLOAD_ID
+    )
+    with pytest.raises(ValueError):
+        container._prepare_initiate_request(BASIC_CONTENT)
+
+    container = _upload.XMLMPUContainer(
+        EXAMPLE_XML_UPLOAD_URL, filename, headers=EXAMPLE_HEADERS
+    )
+    verb, url, body, headers = 
container._prepare_initiate_request(BASIC_CONTENT)
+    assert verb == _upload._POST
+    assert url == EXAMPLE_XML_UPLOAD_URL + _upload._MPU_INITIATE_QUERY
+    assert not body
+    assert headers == {**EXAMPLE_HEADERS, "content-type": BASIC_CONTENT}
+
+    _fix_up_virtual(container)
+    response = _make_xml_response(
+        text=EXAMPLE_XML_MPU_INITIATE_TEXT_TEMPLATE.format(upload_id=UPLOAD_ID)
+    )
+    container._process_initiate_response(response)
+    assert container.upload_id == UPLOAD_ID
+
+    with pytest.raises(NotImplementedError):
+        container.initiate(None, None)
+
+
+def test_xml_mpu_container_finalize(filename):
+    container = _upload.XMLMPUContainer(EXAMPLE_XML_UPLOAD_URL, filename)
+    with pytest.raises(ValueError):
+        container._prepare_finalize_request()
+
+    container = _upload.XMLMPUContainer(
+        EXAMPLE_XML_UPLOAD_URL,
+        filename,
+        headers=EXAMPLE_HEADERS,
+        upload_id=UPLOAD_ID,
+    )
+    container._parts = PARTS
+    verb, url, body, headers = container._prepare_finalize_request()
+    assert verb == _upload._POST
+    final_query = _upload._MPU_FINAL_QUERY_TEMPLATE.format(upload_id=UPLOAD_ID)
+    assert url == EXAMPLE_XML_UPLOAD_URL + final_query
+    assert headers == EXAMPLE_HEADERS
+    assert b"CompleteMultipartUpload" in body
+    for key, value in PARTS.items():
+        assert str(key).encode("utf-8") in body
+        assert value.encode("utf-8") in body
+
+    _fix_up_virtual(container)
+    response = _make_xml_response()
+    container._process_finalize_response(response)
+    assert container.finished
+
+    with pytest.raises(NotImplementedError):
+        container.finalize(None)
+
+
+def test_xml_mpu_container_cancel(filename):
+    container = _upload.XMLMPUContainer(EXAMPLE_XML_UPLOAD_URL, filename)
+    with pytest.raises(ValueError):
+        container._prepare_cancel_request()
+
+    container = _upload.XMLMPUContainer(
+        EXAMPLE_XML_UPLOAD_URL,
+        filename,
+        headers=EXAMPLE_HEADERS,
+        upload_id=UPLOAD_ID,
+    )
+    container._parts = PARTS
+    verb, url, body, headers = container._prepare_cancel_request()
+    assert verb == _upload._DELETE
+    final_query = _upload._MPU_FINAL_QUERY_TEMPLATE.format(upload_id=UPLOAD_ID)
+    assert url == EXAMPLE_XML_UPLOAD_URL + final_query
+    assert headers == EXAMPLE_HEADERS
+    assert not body
+
+    _fix_up_virtual(container)
+    response = _make_xml_response(status_code=204)
+    container._process_cancel_response(response)
+
+    with pytest.raises(NotImplementedError):
+        container.cancel(None)
+
+
+def test_xml_mpu_part(filename):
+    PART_NUMBER = 1
+    START = 0
+    END = 256
+    ETAG = PARTS[1]
+
+    part = _upload.XMLMPUPart(
+        EXAMPLE_XML_UPLOAD_URL,
+        UPLOAD_ID,
+        filename,
+        START,
+        END,
+        PART_NUMBER,
+        headers=EXAMPLE_HEADERS,
+        checksum="md5",
+    )
+    assert part.upload_url == EXAMPLE_XML_UPLOAD_URL
+    assert part.upload_id == UPLOAD_ID
+    assert part.filename == filename
+    assert part.etag is None
+    assert part.start == START
+    assert part.end == END
+    assert part.part_number == PART_NUMBER
+    assert part._headers == EXAMPLE_HEADERS
+    assert part._checksum_type == "md5"
+    assert part._checksum_object is None
+
+    part = _upload.XMLMPUPart(
+        EXAMPLE_XML_UPLOAD_URL,
+        UPLOAD_ID,
+        filename,
+        START,
+        END,
+        PART_NUMBER,
+        headers=EXAMPLE_HEADERS,
+    )
+    verb, url, payload, headers = part._prepare_upload_request()
+    assert verb == _upload._PUT
+    assert url == EXAMPLE_XML_UPLOAD_URL + 
_upload._MPU_PART_QUERY_TEMPLATE.format(
+        part=PART_NUMBER, upload_id=UPLOAD_ID
+    )
+    assert headers == EXAMPLE_HEADERS
+    assert payload == FILE_DATA[START:END]
+
+    _fix_up_virtual(part)
+    response = _make_xml_response(headers={"etag": ETAG})
+    part._process_upload_response(response)
+    assert part.etag == ETAG
+
+
+def test_xml_mpu_part_invalid_response(filename):
+    PART_NUMBER = 1
+    START = 0
+    END = 256
+    ETAG = PARTS[1]
+
+    part = _upload.XMLMPUPart(
+        EXAMPLE_XML_UPLOAD_URL,
+        UPLOAD_ID,
+        filename,
+        START,
+        END,
+        PART_NUMBER,
+        headers=EXAMPLE_HEADERS,
+        checksum="md5",
+    )
+    _fix_up_virtual(part)
+    response = _make_xml_response(headers={"etag": ETAG})
+    with pytest.raises(common.InvalidResponse):
+        part._process_upload_response(response)
+
+
+def test_xml_mpu_part_checksum_failure(filename):
+    PART_NUMBER = 1
+    START = 0
+    END = 256
+    ETAG = PARTS[1]
+
+    part = _upload.XMLMPUPart(
+        EXAMPLE_XML_UPLOAD_URL,
+        UPLOAD_ID,
+        filename,
+        START,
+        END,
+        PART_NUMBER,
+        headers=EXAMPLE_HEADERS,
+        checksum="md5",
+    )
+    _fix_up_virtual(part)
+    part._prepare_upload_request()
+    response = _make_xml_response(
+        headers={"etag": ETAG, "x-goog-hash": "md5=Ojk9c3dhfxgoKVVHYwFbHQ=="}
+    )  # Example md5 checksum but not the correct one
+    with pytest.raises(common.DataCorruption):
+        part._process_upload_response(response)
+
+
+def test_xml_mpu_part_checksum_success(filename):
+    PART_NUMBER = 1
+    START = 0
+    END = 256
+    ETAG = PARTS[1]
+
+    part = _upload.XMLMPUPart(
+        EXAMPLE_XML_UPLOAD_URL,
+        UPLOAD_ID,
+        filename,
+        START,
+        END,
+        PART_NUMBER,
+        headers=EXAMPLE_HEADERS,
+        checksum="md5",
+    )
+    _fix_up_virtual(part)
+    part._prepare_upload_request()
+    response = _make_xml_response(
+        headers={"etag": ETAG, "x-goog-hash": "md5=pOUFGnohRRFFd24NztFuFw=="}
+    )
+    part._process_upload_response(response)
+    assert part.etag == ETAG
+    assert part.finished
+
+    # Test error handling
+    part = _upload.XMLMPUPart(
+        EXAMPLE_XML_UPLOAD_URL,
+        UPLOAD_ID,
+        filename,
+        START,
+        END,
+        PART_NUMBER,
+        headers=EXAMPLE_HEADERS,
+        checksum="md5",
+    )
+    with pytest.raises(NotImplementedError):
+        part.upload(None)
+    part._finished = True
+    with pytest.raises(ValueError):
+        part._prepare_upload_request()
+
+
 def _make_response(status_code=http.client.OK, headers=None, metadata=None):
     headers = headers or {}
     return mock.Mock(
@@ -1239,6 +1507,16 @@
         spec=["headers", "status_code"],
     )
 
+
+def _make_xml_response(status_code=http.client.OK, headers=None, text=None):
+    headers = headers or {}
+    return mock.Mock(
+        headers=headers,
+        status_code=status_code,
+        text=text,
+        spec=["headers", "status_code"],
+    )
+
 
 def _get_status_code(response):
     return response.status_code

Reply via email to