This is an automated email from the ASF dual-hosted git repository.

juergbi pushed a commit to branch juerg/token-auth
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit ad4909fdea21c047c5e2b350ad30f4929f8d99eb
Author: Jürg Billeter <[email protected]>
AuthorDate: Fri Jun 21 14:06:27 2024 +0200

    _protos: Update protos from remote-apis
---
 .../build/bazel/remote/asset/v1/remote_asset.proto |  50 ++++
 .../bazel/remote/asset/v1/remote_asset_pb2.py      |  40 +--
 .../remote/execution/v2/remote_execution.proto     | 314 +++++++++++++++++++--
 .../remote/execution/v2/remote_execution_pb2.py    | 249 ++++++++--------
 .../execution/v2/remote_execution_pb2_grpc.py      | 119 +++++---
 5 files changed, 572 insertions(+), 200 deletions(-)

diff --git 
a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto 
b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto
index 4d9be8175..c896ec405 100644
--- a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto
+++ b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto
@@ -201,6 +201,11 @@ message FetchBlobRequest {
   //
   // Specified qualifier names *MUST* be unique.
   repeated Qualifier qualifiers = 5;
+
+  // The digest function the server must use to compute the digest.
+  //
+  // If unset, the server SHOULD default to SHA256.
+  build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 6;
 }
 
 // A response message for
@@ -216,6 +221,8 @@ message FetchBlobResponse {
   //   requested an asset from a disallowed origin.
   // * `ABORTED`: The operation could not be completed, typically due to a
   //   failed consistency check.
+  // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
+  //   perform the requested operation. The client may retry after a delay.
   google.rpc.Status status = 1;
 
   // The uri from the request that resulted in a successful retrieval, or from
@@ -232,6 +239,15 @@ message FetchBlobResponse {
   // The result of the fetch, if the status had code `OK`.
   // The digest of the file's contents, available for download through the CAS.
   build.bazel.remote.execution.v2.Digest blob_digest = 5;
+
+  // This field SHOULD be set to the digest function that was used by the 
server
+  // to compute [FetchBlobResponse.blob_digest].
+  // Clients could use this to determine whether the server honors
+  // [FetchBlobRequest.digest_function] that was set in the request.
+  //
+  // If unset, clients SHOULD default to use SHA256 regardless of the requested
+  // [FetchBlobRequest.digest_function].
+  build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 6;
 }
 
 // A request message for
@@ -280,6 +296,11 @@ message FetchDirectoryRequest {
   //
   // Specified qualifier names *MUST* be unique.
   repeated Qualifier qualifiers = 5;
+
+  // The digest function the server must use to compute the digest.
+  //
+  // If unset, the server SHOULD default to SHA256.
+  build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 6;
 }
 
 // A response message for
@@ -295,6 +316,8 @@ message FetchDirectoryResponse {
   //   requested an asset from a disallowed origin.
   // * `ABORTED`: The operation could not be completed, typically due to a
   //   failed consistency check.
+  // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
+  //   perform the requested operation. The client may retry after a delay.
   google.rpc.Status status = 1;
 
   // The uri from the request that resulted in a successful retrieval, or from
@@ -312,6 +335,15 @@ message FetchDirectoryResponse {
   // the root digest of a directory tree, suitable for fetching via
   // [ContentAddressableStorage.GetTree].
   build.bazel.remote.execution.v2.Digest root_directory_digest = 5;
+
+  // This field SHOULD be set to the digest function that was used by the 
server
+  // to compute [FetchBlobResponse.root_directory_digest].
+  // Clients could use this to determine whether the server honors
+  // [FetchDirectoryRequest.digest_function] that was set in the request.
+  //
+  // If unset, clients SHOULD default to use SHA256 regardless of the requested
+  // [FetchDirectoryRequest.digest_function].
+  build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 6;
 }
 
 // The Push service is complementary to the Fetch, and allows for
@@ -398,6 +430,15 @@ message PushBlobRequest {
   // indirectly referencing unavailable blobs.
   repeated build.bazel.remote.execution.v2.Digest references_blobs = 6;
   repeated build.bazel.remote.execution.v2.Digest references_directories = 7;
+
+  // The digest function that was used to compute the blob digest.
+  //
+  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
+  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
+  // that case the server SHOULD infer the digest function using the
+  // length of the action digest hash and the digest functions announced
+  // in the server's capabilities.
+  build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 8;
 }
 
 // A response message for
@@ -438,6 +479,15 @@ message PushDirectoryRequest {
   // indirectly referencing unavailable blobs.
   repeated build.bazel.remote.execution.v2.Digest references_blobs = 6;
   repeated build.bazel.remote.execution.v2.Digest references_directories = 7;
+
+  // The digest function that was used to compute blob digests.
+  //
+  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
+  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
+  // that case the server SHOULD infer the digest function using the
+  // length of the action digest hash and the digest functions announced
+  // in the server's capabilities.
+  build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 8;
 }
 
 // A response message for
diff --git 
a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py 
b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py
index 0ca03ede1..001d7a9f1 100644
--- a/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py
+++ b/src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py
@@ -19,7 +19,7 @@ from google.protobuf import timestamp_pb2 as 
google_dot_protobuf_dot_timestamp__
 from buildstream._protos.google.rpc import status_pb2 as 
google_dot_rpc_dot_status__pb2
 
 
-DESCRIPTOR = 
_descriptor_pool.Default().AddSerializedFile(b'\n.build/bazel/remote/asset/v1/remote_asset.proto\x12\x1b\x62uild.bazel.remote.asset.v1\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"(\n\tQualifier\x12\x0c\n\x04name\x18\x01
 \x01(\t\x12\r\n\x05value\x18\x02 
\x01(\t\"\xdc\x01\n\x10\x46\x65tchBlobRequest\x12\x15\n\rins [...]
+DESCRIPTOR = 
_descriptor_pool.Default().AddSerializedFile(b'\n.build/bazel/remote/asset/v1/remote_asset.proto\x12\x1b\x62uild.bazel.remote.asset.v1\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"(\n\tQualifier\x12\x0c\n\x04name\x18\x01
 \x01(\t\x12\r\n\x05value\x18\x02 
\x01(\t\"\xac\x02\n\x10\x46\x65tchBlobRequest\x12\x15\n\rins [...]
 
 
 
@@ -112,23 +112,23 @@ if _descriptor._USE_C_DESCRIPTORS == False:
   _QUALIFIER._serialized_start=255
   _QUALIFIER._serialized_end=295
   _FETCHBLOBREQUEST._serialized_start=298
-  _FETCHBLOBREQUEST._serialized_end=518
-  _FETCHBLOBRESPONSE._serialized_start=521
-  _FETCHBLOBRESPONSE._serialized_end=759
-  _FETCHDIRECTORYREQUEST._serialized_start=762
-  _FETCHDIRECTORYREQUEST._serialized_end=987
-  _FETCHDIRECTORYRESPONSE._serialized_start=990
-  _FETCHDIRECTORYRESPONSE._serialized_end=1243
-  _PUSHBLOBREQUEST._serialized_start=1246
-  _PUSHBLOBREQUEST._serialized_end=1609
-  _PUSHBLOBRESPONSE._serialized_start=1611
-  _PUSHBLOBRESPONSE._serialized_end=1629
-  _PUSHDIRECTORYREQUEST._serialized_start=1632
-  _PUSHDIRECTORYREQUEST._serialized_end=2010
-  _PUSHDIRECTORYRESPONSE._serialized_start=2012
-  _PUSHDIRECTORYRESPONSE._serialized_end=2035
-  _FETCH._serialized_start=2038
-  _FETCH._serialized_end=2387
-  _PUSH._serialized_start=2390
-  _PUSH._serialized_end=2730
+  _FETCHBLOBREQUEST._serialized_end=598
+  _FETCHBLOBRESPONSE._serialized_start=601
+  _FETCHBLOBRESPONSE._serialized_end=919
+  _FETCHDIRECTORYREQUEST._serialized_start=922
+  _FETCHDIRECTORYREQUEST._serialized_end=1227
+  _FETCHDIRECTORYRESPONSE._serialized_start=1230
+  _FETCHDIRECTORYRESPONSE._serialized_end=1563
+  _PUSHBLOBREQUEST._serialized_start=1566
+  _PUSHBLOBREQUEST._serialized_end=2009
+  _PUSHBLOBRESPONSE._serialized_start=2011
+  _PUSHBLOBRESPONSE._serialized_end=2029
+  _PUSHDIRECTORYREQUEST._serialized_start=2032
+  _PUSHDIRECTORYREQUEST._serialized_end=2490
+  _PUSHDIRECTORYRESPONSE._serialized_start=2492
+  _PUSHDIRECTORYRESPONSE._serialized_end=2515
+  _FETCH._serialized_start=2518
+  _FETCH._serialized_end=2867
+  _PUSH._serialized_start=2870
+  _PUSH._serialized_end=3210
 # @@protoc_insertion_point(module_scope)
diff --git 
a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
 
b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
index 437ead7c7..31e20dcf4 100644
--- 
a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
+++ 
b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto
@@ -104,7 +104,12 @@ service Execution {
   // send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
   // where, for each requested blob not present in the CAS, there is a
   // `Violation` with a `type` of `MISSING` and a `subject` of
-  // `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+  // `"blobs/{digest_function/}{hash}/{size}"` indicating the digest of the
+  // missing blob. The `subject` is formatted the same way as the
+  // `resource_name` provided to
+  // [ByteStream.Read][google.bytestream.ByteStream.Read], with the leading
+  // instance name omitted. `digest_function` MUST thus be omitted if its value
+  // is one of MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512, or VSO.
   //
   // The server does not need to guarantee that a call to this method leads to
   // at most one execution of the action. The server MAY execute the action
@@ -120,6 +125,14 @@ service Execution {
   // operation completes, and then respond with the completed operation. The
   // server MAY choose to stream additional updates as execution progresses,
   // such as to provide an update as to the state of the execution.
+  //
+  // In addition to the cases describe for Execute, the WaitExecution method
+  // may fail as follows:
+  //
+  // * `NOT_FOUND`: The operation no longer exists due to any of a transient
+  //   condition, an unknown operation name, or if the server implements the
+  //   Operations API DeleteOperation method and it was called for the current
+  //   execution. The client should call `Execute` to retry.
   rpc WaitExecution(WaitExecutionRequest) returns (stream 
google.longrunning.Operation) {
     option (google.api.http) = { post: 
"/v2/{name=operations/**}:waitExecution" body: "*" };
   }
@@ -204,20 +217,27 @@ service ActionCache {
 // [Write method][google.bytestream.ByteStream.Write] of the ByteStream API.
 //
 // For uncompressed data, The `WriteRequest.resource_name` is of the following 
form:
-// `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`
+// 
`{instance_name}/uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}`
 //
 // Where:
-// * `instance_name` is an identifier, possibly containing multiple path
-//   segments, used to distinguish between the various instances on the server,
-//   in a manner defined by the server. If it is the empty path, the leading
-//   slash is omitted, so that  the `resource_name` becomes
-//   `uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`.
+// * `instance_name` is an identifier used to distinguish between the various
+//   instances on the server. Syntax and semantics of this field are defined
+//   by the server; Clients must not make any assumptions about it (e.g.,
+//   whether it spans multiple path segments or not). If it is the empty path,
+//   the leading slash is omitted, so that  the `resource_name` becomes
+//   
`uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}`.
 //   To simplify parsing, a path segment cannot equal any of the following
 //   keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations`,
 //   `capabilities` or `compressed-blobs`.
 // * `uuid` is a version 4 UUID generated by the client, used to avoid
 //   collisions between concurrent uploads of the same data. Clients MAY
 //   reuse the same `uuid` for uploading different blobs.
+// * `digest_function` is a lowercase string form of a `DigestFunction.Value`
+//   enum, indicating which digest function was used to compute `hash`. If the
+//   digest function used is one of MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512,
+//   or VSO, this component MUST be omitted. In that case the server SHOULD
+//   infer the digest function using the length of the `hash` and the digest
+//   functions announced in the server's capabilities.
 // * `hash` and `size` refer to the 
[Digest][build.bazel.remote.execution.v2.Digest]
 //   of the data being uploaded.
 // * `optional_metadata` is implementation specific data, which clients MAY 
omit.
@@ -225,10 +245,11 @@ service ActionCache {
 //
 // Data can alternatively be uploaded in compressed form, with the following
 // `WriteRequest.resource_name` form:
-// 
`{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
+// 
`{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
 //
 // Where:
-// * `instance_name`, `uuid` and `optional_metadata` are defined as above.
+// * `instance_name`, `uuid`, `digest_function` and `optional_metadata` are
+//   defined as above.
 // * `compressor` is a lowercase string form of a `Compressor.Value` enum
 //   other than `identity`, which is supported by the server and advertised in
 //   
[CacheCapabilities.supported_compressor][build.bazel.remote.execution.v2.CacheCapabilities.supported_compressor].
@@ -271,15 +292,17 @@ service ActionCache {
 // [Read method][google.bytestream.ByteStream.Read] of the ByteStream API.
 //
 // For uncompressed data, The `ReadRequest.resource_name` is of the following 
form:
-// `{instance_name}/blobs/{hash}/{size}`
-// Where `instance_name`, `hash` and `size` are defined as for uploads.
+// `{instance_name}/blobs/{digest_function/}{hash}/{size}`
+// Where `instance_name`, `digest_function`, `hash` and `size` are defined as
+// for uploads.
 //
 // Data can alternatively be downloaded in compressed form, with the following
 // `ReadRequest.resource_name` form:
-// 
`{instance_name}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}`
+// 
`{instance_name}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}`
 //
 // Where:
-// * `instance_name` and `compressor` are defined as for uploads.
+// * `instance_name`, `compressor` and `digest_function` are defined as for
+//   uploads.
 // * `uncompressed_hash` and `uncompressed_size` refer to the
 //   [Digest][build.bazel.remote.execution.v2.Digest] of the data being
 //   downloaded, once uncompressed. Clients MUST verify that these match
@@ -424,6 +447,8 @@ service Capabilities {
   //   CacheCapabilities and ExecutionCapabilities.
   // * Execution only endpoints should return ExecutionCapabilities.
   // * CAS + Action Cache only endpoints should return CacheCapabilities.
+  //
+  // There are no method-specific errors.
   rpc GetCapabilities(GetCapabilitiesRequest) returns (ServerCapabilities) {
     option (google.api.http) = {
       get: "/v2/{instance_name=**}/capabilities"
@@ -588,7 +613,7 @@ message Command {
   // to execution, even if they are not explicitly part of the input root.
   //
   // DEPRECATED since v2.1: Use `output_paths` instead.
-  repeated string output_files = 3;
+  repeated string output_files = 3 [ deprecated = true ];
 
   // A list of the output directories that the client expects to retrieve from
   // the action. Only the listed directories will be returned (an entire
@@ -619,7 +644,7 @@ message Command {
   // if they are not explicitly part of the input root.
   //
   // DEPRECATED since 2.1: Use `output_paths` instead.
-  repeated string output_directories = 4;
+  repeated string output_directories = 4 [ deprecated = true ];
 
   // A list of the output paths that the client expects to retrieve from the
   // action. Only the listed paths will be returned to the client as output.
@@ -659,7 +684,7 @@ message Command {
   // DEPRECATED as of v2.2: platform properties are now specified directly in
   // the action. See documentation note in the
   // [Action][build.bazel.remote.execution.v2.Action] for migration.
-  Platform platform = 5;
+  Platform platform = 5 [ deprecated = true ];
 
   // The working directory, relative to the input root, for the command to run
   // in. It must be a directory which exists in the input tree. If it is left
@@ -678,6 +703,33 @@ message Command {
   // property is not recognized by the server, the server will return an
   // `INVALID_ARGUMENT`.
   repeated string output_node_properties = 8;
+
+  enum OutputDirectoryFormat {
+    // The client is only interested in receiving output directories in
+    // the form of a single Tree object, using the `tree_digest` field.
+    TREE_ONLY = 0;
+
+    // The client is only interested in receiving output directories in
+    // the form of a hierarchy of separately stored Directory objects,
+    // using the `root_directory_digest` field.
+    DIRECTORY_ONLY = 1;
+
+    // The client is interested in receiving output directories both in
+    // the form of a single Tree object and a hierarchy of separately
+    // stored Directory objects, using both the `tree_digest` and
+    // `root_directory_digest` fields.
+    TREE_AND_DIRECTORY = 2;
+  }
+
+  // The format that the worker should use to store the contents of
+  // output directories.
+  //
+  // In case this field is set to a value that is not supported by the
+  // worker, the worker SHOULD interpret this field as TREE_ONLY. The
+  // worker MAY store output directories in formats that are a superset
+  // of what was requested (e.g., interpreting DIRECTORY_ONLY as
+  // TREE_AND_DIRECTORY).
+  OutputDirectoryFormat output_directory_format = 9;
 }
 
 // A `Platform` is a set of requirements, such as hardware, operating system, 
or
@@ -929,8 +981,8 @@ message SymlinkNode {
 // serializing, but care should be taken to avoid shortcuts. For instance,
 // concatenating two messages to merge them may produce duplicate fields.
 message Digest {
-  // The hash. In the case of SHA-256, it will always be a lowercase hex string
-  // exactly 64 characters long.
+  // The hash, represented as a lowercase hexadecimal string, padded with
+  // leading zeroes up to the hash function length.
   string hash = 1;
 
   // The size of the blob, in bytes.
@@ -1035,7 +1087,7 @@ message ActionResult {
   //
   // DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
   // should still populate this field in addition to `output_symlinks`.
-  repeated OutputSymlink output_file_symlinks = 10;
+  repeated OutputSymlink output_file_symlinks = 10 [ deprecated = true ];
 
   // New in v2.1: this field will only be populated if the command
   // `output_paths` field was used, and not the pre v2.1 `output_files` or
@@ -1135,7 +1187,7 @@ message ActionResult {
   //
   // DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
   // should still populate this field in addition to `output_symlinks`.
-  repeated OutputSymlink output_directory_symlinks = 11;
+  repeated OutputSymlink output_directory_symlinks = 11 [ deprecated = true ];
 
   // The exit code of the command.
   int32 exit_code = 4;
@@ -1233,6 +1285,52 @@ message OutputDirectory {
   // [Tree][build.bazel.remote.execution.v2.Tree] proto containing the
   // directory's contents.
   Digest tree_digest = 3;
+
+  // If set, consumers MAY make the following assumptions about the
+  // directories contained in the the Tree, so that it may be
+  // instantiated on a local file system by scanning through it
+  // sequentially:
+  //
+  // - All directories with the same binary representation are stored
+  //   exactly once.
+  // - All directories, apart from the root directory, are referenced by
+  //   at least one parent directory.
+  // - Directories are stored in topological order, with parents being
+  //   stored before the child. The root directory is thus the first to
+  //   be stored.
+  //
+  // Additionally, the Tree MUST be encoded as a stream of records,
+  // where each record has the following format:
+  //
+  // - A tag byte, having one of the following two values:
+  //   - (1 << 3) | 2 == 0x0a: First record (the root directory).
+  //   - (2 << 3) | 2 == 0x12: Any subsequent records (child directories).
+  // - The size of the directory, encoded as a base 128 varint.
+  // - The contents of the directory, encoded as a binary serialized
+  //   Protobuf message.
+  //
+  // This encoding is a subset of the Protobuf wire format of the Tree
+  // message. As it is only permitted to store data associated with
+  // field numbers 1 and 2, the tag MUST be encoded as a single byte.
+  // More details on the Protobuf wire format can be found here:
+  // https://developers.google.com/protocol-buffers/docs/encoding
+  //
+  // It is recommended that implementations using this feature construct
+  // Tree objects manually using the specification given above, as
+  // opposed to using a Protobuf library to marshal a full Tree message.
+  // As individual Directory messages already need to be marshaled to
+  // compute their digests, constructing the Tree object manually avoids
+  // redundant marshaling.
+  bool is_topologically_sorted = 4;
+
+  // The digest of the encoded
+  // [Directory][build.bazel.remote.execution.v2.Directory] proto
+  // containing the contents the directory's root.
+  //
+  // If both `tree_digest` and `root_directory_digest` are set, this
+  // field MUST match the digest of the root directory contained in the
+  // Tree message.
+  Digest root_directory_digest = 5;
 }
 
 // An `OutputSymlink` is similar to a
@@ -1326,6 +1424,29 @@ message ExecuteRequest {
   // The server will have a default policy if this is not provided.
   // This may be applied to both the ActionResult and the associated blobs.
   ResultsCachePolicy results_cache_policy = 8;
+
+  // The digest function that was used to compute the action digest.
+  //
+  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
+  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
+  // that case the server SHOULD infer the digest function using the
+  // length of the action digest hash and the digest functions announced
+  // in the server's capabilities.
+  DigestFunction.Value digest_function = 9;
+
+  // A hint to the server to request inlining stdout in the
+  // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message.
+  bool inline_stdout = 10;
+
+  // A hint to the server to request inlining stderr in the
+  // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message.
+  bool inline_stderr = 11;
+
+  // A hint to the server to inline the contents of the listed output files.
+  // Each path needs to exactly match one file path in either `output_paths` or
+  // `output_files` (DEPRECATED since v2.1) in the
+  // [Command][build.bazel.remote.execution.v2.Command] message.
+  repeated string inline_output_files = 12;
 }
 
 // A `LogFile` is a log stored in the CAS.
@@ -1432,6 +1553,10 @@ message ExecuteOperationMetadata {
   // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
   // standard error from the endpoint hosting streamed responses.
   string stderr_stream_name = 4;
+
+  // The client can read this field to view details about the ongoing
+  // execution.
+  ExecutedActionMetadata partial_execution_metadata = 5;
 }
 
 // A request message for
@@ -1469,6 +1594,15 @@ message GetActionResultRequest {
   // `output_files` (DEPRECATED since v2.1) in the
   // [Command][build.bazel.remote.execution.v2.Command] message.
   repeated string inline_output_files = 5;
+
+  // The digest function that was used to compute the action digest.
+  //
+  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
+  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
+  // that case the server SHOULD infer the digest function using the
+  // length of the action digest hash and the digest functions announced
+  // in the server's capabilities.
+  DigestFunction.Value digest_function = 6;
 }
 
 // A request message for
@@ -1493,6 +1627,15 @@ message UpdateActionResultRequest {
   // The server will have a default policy if this is not provided.
   // This may be applied to both the ActionResult and the associated blobs.
   ResultsCachePolicy results_cache_policy = 4;
+
+  // The digest function that was used to compute the action digest.
+  //
+  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
+  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
+  // that case the server SHOULD infer the digest function using the
+  // length of the action digest hash and the digest functions announced
+  // in the server's capabilities.
+  DigestFunction.Value digest_function = 5;
 }
 
 // A request message for
@@ -1505,8 +1648,18 @@ message FindMissingBlobsRequest {
   // omitted.
   string instance_name = 1;
 
-  // A list of the blobs to check.
+  // A list of the blobs to check. All digests MUST use the same digest
+  // function.
   repeated Digest blob_digests = 2;
+
+  // The digest function of the blobs whose existence is checked.
+  //
+  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
+  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
+  // that case the server SHOULD infer the digest function using the
+  // length of the blob digest hashes and the digest functions announced
+  // in the server's capabilities.
+  DigestFunction.Value digest_function = 3;
 }
 
 // A response message for
@@ -1521,7 +1674,8 @@ message FindMissingBlobsResponse {
 message BatchUpdateBlobsRequest {
   // A request corresponding to a single blob that the client wants to upload.
   message Request {
-    // The digest of the blob. This MUST be the digest of `data`.
+    // The digest of the blob. This MUST be the digest of `data`. All
+    // digests MUST use the same digest function.
     Digest digest = 1;
 
     // The raw binary data.
@@ -1543,6 +1697,16 @@ message BatchUpdateBlobsRequest {
 
   // The individual upload requests.
   repeated Request requests = 2;
+
+  // The digest function that was used to compute the digests of the
+  // blobs being uploaded.
+  //
+  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
+  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
+  // that case the server SHOULD infer the digest function using the
+  // length of the blob digest hashes and the digest functions announced
+  // in the server's capabilities.
+  DigestFunction.Value digest_function = 5;
 }
 
 // A response message for
@@ -1571,12 +1735,22 @@ message BatchReadBlobsRequest {
   // omitted.
   string instance_name = 1;
 
-  // The individual blob digests.
+  // The individual blob digests. All digests MUST use the same digest
+  // function.
   repeated Digest digests = 2;
 
   // A list of acceptable encodings for the returned inlined data, in no
   // particular order. `IDENTITY` is always allowed even if not specified here.
   repeated Compressor.Value acceptable_compressors = 3;
+
+  // The digest function of the blobs being requested.
+  //
+  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
+  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
+  // that case the server SHOULD infer the digest function using the
+  // length of the blob digest hashes and the digest functions announced
+  // in the server's capabilities.
+  DigestFunction.Value digest_function = 4;
 }
 
 // A response message for
@@ -1629,6 +1803,16 @@ message GetTreeRequest {
   // If present, the server will use that token as an offset, returning only
   // that page and the ones that succeed it.
   string page_token = 4;
+
+  // The digest function that was used to compute the digest of the root
+  // directory.
+  //
+  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
+  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
+  // that case the server SHOULD infer the digest function using the
+  // length of the root digest hash and the digest functions announced
+  // in the server's capabilities.
+  DigestFunction.Value digest_function = 5;
 }
 
 // A response message for
@@ -1704,6 +1888,66 @@ message DigestFunction {
     // cryptographic hash function and its collision properties are not 
strongly guaranteed.
     // See https://github.com/aappleby/smhasher/wiki/MurmurHash3 .
     MURMUR3 = 7;
+
+    // The SHA-256 digest function, modified to use a Merkle tree for
+    // large objects. This permits implementations to store large blobs
+    // as a decomposed sequence of 2^j sized chunks, where j >= 10,
+    // while being able to validate integrity at the chunk level.
+    //
+    // Furthermore, on systems that do not offer dedicated instructions
+    // for computing SHA-256 hashes (e.g., the Intel SHA and ARMv8
+    // cryptographic extensions), SHA256TREE hashes can be computed more
+    // efficiently than plain SHA-256 hashes by using generic SIMD
+    // extensions, such as Intel AVX2 or ARM NEON.
+    //
+    // SHA256TREE hashes are computed as follows:
+    //
+    // - For blobs that are 1024 bytes or smaller, the hash is computed
+    //   using the regular SHA-256 digest function.
+    //
+    // - For blobs that are more than 1024 bytes in size, the hash is
+    //   computed as follows:
+    //
+    //   1. The blob is partitioned into a left (leading) and right
+    //      (trailing) blob. These blobs have lengths m and n
+    //      respectively, where m = 2^k and 0 < n <= m.
+    //
+    //   2. Hashes of the left and right blob, Hash(left) and
+    //      Hash(right) respectively, are computed by recursively
+    //      applying the SHA256TREE algorithm.
+    //
+    //   3. A single invocation is made to the SHA-256 block cipher with
+    //      the following parameters:
+    //
+    //          M = Hash(left) || Hash(right)
+    //          H = {
+    //              0xcbbb9d5d, 0x629a292a, 0x9159015a, 0x152fecd8,
+    //              0x67332667, 0x8eb44a87, 0xdb0c2e0d, 0x47b5481d,
+    //          }
+    //
+    //      The values of H are the leading fractional parts of the
+    //      square roots of the 9th to the 16th prime number (23 to 53).
+    //      This differs from plain SHA-256, where the first eight prime
+    //      numbers (2 to 19) are used, thereby preventing trivial hash
+    //      collisions between small and large objects.
+    //
+    //   4. The hash of the full blob can then be obtained by
+    //      concatenating the outputs of the block cipher:
+    //
+    //          Hash(blob) = a || b || c || d || e || f || g || h
+    //
+    //      Addition of the original values of H, as normally done
+    //      through the use of the Davies-Meyer structure, is not
+    //      performed. This isn't necessary, as the block cipher is only
+    //      invoked once.
+    //
+    // Test vectors of this digest function can be found in the
+    // accompanying sha256tree_test_vectors.txt file.
+    SHA256TREE = 8;
+
+    // The BLAKE3 hash function.
+    // See https://github.com/BLAKE3-team/BLAKE3.
+    BLAKE3 = 9;
   }
 }
 
@@ -1764,6 +2008,9 @@ message Compressor {
     // It is advised to use algorithms such as Zstandard instead, as
     // those are faster and/or provide a better compression ratio.
     DEFLATE = 2;
+
+    // Brotli compression.
+    BROTLI = 3;
   }
 }
 
@@ -1804,7 +2051,10 @@ message CacheCapabilities {
 
 // Capabilities of the remote execution system.
 message ExecutionCapabilities {
-  // Remote execution may only support a single digest function.
+  // Legacy field for indicating which digest function is supported by the
+  // remote execution system. It MUST be set to a value other than UNKNOWN.
+  // Implementations should consider the repeated digest_functions field
+  // first, falling back to this singular field if digest_functions is unset.
   DigestFunction.Value digest_function = 1;
 
   // Whether remote execution is enabled for the particular server/instance.
@@ -1815,6 +2065,20 @@ message ExecutionCapabilities {
 
   // Supported node properties.
   repeated string supported_node_properties = 4;
+
+  // All the digest functions supported by the remote execution system.
+  // If this field is set, it MUST also contain digest_function.
+  //
+  // Even if the remote execution system announces support for multiple
+  // digest functions, individual execution requests may only reference
+  // CAS objects using a single digest function. For example, it is not
+  // permitted to execute actions having both MD5 and SHA-256 hashed
+  // files in their input root.
+  //
+  // The CAS objects referenced by action results generated by the
+  // remote execution system MUST use the same digest function as the
+  // one used to construct the action.
+  repeated DigestFunction.Value digest_functions = 5;
 }
 
 // Details for the tool used to call the API.
@@ -1833,7 +2097,7 @@ message ToolDetails {
 //
 // * name: `build.bazel.remote.execution.v2.requestmetadata-bin`
 // * contents: the base64 encoded binary `RequestMetadata` message.
-// Note: the gRPC library serializes binary headers encoded in base 64 by
+// Note: the gRPC library serializes binary headers encoded in base64 by
 // default 
(https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests).
 // Therefore, if the gRPC library is used to pass/retrieve this
 // metadata, the user may ignore the base64 encoding and assume it is simply
diff --git 
a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
 
b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
index 1c9ea49b8..da10f4f9a 100644
--- 
a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
+++ 
b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
@@ -22,7 +22,7 @@ from google.protobuf import wrappers_pb2 as 
google_dot_protobuf_dot_wrappers__pb
 from buildstream._protos.google.rpc import status_pb2 as 
google_dot_rpc_dot_status__pb2
 
 
-DESCRIPTOR = 
_descriptor_pool.Default().AddSerializedFile(b'\n6build/bazel/remote/execution/v2/remote_execution.proto\x12\x1f\x62uild.bazel.remote.execution.v2\x1a\x1f\x62uild/bazel/semver/semver.proto\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\xa6\x02\n\x06\x41\x63tion\x12?\n\
 [...]
+DESCRIPTOR = 
_descriptor_pool.Default().AddSerializedFile(b'\n6build/bazel/remote/execution/v2/remote_execution.proto\x12\x1f\x62uild.bazel.remote.execution.v2\x1a\x1f\x62uild/bazel/semver/semver.proto\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\xa6\x02\n\x06\x41\x63tion\x12?\n\
 [...]
 
 
 
@@ -78,6 +78,7 @@ _CACHECAPABILITIES = 
DESCRIPTOR.message_types_by_name['CacheCapabilities']
 _EXECUTIONCAPABILITIES = 
DESCRIPTOR.message_types_by_name['ExecutionCapabilities']
 _TOOLDETAILS = DESCRIPTOR.message_types_by_name['ToolDetails']
 _REQUESTMETADATA = DESCRIPTOR.message_types_by_name['RequestMetadata']
+_COMMAND_OUTPUTDIRECTORYFORMAT = 
_COMMAND.enum_types_by_name['OutputDirectoryFormat']
 _EXECUTIONSTAGE_VALUE = _EXECUTIONSTAGE.enum_types_by_name['Value']
 _DIGESTFUNCTION_VALUE = _DIGESTFUNCTION.enum_types_by_name['Value']
 _SYMLINKABSOLUTEPATHSTRATEGY_VALUE = 
_SYMLINKABSOLUTEPATHSTRATEGY.enum_types_by_name['Value']
@@ -461,6 +462,16 @@ if _descriptor._USE_C_DESCRIPTORS == False:
 
   DESCRIPTOR._options = None
   DESCRIPTOR._serialized_options = 
b'\n\037build.bazel.remote.execution.v2B\024RemoteExecutionProtoP\001ZQgithub.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2;remoteexecution\242\002\003REX\252\002\037Build.Bazel.Remote.Execution.V2'
+  _COMMAND.fields_by_name['output_files']._options = None
+  _COMMAND.fields_by_name['output_files']._serialized_options = b'\030\001'
+  _COMMAND.fields_by_name['output_directories']._options = None
+  _COMMAND.fields_by_name['output_directories']._serialized_options = 
b'\030\001'
+  _COMMAND.fields_by_name['platform']._options = None
+  _COMMAND.fields_by_name['platform']._serialized_options = b'\030\001'
+  _ACTIONRESULT.fields_by_name['output_file_symlinks']._options = None
+  _ACTIONRESULT.fields_by_name['output_file_symlinks']._serialized_options = 
b'\030\001'
+  _ACTIONRESULT.fields_by_name['output_directory_symlinks']._options = None
+  
_ACTIONRESULT.fields_by_name['output_directory_symlinks']._serialized_options = 
b'\030\001'
   _EXECUTERESPONSE_SERVERLOGSENTRY._options = None
   _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_options = b'8\001'
   _EXECUTION.methods_by_name['Execute']._options = None
@@ -484,121 +495,123 @@ if _descriptor._USE_C_DESCRIPTORS == False:
   _ACTION._serialized_start=341
   _ACTION._serialized_end=635
   _COMMAND._serialized_start=638
-  _COMMAND._serialized_end=1003
-  _COMMAND_ENVIRONMENTVARIABLE._serialized_start=953
-  _COMMAND_ENVIRONMENTVARIABLE._serialized_end=1003
-  _PLATFORM._serialized_start=1005
-  _PLATFORM._serialized_end=1128
-  _PLATFORM_PROPERTY._serialized_start=1089
-  _PLATFORM_PROPERTY._serialized_end=1128
-  _DIRECTORY._serialized_start=1131
-  _DIRECTORY._serialized_end=1413
-  _NODEPROPERTY._serialized_start=1415
-  _NODEPROPERTY._serialized_end=1458
-  _NODEPROPERTIES._serialized_start=1461
-  _NODEPROPERTIES._serialized_end=1636
-  _FILENODE._serialized_start=1639
-  _FILENODE._serialized_end=1829
-  _DIRECTORYNODE._serialized_start=1831
-  _DIRECTORYNODE._serialized_end=1917
-  _SYMLINKNODE._serialized_start=1919
-  _SYMLINKNODE._serialized_end=2042
-  _DIGEST._serialized_start=2044
-  _DIGEST._serialized_end=2086
-  _EXECUTEDACTIONMETADATA._serialized_start=2089
-  _EXECUTEDACTIONMETADATA._serialized_end=2822
-  _ACTIONRESULT._serialized_start=2825
-  _ACTIONRESULT._serialized_end=3496
-  _OUTPUTFILE._serialized_start=3499
-  _OUTPUTFILE._serialized_end=3709
-  _TREE._serialized_start=3711
-  _TREE._serialized_end=3837
-  _OUTPUTDIRECTORY._serialized_start=3839
-  _OUTPUTDIRECTORY._serialized_end=3938
-  _OUTPUTSYMLINK._serialized_start=3940
-  _OUTPUTSYMLINK._serialized_end=4065
-  _EXECUTIONPOLICY._serialized_start=4067
-  _EXECUTIONPOLICY._serialized_end=4102
-  _RESULTSCACHEPOLICY._serialized_start=4104
-  _RESULTSCACHEPOLICY._serialized_end=4142
-  _EXECUTEREQUEST._serialized_start=4145
-  _EXECUTEREQUEST._serialized_end=4452
-  _LOGFILE._serialized_start=4454
-  _LOGFILE._serialized_end=4544
-  _EXECUTERESPONSE._serialized_start=4547
-  _EXECUTERESPONSE._serialized_end=4883
-  _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_start=4792
-  _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_end=4883
-  _EXECUTIONSTAGE._serialized_start=4885
-  _EXECUTIONSTAGE._serialized_end=4982
-  _EXECUTIONSTAGE_VALUE._serialized_start=4903
-  _EXECUTIONSTAGE_VALUE._serialized_end=4982
-  _EXECUTEOPERATIONMETADATA._serialized_start=4985
-  _EXECUTEOPERATIONMETADATA._serialized_end=5201
-  _WAITEXECUTIONREQUEST._serialized_start=5203
-  _WAITEXECUTIONREQUEST._serialized_end=5239
-  _GETACTIONRESULTREQUEST._serialized_start=5242
-  _GETACTIONRESULTREQUEST._serialized_end=5428
-  _UPDATEACTIONRESULTREQUEST._serialized_start=5431
-  _UPDATEACTIONRESULTREQUEST._serialized_end=5698
-  _FINDMISSINGBLOBSREQUEST._serialized_start=5700
-  _FINDMISSINGBLOBSREQUEST._serialized_end=5811
-  _FINDMISSINGBLOBSRESPONSE._serialized_start=5813
-  _FINDMISSINGBLOBSRESPONSE._serialized_end=5910
-  _BATCHUPDATEBLOBSREQUEST._serialized_start=5913
-  _BATCHUPDATEBLOBSREQUEST._serialized_end=6199
-  _BATCHUPDATEBLOBSREQUEST_REQUEST._serialized_start=6048
-  _BATCHUPDATEBLOBSREQUEST_REQUEST._serialized_end=6199
-  _BATCHUPDATEBLOBSRESPONSE._serialized_start=6202
-  _BATCHUPDATEBLOBSRESPONSE._serialized_end=6420
-  _BATCHUPDATEBLOBSRESPONSE_RESPONSE._serialized_start=6317
-  _BATCHUPDATEBLOBSRESPONSE_RESPONSE._serialized_end=6420
-  _BATCHREADBLOBSREQUEST._serialized_start=6423
-  _BATCHREADBLOBSREQUEST._serialized_end=6610
-  _BATCHREADBLOBSRESPONSE._serialized_start=6613
-  _BATCHREADBLOBSRESPONSE._serialized_end=6913
-  _BATCHREADBLOBSRESPONSE_RESPONSE._serialized_start=6725
-  _BATCHREADBLOBSRESPONSE_RESPONSE._serialized_end=6913
-  _GETTREEREQUEST._serialized_start=6916
-  _GETTREEREQUEST._serialized_end=7056
-  _GETTREERESPONSE._serialized_start=7058
-  _GETTREERESPONSE._serialized_end=7165
-  _GETCAPABILITIESREQUEST._serialized_start=7167
-  _GETCAPABILITIESREQUEST._serialized_end=7214
-  _SERVERCAPABILITIES._serialized_start=7217
-  _SERVERCAPABILITIES._serialized_end=7572
-  _DIGESTFUNCTION._serialized_start=7574
-  _DIGESTFUNCTION._serialized_end=7689
-  _DIGESTFUNCTION_VALUE._serialized_start=7592
-  _DIGESTFUNCTION_VALUE._serialized_end=7689
-  _ACTIONCACHEUPDATECAPABILITIES._serialized_start=7691
-  _ACTIONCACHEUPDATECAPABILITIES._serialized_end=7746
-  _PRIORITYCAPABILITIES._serialized_start=7749
-  _PRIORITYCAPABILITIES._serialized_end=7921
-  _PRIORITYCAPABILITIES_PRIORITYRANGE._serialized_start=7862
-  _PRIORITYCAPABILITIES_PRIORITYRANGE._serialized_end=7921
-  _SYMLINKABSOLUTEPATHSTRATEGY._serialized_start=7923
-  _SYMLINKABSOLUTEPATHSTRATEGY._serialized_end=8003
-  _SYMLINKABSOLUTEPATHSTRATEGY_VALUE._serialized_start=7954
-  _SYMLINKABSOLUTEPATHSTRATEGY_VALUE._serialized_end=8003
-  _COMPRESSOR._serialized_start=8005
-  _COMPRESSOR._serialized_end=8063
-  _COMPRESSOR_VALUE._serialized_start=8019
-  _COMPRESSOR_VALUE._serialized_end=8063
-  _CACHECAPABILITIES._serialized_start=8066
-  _CACHECAPABILITIES._serialized_end=8685
-  _EXECUTIONCAPABILITIES._serialized_start=8688
-  _EXECUTIONCAPABILITIES._serialized_end=8944
-  _TOOLDETAILS._serialized_start=8946
-  _TOOLDETAILS._serialized_end=9000
-  _REQUESTMETADATA._serialized_start=9003
-  _REQUESTMETADATA._serialized_end=9240
-  _EXECUTION._serialized_start=9243
-  _EXECUTION._serialized_end=9556
-  _ACTIONCACHE._serialized_start=9559
-  _ACTIONCACHE._serialized_end=10029
-  _CONTENTADDRESSABLESTORAGE._serialized_start=10032
-  _CONTENTADDRESSABLESTORAGE._serialized_end=10827
-  _CAPABILITIES._serialized_start=10830
-  _CAPABILITIES._serialized_end=11019
+  _COMMAND._serialized_end=1196
+  _COMMAND_ENVIRONMENTVARIABLE._serialized_start=1062
+  _COMMAND_ENVIRONMENTVARIABLE._serialized_end=1112
+  _COMMAND_OUTPUTDIRECTORYFORMAT._serialized_start=1114
+  _COMMAND_OUTPUTDIRECTORYFORMAT._serialized_end=1196
+  _PLATFORM._serialized_start=1198
+  _PLATFORM._serialized_end=1321
+  _PLATFORM_PROPERTY._serialized_start=1282
+  _PLATFORM_PROPERTY._serialized_end=1321
+  _DIRECTORY._serialized_start=1324
+  _DIRECTORY._serialized_end=1606
+  _NODEPROPERTY._serialized_start=1608
+  _NODEPROPERTY._serialized_end=1651
+  _NODEPROPERTIES._serialized_start=1654
+  _NODEPROPERTIES._serialized_end=1829
+  _FILENODE._serialized_start=1832
+  _FILENODE._serialized_end=2022
+  _DIRECTORYNODE._serialized_start=2024
+  _DIRECTORYNODE._serialized_end=2110
+  _SYMLINKNODE._serialized_start=2112
+  _SYMLINKNODE._serialized_end=2235
+  _DIGEST._serialized_start=2237
+  _DIGEST._serialized_end=2279
+  _EXECUTEDACTIONMETADATA._serialized_start=2282
+  _EXECUTEDACTIONMETADATA._serialized_end=3015
+  _ACTIONRESULT._serialized_start=3018
+  _ACTIONRESULT._serialized_end=3697
+  _OUTPUTFILE._serialized_start=3700
+  _OUTPUTFILE._serialized_end=3910
+  _TREE._serialized_start=3912
+  _TREE._serialized_end=4038
+  _OUTPUTDIRECTORY._serialized_start=4041
+  _OUTPUTDIRECTORY._serialized_end=4245
+  _OUTPUTSYMLINK._serialized_start=4247
+  _OUTPUTSYMLINK._serialized_end=4372
+  _EXECUTIONPOLICY._serialized_start=4374
+  _EXECUTIONPOLICY._serialized_end=4409
+  _RESULTSCACHEPOLICY._serialized_start=4411
+  _RESULTSCACHEPOLICY._serialized_end=4449
+  _EXECUTEREQUEST._serialized_start=4452
+  _EXECUTEREQUEST._serialized_end=4914
+  _LOGFILE._serialized_start=4916
+  _LOGFILE._serialized_end=5006
+  _EXECUTERESPONSE._serialized_start=5009
+  _EXECUTERESPONSE._serialized_end=5345
+  _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_start=5254
+  _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_end=5345
+  _EXECUTIONSTAGE._serialized_start=5347
+  _EXECUTIONSTAGE._serialized_end=5444
+  _EXECUTIONSTAGE_VALUE._serialized_start=5365
+  _EXECUTIONSTAGE_VALUE._serialized_end=5444
+  _EXECUTEOPERATIONMETADATA._serialized_start=5447
+  _EXECUTEOPERATIONMETADATA._serialized_end=5756
+  _WAITEXECUTIONREQUEST._serialized_start=5758
+  _WAITEXECUTIONREQUEST._serialized_end=5794
+  _GETACTIONRESULTREQUEST._serialized_start=5797
+  _GETACTIONRESULTREQUEST._serialized_end=6063
+  _UPDATEACTIONRESULTREQUEST._serialized_start=6066
+  _UPDATEACTIONRESULTREQUEST._serialized_end=6413
+  _FINDMISSINGBLOBSREQUEST._serialized_start=6416
+  _FINDMISSINGBLOBSREQUEST._serialized_end=6607
+  _FINDMISSINGBLOBSRESPONSE._serialized_start=6609
+  _FINDMISSINGBLOBSRESPONSE._serialized_end=6706
+  _BATCHUPDATEBLOBSREQUEST._serialized_start=6709
+  _BATCHUPDATEBLOBSREQUEST._serialized_end=7075
+  _BATCHUPDATEBLOBSREQUEST_REQUEST._serialized_start=6924
+  _BATCHUPDATEBLOBSREQUEST_REQUEST._serialized_end=7075
+  _BATCHUPDATEBLOBSRESPONSE._serialized_start=7078
+  _BATCHUPDATEBLOBSRESPONSE._serialized_end=7296
+  _BATCHUPDATEBLOBSRESPONSE_RESPONSE._serialized_start=7193
+  _BATCHUPDATEBLOBSRESPONSE_RESPONSE._serialized_end=7296
+  _BATCHREADBLOBSREQUEST._serialized_start=7299
+  _BATCHREADBLOBSREQUEST._serialized_end=7566
+  _BATCHREADBLOBSRESPONSE._serialized_start=7569
+  _BATCHREADBLOBSRESPONSE._serialized_end=7869
+  _BATCHREADBLOBSRESPONSE_RESPONSE._serialized_start=7681
+  _BATCHREADBLOBSRESPONSE_RESPONSE._serialized_end=7869
+  _GETTREEREQUEST._serialized_start=7872
+  _GETTREEREQUEST._serialized_end=8092
+  _GETTREERESPONSE._serialized_start=8094
+  _GETTREERESPONSE._serialized_end=8201
+  _GETCAPABILITIESREQUEST._serialized_start=8203
+  _GETCAPABILITIESREQUEST._serialized_end=8250
+  _SERVERCAPABILITIES._serialized_start=8253
+  _SERVERCAPABILITIES._serialized_end=8608
+  _DIGESTFUNCTION._serialized_start=8611
+  _DIGESTFUNCTION._serialized_end=8754
+  _DIGESTFUNCTION_VALUE._serialized_start=8629
+  _DIGESTFUNCTION_VALUE._serialized_end=8754
+  _ACTIONCACHEUPDATECAPABILITIES._serialized_start=8756
+  _ACTIONCACHEUPDATECAPABILITIES._serialized_end=8811
+  _PRIORITYCAPABILITIES._serialized_start=8814
+  _PRIORITYCAPABILITIES._serialized_end=8986
+  _PRIORITYCAPABILITIES_PRIORITYRANGE._serialized_start=8927
+  _PRIORITYCAPABILITIES_PRIORITYRANGE._serialized_end=8986
+  _SYMLINKABSOLUTEPATHSTRATEGY._serialized_start=8988
+  _SYMLINKABSOLUTEPATHSTRATEGY._serialized_end=9068
+  _SYMLINKABSOLUTEPATHSTRATEGY_VALUE._serialized_start=9019
+  _SYMLINKABSOLUTEPATHSTRATEGY_VALUE._serialized_end=9068
+  _COMPRESSOR._serialized_start=9070
+  _COMPRESSOR._serialized_end=9140
+  _COMPRESSOR_VALUE._serialized_start=9084
+  _COMPRESSOR_VALUE._serialized_end=9140
+  _CACHECAPABILITIES._serialized_start=9143
+  _CACHECAPABILITIES._serialized_end=9762
+  _EXECUTIONCAPABILITIES._serialized_start=9765
+  _EXECUTIONCAPABILITIES._serialized_end=10102
+  _TOOLDETAILS._serialized_start=10104
+  _TOOLDETAILS._serialized_end=10158
+  _REQUESTMETADATA._serialized_start=10161
+  _REQUESTMETADATA._serialized_end=10398
+  _EXECUTION._serialized_start=10401
+  _EXECUTION._serialized_end=10714
+  _ACTIONCACHE._serialized_start=10717
+  _ACTIONCACHE._serialized_end=11187
+  _CONTENTADDRESSABLESTORAGE._serialized_start=11190
+  _CONTENTADDRESSABLESTORAGE._serialized_end=11985
+  _CAPABILITIES._serialized_start=11988
+  _CAPABILITIES._serialized_end=12177
 # @@protoc_insertion_point(module_scope)
diff --git 
a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
 
b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
index 5225958ae..1ed5dc9d9 100644
--- 
a/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
+++ 
b/src/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
@@ -109,7 +109,12 @@ class ExecutionServicer(object):
         send a [PreconditionFailure][google.rpc.PreconditionFailure] error 
detail
         where, for each requested blob not present in the CAS, there is a
         `Violation` with a `type` of `MISSING` and a `subject` of
-        `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
+        `"blobs/{digest_function/}{hash}/{size}"` indicating the digest of the
+        missing blob. The `subject` is formatted the same way as the
+        `resource_name` provided to
+        [ByteStream.Read][google.bytestream.ByteStream.Read], with the leading
+        instance name omitted. `digest_function` MUST thus be omitted if its 
value
+        is one of MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512, or VSO.
 
         The server does not need to guarantee that a call to this method leads 
to
         at most one execution of the action. The server MAY execute the action
@@ -127,6 +132,14 @@ class ExecutionServicer(object):
         operation completes, and then respond with the completed operation. The
         server MAY choose to stream additional updates as execution progresses,
         such as to provide an update as to the state of the execution.
+
+        In addition to the cases describe for Execute, the WaitExecution method
+        may fail as follows:
+
+        * `NOT_FOUND`: The operation no longer exists due to any of a transient
+        condition, an unknown operation name, or if the server implements the
+        Operations API DeleteOperation method and it was called for the current
+        execution. The client should call `Execute` to retry.
         """
         context.set_code(grpc.StatusCode.UNIMPLEMENTED)
         context.set_details('Method not implemented!')
@@ -394,20 +407,27 @@ class ContentAddressableStorageStub(object):
     [Write method][google.bytestream.ByteStream.Write] of the ByteStream API.
 
     For uncompressed data, The `WriteRequest.resource_name` is of the 
following form:
-    `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`
+    
`{instance_name}/uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}`
 
     Where:
-    * `instance_name` is an identifier, possibly containing multiple path
-    segments, used to distinguish between the various instances on the server,
-    in a manner defined by the server. If it is the empty path, the leading
-    slash is omitted, so that  the `resource_name` becomes
-    `uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`.
+    * `instance_name` is an identifier used to distinguish between the various
+    instances on the server. Syntax and semantics of this field are defined
+    by the server; Clients must not make any assumptions about it (e.g.,
+    whether it spans multiple path segments or not). If it is the empty path,
+    the leading slash is omitted, so that  the `resource_name` becomes
+    `uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}`.
     To simplify parsing, a path segment cannot equal any of the following
     keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations`,
     `capabilities` or `compressed-blobs`.
     * `uuid` is a version 4 UUID generated by the client, used to avoid
     collisions between concurrent uploads of the same data. Clients MAY
     reuse the same `uuid` for uploading different blobs.
+    * `digest_function` is a lowercase string form of a `DigestFunction.Value`
+    enum, indicating which digest function was used to compute `hash`. If the
+    digest function used is one of MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512,
+    or VSO, this component MUST be omitted. In that case the server SHOULD
+    infer the digest function using the length of the `hash` and the digest
+    functions announced in the server's capabilities.
     * `hash` and `size` refer to the 
[Digest][build.bazel.remote.execution.v2.Digest]
     of the data being uploaded.
     * `optional_metadata` is implementation specific data, which clients MAY 
omit.
@@ -415,10 +435,11 @@ class ContentAddressableStorageStub(object):
 
     Data can alternatively be uploaded in compressed form, with the following
     `WriteRequest.resource_name` form:
-    
`{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
+    
`{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
 
     Where:
-    * `instance_name`, `uuid` and `optional_metadata` are defined as above.
+    * `instance_name`, `uuid`, `digest_function` and `optional_metadata` are
+    defined as above.
     * `compressor` is a lowercase string form of a `Compressor.Value` enum
     other than `identity`, which is supported by the server and advertised in
     
[CacheCapabilities.supported_compressor][build.bazel.remote.execution.v2.CacheCapabilities.supported_compressor].
@@ -461,15 +482,17 @@ class ContentAddressableStorageStub(object):
     [Read method][google.bytestream.ByteStream.Read] of the ByteStream API.
 
     For uncompressed data, The `ReadRequest.resource_name` is of the following 
form:
-    `{instance_name}/blobs/{hash}/{size}`
-    Where `instance_name`, `hash` and `size` are defined as for uploads.
+    `{instance_name}/blobs/{digest_function/}{hash}/{size}`
+    Where `instance_name`, `digest_function`, `hash` and `size` are defined as
+    for uploads.
 
     Data can alternatively be downloaded in compressed form, with the following
     `ReadRequest.resource_name` form:
-    
`{instance_name}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}`
+    
`{instance_name}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}`
 
     Where:
-    * `instance_name` and `compressor` are defined as for uploads.
+    * `instance_name`, `compressor` and `digest_function` are defined as for
+    uploads.
     * `uncompressed_hash` and `uncompressed_size` refer to the
     [Digest][build.bazel.remote.execution.v2.Digest] of the data being
     downloaded, once uncompressed. Clients MUST verify that these match
@@ -558,20 +581,27 @@ class ContentAddressableStorageServicer(object):
     [Write method][google.bytestream.ByteStream.Write] of the ByteStream API.
 
     For uncompressed data, The `WriteRequest.resource_name` is of the 
following form:
-    `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`
+    
`{instance_name}/uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}`
 
     Where:
-    * `instance_name` is an identifier, possibly containing multiple path
-    segments, used to distinguish between the various instances on the server,
-    in a manner defined by the server. If it is the empty path, the leading
-    slash is omitted, so that  the `resource_name` becomes
-    `uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`.
+    * `instance_name` is an identifier used to distinguish between the various
+    instances on the server. Syntax and semantics of this field are defined
+    by the server; Clients must not make any assumptions about it (e.g.,
+    whether it spans multiple path segments or not). If it is the empty path,
+    the leading slash is omitted, so that  the `resource_name` becomes
+    `uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}`.
     To simplify parsing, a path segment cannot equal any of the following
     keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations`,
     `capabilities` or `compressed-blobs`.
     * `uuid` is a version 4 UUID generated by the client, used to avoid
     collisions between concurrent uploads of the same data. Clients MAY
     reuse the same `uuid` for uploading different blobs.
+    * `digest_function` is a lowercase string form of a `DigestFunction.Value`
+    enum, indicating which digest function was used to compute `hash`. If the
+    digest function used is one of MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512,
+    or VSO, this component MUST be omitted. In that case the server SHOULD
+    infer the digest function using the length of the `hash` and the digest
+    functions announced in the server's capabilities.
     * `hash` and `size` refer to the 
[Digest][build.bazel.remote.execution.v2.Digest]
     of the data being uploaded.
     * `optional_metadata` is implementation specific data, which clients MAY 
omit.
@@ -579,10 +609,11 @@ class ContentAddressableStorageServicer(object):
 
     Data can alternatively be uploaded in compressed form, with the following
     `WriteRequest.resource_name` form:
-    
`{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
+    
`{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
 
     Where:
-    * `instance_name`, `uuid` and `optional_metadata` are defined as above.
+    * `instance_name`, `uuid`, `digest_function` and `optional_metadata` are
+    defined as above.
     * `compressor` is a lowercase string form of a `Compressor.Value` enum
     other than `identity`, which is supported by the server and advertised in
     
[CacheCapabilities.supported_compressor][build.bazel.remote.execution.v2.CacheCapabilities.supported_compressor].
@@ -625,15 +656,17 @@ class ContentAddressableStorageServicer(object):
     [Read method][google.bytestream.ByteStream.Read] of the ByteStream API.
 
     For uncompressed data, The `ReadRequest.resource_name` is of the following 
form:
-    `{instance_name}/blobs/{hash}/{size}`
-    Where `instance_name`, `hash` and `size` are defined as for uploads.
+    `{instance_name}/blobs/{digest_function/}{hash}/{size}`
+    Where `instance_name`, `digest_function`, `hash` and `size` are defined as
+    for uploads.
 
     Data can alternatively be downloaded in compressed form, with the following
     `ReadRequest.resource_name` form:
-    
`{instance_name}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}`
+    
`{instance_name}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}`
 
     Where:
-    * `instance_name` and `compressor` are defined as for uploads.
+    * `instance_name`, `compressor` and `digest_function` are defined as for
+    uploads.
     * `uncompressed_hash` and `uncompressed_size` refer to the
     [Digest][build.bazel.remote.execution.v2.Digest] of the data being
     downloaded, once uncompressed. Clients MUST verify that these match
@@ -824,20 +857,27 @@ class ContentAddressableStorage(object):
     [Write method][google.bytestream.ByteStream.Write] of the ByteStream API.
 
     For uncompressed data, The `WriteRequest.resource_name` is of the 
following form:
-    `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`
+    
`{instance_name}/uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}`
 
     Where:
-    * `instance_name` is an identifier, possibly containing multiple path
-    segments, used to distinguish between the various instances on the server,
-    in a manner defined by the server. If it is the empty path, the leading
-    slash is omitted, so that  the `resource_name` becomes
-    `uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`.
+    * `instance_name` is an identifier used to distinguish between the various
+    instances on the server. Syntax and semantics of this field are defined
+    by the server; Clients must not make any assumptions about it (e.g.,
+    whether it spans multiple path segments or not). If it is the empty path,
+    the leading slash is omitted, so that  the `resource_name` becomes
+    `uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}`.
     To simplify parsing, a path segment cannot equal any of the following
     keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations`,
     `capabilities` or `compressed-blobs`.
     * `uuid` is a version 4 UUID generated by the client, used to avoid
     collisions between concurrent uploads of the same data. Clients MAY
     reuse the same `uuid` for uploading different blobs.
+    * `digest_function` is a lowercase string form of a `DigestFunction.Value`
+    enum, indicating which digest function was used to compute `hash`. If the
+    digest function used is one of MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512,
+    or VSO, this component MUST be omitted. In that case the server SHOULD
+    infer the digest function using the length of the `hash` and the digest
+    functions announced in the server's capabilities.
     * `hash` and `size` refer to the 
[Digest][build.bazel.remote.execution.v2.Digest]
     of the data being uploaded.
     * `optional_metadata` is implementation specific data, which clients MAY 
omit.
@@ -845,10 +885,11 @@ class ContentAddressableStorage(object):
 
     Data can alternatively be uploaded in compressed form, with the following
     `WriteRequest.resource_name` form:
-    
`{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
+    
`{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
 
     Where:
-    * `instance_name`, `uuid` and `optional_metadata` are defined as above.
+    * `instance_name`, `uuid`, `digest_function` and `optional_metadata` are
+    defined as above.
     * `compressor` is a lowercase string form of a `Compressor.Value` enum
     other than `identity`, which is supported by the server and advertised in
     
[CacheCapabilities.supported_compressor][build.bazel.remote.execution.v2.CacheCapabilities.supported_compressor].
@@ -891,15 +932,17 @@ class ContentAddressableStorage(object):
     [Read method][google.bytestream.ByteStream.Read] of the ByteStream API.
 
     For uncompressed data, The `ReadRequest.resource_name` is of the following 
form:
-    `{instance_name}/blobs/{hash}/{size}`
-    Where `instance_name`, `hash` and `size` are defined as for uploads.
+    `{instance_name}/blobs/{digest_function/}{hash}/{size}`
+    Where `instance_name`, `digest_function`, `hash` and `size` are defined as
+    for uploads.
 
     Data can alternatively be downloaded in compressed form, with the following
     `ReadRequest.resource_name` form:
-    
`{instance_name}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}`
+    
`{instance_name}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}`
 
     Where:
-    * `instance_name` and `compressor` are defined as for uploads.
+    * `instance_name`, `compressor` and `digest_function` are defined as for
+    uploads.
     * `uncompressed_hash` and `uncompressed_size` refer to the
     [Digest][build.bazel.remote.execution.v2.Digest] of the data being
     downloaded, once uncompressed. Clients MUST verify that these match
@@ -1046,6 +1089,8 @@ class CapabilitiesServicer(object):
         CacheCapabilities and ExecutionCapabilities.
         * Execution only endpoints should return ExecutionCapabilities.
         * CAS + Action Cache only endpoints should return CacheCapabilities.
+
+        There are no method-specific errors.
         """
         context.set_code(grpc.StatusCode.UNIMPLEMENTED)
         context.set_details('Method not implemented!')

Reply via email to