Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package rekor for openSUSE:Factory checked 
in at 2022-12-05 18:01:40
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/rekor (Old)
 and      /work/SRC/openSUSE:Factory/.rekor.new.1835 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "rekor"

Mon Dec  5 18:01:40 2022 rev:13 rq:1040165 version:1.0.1

Changes:
--------
--- /work/SRC/openSUSE:Factory/rekor/rekor.changes      2022-10-19 
13:18:33.401331493 +0200
+++ /work/SRC/openSUSE:Factory/.rekor.new.1835/rekor.changes    2022-12-05 
18:01:52.276800532 +0100
@@ -1,0 +2,6 @@
+Tue Nov 29 13:42:54 UTC 2022 - Marcus Meissner <meiss...@suse.com>
+
+- updated to rekor 1.0.1 (jsc#SLE-23476):
+  - stop inserting envelope hash for intoto:0.0.2 types into index
+
+-------------------------------------------------------------------
@@ -4 +10 @@
-- updated to rekor 1.0.0 (sc#SLE-23476):
+- updated to rekor 1.0.0 (jsc#SLE-23476):

Old:
----
  rekor-1.0.0.tar.gz

New:
----
  rekor-1.0.1.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ rekor.spec ++++++
--- /var/tmp/diff_new_pack.qMZFq0/_old  2022-12-05 18:01:53.436806849 +0100
+++ /var/tmp/diff_new_pack.qMZFq0/_new  2022-12-05 18:01:53.440806871 +0100
@@ -19,9 +19,9 @@
 %define apps cli server
 
 Name:           rekor
-Version:        1.0.0
+Version:        1.0.1
 Release:        0
-%define revision 7215f5c4782deef0b9c249d39ab6b9bc70d58a94
+%define revision d3162350e96098ca8a24adfdbee42057e43b5de6
 Summary:        Supply Chain Transparency Log
 License:        Apache-2.0
 URL:            https://github.com/sigstore/rekor

++++++ rekor-1.0.0.tar.gz -> rekor-1.0.1.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/.github/workflows/build.yml 
new/rekor-1.0.1/.github/workflows/build.yml
--- old/rekor-1.0.0/.github/workflows/build.yml 2022-10-17 19:35:23.000000000 
+0200
+++ new/rekor-1.0.1/.github/workflows/build.yml 2022-11-10 16:26:56.000000000 
+0100
@@ -20,7 +20,7 @@
   push:
     branches:
       - main
-      - release-*
+      - 'release-**'
     tags:
       - '*'
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/.github/workflows/codeql-analysis.yml 
new/rekor-1.0.1/.github/workflows/codeql-analysis.yml
--- old/rekor-1.0.0/.github/workflows/codeql-analysis.yml       2022-10-17 
19:35:23.000000000 +0200
+++ new/rekor-1.0.1/.github/workflows/codeql-analysis.yml       2022-11-10 
16:26:56.000000000 +0100
@@ -17,10 +17,13 @@
 name: CodeQL
 on:
   push:
-    branches: [ main ]
+    branches:
+      - main
+      - 'release-**'
   pull_request:
-    # The branches below must be a subset of the branches above
-    branches: [ main ]
+    branches:
+      - main
+      - 'release-**'
   schedule:
     - cron: '45 10 * * 1'
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/.github/workflows/main.yml 
new/rekor-1.0.1/.github/workflows/main.yml
--- old/rekor-1.0.0/.github/workflows/main.yml  2022-10-17 19:35:23.000000000 
+0200
+++ new/rekor-1.0.1/.github/workflows/main.yml  2022-11-10 16:26:56.000000000 
+0100
@@ -17,9 +17,13 @@
 
 on:
   push:
-    branches: [ main ]
+    branches:
+      - main
+      - 'release-**'
   pull_request:
-    branches: [ main ]
+    branches:
+      - main
+      - 'release-**'
 
 permissions:
   contents: read
@@ -63,8 +67,10 @@
 
       - name: container
         run: |
-          make ko-local 2>&1 | tee output.txt
-          docker run --rm $(tail -1 output.txt) version
+          make ko-local
+          docker run --rm $(cat rekorImagerefs) version
+          docker run --rm $(cat cliImagerefs) version
+          docker run --rm $(cat redisImagerefs) --version
 
   e2e:
     runs-on: ubuntu-20.04
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/.github/workflows/milestone.yml 
new/rekor-1.0.1/.github/workflows/milestone.yml
--- old/rekor-1.0.0/.github/workflows/milestone.yml     2022-10-17 
19:35:23.000000000 +0200
+++ new/rekor-1.0.1/.github/workflows/milestone.yml     2022-11-10 
16:26:56.000000000 +0100
@@ -5,6 +5,7 @@
     types: [closed]
     branches:
       - main
+      - 'release-**'
 
 jobs:
   milestone:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/.github/workflows/scorecard_action.yml 
new/rekor-1.0.1/.github/workflows/scorecard_action.yml
--- old/rekor-1.0.0/.github/workflows/scorecard_action.yml      2022-10-17 
19:35:23.000000000 +0200
+++ new/rekor-1.0.1/.github/workflows/scorecard_action.yml      2022-11-10 
16:26:56.000000000 +0100
@@ -6,7 +6,9 @@
     # Weekly on Saturdays.
     - cron: '30 1 * * 6'
   push:
-    branches: [ main ]
+    branches:
+      - main
+      - 'release-**'
 
 # Declare default permissions as read only.
 permissions: read-all
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/.github/workflows/validate-release.yml 
new/rekor-1.0.1/.github/workflows/validate-release.yml
--- old/rekor-1.0.0/.github/workflows/validate-release.yml      2022-10-17 
19:35:23.000000000 +0200
+++ new/rekor-1.0.1/.github/workflows/validate-release.yml      2022-11-10 
16:26:56.000000000 +0100
@@ -19,7 +19,7 @@
   push:
     branches:
       - main
-      - release-*
+      - 'release-**'
   pull_request:
 
 jobs:
@@ -39,8 +39,8 @@
       statuses: none
 
     env:
-      CROSS_BUILDER_IMAGE: 
ghcr.io/gythialy/golang-cross:v1.19.2-1@sha256:d7e32c3e7d89356fb014ded4c1be7baabe3c454ca7753842334226fd3327d280
-      COSIGN_IMAGE: 
gcr.io/projectsigstore/cosign:v1.13.0@sha256:398f441c46e58906dc6d3aaaad22fe63f018dc30acbe13b326e5a016e711301c
+      CROSS_BUILDER_IMAGE: 
ghcr.io/gythialy/golang-cross:v1.19.3-0@sha256:1072190e76d68f455f1bedb7430a633916b6629a722c42246037ac518fdb0ff2
+      COSIGN_IMAGE: 
gcr.io/projectsigstore/cosign:v1.13.1@sha256:fd5b09be23ef1027e1bdd490ce78dcc65d2b15902e1f4ba8e04f3b4019cc1057
 
     steps:
       - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # 
v3.0.2
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/.gitignore new/rekor-1.0.1/.gitignore
--- old/rekor-1.0.0/.gitignore  2022-10-17 19:35:23.000000000 +0200
+++ new/rekor-1.0.1/.gitignore  2022-11-10 16:26:56.000000000 +0100
@@ -17,6 +17,9 @@
 rekorCliImagerefs
 trillianServerImagerefs
 trillianSignerImagerefs
+rekorImagerefs
+cliImagerefs
+redisImagerefs
 cosign.*
 signature
 rekor.pub
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/.ko.yaml new/rekor-1.0.1/.ko.yaml
--- old/rekor-1.0.0/.ko.yaml    2022-10-17 19:35:23.000000000 +0200
+++ new/rekor-1.0.1/.ko.yaml    2022-11-10 16:26:56.000000000 +0100
@@ -46,3 +46,18 @@
   ldflags:
   - -extldflags "-static"
   - "{{ .Env.LDFLAGS }}"
+
+- id: backfill-redis
+  dir: .
+  main: ./cmd/backfill-redis
+  env:
+  - CGO_ENABLED=0
+  flags:
+  - -trimpath
+  - --tags
+  - "{{ .Env.GIT_HASH }}"
+  - --tags
+  - "{{ .Env.GIT_VERSION }}"
+  ldflags:
+  - -extldflags "-static"
+  - "{{ .Env.LDFLAGS }}"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/Makefile new/rekor-1.0.1/Makefile
--- old/rekor-1.0.0/Makefile    2022-10-17 19:35:23.000000000 +0200
+++ new/rekor-1.0.1/Makefile    2022-11-10 16:26:56.000000000 +0100
@@ -81,6 +81,9 @@
 rekor-server: $(SRCS)
        CGO_ENABLED=0 go build -trimpath -ldflags "$(SERVER_LDFLAGS)" -o 
rekor-server ./cmd/rekor-server
 
+backfill-redis: $(SRCS)
+       CGO_ENABLED=0 go build -trimpath -ldflags "$(SERVER_LDFLAGS)" -o 
rekor-server ./cmd/backfill-redis
+
 test:
        go test ./...
 
@@ -122,6 +125,12 @@
                --platform=all --tags $(GIT_VERSION) --tags $(GIT_HASH) \
                --image-refs rekorCliImagerefs 
github.com/sigstore/rekor/cmd/rekor-cli
 
+       # backfill-redis
+       LDFLAGS="$(SERVER_LDFLAGS)" GIT_HASH=$(GIT_HASH) 
GIT_VERSION=$(GIT_VERSION) \
+       ko publish --base-import-paths \
+               --platform=all --tags $(GIT_VERSION) --tags $(GIT_HASH) \
+               --image-refs bRedisImagerefs 
github.com/sigstore/rekor/cmd/backfill-redis
+
 deploy:
        LDFLAGS="$(SERVER_LDFLAGS)" GIT_HASH=$(GIT_HASH) 
GIT_VERSION=$(GIT_VERSION) ko apply -f config/
 
@@ -138,14 +147,19 @@
 ko-local:
        LDFLAGS="$(SERVER_LDFLAGS)" GIT_HASH=$(GIT_HASH) 
GIT_VERSION=$(GIT_VERSION) \
        ko publish --base-import-paths \
-               --tags $(GIT_VERSION) --tags $(GIT_HASH) --local \
+               --tags $(GIT_VERSION) --tags $(GIT_HASH) --local --image-refs 
rekorImagerefs \
                github.com/sigstore/rekor/cmd/rekor-server
 
        LDFLAGS="$(CLI_LDFLAGS)" GIT_HASH=$(GIT_HASH) 
GIT_VERSION=$(GIT_VERSION) \
        ko publish --base-import-paths \
-               --tags $(GIT_VERSION) --tags $(GIT_HASH) --local \
+               --tags $(GIT_VERSION) --tags $(GIT_HASH) --local --image-refs 
cliImagerefs \
                github.com/sigstore/rekor/cmd/rekor-cli
 
+       LDFLAGS="$(SERVER_LDFLAGS)" GIT_HASH=$(GIT_HASH) 
GIT_VERSION=$(GIT_VERSION) \
+       ko publish --base-import-paths \
+               --tags $(GIT_VERSION) --tags $(GIT_HASH) --local --image-refs 
redisImagerefs \
+               github.com/sigstore/rekor/cmd/backfill-redis
+
 # This builds the trillian containers we rely on using ko for cross platform 
support
 .PHONY: ko-trillian
 ko-trillian:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/cmd/backfill-redis/main.go 
new/rekor-1.0.1/cmd/backfill-redis/main.go
--- old/rekor-1.0.0/cmd/backfill-redis/main.go  1970-01-01 01:00:00.000000000 
+0100
+++ new/rekor-1.0.1/cmd/backfill-redis/main.go  2022-11-10 16:26:56.000000000 
+0100
@@ -0,0 +1,181 @@
+// Copyright 2022 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+       backfill-redis is a script to populate the Redis index with entries
+       from Rekor. This is sometimes necessary because Redis caching is best
+       effort. If Redis returns an error, Rekor will not, and so sometimes
+       we need to backfill missing entries into Redis for the search API.
+
+       To run:
+       go run cmd/backfill-redis/main.go --rekor-address <address> \
+           --hostname <redis-hostname> --port <redis-port>
+               --start <first index to backfill> --end <last index to backfill>
+*/
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "encoding/base64"
+       "flag"
+       "fmt"
+       "log"
+       "os"
+
+       "github.com/go-openapi/runtime"
+       radix "github.com/mediocregopher/radix/v4"
+       "sigs.k8s.io/release-utils/version"
+
+       "github.com/sigstore/rekor/pkg/client"
+       "github.com/sigstore/rekor/pkg/generated/client/entries"
+       "github.com/sigstore/rekor/pkg/generated/models"
+       "github.com/sigstore/rekor/pkg/types"
+
+       // these imports are to call the packages' init methods
+       _ "github.com/sigstore/rekor/pkg/types/alpine/v0.0.1"
+       _ "github.com/sigstore/rekor/pkg/types/cose/v0.0.1"
+       _ "github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1"
+       _ "github.com/sigstore/rekor/pkg/types/helm/v0.0.1"
+       _ "github.com/sigstore/rekor/pkg/types/intoto/v0.0.1"
+
+       // remove 0.0.2 intoto type due to bugs
+       // _ "github.com/sigstore/rekor/pkg/types/intoto/v0.0.2"
+       _ "github.com/sigstore/rekor/pkg/types/jar/v0.0.1"
+       _ "github.com/sigstore/rekor/pkg/types/rekord/v0.0.1"
+       _ "github.com/sigstore/rekor/pkg/types/rfc3161/v0.0.1"
+       _ "github.com/sigstore/rekor/pkg/types/rpm/v0.0.1"
+       _ "github.com/sigstore/rekor/pkg/types/tuf/v0.0.1"
+)
+
+var (
+       redisHostname = flag.String("hostname", "", "Hostname for Redis 
application")
+       redisPort     = flag.String("port", "", "Port to Redis application")
+       startIndex    = flag.Int("start", -1, "First index to backfill")
+       endIndex      = flag.Int("end", -1, "Last index to backfill")
+       rekorAddress  = flag.String("rekor-address", "", "Address for Rekor, 
e.g. https://rekor.sigstore.dev";)
+       versionFlag   = flag.Bool("version", false, "Print the current version 
of Backfill Redis")
+)
+
+func main() {
+       flag.Parse()
+
+       versionInfo := version.GetVersionInfo()
+       if *versionFlag {
+               fmt.Println(versionInfo.String())
+               os.Exit(0)
+       }
+
+       if *redisHostname == "" {
+               log.Fatal("address must be set")
+       }
+       if *redisPort == "" {
+               log.Fatal("port must be set")
+       }
+       if *startIndex == -1 {
+               log.Fatal("start must be set to >=0")
+       }
+       if *endIndex == -1 {
+               log.Fatal("end must be set to >=0")
+       }
+       if *rekorAddress == "" {
+               log.Fatal("rekor-address must be set")
+       }
+
+       log.Printf("running backfill redis Version: %s GitCommit: %s BuildDate: 
%s", versionInfo.GitVersion, versionInfo.GitCommit, versionInfo.BuildDate)
+
+       cfg := radix.PoolConfig{}
+       redisClient, err := cfg.New(context.Background(), "tcp", 
fmt.Sprintf("%s:%s", *redisHostname, *redisPort))
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       rekorClient, err := client.GetRekorClient(*rekorAddress)
+       if err != nil {
+               log.Fatalf("creating rekor client: %v", err)
+       }
+
+       for i := *startIndex; i <= *endIndex; i++ {
+               params := 
entries.NewGetLogEntryByIndexParamsWithContext(context.Background())
+               params.SetLogIndex(int64(i))
+               resp, err := rekorClient.Entries.GetLogEntryByIndex(params)
+               if err != nil {
+                       log.Fatalf("retrieving log uuid by index: %v", err)
+               }
+               var insertErrs []error
+               for uuid, entry := range resp.Payload {
+                       // uuid is the global UUID - tree ID and entry UUID
+                       e, _, _, err := unmarshalEntryImpl(entry.Body.(string))
+                       if err != nil {
+                               insertErrs = append(insertErrs, 
fmt.Errorf("error unmarshalling entry for %s: %v", uuid, err))
+                               continue
+                       }
+                       keys, err := e.IndexKeys()
+                       if err != nil {
+                               insertErrs = append(insertErrs, 
fmt.Errorf("error building index keys for %s: %v", uuid, err))
+                               continue
+                       }
+                       for _, key := range keys {
+                               // remove the key-value pair from the index in 
case it already exists
+                               if err := removeFromIndex(context.Background(), 
redisClient, key, uuid); err != nil {
+                                       insertErrs = append(insertErrs, 
fmt.Errorf("error removing UUID %s with key %s: %v", uuid, key, err))
+                               }
+                               if err := addToIndex(context.Background(), 
redisClient, key, uuid); err != nil {
+                                       insertErrs = append(insertErrs, 
fmt.Errorf("error inserting UUID %s with key %s: %v", uuid, key, err))
+                               }
+                               fmt.Printf("Uploaded Redis entry %s, index %d, 
key %s\n", uuid, i, key)
+                       }
+               }
+               if len(insertErrs) != 0 {
+                       fmt.Printf("Errors with log index %d:\n", i)
+                       for _, e := range insertErrs {
+                               fmt.Println(e)
+                       }
+               } else {
+                       fmt.Printf("Completed log index %d\n", i)
+               }
+       }
+}
+
+// unmarshalEntryImpl decodes the base64-encoded entry to a specific entry 
type (types.EntryImpl).
+// Taken from Cosign
+func unmarshalEntryImpl(e string) (types.EntryImpl, string, string, error) {
+       b, err := base64.StdEncoding.DecodeString(e)
+       if err != nil {
+               return nil, "", "", err
+       }
+
+       pe, err := models.UnmarshalProposedEntry(bytes.NewReader(b), 
runtime.JSONConsumer())
+       if err != nil {
+               return nil, "", "", err
+       }
+
+       entry, err := types.UnmarshalEntry(pe)
+       if err != nil {
+               return nil, "", "", err
+       }
+       return entry, pe.Kind(), entry.APIVersion(), nil
+}
+
+// removeFromIndex removes all occurrences of a value from a given key. This 
guards against
+// multiple invocations of backfilling creating duplicates.
+func removeFromIndex(ctx context.Context, redisClient radix.Client, key, value 
string) error {
+       return redisClient.Do(ctx, radix.Cmd(nil, "LREM", key, "0", value))
+}
+
+// addToIndex pushes a value onto a key of type list.
+func addToIndex(ctx context.Context, redisClient radix.Client, key, value 
string) error {
+       return redisClient.Do(ctx, radix.Cmd(nil, "LPUSH", key, value))
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/openapi.yaml new/rekor-1.0.1/openapi.yaml
--- old/rekor-1.0.0/openapi.yaml        2022-10-17 19:35:23.000000000 +0200
+++ new/rekor-1.0.1/openapi.yaml        2022-11-10 16:26:56.000000000 +0100
@@ -236,6 +236,8 @@
               $ref: '#/definitions/LogEntry'
         400:
           $ref: '#/responses/BadContent'
+        422:
+          $ref: '#/responses/UnprocessableEntity'
         default:
           $ref: '#/responses/InternalServerError'
   
@@ -643,3 +645,7 @@
     description: There was an internal error in the server while processing 
the request
     schema:
       $ref: "#/definitions/Error"
+  UnprocessableEntity:
+    description: The server understood the request but is unable to process 
the contained instructions
+    schema:
+      $ref: "#/definitions/Error"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/pkg/api/entries.go 
new/rekor-1.0.1/pkg/api/entries.go
--- old/rekor-1.0.0/pkg/api/entries.go  2022-10-17 19:35:23.000000000 +0200
+++ new/rekor-1.0.1/pkg/api/entries.go  2022-11-10 16:26:56.000000000 +0100
@@ -361,27 +361,29 @@
                g, _ := errgroup.WithContext(httpReqCtx)
 
                var searchHashes [][]byte
-               code := http.StatusBadRequest
                for _, entryID := range params.Entry.EntryUUIDs {
-                       if sharding.ValidateEntryID(entryID) == nil {
+                       // if we got this far, then entryID is either a 64 or 
80 character hex string
+                       err := sharding.ValidateEntryID(entryID)
+                       if err == nil {
                                logEntry, err := retrieveLogEntry(httpReqCtx, 
entryID)
-                               if errors.Is(err, ErrNotFound) {
-                                       code = http.StatusNotFound
+                               if err != nil && !errors.Is(err, ErrNotFound) {
+                                       return handleRekorAPIError(params, 
http.StatusInternalServerError, err, fmt.Sprintf("error getting log entry for 
%s", entryID))
+                               } else if err == nil {
+                                       resultPayload = append(resultPayload, 
logEntry)
                                }
-                               if err != nil {
-                                       return handleRekorAPIError(params, 
code, err, fmt.Sprintf("error getting log entry for %s", entryID))
-                               }
-                               resultPayload = append(resultPayload, logEntry)
                                continue
+                       } else if len(entryID) == sharding.EntryIDHexStringLen {
+                               // if ValidateEntryID failed and this is a full 
length entryID, then we can't search for it
+                               return handleRekorAPIError(params, 
http.StatusBadRequest, err, fmt.Sprintf("invalid entryID %s", entryID))
                        }
                        // At this point, check if we got a uuid instead of an 
EntryID, so search for the hash later
                        uuid := entryID
                        if err := sharding.ValidateUUID(uuid); err != nil {
-                               return handleRekorAPIError(params, code, err, 
fmt.Sprintf("validating uuid %s", uuid))
+                               return handleRekorAPIError(params, 
http.StatusBadRequest, err, fmt.Sprintf("invalid uuid %s", uuid))
                        }
                        hash, err := hex.DecodeString(uuid)
                        if err != nil {
-                               return handleRekorAPIError(params, code, err, 
malformedUUID)
+                               return handleRekorAPIError(params, 
http.StatusBadRequest, err, malformedUUID)
                        }
                        searchHashes = append(searchHashes, hash)
                }
@@ -408,7 +410,7 @@
                }
 
                if err := g.Wait(); err != nil {
-                       return handleRekorAPIError(params, code, err, 
err.Error())
+                       return handleRekorAPIError(params, 
http.StatusBadRequest, err, err.Error())
                }
                close(searchHashesChan)
                for hash := range searchHashesChan {
@@ -424,31 +426,30 @@
                                for _, shard := range api.logRanges.AllShards() 
{
                                        tcs := 
NewTrillianClientFromTreeID(httpReqCtx, shard)
                                        resp := tcs.getLeafAndProofByHash(hash)
-                                       if resp.status != codes.OK {
-                                               continue
-                                       }
-                                       if resp.err != nil {
-                                               continue
-                                       }
-                                       leafResult := resp.getLeafAndProofResult
-                                       if leafResult != nil && leafResult.Leaf 
!= nil {
-                                               if results == nil {
-                                                       results = 
map[int64]*trillian.GetEntryAndProofResponse{}
+                                       switch resp.status {
+                                       case codes.OK:
+                                               leafResult := 
resp.getLeafAndProofResult
+                                               if leafResult != nil && 
leafResult.Leaf != nil {
+                                                       if results == nil {
+                                                               results = 
map[int64]*trillian.GetEntryAndProofResponse{}
+                                                       }
+                                                       results[shard] = 
resp.getLeafAndProofResult
                                                }
-                                               results[shard] = 
resp.getLeafAndProofResult
+                                       case codes.NotFound:
+                                               // do nothing here, do not 
throw 404 error
+                                               continue
+                                       default:
+                                               
log.ContextLogger(httpReqCtx).Errorf("error getLeafAndProofByHash(%s): code: 
%v, msg %v", hex.EncodeToString(hash), resp.status, resp.err)
+                                               return 
fmt.Errorf(trillianCommunicationError)
                                        }
                                }
-                               if results == nil {
-                                       code = http.StatusNotFound
-                                       return fmt.Errorf("no responses found")
-                               }
                                searchByHashResults[i] = results
                                return nil
                        })
                }
 
                if err := g.Wait(); err != nil {
-                       return handleRekorAPIError(params, code, err, 
err.Error())
+                       return handleRekorAPIError(params, 
http.StatusInternalServerError, err, err.Error())
                }
 
                for _, hashMap := range searchByHashResults {
@@ -459,8 +460,7 @@
                                tcs := NewTrillianClientFromTreeID(httpReqCtx, 
shard)
                                logEntry, err := logEntryFromLeaf(httpReqCtx, 
api.signer, tcs, leafResp.Leaf, leafResp.SignedLogRoot, leafResp.Proof, shard, 
api.logRanges)
                                if err != nil {
-                                       code = http.StatusInternalServerError
-                                       return handleRekorAPIError(params, 
code, err, err.Error())
+                                       return handleRekorAPIError(params, 
http.StatusInternalServerError, err, err.Error())
                                }
                                resultPayload = append(resultPayload, logEntry)
                        }
@@ -471,26 +471,21 @@
                g, _ := errgroup.WithContext(httpReqCtx)
                resultPayloadChan := make(chan models.LogEntry, 
len(params.Entry.LogIndexes))
 
-               code := http.StatusInternalServerError
                for _, logIndex := range params.Entry.LogIndexes {
                        logIndex := logIndex // 
https://golang.org/doc/faq#closures_and_goroutines
                        g.Go(func() error {
                                logEntry, err := 
retrieveLogEntryByIndex(httpReqCtx, int(swag.Int64Value(logIndex)))
-                               if err != nil {
-                                       switch {
-                                       case errors.Is(err, ErrNotFound):
-                                               code = http.StatusNotFound
-                                       default:
-                                       }
+                               if err != nil && !errors.Is(err, ErrNotFound) {
                                        return err
+                               } else if err == nil {
+                                       resultPayloadChan <- logEntry
                                }
-                               resultPayloadChan <- logEntry
                                return nil
                        })
                }
 
                if err := g.Wait(); err != nil {
-                       return handleRekorAPIError(params, code, err, 
err.Error())
+                       return handleRekorAPIError(params, 
http.StatusInternalServerError, err, err.Error())
                }
                close(resultPayloadChan)
                for result := range resultPayloadChan {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/pkg/api/error.go 
new/rekor-1.0.1/pkg/api/error.go
--- old/rekor-1.0.0/pkg/api/error.go    2022-10-17 19:35:23.000000000 +0200
+++ new/rekor-1.0.1/pkg/api/error.go    2022-11-10 16:26:56.000000000 +0100
@@ -33,21 +33,21 @@
 )
 
 const (
-       trillianCommunicationError     = "Unexpected error communicating with 
transparency log"
-       trillianUnexpectedResult       = "Unexpected result from transparency 
log"
-       validationError                = "Error processing entry: %v"
-       failedToGenerateCanonicalEntry = "Error generating canonicalized entry"
-       entryAlreadyExists             = "An equivalent entry already exists in 
the transparency log with UUID %v"
+       trillianCommunicationError     = "unexpected error communicating with 
transparency log"
+       trillianUnexpectedResult       = "unexpected result from transparency 
log"
+       validationError                = "error processing entry: %v"
+       failedToGenerateCanonicalEntry = "error generating canonicalized entry"
+       entryAlreadyExists             = "an equivalent entry already exists in 
the transparency log with UUID %v"
        firstSizeLessThanLastSize      = "firstSize(%d) must be less than 
lastSize(%d)"
        malformedUUID                  = "UUID must be a 64-character 
hexadecimal string"
-       malformedPublicKey             = "Public key provided could not be 
parsed"
-       failedToGenerateCanonicalKey   = "Error generating canonicalized public 
key"
-       redisUnexpectedResult          = "Unexpected result from searching 
index"
-       lastSizeGreaterThanKnown       = "The tree size requested(%d) was 
greater than what is currently observable(%d)"
-       signingError                   = "Error signing"
-       sthGenerateError               = "Error generating signed tree head"
-       unsupportedPKIFormat           = "The PKI format requested is not 
supported by this server"
-       unexpectedInactiveShardError   = "Unexpected error communicating with 
inactive shard"
+       malformedPublicKey             = "public key provided could not be 
parsed"
+       failedToGenerateCanonicalKey   = "error generating canonicalized public 
key"
+       redisUnexpectedResult          = "unexpected result from searching 
index"
+       lastSizeGreaterThanKnown       = "the tree size requested(%d) was 
greater than what is currently observable(%d)"
+       signingError                   = "error signing"
+       sthGenerateError               = "error generating signed tree head"
+       unsupportedPKIFormat           = "the PKI format requested is not 
supported by this server"
+       unexpectedInactiveShardError   = "unexpected error communicating with 
inactive shard"
        maxSearchQueryLimit            = "more than max allowed %d entries in 
request"
 )
 
@@ -122,6 +122,8 @@
                switch code {
                case http.StatusBadRequest:
                        return 
entries.NewSearchLogQueryBadRequest().WithPayload(errorMsg(message, code))
+               case http.StatusUnprocessableEntity:
+                       return 
entries.NewSearchLogQueryUnprocessableEntity().WithPayload(errorMsg(message, 
code))
                default:
                        return 
entries.NewSearchLogQueryDefault(code).WithPayload(errorMsg(message, code))
                }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/rekor-1.0.0/pkg/generated/client/entries/search_log_query_responses.go 
new/rekor-1.0.1/pkg/generated/client/entries/search_log_query_responses.go
--- old/rekor-1.0.0/pkg/generated/client/entries/search_log_query_responses.go  
2022-10-17 19:35:23.000000000 +0200
+++ new/rekor-1.0.1/pkg/generated/client/entries/search_log_query_responses.go  
2022-11-10 16:26:56.000000000 +0100
@@ -51,6 +51,12 @@
                        return nil, err
                }
                return nil, result
+       case 422:
+               result := NewSearchLogQueryUnprocessableEntity()
+               if err := result.readResponse(response, consumer, o.formats); 
err != nil {
+                       return nil, err
+               }
+               return nil, result
        default:
                result := NewSearchLogQueryDefault(response.Code())
                if err := result.readResponse(response, consumer, o.formats); 
err != nil {
@@ -179,6 +185,69 @@
 
        o.Payload = new(models.Error)
 
+       // response payload
+       if err := consumer.Consume(response.Body(), o.Payload); err != nil && 
err != io.EOF {
+               return err
+       }
+
+       return nil
+}
+
+// NewSearchLogQueryUnprocessableEntity creates a 
SearchLogQueryUnprocessableEntity with default headers values
+func NewSearchLogQueryUnprocessableEntity() *SearchLogQueryUnprocessableEntity 
{
+       return &SearchLogQueryUnprocessableEntity{}
+}
+
+/*
+SearchLogQueryUnprocessableEntity describes a response with status code 422, 
with default header values.
+
+The server understood the request but is unable to process the contained 
instructions
+*/
+type SearchLogQueryUnprocessableEntity struct {
+       Payload *models.Error
+}
+
+// IsSuccess returns true when this search log query unprocessable entity 
response has a 2xx status code
+func (o *SearchLogQueryUnprocessableEntity) IsSuccess() bool {
+       return false
+}
+
+// IsRedirect returns true when this search log query unprocessable entity 
response has a 3xx status code
+func (o *SearchLogQueryUnprocessableEntity) IsRedirect() bool {
+       return false
+}
+
+// IsClientError returns true when this search log query unprocessable entity 
response has a 4xx status code
+func (o *SearchLogQueryUnprocessableEntity) IsClientError() bool {
+       return true
+}
+
+// IsServerError returns true when this search log query unprocessable entity 
response has a 5xx status code
+func (o *SearchLogQueryUnprocessableEntity) IsServerError() bool {
+       return false
+}
+
+// IsCode returns true when this search log query unprocessable entity 
response a status code equal to that given
+func (o *SearchLogQueryUnprocessableEntity) IsCode(code int) bool {
+       return code == 422
+}
+
+func (o *SearchLogQueryUnprocessableEntity) Error() string {
+       return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] 
searchLogQueryUnprocessableEntity  %+v", 422, o.Payload)
+}
+
+func (o *SearchLogQueryUnprocessableEntity) String() string {
+       return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] 
searchLogQueryUnprocessableEntity  %+v", 422, o.Payload)
+}
+
+func (o *SearchLogQueryUnprocessableEntity) GetPayload() *models.Error {
+       return o.Payload
+}
+
+func (o *SearchLogQueryUnprocessableEntity) readResponse(response 
runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) 
error {
+
+       o.Payload = new(models.Error)
+
        // response payload
        if err := consumer.Consume(response.Body(), o.Payload); err != nil && 
err != io.EOF {
                return err
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/pkg/generated/restapi/embedded_spec.go 
new/rekor-1.0.1/pkg/generated/restapi/embedded_spec.go
--- old/rekor-1.0.0/pkg/generated/restapi/embedded_spec.go      2022-10-17 
19:35:23.000000000 +0200
+++ new/rekor-1.0.1/pkg/generated/restapi/embedded_spec.go      2022-11-10 
16:26:56.000000000 +0100
@@ -220,6 +220,9 @@
           "400": {
             "$ref": "#/responses/BadContent"
           },
+          "422": {
+            "$ref": "#/responses/UnprocessableEntity"
+          },
           "default": {
             "$ref": "#/responses/InternalServerError"
           }
@@ -911,6 +914,12 @@
     },
     "NotFound": {
       "description": "The content requested could not be found"
+    },
+    "UnprocessableEntity": {
+      "description": "The server understood the request but is unable to 
process the contained instructions",
+      "schema": {
+        "$ref": "#/definitions/Error"
+      }
     }
   }
 }`))
@@ -1132,6 +1141,12 @@
               "$ref": "#/definitions/Error"
             }
           },
+          "422": {
+            "description": "The server understood the request but is unable to 
process the contained instructions",
+            "schema": {
+              "$ref": "#/definitions/Error"
+            }
+          },
           "default": {
             "description": "There was an internal error in the server while 
processing the request",
             "schema": {
@@ -3892,6 +3907,12 @@
     },
     "NotFound": {
       "description": "The content requested could not be found"
+    },
+    "UnprocessableEntity": {
+      "description": "The server understood the request but is unable to 
process the contained instructions",
+      "schema": {
+        "$ref": "#/definitions/Error"
+      }
     }
   }
 }`))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/rekor-1.0.0/pkg/generated/restapi/operations/entries/search_log_query_responses.go
 
new/rekor-1.0.1/pkg/generated/restapi/operations/entries/search_log_query_responses.go
--- 
old/rekor-1.0.0/pkg/generated/restapi/operations/entries/search_log_query_responses.go
      2022-10-17 19:35:23.000000000 +0200
+++ 
new/rekor-1.0.1/pkg/generated/restapi/operations/entries/search_log_query_responses.go
      2022-11-10 16:26:56.000000000 +0100
@@ -122,6 +122,51 @@
        }
 }
 
+// SearchLogQueryUnprocessableEntityCode is the HTTP code returned for type 
SearchLogQueryUnprocessableEntity
+const SearchLogQueryUnprocessableEntityCode int = 422
+
+/*
+SearchLogQueryUnprocessableEntity The server understood the request but is 
unable to process the contained instructions
+
+swagger:response searchLogQueryUnprocessableEntity
+*/
+type SearchLogQueryUnprocessableEntity struct {
+
+       /*
+         In: Body
+       */
+       Payload *models.Error `json:"body,omitempty"`
+}
+
+// NewSearchLogQueryUnprocessableEntity creates 
SearchLogQueryUnprocessableEntity with default headers values
+func NewSearchLogQueryUnprocessableEntity() *SearchLogQueryUnprocessableEntity 
{
+
+       return &SearchLogQueryUnprocessableEntity{}
+}
+
+// WithPayload adds the payload to the search log query unprocessable entity 
response
+func (o *SearchLogQueryUnprocessableEntity) WithPayload(payload *models.Error) 
*SearchLogQueryUnprocessableEntity {
+       o.Payload = payload
+       return o
+}
+
+// SetPayload sets the payload to the search log query unprocessable entity 
response
+func (o *SearchLogQueryUnprocessableEntity) SetPayload(payload *models.Error) {
+       o.Payload = payload
+}
+
+// WriteResponse to the client
+func (o *SearchLogQueryUnprocessableEntity) WriteResponse(rw 
http.ResponseWriter, producer runtime.Producer) {
+
+       rw.WriteHeader(422)
+       if o.Payload != nil {
+               payload := o.Payload
+               if err := producer.Produce(rw, payload); err != nil {
+                       panic(err) // let the recovery middleware deal with this
+               }
+       }
+}
+
 /*
 SearchLogQueryDefault There was an internal error in the server while 
processing the request
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/pkg/types/intoto/v0.0.2/entry.go 
new/rekor-1.0.1/pkg/types/intoto/v0.0.2/entry.go
--- old/rekor-1.0.0/pkg/types/intoto/v0.0.2/entry.go    2022-10-17 
19:35:23.000000000 +0200
+++ new/rekor-1.0.1/pkg/types/intoto/v0.0.2/entry.go    2022-11-10 
16:26:56.000000000 +0100
@@ -96,8 +96,10 @@
        payloadKey := strings.ToLower(fmt.Sprintf("%s:%s", 
*v.IntotoObj.Content.PayloadHash.Algorithm, 
*v.IntotoObj.Content.PayloadHash.Value))
        result = append(result, payloadKey)
 
-       hashkey := strings.ToLower(fmt.Sprintf("%s:%s", 
*v.IntotoObj.Content.Hash.Algorithm, *v.IntotoObj.Content.Hash.Value))
-       result = append(result, hashkey)
+       // since we can't deterministically calculate this server-side (due to 
public keys being added inline, and also canonicalization being potentially 
different),
+       // we'll just skip adding this index key
+       // hashkey := strings.ToLower(fmt.Sprintf("%s:%s", 
*v.IntotoObj.Content.Hash.Algorithm, *v.IntotoObj.Content.Hash.Value))
+       // result = append(result, hashkey)
 
        switch *v.IntotoObj.Content.Envelope.PayloadType {
        case in_toto.PayloadType:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/pkg/types/intoto/v0.0.2/entry_test.go 
new/rekor-1.0.1/pkg/types/intoto/v0.0.2/entry_test.go
--- old/rekor-1.0.0/pkg/types/intoto/v0.0.2/entry_test.go       2022-10-17 
19:35:23.000000000 +0200
+++ new/rekor-1.0.1/pkg/types/intoto/v0.0.2/entry_test.go       2022-11-10 
16:26:56.000000000 +0100
@@ -32,7 +32,6 @@
        "math/big"
        "reflect"
        "sort"
-       "strings"
        "testing"
 
        "github.com/go-openapi/runtime"
@@ -294,8 +293,6 @@
                                        t.Errorf("V002Entry.AttestationKey() = 
%v, want %v", v.AttestationKey(), "sha256:"+hex.EncodeToString(h[:]))
                                }
 
-                               hashkey := strings.ToLower(fmt.Sprintf("%s:%s", 
*tt.it.Content.Hash.Algorithm, *tt.it.Content.Hash.Value))
-                               want = append(want, hashkey)
                                got, _ := v.IndexKeys()
                                sort.Strings(got)
                                sort.Strings(want)
@@ -456,8 +453,6 @@
 
                        want = append(want, 
"sha256:"+hex.EncodeToString(payloadHash[:]))
 
-                       hashkey := strings.ToLower("sha256:" + 
*v.IntotoObj.Content.Hash.Value)
-                       want = append(want, hashkey)
                        want = append(want, tt.want...)
                        got, _ := v.IndexKeys()
                        sort.Strings(got)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/release/cloudbuild.yaml 
new/rekor-1.0.1/release/cloudbuild.yaml
--- old/rekor-1.0.0/release/cloudbuild.yaml     2022-10-17 19:35:23.000000000 
+0200
+++ new/rekor-1.0.1/release/cloudbuild.yaml     2022-11-10 16:26:56.000000000 
+0100
@@ -32,16 +32,16 @@
     echo "Checking out ${_GIT_TAG}"
     git checkout ${_GIT_TAG}
 
-- name: 
'gcr.io/projectsigstore/cosign:v1.13.0@sha256:398f441c46e58906dc6d3aaaad22fe63f018dc30acbe13b326e5a016e711301c'
+- name: 
'gcr.io/projectsigstore/cosign:v1.13.1@sha256:fd5b09be23ef1027e1bdd490ce78dcc65d2b15902e1f4ba8e04f3b4019cc1057'
   dir: "go/src/sigstore/rekor"
   env:
   - COSIGN_EXPERIMENTAL=true
   - TUF_ROOT=/tmp
   args:
   - 'verify'
-  - 
'ghcr.io/gythialy/golang-cross:v1.19.2-1@sha256:d7e32c3e7d89356fb014ded4c1be7baabe3c454ca7753842334226fd3327d280'
+  - 
'ghcr.io/gythialy/golang-cross:v1.19.3-0@sha256:1072190e76d68f455f1bedb7430a633916b6629a722c42246037ac518fdb0ff2'
 
-- name: 
ghcr.io/gythialy/golang-cross:v1.19.2-1@sha256:d7e32c3e7d89356fb014ded4c1be7baabe3c454ca7753842334226fd3327d280
+- name: 
ghcr.io/gythialy/golang-cross:v1.19.3-0@sha256:1072190e76d68f455f1bedb7430a633916b6629a722c42246037ac518fdb0ff2
   entrypoint: /bin/sh
   dir: "go/src/sigstore/rekor"
   env:
@@ -64,7 +64,7 @@
       gcloud auth configure-docker \
       && make release
 
-- name: 
ghcr.io/gythialy/golang-cross:v1.19.2-1@sha256:d7e32c3e7d89356fb014ded4c1be7baabe3c454ca7753842334226fd3327d280
+- name: 
ghcr.io/gythialy/golang-cross:v1.19.3-0@sha256:1072190e76d68f455f1bedb7430a633916b6629a722c42246037ac518fdb0ff2
   entrypoint: 'bash'
   dir: "go/src/sigstore/rekor"
   env:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/release/ko-sign-release-images.sh 
new/rekor-1.0.1/release/ko-sign-release-images.sh
--- old/rekor-1.0.0/release/ko-sign-release-images.sh   2022-10-17 
19:35:23.000000000 +0200
+++ new/rekor-1.0.1/release/ko-sign-release-images.sh   2022-11-10 
16:26:56.000000000 +0100
@@ -36,6 +36,11 @@
     exit 1
 fi
 
+if [[ ! -f bRedisImagerefs ]]; then
+    echo "bRedisImagerefs not found"
+    exit 1
+fi
+
 if [[ ! -f trillianServerImagerefs ]]; then
     echo "trillianServerImagerefs not found"
     exit 1
@@ -49,11 +54,13 @@
 echo "Signing images with GCP KMS Key..."
 cosign sign --force --key 
"gcpkms://projects/$PROJECT_ID/locations/$KEY_LOCATION/keyRings/$KEY_RING/cryptoKeys/$KEY_NAME/versions/$KEY_VERSION"
 -a GIT_HASH="$GIT_HASH" -a GIT_VERSION="$GIT_VERSION" $(cat 
rekorServerImagerefs)
 cosign sign --force --key 
"gcpkms://projects/$PROJECT_ID/locations/$KEY_LOCATION/keyRings/$KEY_RING/cryptoKeys/$KEY_NAME/versions/$KEY_VERSION"
 -a GIT_HASH="$GIT_HASH" -a GIT_VERSION="$GIT_VERSION" $(cat rekorCliImagerefs)
+cosign sign --force --key 
"gcpkms://projects/$PROJECT_ID/locations/$KEY_LOCATION/keyRings/$KEY_RING/cryptoKeys/$KEY_NAME/versions/$KEY_VERSION"
 -a GIT_HASH="$GIT_HASH" -a GIT_VERSION="$GIT_VERSION" $(cat bRedisImagerefs)
 cosign sign --force --key 
"gcpkms://projects/$PROJECT_ID/locations/$KEY_LOCATION/keyRings/$KEY_RING/cryptoKeys/$KEY_NAME/versions/$KEY_VERSION"
 -a GIT_HASH="$GIT_HASH" -a GIT_VERSION="$GIT_VERSION" $(cat 
trillianServerImagerefs)
 cosign sign --force --key 
"gcpkms://projects/$PROJECT_ID/locations/$KEY_LOCATION/keyRings/$KEY_RING/cryptoKeys/$KEY_NAME/versions/$KEY_VERSION"
 -a GIT_HASH="$GIT_HASH" -a GIT_VERSION="$GIT_VERSION" $(cat 
trillianSignerImagerefs)
 
 echo "Signing images with Keyless..."
 cosign sign --force -a GIT_HASH="$GIT_HASH" -a GIT_VERSION="$GIT_VERSION" 
$(cat rekorServerImagerefs)
 cosign sign --force -a GIT_HASH="$GIT_HASH" -a GIT_VERSION="$GIT_VERSION" 
$(cat rekorCliImagerefs)
+cosign sign --force -a GIT_HASH="$GIT_HASH" -a GIT_VERSION="$GIT_VERSION" 
$(cat bRedisImagerefs)
 cosign sign --force -a GIT_HASH="$GIT_HASH" -a GIT_VERSION="$GIT_VERSION" 
$(cat trillianServerImagerefs)
 cosign sign --force -a GIT_HASH="$GIT_HASH" -a GIT_VERSION="$GIT_VERSION" 
$(cat trillianSignerImagerefs)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/release/release.mk 
new/rekor-1.0.1/release/release.mk
--- old/rekor-1.0.0/release/release.mk  2022-10-17 19:35:23.000000000 +0200
+++ new/rekor-1.0.1/release/release.mk  2022-11-10 16:26:56.000000000 +0100
@@ -5,12 +5,12 @@
 # used when releasing together with GCP CloudBuild
 .PHONY: release
 release:
-       CLI_LDFLAGS="$(CLI_LDFLAGS)" SERVER_LDFLAGS="$(SERVER_LDFLAGS)" 
goreleaser release --rm-dist --timeout 60m
+       CLI_LDFLAGS="$(CLI_LDFLAGS)" SERVER_LDFLAGS="$(SERVER_LDFLAGS)" 
goreleaser release --rm-dist --timeout 120m
 
 # used when need to validate the goreleaser
 .PHONY: snapshot
 snapshot:
-       CLI_LDFLAGS="$(CLI_LDFLAGS)" SERVER_LDFLAGS="$(SERVER_LDFLAGS)" 
goreleaser release --skip-sign --skip-publish --snapshot --rm-dist
+       CLI_LDFLAGS="$(CLI_LDFLAGS)" SERVER_LDFLAGS="$(SERVER_LDFLAGS)" 
goreleaser release --skip-sign --skip-publish --snapshot --rm-dist --timeout 
120m
 
 ###########################
 # sign section
@@ -33,6 +33,10 @@
 copy-rekor-cli-signed-release-to-ghcr:
        cosign copy $(KO_PREFIX)/rekor-cli:$(GIT_VERSION) 
$(GHCR_PREFIX)/rekor-cli:$(GIT_VERSION)
 
+.PHONY: copy-backfill-redis-signed-release-to-ghcr
+copy-backfill-redis-signed-release-to-ghcr:
+       cosign copy $(KO_PREFIX)/backfill-redis:$(GIT_VERSION) 
$(GHCR_PREFIX)/backfill-redis:$(GIT_VERSION)
+
 .PHONY: copy-trillian-log-server-signed-release-to-ghcr
 copy-trillian-log-server-signed-release-to-ghcr:
        cosign copy $(KO_PREFIX)/trillian_log_server:$(GIT_VERSION) 
$(GHCR_PREFIX)/trillian_log_server:$(GIT_VERSION)
@@ -42,7 +46,7 @@
        cosign copy $(KO_PREFIX)/trillian_log_signer:$(GIT_VERSION) 
$(GHCR_PREFIX)/trillian_log_signer:$(GIT_VERSION)
 
 .PHONY: copy-signed-release-to-ghcr
-copy-signed-release-to-ghcr: copy-rekor-server-signed-release-to-ghcr 
copy-rekor-cli-signed-release-to-ghcr 
copy-trillian-log-signer-signed-release-to-ghcr 
copy-trillian-log-server-signed-release-to-ghcr
+copy-signed-release-to-ghcr: copy-rekor-server-signed-release-to-ghcr 
copy-rekor-cli-signed-release-to-ghcr 
copy-backfill-redis-signed-release-to-ghcr 
copy-trillian-log-signer-signed-release-to-ghcr 
copy-trillian-log-server-signed-release-to-ghcr
 
 ## --------------------------------------
 ## Dist / maybe we can deprecate
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rekor-1.0.0/tests/e2e_test.go 
new/rekor-1.0.1/tests/e2e_test.go
--- old/rekor-1.0.0/tests/e2e_test.go   2022-10-17 19:35:23.000000000 +0200
+++ new/rekor-1.0.1/tests/e2e_test.go   2022-11-10 16:26:56.000000000 +0100
@@ -50,6 +50,7 @@
        "github.com/go-openapi/strfmt"
        "github.com/go-openapi/swag"
        "github.com/google/go-cmp/cmp"
+       "github.com/google/go-cmp/cmp/cmpopts"
        "github.com/in-toto/in-toto-golang/in_toto"
        slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
        "github.com/secure-systems-lab/go-securesystemslib/dsse"
@@ -145,7 +146,7 @@
 
        // Verify should fail initially
        out := runCliErr(t, "verify", "--artifact", artifactPath, 
"--signature", sigPath, "--public-key", pubPath)
-       outputContains(t, out, "404")
+       outputContains(t, out, "entry in log cannot be located")
 
        // It should upload successfully.
        out = runCli(t, "upload", "--artifact", artifactPath, "--signature", 
sigPath, "--public-key", pubPath)
@@ -981,7 +982,7 @@
 func TestVerifyNonExistantIndex(t *testing.T) {
        // this index is extremely likely to not exist
        out := runCliErr(t, "verify", "--log-index", "100000000")
-       outputContains(t, out, "404")
+       outputContains(t, out, "entry in log cannot be located")
 }
 
 func TestGetNonExistantUUID(t *testing.T) {
@@ -993,7 +994,7 @@
 func TestVerifyNonExistantUUID(t *testing.T) {
        // this uuid is extremely likely to not exist
        out := runCliErr(t, "verify", "--uuid", 
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
-       outputContains(t, out, "404")
+       outputContains(t, out, "entry in log cannot be located")
 
        // Check response code
        tid := getTreeID(t)
@@ -1011,9 +1012,11 @@
                t.Fatal(err)
        }
        c, _ := ioutil.ReadAll(resp.Body)
-       t.Log(string(c))
-       if resp.StatusCode != 404 {
-               t.Fatal("expected 404 status")
+       if resp.StatusCode != 200 {
+               t.Fatalf("expected status 200, got %d instead", resp.StatusCode)
+       }
+       if strings.TrimSpace(string(c)) != "[]" {
+               t.Fatalf("expected empty JSON array as response, got %s 
instead", string(c))
        }
 }
 
@@ -1224,10 +1227,8 @@
        if err != nil {
                t.Fatal(err)
        }
-       c, _ := ioutil.ReadAll(resp.Body)
-       t.Log(string(c))
        if resp.StatusCode != 400 {
-               t.Fatal("expected status 400")
+               t.Fatalf("expected status 400, got %d instead", resp.StatusCode)
        }
 }
 
@@ -1250,9 +1251,11 @@
                t.Fatal(err)
        }
        c, _ := ioutil.ReadAll(resp.Body)
-       t.Log(string(c))
-       if resp.StatusCode != 404 {
-               t.Fatal("expected 404 status")
+       if resp.StatusCode != 200 {
+               t.Fatalf("expected status 200, got %d instead", resp.StatusCode)
+       }
+       if strings.TrimSpace(string(c)) != "[]" {
+               t.Fatalf("expected empty JSON array as response, got %s 
instead", string(c))
        }
 }
 
@@ -1324,8 +1327,12 @@
                t.Fatal(err)
        }
        // Not Found because currently we don't detect that an unused random 
tree ID is invalid.
-       if resp.StatusCode != 404 {
-               t.Fatalf("expected 404 status code but got %d", resp.StatusCode)
+       c, _ := ioutil.ReadAll(resp.Body)
+       if resp.StatusCode != 200 {
+               t.Fatalf("expected status 200, got %d instead", resp.StatusCode)
+       }
+       if strings.TrimSpace(string(c)) != "[]" {
+               t.Fatalf("expected empty JSON array as response, got %s 
instead", string(c))
        }
 }
 
@@ -1400,3 +1407,470 @@
                t.Error("rekor_qps_by_api did not increment")
        }
 }
+
+// TestSearchLogQuerySingleShard provides coverage testing on the 
searchLogQuery endpoint within a single shard
+func TestSearchLogQuerySingleShard(t *testing.T) {
+
+       // Write the shared public key to a file
+       pubPath := filepath.Join(t.TempDir(), "logQuery_pubKey.asc")
+       pubKeyBytes := []byte(publicKey)
+       if err := ioutil.WriteFile(pubPath, pubKeyBytes, 0644); err != nil {
+               t.Fatal(err)
+       }
+
+       // Create two valid log entries to use for the test cases
+       firstArtifactPath := filepath.Join(t.TempDir(), "artifact1")
+       firstSigPath := filepath.Join(t.TempDir(), "signature1.asc")
+       createdPGPSignedArtifact(t, firstArtifactPath, firstSigPath)
+       firstArtifactBytes, _ := ioutil.ReadFile(firstArtifactPath)
+       firstSigBytes, _ := ioutil.ReadFile(firstSigPath)
+
+       firstRekord := rekord.V001Entry{
+               RekordObj: models.RekordV001Schema{
+                       Data: &models.RekordV001SchemaData{
+                               Content: strfmt.Base64(firstArtifactBytes),
+                       },
+                       Signature: &models.RekordV001SchemaSignature{
+                               Content: (*strfmt.Base64)(&firstSigBytes),
+                               Format:  
swag.String(models.RekordV001SchemaSignatureFormatPgp),
+                               PublicKey: 
&models.RekordV001SchemaSignaturePublicKey{
+                                       Content: (*strfmt.Base64)(&pubKeyBytes),
+                               },
+                       },
+               },
+       }
+       firstEntry := &models.Rekord{
+               APIVersion: swag.String(firstRekord.APIVersion()),
+               Spec:       firstRekord.RekordObj,
+       }
+
+       secondArtifactPath := filepath.Join(t.TempDir(), "artifact2")
+       secondSigPath := filepath.Join(t.TempDir(), "signature2.asc")
+       createdPGPSignedArtifact(t, secondArtifactPath, secondSigPath)
+       secondArtifactBytes, _ := ioutil.ReadFile(secondArtifactPath)
+       secondSigBytes, _ := ioutil.ReadFile(secondSigPath)
+
+       secondRekord := rekord.V001Entry{
+               RekordObj: models.RekordV001Schema{
+                       Data: &models.RekordV001SchemaData{
+                               Content: strfmt.Base64(secondArtifactBytes),
+                       },
+                       Signature: &models.RekordV001SchemaSignature{
+                               Content: (*strfmt.Base64)(&secondSigBytes),
+                               Format:  
swag.String(models.RekordV001SchemaSignatureFormatPgp),
+                               PublicKey: 
&models.RekordV001SchemaSignaturePublicKey{
+                                       Content: (*strfmt.Base64)(&pubKeyBytes),
+                               },
+                       },
+               },
+       }
+       secondEntry := &models.Rekord{
+               APIVersion: swag.String(secondRekord.APIVersion()),
+               Spec:       secondRekord.RekordObj,
+       }
+
+       // Now upload them to rekor!
+       firstOut := runCli(t, "upload", "--artifact", firstArtifactPath, 
"--signature", firstSigPath, "--public-key", pubPath)
+       secondOut := runCli(t, "upload", "--artifact", secondArtifactPath, 
"--signature", secondSigPath, "--public-key", pubPath)
+
+       firstEntryID := getUUIDFromUploadOutput(t, firstOut)
+       firstUUID, _ := sharding.GetUUIDFromIDString(firstEntryID)
+       firstIndex := int64(getLogIndexFromUploadOutput(t, firstOut))
+       secondEntryID := getUUIDFromUploadOutput(t, secondOut)
+       secondUUID, _ := sharding.GetUUIDFromIDString(secondEntryID)
+       secondIndex := int64(getLogIndexFromUploadOutput(t, secondOut))
+
+       // this is invalid because treeID is > int64
+       invalidEntryID := 
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeeefff"
+       invalidIndex := int64(-1)
+       invalidEntry := &models.Rekord{
+               APIVersion: swag.String(secondRekord.APIVersion()),
+       }
+
+       nonexistentArtifactPath := filepath.Join(t.TempDir(), "artifact3")
+       nonexistentSigPath := filepath.Join(t.TempDir(), "signature3.asc")
+       createdPGPSignedArtifact(t, nonexistentArtifactPath, nonexistentSigPath)
+       nonexistentArtifactBytes, _ := ioutil.ReadFile(nonexistentArtifactPath)
+       nonexistentSigBytes, _ := ioutil.ReadFile(nonexistentSigPath)
+
+       nonexistentEntryID := 
"0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeeefff"
+       nonexistentUUID := 
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeeefff"
+       nonexistentIndex := int64(999999999) // assuming we don't put that many 
entries in the log
+       nonexistentRekord := rekord.V001Entry{
+               RekordObj: models.RekordV001Schema{
+                       Data: &models.RekordV001SchemaData{
+                               Content: 
strfmt.Base64(nonexistentArtifactBytes),
+                       },
+                       Signature: &models.RekordV001SchemaSignature{
+                               Content: (*strfmt.Base64)(&nonexistentSigBytes),
+                               Format:  
swag.String(models.RekordV001SchemaSignatureFormatPgp),
+                               PublicKey: 
&models.RekordV001SchemaSignaturePublicKey{
+                                       Content: (*strfmt.Base64)(&pubKeyBytes),
+                               },
+                       },
+               },
+       }
+       nonexistentEntry := &models.Rekord{
+               APIVersion: swag.String("0.0.1"),
+               Spec:       nonexistentRekord.RekordObj,
+       }
+
+       type testCase struct {
+               name                      string
+               expectSuccess             bool
+               expectedErrorResponseCode int64
+               expectedEntryIDs          []string
+               entryUUIDs                []string
+               logIndexes                []*int64
+               entries                   []models.ProposedEntry
+       }
+
+       testCases := []testCase{
+               {
+                       name:             "empty entryUUIDs",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{},
+                       entryUUIDs:       []string{},
+               },
+               {
+                       name:             "first in log (using entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID},
+                       entryUUIDs:       []string{firstEntryID},
+               },
+               {
+                       name:             "first in log (using UUID in 
entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID},
+                       entryUUIDs:       []string{firstUUID},
+               },
+               {
+                       name:             "second in log (using entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{secondEntryID},
+                       entryUUIDs:       []string{secondEntryID},
+               },
+               {
+                       name:                      "invalid entryID (using 
entryUUIDs)",
+                       expectSuccess:             false,
+                       expectedErrorResponseCode: http.StatusBadRequest,
+                       entryUUIDs:                []string{invalidEntryID},
+               },
+               {
+                       name:             "valid entryID not in log (using 
entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{},
+                       entryUUIDs:       []string{nonexistentEntryID},
+               },
+               {
+                       name:             "valid UUID not in log (using 
entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{},
+                       entryUUIDs:       []string{nonexistentUUID},
+               },
+               {
+                       name:             "both valid entries in log (using 
entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entryUUIDs:       []string{firstEntryID, secondEntryID},
+               },
+               {
+                       name:             "both valid entries in log (one with 
UUID, other with entryID) (using entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entryUUIDs:       []string{firstEntryID, secondUUID},
+               },
+               {
+                       name:                      "one valid entry in log, one 
malformed (using entryUUIDs)",
+                       expectSuccess:             false,
+                       expectedErrorResponseCode: http.StatusBadRequest,
+                       entryUUIDs:                []string{firstEntryID, 
invalidEntryID},
+               },
+               {
+                       name:             "one existing, one valid entryID but 
not in log (using entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID},
+                       entryUUIDs:       []string{firstEntryID, 
nonexistentEntryID},
+               },
+               {
+                       name:             "two existing, one valid entryID but 
not in log (using entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entryUUIDs:       []string{firstEntryID, secondEntryID, 
nonexistentEntryID},
+               },
+               {
+                       name:             "two existing, one valid entryID but 
not in log (different ordering 1) (using entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entryUUIDs:       []string{firstEntryID, 
nonexistentEntryID, secondEntryID},
+               },
+               {
+                       name:             "two existing, one valid entryID but 
not in log (different ordering 2) (using entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entryUUIDs:       []string{nonexistentEntryID, 
firstEntryID, secondEntryID},
+               },
+               {
+                       name:             "two existing, one valid entryID but 
not in log (different ordering 3) (using entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entryUUIDs:       []string{nonexistentUUID, 
firstEntryID, secondEntryID},
+               },
+               {
+                       name:             "two existing, one valid entryID but 
not in log (different ordering 4) (using entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entryUUIDs:       []string{nonexistentEntryID, 
firstUUID, secondEntryID},
+               },
+               {
+                       name:             "two existing, one valid entryID but 
not in log (different ordering 5) (using entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entryUUIDs:       []string{nonexistentEntryID, 
firstEntryID, secondUUID},
+               },
+               {
+                       name:             "two existing, one valid entryID but 
not in log (different ordering 6) (using entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entryUUIDs:       []string{nonexistentUUID, 
firstEntryID, secondUUID},
+               },
+               {
+                       name:             "two existing, one valid entryID but 
not in log (different ordering 7) (using entryUUIDs)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entryUUIDs:       []string{nonexistentEntryID, 
firstUUID, secondUUID},
+               },
+               {
+                       name:                      "request more than 10 
entries (using entryUUIDs)",
+                       expectSuccess:             false,
+                       expectedErrorResponseCode: 
http.StatusUnprocessableEntity,
+                       entryUUIDs:                []string{firstEntryID, 
firstEntryID, firstEntryID, firstEntryID, firstEntryID, firstEntryID, 
firstEntryID, firstEntryID, firstEntryID, firstEntryID, firstEntryID},
+               },
+               {
+                       name:             "empty logIndexes",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{},
+                       logIndexes:       []*int64{},
+               },
+               {
+                       name:             "first in log (using logIndexes)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID},
+                       logIndexes:       []*int64{&firstIndex},
+               },
+               {
+                       name:             "second in log (using logIndexes)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{secondEntryID},
+                       logIndexes:       []*int64{&secondIndex},
+               },
+               {
+                       name:                      "invalid logIndex (using 
logIndexes)",
+                       expectSuccess:             false,
+                       expectedErrorResponseCode: 
http.StatusUnprocessableEntity,
+                       logIndexes:                []*int64{&invalidIndex},
+               },
+               {
+                       name:             "valid index not in log (using 
logIndexes)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{},
+                       logIndexes:       []*int64{&nonexistentIndex},
+               },
+               {
+                       name:             "both valid entries in log (using 
logIndexes)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       logIndexes:       []*int64{&firstIndex, &secondIndex},
+               },
+               {
+                       name:                      "one valid entry in log, one 
malformed (using logIndexes)",
+                       expectSuccess:             false,
+                       expectedErrorResponseCode: 
http.StatusUnprocessableEntity,
+                       logIndexes:                []*int64{&firstIndex, 
&invalidIndex},
+               },
+               {
+                       name:             "one existing, one valid Index but 
not in log (using logIndexes)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID},
+                       logIndexes:       []*int64{&firstIndex, 
&nonexistentIndex},
+               },
+               {
+                       name:             "two existing, one valid Index but 
not in log (using logIndexes)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       logIndexes:       []*int64{&firstIndex, &secondIndex, 
&nonexistentIndex},
+               },
+               {
+                       name:             "two existing, one valid Index but 
not in log (different ordering 1) (using logIndexes)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       logIndexes:       []*int64{&firstIndex, 
&nonexistentIndex, &secondIndex},
+               },
+               {
+                       name:             "two existing, one valid Index but 
not in log (different ordering 2) (using logIndexes)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       logIndexes:       []*int64{&nonexistentIndex, 
&firstIndex, &secondIndex},
+               },
+               {
+                       name:                      "request more than 10 
entries (using logIndexes)",
+                       expectSuccess:             false,
+                       expectedErrorResponseCode: 
http.StatusUnprocessableEntity,
+                       logIndexes:                []*int64{&firstIndex, 
&firstIndex, &firstIndex, &firstIndex, &firstIndex, &firstIndex, &firstIndex, 
&firstIndex, &firstIndex, &firstIndex, &firstIndex},
+               },
+               {
+                       name:             "empty entries",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{},
+                       entries:          []models.ProposedEntry{},
+               },
+               {
+                       name:             "first in log (using entries)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID},
+                       entries:          []models.ProposedEntry{firstEntry},
+               },
+               {
+                       name:             "second in log (using entries)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{secondEntryID},
+                       entries:          []models.ProposedEntry{secondEntry},
+               },
+               {
+                       name:                      "invalid entry (using 
entries)",
+                       expectSuccess:             false,
+                       expectedErrorResponseCode: 
http.StatusUnprocessableEntity,
+                       entries:                   
[]models.ProposedEntry{invalidEntry},
+               },
+               {
+                       name:             "valid entry not in log (using 
entries)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{},
+                       entries:          
[]models.ProposedEntry{nonexistentEntry},
+               },
+               {
+                       name:             "both valid entries in log (using 
entries)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entries:          []models.ProposedEntry{firstEntry, 
secondEntry},
+               },
+               {
+                       name:                      "one valid entry in log, one 
malformed (using entries)",
+                       expectSuccess:             false,
+                       expectedErrorResponseCode: 
http.StatusUnprocessableEntity,
+                       entries:                   
[]models.ProposedEntry{firstEntry, invalidEntry},
+               },
+               {
+                       name:             "one existing, one valid Index but 
not in log (using entries)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID},
+                       entries:          []models.ProposedEntry{firstEntry, 
nonexistentEntry},
+               },
+               {
+                       name:             "two existing, one valid Index but 
not in log (using entries)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entries:          []models.ProposedEntry{firstEntry, 
secondEntry, nonexistentEntry},
+               },
+               {
+                       name:             "two existing, one valid Index but 
not in log (different ordering 1) (using entries)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entries:          []models.ProposedEntry{firstEntry, 
nonexistentEntry, secondEntry},
+               },
+               {
+                       name:             "two existing, one valid Index but 
not in log (different ordering 2) (using entries)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID},
+                       entries:          
[]models.ProposedEntry{nonexistentEntry, firstEntry, secondEntry},
+               },
+               {
+                       name:                      "request more than 10 
entries (using entries)",
+                       expectSuccess:             false,
+                       expectedErrorResponseCode: 
http.StatusUnprocessableEntity,
+                       entries:                   
[]models.ProposedEntry{firstEntry, firstEntry, firstEntry, firstEntry, 
firstEntry, firstEntry, firstEntry, firstEntry, firstEntry, firstEntry, 
firstEntry},
+               },
+               {
+                       name:                      "request more than 10 
entries (using mixture)",
+                       expectSuccess:             false,
+                       expectedErrorResponseCode: 
http.StatusUnprocessableEntity,
+                       entryUUIDs:                []string{firstEntryID, 
firstEntryID, firstEntryID, firstEntryID},
+                       logIndexes:                []*int64{&firstIndex, 
&firstIndex, &firstIndex},
+                       entries:                   
[]models.ProposedEntry{firstEntry, firstEntry, firstEntry, firstEntry},
+               },
+               {
+                       name:                      "request valid and invalid 
(using mixture)",
+                       expectSuccess:             false,
+                       expectedErrorResponseCode: 
http.StatusUnprocessableEntity,
+                       entryUUIDs:                []string{firstEntryID, 
firstEntryID, firstEntryID, firstEntryID},
+                       logIndexes:                []*int64{&invalidIndex, 
&invalidIndex, &invalidIndex},
+                       entries:                   
[]models.ProposedEntry{firstEntry, firstEntry, firstEntry},
+               },
+               {
+                       name:             "request valid and nonexistent (using 
mixture)",
+                       expectSuccess:    true,
+                       expectedEntryIDs: []string{firstEntryID, secondEntryID, 
firstEntryID, secondEntryID, firstEntryID, secondEntryID},
+                       entryUUIDs:       []string{firstEntryID, secondEntryID, 
nonexistentEntryID},
+                       logIndexes:       []*int64{&firstIndex, &secondIndex, 
&nonexistentIndex},
+                       entries:          []models.ProposedEntry{firstEntry, 
secondEntry, nonexistentEntry},
+               },
+       }
+
+       for _, test := range testCases {
+               rekorClient, err := 
client.GetRekorClient("http://localhost:3000";, client.WithRetryCount(0))
+               if err != nil {
+                       t.Fatal(err)
+               }
+               t.Run(test.name, func(t *testing.T) {
+                       params := entries.NewSearchLogQueryParams()
+                       entry := &models.SearchLogQuery{}
+                       if len(test.entryUUIDs) > 0 {
+                               t.Log("trying with entryUUIDs: ", 
test.entryUUIDs)
+                               entry.EntryUUIDs = test.entryUUIDs
+                       }
+                       if len(test.logIndexes) > 0 {
+                               entry.LogIndexes = test.logIndexes
+                       }
+                       if len(test.entries) > 0 {
+                               entry.SetEntries(test.entries)
+                       }
+                       params.SetEntry(entry)
+
+                       resp, err := rekorClient.Entries.SearchLogQuery(params)
+                       if err != nil {
+                               if !test.expectSuccess {
+                                       if _, ok := 
err.(*entries.SearchLogQueryBadRequest); ok {
+                                               if 
test.expectedErrorResponseCode != http.StatusBadRequest {
+                                                       t.Fatalf("unexpected 
error code received: expected %d, got %d: %v", test.expectedErrorResponseCode, 
http.StatusBadRequest, err)
+                                               }
+                                       } else if _, ok := 
err.(*entries.SearchLogQueryUnprocessableEntity); ok {
+                                               if 
test.expectedErrorResponseCode != http.StatusUnprocessableEntity {
+                                                       t.Fatalf("unexpected 
error code received: expected %d, got %d: %v", test.expectedErrorResponseCode, 
http.StatusUnprocessableEntity, err)
+                                               }
+                                       } else if e, ok := 
err.(*entries.SearchLogQueryDefault); ok {
+                                               t.Fatalf("unexpected error: 
%v", e)
+                                       }
+                               } else {
+                                       t.Fatalf("unexpected error: %v", err)
+                               }
+                       } else {
+                               if len(resp.Payload) != 
len(test.expectedEntryIDs) {
+                                       t.Fatalf("unexpected number of 
responses received: expected %d, got %d", len(test.expectedEntryIDs), 
len(resp.Payload))
+                               }
+                               // walk responses, build up list of returned 
entry IDs
+                               returnedEntryIDs := []string{}
+                               for _, entry := range resp.Payload {
+                                       // do this for dynamic keyed entries
+                                       for entryID, _ := range entry {
+                                               t.Log("received entry: ", 
entryID)
+                                               returnedEntryIDs = 
append(returnedEntryIDs, entryID)
+                                       }
+                               }
+                               // we have the expected number of responses, 
let's ensure they're the ones we expected
+                               if out := cmp.Diff(returnedEntryIDs, 
test.expectedEntryIDs, cmpopts.SortSlices(func(a, b string) bool { return a < b 
})); out != "" {
+                                       t.Fatalf("unexpected responses: %v", 
out)
+                               }
+                       }
+               })
+       }
+}

++++++ vendor.tar.xz ++++++

Reply via email to