Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package rqlite for openSUSE:Factory checked 
in at 2026-03-10 17:53:31
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/rqlite (Old)
 and      /work/SRC/openSUSE:Factory/.rqlite.new.8177 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "rqlite"

Tue Mar 10 17:53:31 2026 rev:45 rq:1337825 version:9.4.3

Changes:
--------
--- /work/SRC/openSUSE:Factory/rqlite/rqlite.changes    2026-02-14 
21:39:19.625616813 +0100
+++ /work/SRC/openSUSE:Factory/.rqlite.new.8177/rqlite.changes  2026-03-10 
17:53:45.864666586 +0100
@@ -1,0 +2,8 @@
+Mon Mar 09 20:14:26 UTC 2026 - Andreas Stieger <[email protected]>
+
+- Update to version 9.4.3:
+  * Remove mustTruncate as it can result in a deadlock
+  * Fix: INSERT/UPDATE with RETURNING clause will not abort
+    transaction on erro
+
+-------------------------------------------------------------------

Old:
----
  rqlite-9.4.1.tar.xz

New:
----
  rqlite-9.4.3.tar.xz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ rqlite.spec ++++++
--- /var/tmp/diff_new_pack.6dAZFx/_old  2026-03-10 17:53:46.688700609 +0100
+++ /var/tmp/diff_new_pack.6dAZFx/_new  2026-03-10 17:53:46.688700609 +0100
@@ -17,7 +17,7 @@
 
 
 Name:           rqlite
-Version:        9.4.1
+Version:        9.4.3
 Release:        0
 Summary:        Distributed relational database built on SQLite
 License:        MIT

++++++ _service ++++++
--- /var/tmp/diff_new_pack.6dAZFx/_old  2026-03-10 17:53:46.736702590 +0100
+++ /var/tmp/diff_new_pack.6dAZFx/_new  2026-03-10 17:53:46.740702755 +0100
@@ -3,7 +3,7 @@
     <param name="url">https://github.com/rqlite/rqlite.git</param>
     <param name="scm">git</param>
     <param name="exclude">.git</param>
-    <param name="revision">v9.4.1</param>
+    <param name="revision">v9.4.3</param>
     <param name="versionformat">@PARENT_TAG@</param>
     <param name="changesgenerate">enable</param>
     <param name="versionrewrite-pattern">v(.*)</param>

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.6dAZFx/_old  2026-03-10 17:53:46.768703912 +0100
+++ /var/tmp/diff_new_pack.6dAZFx/_new  2026-03-10 17:53:46.772704077 +0100
@@ -1,7 +1,7 @@
 <servicedata>
   <service name="tar_scm">
     <param name="url">https://github.com/rqlite/rqlite.git</param>
-    <param 
name="changesrevision">df349344430a990f2acfda0c64b99a8e37f4837c</param>
+    <param 
name="changesrevision">379044842cbed8e9ff2b7f3aa69539f2a7494a9b</param>
   </service>
 </servicedata>
 (No newline at EOF)

++++++ rqlite-9.4.1.tar.xz -> rqlite-9.4.3.tar.xz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.4.1/db/db.go new/rqlite-9.4.3/db/db.go
--- old/rqlite-9.4.1/db/db.go   2026-02-12 05:22:37.000000000 +0100
+++ new/rqlite-9.4.3/db/db.go   2026-03-09 15:12:14.000000000 +0100
@@ -1043,7 +1043,7 @@
                response.Result = &command.ExecuteQueryResponse_Error{
                        Error: err.Error(),
                }
-               return response, nil
+               return response, err
        }
 
        if timeout > 0 {
@@ -1059,7 +1059,7 @@
                        response.Result = &command.ExecuteQueryResponse_Error{
                                Error: err.Error(),
                        }
-                       return response, nil
+                       return response, err
                }
                response.Result = &command.ExecuteQueryResponse_Q{
                        Q: rows,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.4.1/store/store.go 
new/rqlite-9.4.3/store/store.go
--- old/rqlite-9.4.1/store/store.go     2026-02-12 05:22:37.000000000 +0100
+++ new/rqlite-9.4.3/store/store.go     2026-03-09 15:12:14.000000000 +0100
@@ -158,6 +158,7 @@
        numSnapshotsIncremental           = "num_snapshots_incremental"
        numFullCheckpointFailed           = "num_full_checkpoint_failed"
        numWALCheckpointTruncateFailed    = "num_wal_checkpoint_truncate_failed"
+       numWALCheckpointAllMovedFailed    = 
"num_wal_checkpoint_all_moved_failed"
        numWALCheckpointIncomplete        = "num_wal_checkpoint_incomplete"
        numWALMustCheckpoint              = "num_wal_must_checkpoint"
        numAutoVacuums                    = "num_auto_vacuums"
@@ -227,6 +228,7 @@
        stats.Add(numSnapshotsIncremental, 0)
        stats.Add(numFullCheckpointFailed, 0)
        stats.Add(numWALCheckpointTruncateFailed, 0)
+       stats.Add(numWALCheckpointAllMovedFailed, 0)
        stats.Add(numWALCheckpointIncomplete, 0)
        stats.Add(numWALMustCheckpoint, 0)
        stats.Add(numAutoVacuums, 0)
@@ -347,9 +349,8 @@
        snapshotWDone  chan struct{}
 
        // Snapshotting synchronization and and management
-       snapshotSync            *rsync.SyncChannels
-       snapshotCAS             *rsync.CheckAndSet
-       numFailedSnapshotsInRow int
+       snapshotSync *rsync.SyncChannels
+       snapshotCAS  *rsync.CheckAndSet
 
        // Latest log entry index actually reflected by the FSM. Due to Raft 
code
        // these values are not updated automatically after a Snapshot-restore.
@@ -2642,6 +2643,8 @@
                        }
                        chkTStartTime := time.Now()
                        if err := s.checkpointWAL(); err != nil {
+                               walTmpFD.Close()
+                               os.Remove(walTmpFD.Name())
                                stats.Add(numWALCheckpointTruncateFailed, 1)
                                return nil, fmt.Errorf("incremental snapshot 
can't complete due to WAL checkpoint error (will retry): %s",
                                        err.Error())
@@ -2949,38 +2952,17 @@
 }
 
 // checkpointWAL performs a checkpoint of the WAL, truncating it. If it 
returns an error
-// the checkpoint operation can be retried at the caller's discretion. If this 
function
-// encounters an error such that the checkpoint must be retried, it will 
automatically do
-// that until it is successful (or a timeout fires).
-//
-// This function also implements the policy that if a certain number of 
checkpoint attempts
-// fail in a row, it will loop until is successful.
+// the checkpoint operation can be retried at the caller's discretion.
 func (s *Store) checkpointWAL() (retErr error) {
-       defer func() {
-               if retErr != nil {
-                       s.numFailedSnapshotsInRow++
-                       if s.numFailedSnapshotsInRow == maxFailedSnapshotsInRow 
{
-                               s.logger.Printf("too many failed snapshots in a 
row (%d), forcing WAL checkpoint truncate",
-                                       s.numFailedSnapshotsInRow)
-                               s.mustTruncateCheckpoint()
-                               s.numFailedSnapshotsInRow = 0
-                               retErr = nil
-                       }
-               } else {
-                       s.numFailedSnapshotsInRow = 0
-               }
-       }()
-
        meta, err := s.db.Checkpoint(sql.CheckpointTruncate)
        if err != nil {
                return err
        }
        if !meta.Success() {
                if meta.Pages == meta.Moved {
-                       s.logger.Printf("checkpoint moved %d/%d pages, but did 
not truncate WAL, forcing truncate",
+                       s.logger.Printf("checkpoint moved all pages (%d/%d), 
but failed to truncate WAL",
                                meta.Moved, meta.Pages)
-                       s.mustTruncateCheckpoint()
-                       return nil
+                       stats.Add(numWALCheckpointAllMovedFailed, 1)
                }
                stats.Add(numWALCheckpointIncomplete, 1)
                return fmt.Errorf("checkpoint incomplete: %s", meta.String())
@@ -2988,45 +2970,6 @@
        return nil
 }
 
-// mustTruncateCheckpoint truncates the checkpointed WAL, retrying until 
successful or
-// timing out.
-//
-// This should be called if we hit a specifc edge case where all pages were 
moved but some
-// reader blocked truncation. The next write could start overwriting WAL 
frames at the start
-// of the WAL which would mean we would lose WAL data, so we need to forcibly 
truncate here.
-// We do this by blocking all readers (writes are already blocked). This 
handling is due to
-// research into SQLite and not seen as of yet.
-//
-// Finally, we could still timeout here while trying to truncate. This could 
happen if a
-// reader external to rqlite just won't let go.
-func (s *Store) mustTruncateCheckpoint() {
-       startT := time.Now()
-       defer func() {
-               s.logger.Printf("forced WAL truncate checkpoint took %s", 
time.Since(startT))
-       }()
-
-       stats.Add(numWALMustCheckpoint, 1)
-       s.readerMu.Lock()
-       defer s.readerMu.Unlock()
-
-       ticker := time.NewTicker(mustWALCheckpointDelay)
-       defer ticker.Stop()
-       for {
-               select {
-               case <-ticker.C:
-                       meta, err := s.db.Checkpoint(sql.CheckpointTruncate)
-                       if err == nil && meta.Success() {
-                               return
-                       }
-               case <-time.After(mustWALCheckpointTimeout):
-                       msg := fmt.Sprintf("timed out trying to truncate 
checkpoint WAL after %s,"+
-                               " probably due to external long-running read - 
aborting",
-                               mustWALCheckpointTimeout)
-                       s.logger.Fatal(msg)
-               }
-       }
-}
-
 // selfLeaderChange is called when this node detects that its leadership
 // status has changed.
 func (s *Store) selfLeaderChange(leader bool) {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/rqlite-9.4.1/store/store_snapshot_test.go 
new/rqlite-9.4.3/store/store_snapshot_test.go
--- old/rqlite-9.4.1/store/store_snapshot_test.go       2026-02-12 
05:22:37.000000000 +0100
+++ new/rqlite-9.4.3/store/store_snapshot_test.go       2026-03-09 
15:12:14.000000000 +0100
@@ -291,115 +291,6 @@
        <-ctx.Done()
 }
 
-// Test_SingleNode_SnapshotFail_Blocked_Retry tests that a snapshot operation
-// that requires a forced checkpoint and truncation does succeed once the
-// blocking query unblocks.
-func Test_SingleNode_SnapshotFail_Blocked_Retry(t *testing.T) {
-       s, ln := mustNewStore(t)
-       defer ln.Close()
-
-       s.SnapshotThreshold = 8192
-       s.SnapshotInterval = time.Hour
-       s.NoSnapshotOnClose = true
-       if err := s.Open(); err != nil {
-               t.Fatalf("failed to open single-node store: %s", err.Error())
-       }
-       defer s.Close(true)
-       if err := s.Bootstrap(NewServer(s.ID(), s.Addr(), true)); err != nil {
-               t.Fatalf("failed to bootstrap single-node store: %s", 
err.Error())
-       }
-       if _, err := s.WaitForLeader(10 * time.Second); err != nil {
-               t.Fatalf("Error waiting for leader: %s", err)
-       }
-       er := executeRequestFromString(`CREATE TABLE foo (id INTEGER NOT NULL 
PRIMARY KEY, name TEXT)`,
-               false, false)
-       _, _, err := s.Execute(context.Background(), er)
-       if err != nil {
-               t.Fatalf("failed to execute on single node: %s", err.Error())
-       }
-
-       er = executeRequestFromString(`INSERT INTO foo(name) VALUES("fiona")`, 
false, false)
-       _, _, err = s.Execute(context.Background(), er)
-       if err != nil {
-               t.Fatalf("failed to execute on single node: %s", err.Error())
-       }
-
-       ctx, cancelFunc := context.WithCancel(context.Background())
-       go func() {
-               qr := queryRequestFromString("SELECT * FROM foo", false, false)
-               qr.GetRequest().Statements[0].ForceStall = true
-
-               blockingDB, err := db.Open(s.dbPath, false, true)
-               if err != nil {
-                       t.Errorf("failed to open blocking DB connection: %s", 
err.Error())
-               }
-               defer blockingDB.Close()
-
-               _, err = blockingDB.QueryWithContext(ctx, qr.GetRequest(), 
false)
-               if err != nil {
-                       t.Errorf("failed to execute stalled query on blocking 
DB connection: %s", err.Error())
-               }
-       }()
-       time.Sleep(1 * time.Second)
-
-       success := false
-       var wg sync.WaitGroup
-       wg.Go(func() {
-               if err := s.Snapshot(0); err != nil {
-                       t.Errorf("failed to snapshot single-node store with 
released stalled query: %s", err.Error())
-               } else {
-                       success = true
-               }
-       })
-       time.Sleep(1 * time.Second)
-       cancelFunc()
-       wg.Wait()
-       if !success {
-               t.Fatalf("expected snapshot to succeed after blocking query 
released")
-       }
-
-       // Again, this time with a persistent snapshot.
-       er = executeRequestFromString(`INSERT INTO foo(name) VALUES("fiona")`, 
false, false)
-       _, _, err = s.Execute(context.Background(), er)
-       if err != nil {
-               t.Fatalf("failed to execute on single node: %s", err.Error())
-       }
-
-       ctx, cancelFunc = context.WithCancel(context.Background())
-       go func() {
-               qr := queryRequestFromString("SELECT * FROM foo", false, false)
-               qr.GetRequest().Statements[0].ForceStall = true
-
-               blockingDB, err := db.Open(s.dbPath, false, true)
-               if err != nil {
-                       t.Errorf("failed to open blocking DB connection: %s", 
err.Error())
-               }
-               defer blockingDB.Close()
-
-               _, err = blockingDB.QueryWithContext(ctx, qr.GetRequest(), 
false)
-               if err != nil {
-                       t.Errorf("failed to execute stalled query on blocking 
DB connection: %s", err.Error())
-               }
-       }()
-       time.Sleep(1 * time.Second)
-
-       success = false
-       var wg2 sync.WaitGroup
-       wg2.Go(func() {
-               if err := s.Snapshot(0); err != nil {
-                       t.Errorf("failed to snapshot single-node store with 
second released stalled query: %s", err.Error())
-               } else {
-                       success = true
-               }
-       })
-       time.Sleep(1 * time.Second)
-       cancelFunc()
-       wg2.Wait()
-       if !success {
-               t.Fatalf("expected snapshot to succeed after blocking query 
released")
-       }
-}
-
 func Test_SingleNode_SnapshotWithAutoOptimize_Stress(t *testing.T) {
        s, ln := mustNewStore(t)
        defer ln.Close()

++++++ vendor.tar.xz ++++++

Reply via email to