Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package forgejo-runner for openSUSE:Factory 
checked in at 2025-10-07 18:28:53
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/forgejo-runner (Old)
 and      /work/SRC/openSUSE:Factory/.forgejo-runner.new.11973 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "forgejo-runner"

Tue Oct  7 18:28:53 2025 rev:28 rq:1309604 version:11.1.2

Changes:
--------
--- /work/SRC/openSUSE:Factory/forgejo-runner/forgejo-runner.changes    
2025-09-20 22:05:38.998621077 +0200
+++ /work/SRC/openSUSE:Factory/.forgejo-runner.new.11973/forgejo-runner.changes 
2025-10-07 18:31:11.416614674 +0200
@@ -1,0 +2,17 @@
+Tue Oct  7 11:54:22 UTC 2025 - Richard Rahl <[email protected]>
+
+- update to version 11.1.2:
+  * feat: support evaluating workflow-level concurrency blocks in jobparser
+  * fix(security): a multiline secret may be found in a single log entry
+  * fix: improve logging to diagnose mystery job terminations
+  * fix: modifying a cache secret does not invalidate cached entries
+  * fix: allow GC & cache operations to operate concurrently
+  * fix: do not attempt to run the LXC stop script with self-hosted
+  * fix: event.pull_request.action == closed can use the cache of the base
+    repository
+  * fix: Correctly override the value of Forgejo-Cache-Host when
+    ACTIONS_CACHE_URL is overridden
+  * fix: a composite action must not change the result of the calling step
+    before it completes
+
+-------------------------------------------------------------------

Old:
----
  forgejo-runner-11.1.1.obscpio

New:
----
  forgejo-runner-11.1.2.obscpio

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ forgejo-runner.spec ++++++
--- /var/tmp/diff_new_pack.9kXHYp/_old  2025-10-07 18:31:12.268650705 +0200
+++ /var/tmp/diff_new_pack.9kXHYp/_new  2025-10-07 18:31:12.272650875 +0200
@@ -19,7 +19,7 @@
 %define services %{name}.service
 
 Name:           forgejo-runner
-Version:        11.1.1
+Version:        11.1.2
 Release:        0
 Summary:        Daemon that connects to a Forgejo instance and runs CI jobs
 License:        GPL-3.0-or-later

++++++ _service ++++++
--- /var/tmp/diff_new_pack.9kXHYp/_old  2025-10-07 18:31:12.312652566 +0200
+++ /var/tmp/diff_new_pack.9kXHYp/_new  2025-10-07 18:31:12.316652735 +0200
@@ -2,7 +2,7 @@
   <service name="obs_scm" mode="manual">
     <param name="url">https://code.forgejo.org/forgejo/runner</param>
     <param name="scm">git</param>
-    <param name="revision">refs/tags/v11.1.1</param>
+    <param name="revision">refs/tags/v11.1.2</param>
     <param name="versionformat">@PARENT_TAG@</param>
     <param name="changesgenerate">disable</param>
     <param name="versionrewrite-pattern">v(.*)</param>

++++++ forgejo-runner-11.1.1.obscpio -> forgejo-runner-11.1.2.obscpio ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/.forgejo/workflows/build-release-integration.yml 
new/forgejo-runner-11.1.2/.forgejo/workflows/build-release-integration.yml
--- old/forgejo-runner-11.1.1/.forgejo/workflows/build-release-integration.yml  
2025-09-18 18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/.forgejo/workflows/build-release-integration.yml  
2025-10-03 10:22:06.000000000 +0200
@@ -19,7 +19,7 @@
 enable-email-notifications: true
 
 env:
-  FORGEJO_VERSION: 11.0.5 # renovate: datasource=docker 
depName=code.forgejo.org/forgejo/forgejo
+  FORGEJO_VERSION: 11.0.6 # renovate: datasource=docker 
depName=code.forgejo.org/forgejo/forgejo
 
 jobs:
   release-simulation:
@@ -29,7 +29,7 @@
       - uses:  https://data.forgejo.org/actions/checkout@v4
 
       - id: forgejo
-        uses: https://data.forgejo.org/actions/[email protected]
+        uses: https://data.forgejo.org/actions/[email protected]
         with:
           user: root
           password: admin1234
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/.forgejo/workflows/docker-build-push-action-in-lxc.yml
 
new/forgejo-runner-11.1.2/.forgejo/workflows/docker-build-push-action-in-lxc.yml
--- 
old/forgejo-runner-11.1.1/.forgejo/workflows/docker-build-push-action-in-lxc.yml
    2025-09-18 18:17:56.000000000 +0200
+++ 
new/forgejo-runner-11.1.2/.forgejo/workflows/docker-build-push-action-in-lxc.yml
    2025-10-03 10:22:06.000000000 +0200
@@ -21,7 +21,7 @@
 enable-email-notifications: true
 
 env:
-  FORGEJO_VERSION: 11.0.5 # renovate: datasource=docker 
depName=code.forgejo.org/forgejo/forgejo
+  FORGEJO_VERSION: 11.0.6 # renovate: datasource=docker 
depName=code.forgejo.org/forgejo/forgejo
   FORGEJO_USER: root
   FORGEJO_PASSWORD: admin1234
 
@@ -34,7 +34,7 @@
 
       - name: install Forgejo so it can be used as a container registry
         id: registry
-        uses: https://data.forgejo.org/actions/[email protected]
+        uses: https://data.forgejo.org/actions/[email protected]
         with:
           user: ${{ env.FORGEJO_USER }}
           password: ${{ env.FORGEJO_PASSWORD }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/.forgejo/workflows/example-lxc-systemd.yml 
new/forgejo-runner-11.1.2/.forgejo/workflows/example-lxc-systemd.yml
--- old/forgejo-runner-11.1.1/.forgejo/workflows/example-lxc-systemd.yml        
2025-09-18 18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/.forgejo/workflows/example-lxc-systemd.yml        
2025-10-03 10:22:06.000000000 +0200
@@ -18,7 +18,7 @@
 jobs:
   example-lxc-systemd:
     if: vars.ROLE == 'forgejo-coding'
-    runs-on: lxc-bookworm
+    runs-on: lxc-trixie
     steps:
        - uses: https://data.forgejo.org/actions/checkout@v4
 
@@ -53,7 +53,7 @@
            done
 
        - id: forgejo
-         uses: https://data.forgejo.org/actions/[email protected]
+         uses: https://data.forgejo.org/actions/[email protected]
          with:
            user: root
            password: admin1234
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/Makefile 
new/forgejo-runner-11.1.2/Makefile
--- old/forgejo-runner-11.1.1/Makefile  2025-09-18 18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/Makefile  2025-10-03 10:22:06.000000000 +0200
@@ -14,7 +14,7 @@
 GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name 
"generated.*")
 
 MOCKERY_PACKAGE ?= github.com/vektra/mockery/[email protected] # renovate: 
datasource=go
-GOLANGCI_LINT_PACKAGE ?= 
github.com/golangci/golangci-lint/v2/cmd/[email protected] # renovate: 
datasource=go
+GOLANGCI_LINT_PACKAGE ?= 
github.com/golangci/golangci-lint/v2/cmd/[email protected] # renovate: 
datasource=go
 
 DOCKER_IMAGE ?= gitea/act_runner
 DOCKER_TAG ?= nightly
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/act/artifactcache/caches.go 
new/forgejo-runner-11.1.2/act/artifactcache/caches.go
--- old/forgejo-runner-11.1.1/act/artifactcache/caches.go       2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/artifactcache/caches.go       2025-10-03 
10:22:06.000000000 +0200
@@ -19,12 +19,13 @@
 
 //go:generate mockery --inpackage --name caches
 type caches interface {
-       openDB() (*bolthold.Store, error)
+       getDB() *bolthold.Store
        validateMac(rundata RunData) (string, error)
        readCache(id uint64, repo string) (*Cache, error)
        useCache(id uint64) error
        setgcAt(at time.Time)
        gcCache()
+       close()
 
        serve(w http.ResponseWriter, r *http.Request, id uint64)
        commit(id uint64, size int64) (int64, error)
@@ -38,6 +39,8 @@
        logger  logrus.FieldLogger
        secret  string
 
+       db *bolthold.Store
+
        gcing atomic.Bool
        gcAt  time.Time
 }
@@ -68,12 +71,6 @@
        }
        c.storage = storage
 
-       c.gcCache()
-
-       return c, nil
-}
-
-func (c *cachesImpl) openDB() (*bolthold.Store, error) {
        file := filepath.Join(c.dir, "bolt.db")
        db, err := bolthold.Open(file, 0o644, &bolthold.Options{
                Encoder: json.Marshal,
@@ -87,7 +84,22 @@
        if err != nil {
                return nil, fmt.Errorf("Open(%s): %w", file, err)
        }
-       return db, nil
+       c.db = db
+
+       c.gcCache()
+
+       return c, nil
+}
+
+func (c *cachesImpl) close() {
+       if c.db != nil {
+               c.db.Close()
+               c.db = nil
+       }
+}
+
+func (c *cachesImpl) getDB() *bolthold.Store {
+       return c.db
 }
 
 var findCacheWithIsolationKeyFallback = func(db *bolthold.Store, repo string, 
keys []string, version, writeIsolationKey string) (*Cache, error) {
@@ -156,11 +168,7 @@
 }
 
 func (c *cachesImpl) readCache(id uint64, repo string) (*Cache, error) {
-       db, err := c.openDB()
-       if err != nil {
-               return nil, err
-       }
-       defer db.Close()
+       db := c.getDB()
        cache := &Cache{}
        if err := db.Get(id, cache); err != nil {
                return nil, fmt.Errorf("readCache: Get(%v): %w", id, err)
@@ -173,11 +181,7 @@
 }
 
 func (c *cachesImpl) useCache(id uint64) error {
-       db, err := c.openDB()
-       if err != nil {
-               return err
-       }
-       defer db.Close()
+       db := c.getDB()
        cache := &Cache{}
        if err := db.Get(id, cache); err != nil {
                return fmt.Errorf("useCache: Get(%v): %w", id, err)
@@ -232,12 +236,7 @@
        c.gcAt = time.Now()
        c.logger.Debugf("gc: %v", c.gcAt.String())
 
-       db, err := c.openDB()
-       if err != nil {
-               fatal(c.logger, err)
-               return
-       }
-       defer db.Close()
+       db := c.getDB()
 
        // Remove the caches which are not completed for a while, they are most 
likely to be broken.
        var caches []*Cache
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/act/artifactcache/caches_test.go 
new/forgejo-runner-11.1.2/act/artifactcache/caches_test.go
--- old/forgejo-runner-11.1.1/act/artifactcache/caches_test.go  2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/artifactcache/caches_test.go  2025-10-03 
10:22:06.000000000 +0200
@@ -14,6 +14,7 @@
 func TestCacheReadWrite(t *testing.T) {
        caches, err := newCaches(t.TempDir(), "secret", logrus.New())
        require.NoError(t, err)
+       defer caches.close()
        t.Run("NotFound", func(t *testing.T) {
                found, err := caches.readCache(456, "repo")
                assert.Nil(t, found)
@@ -33,9 +34,7 @@
        cache.Repo = repo
 
        t.Run("Insert", func(t *testing.T) {
-               db, err := caches.openDB()
-               require.NoError(t, err)
-               defer db.Close()
+               db := caches.getDB()
                assert.NoError(t, insertCache(db, cache))
        })
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/act/artifactcache/handler.go 
new/forgejo-runner-11.1.2/act/artifactcache/handler.go
--- old/forgejo-runner-11.1.1/act/artifactcache/handler.go      2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/artifactcache/handler.go      2025-10-03 
10:22:06.000000000 +0200
@@ -122,6 +122,10 @@
                return nil
        }
        var retErr error
+       if h.caches != nil {
+               h.caches.close()
+               h.caches = nil
+       }
        if h.server != nil {
                err := h.server.Close()
                if err != nil {
@@ -151,6 +155,9 @@
 }
 
 func (h *handler) setCaches(caches caches) {
+       if h.caches != nil {
+               h.caches.close()
+       }
        h.caches = caches
 }
 
@@ -170,12 +177,7 @@
        }
        version := r.URL.Query().Get("version")
 
-       db, err := h.caches.openDB()
-       if err != nil {
-               h.responseFatalJSON(w, r, err)
-               return
-       }
-       defer db.Close()
+       db := h.caches.getDB()
 
        cache, err := findCacheWithIsolationKeyFallback(db, repo, keys, 
version, rundata.WriteIsolationKey)
        if err != nil {
@@ -221,12 +223,7 @@
        api.Key = strings.ToLower(api.Key)
 
        cache := api.ToCache()
-       db, err := h.caches.openDB()
-       if err != nil {
-               h.responseFatalJSON(w, r, err)
-               return
-       }
-       defer db.Close()
+       db := h.caches.getDB()
 
        now := time.Now().Unix()
        cache.CreatedAt = now
@@ -335,12 +332,7 @@
        // write real size back to cache, it may be different from the current 
value when the request doesn't specify it.
        cache.Size = size
 
-       db, err := h.caches.openDB()
-       if err != nil {
-               h.responseFatalJSON(w, r, err)
-               return
-       }
-       defer db.Close()
+       db := h.caches.getDB()
 
        cache.Complete = true
        if err := db.Update(cache.ID, cache); err != nil {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/act/artifactcache/handler_test.go 
new/forgejo-runner-11.1.2/act/artifactcache/handler_test.go
--- old/forgejo-runner-11.1.1/act/artifactcache/handler_test.go 2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/artifactcache/handler_test.go 2025-10-03 
10:22:06.000000000 +0200
@@ -78,9 +78,7 @@
 
        defer func() {
                t.Run("inspect db", func(t *testing.T) {
-                       db, err := handler.getCaches().openDB()
-                       require.NoError(t, err)
-                       defer db.Close()
+                       db := handler.getCaches().getDB()
                        require.NoError(t, db.Bolt().View(func(tx *bbolt.Tx) 
error {
                                return 
tx.Bucket([]byte("Cache")).ForEach(func(k, v []byte) error {
                                        t.Logf("%s: %s", k, v)
@@ -938,39 +936,10 @@
                        },
                },
                {
-                       name: "find open",
-                       caches: func(t *testing.T, message string) caches {
-                               caches := newMockCaches(t)
-                               caches.On("validateMac", 
RunData{}).Return(cacheRepo, nil)
-                               caches.On("openDB", mock.Anything, 
mock.Anything).Return(nil, errors.New(message))
-                               return caches
-                       },
-                       call: func(t *testing.T, handler Handler, w 
http.ResponseWriter) {
-                               req, err := http.NewRequest("GET", 
"example.com/cache", nil)
-                               require.NoError(t, err)
-                               handler.find(w, req, nil)
-                       },
-               },
-               {
-                       name: "reserve",
-                       caches: func(t *testing.T, message string) caches {
-                               caches := newMockCaches(t)
-                               caches.On("validateMac", 
RunData{}).Return(cacheRepo, nil)
-                               caches.On("openDB", mock.Anything, 
mock.Anything).Return(nil, errors.New(message))
-                               return caches
-                       },
-                       call: func(t *testing.T, handler Handler, w 
http.ResponseWriter) {
-                               body, err := json.Marshal(&Request{})
-                               require.NoError(t, err)
-                               req, err := http.NewRequest("POST", 
"example.com/caches", bytes.NewReader(body))
-                               require.NoError(t, err)
-                               handler.reserve(w, req, nil)
-                       },
-               },
-               {
                        name: "upload",
                        caches: func(t *testing.T, message string) caches {
                                caches := newMockCaches(t)
+                               caches.On("close").Return()
                                caches.On("validateMac", 
RunData{}).Return(cacheRepo, nil)
                                caches.On("readCache", mock.Anything, 
mock.Anything).Return(nil, errors.New(message))
                                return caches
@@ -988,6 +957,7 @@
                        name: "commit",
                        caches: func(t *testing.T, message string) caches {
                                caches := newMockCaches(t)
+                               caches.On("close").Return()
                                caches.On("validateMac", 
RunData{}).Return(cacheRepo, nil)
                                caches.On("readCache", mock.Anything, 
mock.Anything).Return(nil, errors.New(message))
                                return caches
@@ -1005,6 +975,7 @@
                        name: "get",
                        caches: func(t *testing.T, message string) caches {
                                caches := newMockCaches(t)
+                               caches.On("close").Return()
                                caches.On("validateMac", 
RunData{}).Return(cacheRepo, nil)
                                caches.On("readCache", mock.Anything, 
mock.Anything).Return(nil, errors.New(message))
                                return caches
@@ -1042,10 +1013,12 @@
                        dir := filepath.Join(t.TempDir(), "artifactcache")
                        handler, err := StartHandler(dir, "", 0, "secret", nil)
                        require.NoError(t, err)
+                       defer handler.Close()
 
                        fatalMessage = "<unset>"
 
-                       handler.setCaches(testCase.caches(t, message))
+                       caches := testCase.caches(t, message) // doesn't need 
to be closed because it will be given to handler
+                       handler.setCaches(caches)
 
                        w := httptest.NewRecorder()
                        testCase.call(t, handler, w)
@@ -1138,18 +1111,15 @@
                },
        }
 
-       db, err := handler.getCaches().openDB()
-       require.NoError(t, err)
+       db := handler.getCaches().getDB()
        for _, c := range cases {
                require.NoError(t, insertCache(db, c.Cache))
        }
-       require.NoError(t, db.Close())
 
        handler.getCaches().setgcAt(time.Time{}) // ensure gcCache will not skip
        handler.getCaches().gcCache()
 
-       db, err = handler.getCaches().openDB()
-       require.NoError(t, err)
+       db = handler.getCaches().getDB()
        for i, v := range cases {
                t.Run(fmt.Sprintf("%d_%s", i, v.Cache.Key), func(t *testing.T) {
                        cache := &Cache{}
@@ -1161,7 +1131,6 @@
                        }
                })
        }
-       require.NoError(t, db.Close())
 }
 
 func TestHandler_ExternalURL(t *testing.T) {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/act/artifactcache/mock_caches.go 
new/forgejo-runner-11.1.2/act/artifactcache/mock_caches.go
--- old/forgejo-runner-11.1.1/act/artifactcache/mock_caches.go  2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/artifactcache/mock_caches.go  2025-10-03 
10:22:06.000000000 +0200
@@ -19,6 +19,11 @@
        mock.Mock
 }
 
+// close provides a mock function with no fields
+func (_m *mockCaches) close() {
+       _m.Called()
+}
+
 // commit provides a mock function with given fields: id, size
 func (_m *mockCaches) commit(id uint64, size int64) (int64, error) {
        ret := _m.Called(id, size)
@@ -80,19 +85,15 @@
        _m.Called()
 }
 
-// openDB provides a mock function with no fields
-func (_m *mockCaches) openDB() (*bolthold.Store, error) {
+// getDB provides a mock function with no fields
+func (_m *mockCaches) getDB() *bolthold.Store {
        ret := _m.Called()
 
        if len(ret) == 0 {
-               panic("no return value specified for openDB")
+               panic("no return value specified for getDB")
        }
 
        var r0 *bolthold.Store
-       var r1 error
-       if rf, ok := ret.Get(0).(func() (*bolthold.Store, error)); ok {
-               return rf()
-       }
        if rf, ok := ret.Get(0).(func() *bolthold.Store); ok {
                r0 = rf()
        } else {
@@ -101,13 +102,7 @@
                }
        }
 
-       if rf, ok := ret.Get(1).(func() error); ok {
-               r1 = rf()
-       } else {
-               r1 = ret.Error(1)
-       }
-
-       return r0, r1
+       return r0
 }
 
 // readCache provides a mock function with given fields: id, repo
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/act/cacheproxy/handler.go 
new/forgejo-runner-11.1.2/act/cacheproxy/handler.go
--- old/forgejo-runner-11.1.1/act/cacheproxy/handler.go 2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/cacheproxy/handler.go 2025-10-03 
10:22:06.000000000 +0200
@@ -37,7 +37,8 @@
 
        outboundIP string
 
-       cacheServerHost string
+       cacheServerHost        string
+       cacheProxyHostOverride string
 
        cacheSecret string
 
@@ -55,7 +56,7 @@
        }
 }
 
-func StartHandler(targetHost, outboundIP string, port uint16, cacheSecret 
string, logger logrus.FieldLogger) (*Handler, error) {
+func StartHandler(targetHost, outboundIP string, port uint16, 
cacheProxyHostOverride, cacheSecret string, logger logrus.FieldLogger) 
(*Handler, error) {
        h := &Handler{}
 
        if logger == nil {
@@ -77,6 +78,7 @@
        }
 
        h.cacheServerHost = targetHost
+       h.cacheProxyHostOverride = cacheProxyHostOverride
 
        proxy, err := h.newReverseProxy(targetHost)
        if err != nil {
@@ -153,7 +155,9 @@
 }
 
 func (h *Handler) ExternalURL() string {
-       // TODO: make the external url configurable if necessary
+       if h.cacheProxyHostOverride != "" {
+               return h.cacheProxyHostOverride
+       }
        return fmt.Sprintf("http://%s";, net.JoinHostPort(h.outboundIP, 
strconv.Itoa(h.listener.Addr().(*net.TCPAddr).Port)))
 }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/act/container/host_environment.go 
new/forgejo-runner-11.1.2/act/container/host_environment.go
--- old/forgejo-runner-11.1.1/act/container/host_environment.go 2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/container/host_environment.go 2025-10-03 
10:22:06.000000000 +0200
@@ -389,7 +389,7 @@
                if err := e.exec(ctx, command, cmdline, env, user, workdir); 
err != nil {
                        select {
                        case <-ctx.Done():
-                               return fmt.Errorf("this step has been 
cancelled: %w", err)
+                               return fmt.Errorf("this step has been 
cancelled: ctx: %w, exec: %w", ctx.Err(), err)
                        default:
                                return err
                        }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/act/jobparser/interpeter.go 
new/forgejo-runner-11.1.2/act/jobparser/interpeter.go
--- old/forgejo-runner-11.1.1/act/jobparser/interpeter.go       2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/jobparser/interpeter.go       2025-10-03 
10:22:06.000000000 +0200
@@ -76,6 +76,36 @@
        return exprparser.NewInterpeter(ee, config)
 }
 
+// Returns an interpeter used in the server in the context of workflow-level 
templates. Needs github, inputs, and vars
+// context only.
+func NewWorkflowInterpeter(
+       gitCtx *model.GithubContext,
+       vars map[string]string,
+       inputs map[string]any,
+) exprparser.Interpreter {
+       ee := &exprparser.EvaluationEnvironment{
+               Github:   gitCtx,
+               Env:      nil, // no need
+               Job:      nil, // no need
+               Steps:    nil, // no need
+               Runner:   nil, // no need
+               Secrets:  nil, // no need
+               Strategy: nil, // no need
+               Matrix:   nil, // no need
+               Needs:    nil, // no need
+               Inputs:   inputs,
+               Vars:     vars,
+       }
+
+       config := exprparser.Config{
+               Run:        nil,
+               WorkingDir: "", // WorkingDir is used for the function 
hashFiles, but it's not needed in the server
+               Context:    "workflow",
+       }
+
+       return exprparser.NewInterpeter(ee, config)
+}
+
 // JobResult is the minimum requirement of job results for Interpeter
 type JobResult struct {
        Needs   []string
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/act/jobparser/model.go 
new/forgejo-runner-11.1.2/act/jobparser/model.go
--- old/forgejo-runner-11.1.1/act/jobparser/model.go    2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/jobparser/model.go    2025-10-03 
10:22:06.000000000 +0200
@@ -1,7 +1,6 @@
 package jobparser
 
 import (
-       "bytes"
        "fmt"
 
        "code.forgejo.org/forgejo/runner/v11/act/model"
@@ -193,83 +192,32 @@
        return evt.schedules
 }
 
-func ReadWorkflowRawConcurrency(content []byte) (*model.RawConcurrency, error) 
{
-       w := new(model.Workflow)
-       err := yaml.NewDecoder(bytes.NewReader(content)).Decode(w)
-       return w.RawConcurrency, err
-}
-
-func EvaluateConcurrency(rc *model.RawConcurrency, jobID string, job *Job, 
gitCtx map[string]any, results map[string]*JobResult, vars map[string]string, 
inputs map[string]any) (string, bool, error) {
-       actJob := &model.Job{}
-       if job != nil {
-               actJob.Strategy = &model.Strategy{
-                       FailFastString:    job.Strategy.FailFastString,
-                       MaxParallelString: job.Strategy.MaxParallelString,
-                       RawMatrix:         job.Strategy.RawMatrix,
-               }
-               actJob.Strategy.FailFast = actJob.Strategy.GetFailFast()
-               actJob.Strategy.MaxParallel = actJob.Strategy.GetMaxParallel()
-       }
-
-       matrix := make(map[string]any)
-       matrixes, err := actJob.GetMatrixes()
-       if err != nil {
-               return "", false, err
-       }
-       if len(matrixes) > 0 {
-               matrix = matrixes[0]
-       }
-
-       evaluator := NewExpressionEvaluator(NewInterpeter(jobID, actJob, 
matrix, toGitContext(gitCtx), results, vars, inputs))
+// Convert the raw YAML from the `concurrency` block on a workflow into the 
evaluated concurrency group and
+// cancel-in-progress value. This implementation only supports workflow-level 
concurrency definition, where we expect
+// expressions to be able to access only the github, inputs and vars contexts. 
If RawConcurrency is empty, then the
+// returned concurrency group will be "" and cancel-in-progress will be nil -- 
this can be used to distinguish from an
+// explicit cancel-in-progress choice even if a group isn't specified.
+func EvaluateWorkflowConcurrency(rc *model.RawConcurrency, gitCtx 
*model.GithubContext, vars map[string]string, inputs map[string]any) (string, 
*bool, error) {
+       evaluator := NewExpressionEvaluator(NewWorkflowInterpeter(gitCtx, vars, 
inputs))
        var node yaml.Node
        if err := node.Encode(rc); err != nil {
-               return "", false, fmt.Errorf("failed to encode concurrency: 
%w", err)
+               return "", nil, fmt.Errorf("failed to encode concurrency: %w", 
err)
        }
        if err := evaluator.EvaluateYamlNode(&node); err != nil {
-               return "", false, fmt.Errorf("failed to evaluate concurrency: 
%w", err)
+               return "", nil, fmt.Errorf("failed to evaluate concurrency: 
%w", err)
        }
        var evaluated model.RawConcurrency
        if err := node.Decode(&evaluated); err != nil {
-               return "", false, fmt.Errorf("failed to unmarshal evaluated 
concurrency: %w", err)
+               return "", nil, fmt.Errorf("failed to unmarshal evaluated 
concurrency: %w", err)
        }
        if evaluated.RawExpression != "" {
-               return evaluated.RawExpression, false, nil
+               return evaluated.RawExpression, nil, nil
        }
-       return evaluated.Group, evaluated.CancelInProgress == "true", nil
-}
-
-func toGitContext(input map[string]any) *model.GithubContext {
-       gitContext := &model.GithubContext{
-               EventPath:        asString(input["event_path"]),
-               Workflow:         asString(input["workflow"]),
-               RunID:            asString(input["run_id"]),
-               RunNumber:        asString(input["run_number"]),
-               Actor:            asString(input["actor"]),
-               Repository:       asString(input["repository"]),
-               EventName:        asString(input["event_name"]),
-               Sha:              asString(input["sha"]),
-               Ref:              asString(input["ref"]),
-               RefName:          asString(input["ref_name"]),
-               RefType:          asString(input["ref_type"]),
-               HeadRef:          asString(input["head_ref"]),
-               BaseRef:          asString(input["base_ref"]),
-               Token:            asString(input["token"]),
-               Workspace:        asString(input["workspace"]),
-               Action:           asString(input["action"]),
-               ActionPath:       asString(input["action_path"]),
-               ActionRef:        asString(input["action_ref"]),
-               ActionRepository: asString(input["action_repository"]),
-               Job:              asString(input["job"]),
-               RepositoryOwner:  asString(input["repository_owner"]),
-               RetentionDays:    asString(input["retention_days"]),
-       }
-
-       event, ok := input["event"].(map[string]any)
-       if ok {
-               gitContext.Event = event
+       if evaluated.CancelInProgress == "" {
+               return evaluated.Group, nil, nil
        }
-
-       return gitContext
+       cancelInProgress := evaluated.CancelInProgress == "true"
+       return evaluated.Group, &cancelInProgress, nil
 }
 
 func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
@@ -430,12 +378,3 @@
 
        return scalars, datas, nil
 }
-
-func asString(v any) string {
-       if v == nil {
-               return ""
-       } else if s, ok := v.(string); ok {
-               return s
-       }
-       return ""
-}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/act/jobparser/model_test.go 
new/forgejo-runner-11.1.2/act/jobparser/model_test.go
--- old/forgejo-runner-11.1.1/act/jobparser/model_test.go       2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/jobparser/model_test.go       2025-10-03 
10:22:06.000000000 +0200
@@ -342,10 +342,11 @@
 
 func TestEvaluateConcurrency(t *testing.T) {
        tests := []struct {
-               name             string
-               input            model.RawConcurrency
-               group            string
-               cancelInProgress bool
+               name                string
+               input               model.RawConcurrency
+               group               string
+               cancelInProgressNil bool
+               cancelInProgress    bool
        }{
                {
                        name: "basic",
@@ -357,18 +358,18 @@
                        cancelInProgress: true,
                },
                {
-                       name:             "undefined",
-                       input:            model.RawConcurrency{},
-                       group:            "",
-                       cancelInProgress: false,
+                       name:                "undefined",
+                       input:               model.RawConcurrency{},
+                       group:               "",
+                       cancelInProgressNil: true,
                },
                {
                        name: "group-evaluation",
                        input: model.RawConcurrency{
                                Group: "${{ github.workflow }}-${{ github.ref 
}}",
                        },
-                       group:            "test_workflow-main",
-                       cancelInProgress: false,
+                       group:               "test_workflow-main",
+                       cancelInProgressNil: true,
                },
                {
                        name: "cancel-evaluation-true",
@@ -393,37 +394,44 @@
                        input: model.RawConcurrency{
                                Group: "user-${{ 
github.event.commits[0].author.username }}",
                        },
-                       group:            "user-someone",
-                       cancelInProgress: false,
+                       group:               "user-someone",
+                       cancelInProgressNil: true,
                },
                {
                        name: "arbitrary-var",
                        input: model.RawConcurrency{
                                Group: "${{ vars.eval_arbitrary_var }}",
                        },
-                       group:            "123",
-                       cancelInProgress: false,
+                       group:               "123",
+                       cancelInProgressNil: true,
                },
                {
                        name: "arbitrary-input",
                        input: model.RawConcurrency{
                                Group: "${{ inputs.eval_arbitrary_input }}",
                        },
-                       group:            "456",
-                       cancelInProgress: false,
+                       group:               "456",
+                       cancelInProgressNil: true,
+               },
+               {
+                       name: "cancel-in-progress-only",
+                       input: model.RawConcurrency{
+                               CancelInProgress: "true",
+                       },
+                       group:            "",
+                       cancelInProgress: true,
                },
        }
 
        for _, test := range tests {
                t.Run(test.name, func(t *testing.T) {
-                       group, cancelInProgress, err := EvaluateConcurrency(
+                       group, cancelInProgress, err := 
EvaluateWorkflowConcurrency(
                                &test.input,
-                               "job-id",
-                               nil, // job
-                               map[string]any{
-                                       "workflow": "test_workflow",
-                                       "ref":      "main",
-                                       "event": map[string]any{
+                               // gitCtx
+                               &model.GithubContext{
+                                       Workflow: "test_workflow",
+                                       Ref:      "main",
+                                       Event: map[string]any{
                                                "commits": []any{
                                                        map[string]any{
                                                                "author": 
map[string]any{
@@ -437,20 +445,24 @@
                                                        },
                                                },
                                        },
-                               }, // gitCtx
-                               map[string]*JobResult{
-                                       "job-id": {},
-                               }, // results
+                               },
+                               // vars
                                map[string]string{
                                        "eval_arbitrary_var": "123",
-                               }, // vars
+                               },
+                               // inputs
                                map[string]any{
                                        "eval_arbitrary_input": "456",
-                               }, // inputs
+                               },
                        )
                        assert.NoError(t, err)
                        assert.EqualValues(t, test.group, group)
-                       assert.EqualValues(t, test.cancelInProgress, 
cancelInProgress)
+                       if test.cancelInProgressNil {
+                               assert.Nil(t, cancelInProgress)
+                       } else {
+                               require.NotNil(t, cancelInProgress)
+                               assert.EqualValues(t, test.cancelInProgress, 
*cancelInProgress)
+                       }
                })
        }
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/act/runner/logger.go 
new/forgejo-runner-11.1.2/act/runner/logger.go
--- old/forgejo-runner-11.1.1/act/runner/logger.go      2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/runner/logger.go      2025-10-03 
10:22:06.000000000 +0200
@@ -146,6 +146,26 @@
        }).WithContext(ctx))
 }
 
+func GetOuterStepResult(entry *logrus.Entry) any {
+       r, ok := entry.Data["stepResult"]
+       if !ok {
+               return nil
+       }
+
+       // composite actions steps log with a list of stepID
+       if s, ok := entry.Data["stepID"]; ok {
+               if stepIDs, ok := s.([]string); ok {
+                       if len(stepIDs) > 1 {
+                               return nil
+                       }
+               }
+       } else {
+               return nil
+       }
+
+       return r
+}
+
 func withStepLogger(ctx context.Context, stepNumber int, stepID, stepName, 
stageName string) context.Context {
        rtn := common.Logger(ctx).WithFields(logrus.Fields{
                "stepNumber": stepNumber,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/act/runner/logger_test.go 
new/forgejo-runner-11.1.2/act/runner/logger_test.go
--- old/forgejo-runner-11.1.1/act/runner/logger_test.go 1970-01-01 
01:00:00.000000000 +0100
+++ new/forgejo-runner-11.1.2/act/runner/logger_test.go 2025-10-03 
10:22:06.000000000 +0200
@@ -0,0 +1,63 @@
+package runner
+
+import (
+       "testing"
+
+       "code.forgejo.org/forgejo/runner/v11/act/common"
+
+       "github.com/sirupsen/logrus/hooks/test"
+       "github.com/stretchr/testify/assert"
+       "github.com/stretchr/testify/require"
+)
+
+func TestRunner_GetOuterStepResult(t *testing.T) {
+       nullLogger, hook := test.NewNullLogger()
+       ctx := common.WithLogger(t.Context(), nullLogger)
+
+       t.Run("no stepResult", func(t *testing.T) {
+               hook.Reset()
+               common.Logger(ctx).Info("✅ Success")
+               entry := hook.LastEntry()
+               require.NotNil(t, entry)
+               assert.Nil(t, GetOuterStepResult(entry))
+       })
+
+       t.Run("stepResult and no stepID", func(t *testing.T) {
+               hook.Reset()
+               common.Logger(ctx).WithField("stepResult", "success").Info("✅ 
Success")
+               entry := hook.LastEntry()
+               require.NotNil(t, entry)
+               assert.Nil(t, GetOuterStepResult(entry))
+       })
+
+       stepNumber := 123
+       stepID := "step id"
+       stepName := "readable name"
+       stageName := "Main"
+       ctx = withStepLogger(ctx, stepNumber, stepID, stepName, stageName)
+
+       t.Run("stepResult and stepID", func(t *testing.T) {
+               hook.Reset()
+               common.Logger(ctx).WithField("stepResult", "success").Info("✅ 
Success")
+               entry := hook.LastEntry()
+               actualStepIDs, ok := entry.Data["stepID"]
+               require.True(t, ok)
+               require.Equal(t, []string{stepID}, actualStepIDs)
+               require.NotNil(t, entry)
+               assert.Equal(t, "success", GetOuterStepResult(entry))
+       })
+
+       compositeStepID := "composite step id"
+       ctx = WithCompositeStepLogger(ctx, compositeStepID)
+
+       t.Run("stepResult and composite stepID", func(t *testing.T) {
+               hook.Reset()
+               common.Logger(ctx).WithField("stepResult", "success").Info("✅ 
Success")
+               entry := hook.LastEntry()
+               actualStepIDs, ok := entry.Data["stepID"]
+               require.True(t, ok)
+               require.Equal(t, []string{stepID, compositeStepID}, 
actualStepIDs)
+               require.NotNil(t, entry)
+               assert.Nil(t, GetOuterStepResult(entry))
+       })
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/act/runner/run_context.go 
new/forgejo-runner-11.1.2/act/runner/run_context.go
--- old/forgejo-runner-11.1.1/act/runner/run_context.go 2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/runner/run_context.go 2025-10-03 
10:22:06.000000000 +0200
@@ -254,6 +254,10 @@
        logger := common.Logger(ctx)
        logger.Debugf("stopHostEnvironment")
 
+       if !rc.IsLXCHostEnv(ctx) {
+               return nil
+       }
+
        var stopScript bytes.Buffer
        if err := stopTemplate.Execute(&stopScript, struct {
                Name string
@@ -945,7 +949,7 @@
                        return err
                }
                if res {
-                       timeoutctx, cancelTimeOut := evaluateTimeout(ctx, 
rc.ExprEval, rc.Run.Job().TimeoutMinutes)
+                       timeoutctx, cancelTimeOut := evaluateTimeout(ctx, 
"job", rc.ExprEval, rc.Run.Job().TimeoutMinutes)
                        defer cancelTimeOut()
 
                        return executor(timeoutctx)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/act/runner/step.go 
new/forgejo-runner-11.1.2/act/runner/step.go
--- old/forgejo-runner-11.1.1/act/runner/step.go        2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/runner/step.go        2025-10-03 
10:22:06.000000000 +0200
@@ -177,7 +177,7 @@
                        Mode: 0o666,
                })(ctx)
 
-               timeoutctx, cancelTimeOut := evaluateTimeout(ctx, rc.ExprEval, 
stepModel.TimeoutMinutes)
+               timeoutctx, cancelTimeOut := evaluateTimeout(ctx, "step", 
rc.ExprEval, stepModel.TimeoutMinutes)
                defer cancelTimeOut()
                err = executor(timeoutctx)
 
@@ -213,12 +213,12 @@
        }
 }
 
-func evaluateTimeout(ctx context.Context, exprEval ExpressionEvaluator, 
timeoutMinutes string) (context.Context, context.CancelFunc) {
+func evaluateTimeout(ctx context.Context, contextType string, exprEval 
ExpressionEvaluator, timeoutMinutes string) (context.Context, 
context.CancelFunc) {
        timeout := exprEval.Interpolate(ctx, timeoutMinutes)
        if timeout != "" {
                timeOutMinutes, err := strconv.ParseInt(timeout, 10, 64)
                if err == nil {
-                       common.Logger(ctx).Debugf("the step will stop in 
timeout-minutes %s", timeout)
+                       common.Logger(ctx).Debugf("the %s will stop in 
timeout-minutes %s", contextType, timeout)
                        return context.WithTimeout(ctx, 
time.Duration(timeOutMinutes)*time.Minute)
                }
                common.Logger(ctx).Errorf("timeout-minutes %s cannot be parsed 
and will be ignored: %w", timeout, err)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/act/schema/schema_test.go 
new/forgejo-runner-11.1.2/act/schema/schema_test.go
--- old/forgejo-runner-11.1.1/act/schema/schema_test.go 2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/act/schema/schema_test.go 2025-10-03 
10:22:06.000000000 +0200
@@ -30,6 +30,84 @@
        assert.NoError(t, err)
 }
 
+func TestContextsInWorkflowMatrix(t *testing.T) {
+       t.Run("KnownContexts", func(t *testing.T) {
+               // Parse raw YAML snippet.
+               var node yaml.Node
+               err := yaml.Unmarshal([]byte(`
+on: push
+
+jobs:
+  job:
+    uses: ./.forgejo/workflow/test.yaml
+    strategy:
+      matrix:
+        input1:
+          - ${{ forge.KEY }}
+          - ${{ forgejo.KEY }}
+          - ${{ github.KEY }}
+          - ${{ inputs.KEY }}
+          - ${{ vars.KEY }}
+          - ${{ needs.KEY }}
+        include:
+         - forge: ${{ forge.KEY }}
+         - forgejo: ${{ forgejo.KEY }}
+         - github: ${{ github.KEY }}
+         - inputs: ${{ inputs.KEY }}
+         - vars: ${{ vars.KEY }}
+         - needs: ${{ needs.KEY }}
+        exclude:
+         - forge: ${{ forge.KEY }}
+         - forgejo: ${{ forgejo.KEY }}
+         - github: ${{ github.KEY }}
+         - inputs: ${{ inputs.KEY }}
+         - vars: ${{ vars.KEY }}
+         - needs: ${{ needs.KEY }}
+`), &node)
+               if !assert.NoError(t, err) {
+                       return
+               }
+
+               // Parse YAML node as a validated workflow.
+               err = (&Node{
+                       Definition: "workflow-root",
+                       Schema:     GetWorkflowSchema(),
+               }).UnmarshalYAML(&node)
+               assert.NoError(t, err)
+       })
+
+       t.Run("UnknownContext", func(t *testing.T) {
+               for _, property := range []string{"include", "exclude", 
"input1"} {
+                       t.Run(property, func(t *testing.T) {
+                               for _, context := range []string{"secrets", 
"job", "steps", "runner", "matrix", "strategy"} {
+                                       t.Run(context, func(t *testing.T) {
+                                               var node yaml.Node
+                                               err := 
yaml.Unmarshal([]byte(fmt.Sprintf(`
+on: push
+
+jobs:
+  job:
+    uses: ./.forgejo/workflow/test.yaml
+    strategy:
+      matrix:
+        %[1]s:
+          - input1: ${{ %[2]s.KEY }}
+`, property, context)), &node)
+                                               if !assert.NoError(t, err) {
+                                                       return
+                                               }
+                                               err = (&Node{
+                                                       Definition: 
"workflow-root",
+                                                       Schema:     
GetWorkflowSchema(),
+                                               }).UnmarshalYAML(&node)
+                                               assert.ErrorContains(t, err, 
"Unknown Variable Access "+context)
+                                       })
+                               }
+                       })
+               }
+       })
+}
+
 func TestReusableWorkflow(t *testing.T) {
        t.Run("KnownContexts", func(t *testing.T) {
                var node yaml.Node
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/examples/docker-compose/compose-forgejo-and-runner.yml
 
new/forgejo-runner-11.1.2/examples/docker-compose/compose-forgejo-and-runner.yml
--- 
old/forgejo-runner-11.1.1/examples/docker-compose/compose-forgejo-and-runner.yml
    2025-09-18 18:17:56.000000000 +0200
+++ 
new/forgejo-runner-11.1.2/examples/docker-compose/compose-forgejo-and-runner.yml
    2025-10-03 10:22:06.000000000 +0200
@@ -51,7 +51,7 @@
       - 8080:3000
 
   runner-register:
-    image: code.forgejo.org/forgejo/runner:11.0.0
+    image: code.forgejo.org/forgejo/runner:11.1.1
     links:
       - docker-in-docker
       - forgejo
@@ -77,7 +77,7 @@
       '
 
   runner-daemon:
-    image: code.forgejo.org/forgejo/runner:11.0.0
+    image: code.forgejo.org/forgejo/runner:11.1.1
     links:
       - docker-in-docker
       - forgejo
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/examples/lxc-systemd/forgejo-runner-service.sh 
new/forgejo-runner-11.1.2/examples/lxc-systemd/forgejo-runner-service.sh
--- old/forgejo-runner-11.1.1/examples/lxc-systemd/forgejo-runner-service.sh    
2025-09-18 18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/examples/lxc-systemd/forgejo-runner-service.sh    
2025-10-03 10:22:06.000000000 +0200
@@ -22,12 +22,12 @@
 : ${INPUTS_LIFETIME:=7d}
 DEFAULT_LXC_HELPERS_VERSION=1.1.0 # renovate: datasource=forgejo-tags 
depName=forgejo/lxc-helpers
 : ${INPUTS_LXC_HELPERS_VERSION:=$DEFAULT_LXC_HELPERS_VERSION}
-DEFAULT_RUNNER_VERSION=11.0.0 # renovate: datasource=forgejo-releases 
depName=forgejo/runner
+DEFAULT_RUNNER_VERSION=11.1.1 # renovate: datasource=forgejo-releases 
depName=forgejo/runner
 : ${INPUTS_RUNNER_VERSION:=$DEFAULT_RUNNER_VERSION}
 
 : ${KILL_AFTER:=21600} # 6h == 21600
 NODEJS_VERSION=20
-DEBIAN_RELEASE=bookworm
+DEBIAN_RELEASE=trixie
 YQ_VERSION=v4.45.1
 SELF=${BASH_SOURCE[0]}
 SELF_FILENAME=$(basename "$SELF")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/internal/app/poll/poller.go 
new/forgejo-runner-11.1.2/internal/app/poll/poller.go
--- old/forgejo-runner-11.1.1/internal/app/poll/poller.go       2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/internal/app/poll/poller.go       2025-10-03 
10:22:06.000000000 +0200
@@ -90,10 +90,10 @@
                return nil
 
        case <-ctx.Done():
-               log.Trace("forcing the jobs to shutdown")
+               log.Info("forcing the jobs to shutdown")
                p.shutdownJobs()
                <-p.done
-               log.Trace("all jobs have been shutdown")
+               log.Info("all jobs have been shutdown")
                return ctx.Err()
        }
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/internal/app/run/runner.go 
new/forgejo-runner-11.1.2/internal/app/run/runner.go
--- old/forgejo-runner-11.1.1/internal/app/run/runner.go        2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/internal/app/run/runner.go        2025-10-03 
10:22:06.000000000 +0200
@@ -144,6 +144,7 @@
                cacheURL,
                cfg.Cache.Host,
                cfg.Cache.ProxyPort,
+               cfg.Cache.ActionsCacheURLOverride,
                cacheSecret,
                log.StandardLogger().WithField("module", "cache_proxy"),
        )
@@ -152,9 +153,6 @@
        }
 
        envs["ACTIONS_CACHE_URL"] = cacheProxy.ExternalURL()
-       if cfg.Cache.ActionsCacheURLOverride != "" {
-               envs["ACTIONS_CACHE_URL"] = cfg.Cache.ActionsCacheURLOverride
-       }
 
        return cacheProxy
 }
@@ -195,6 +193,44 @@
        return fmt.Errorf("the workflow file is not usable")
 }
 
+func getWriteIsolationKey(ctx context.Context, eventName, ref string, event 
map[string]any) (string, error) {
+       if eventName == "pull_request" {
+               // The "closed" action of a pull request event runs in the 
context of the base repository
+               // and was merged by a user with write access to the base 
repository. It is authorized to
+               // write the repository cache.
+               if event["action"] == "closed" {
+                       pullRequest, ok := 
event["pull_request"].(map[string]any)
+                       if !ok {
+                               return "", fmt.Errorf("getWriteIsolationKey: 
event.pull_request is not a map[string]any but %T", event["pull_request"])
+                       }
+                       merged, ok := pullRequest["merged"].(bool)
+                       if !ok {
+                               return "", fmt.Errorf("getWriteIsolationKey: 
event.pull_request.merged is not a bool but %T", pullRequest["merged"])
+                       }
+                       if merged {
+                               return "", nil
+                       }
+                       // a pull request that is closed but not merged falls 
thru and is expected to obey the same
+                       // constraints as an opened pull request, it may be 
closed by a user with no write permissions to the
+                       // base repository
+               }
+               // When performing an action on an event from an opened PR, 
provide a "write isolation key" to the cache. The generated
+               // ACTIONS_CACHE_URL will be able to read the cache, and write 
to a cache, but its writes will be isolated to
+               // future runs of the PR's workflows and won't be shared with 
other pull requests or actions. This is a security
+               // measure to prevent a malicious pull request from poisoning 
the cache with secret-stealing code which would
+               // later be executed on another action.
+               // Ensure that `ref` has the expected format so that we don't 
end up with a useless write isolation key
+               if !strings.HasPrefix(ref, "refs/pull/") {
+                       return "", fmt.Errorf("getWriteIsolationKey: expected 
ref to be refs/pull/..., but was %q", ref)
+               }
+               return ref, nil
+       }
+
+       // Other events do not allow the trigger user to modify the content of 
the repository and
+       // are allowed to write the cache without an isolation key
+       return "", nil
+}
+
 func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter 
*report.Reporter) (err error) {
        defer func() {
                if r := recover(); r != nil {
@@ -228,15 +264,18 @@
                defaultActionURL,
                r.client.Address())
 
+       eventName := taskContext["event_name"].GetStringValue()
+       ref := taskContext["ref"].GetStringValue()
+       event := taskContext["event"].GetStructValue().AsMap()
        preset := &model.GithubContext{
-               Event:           taskContext["event"].GetStructValue().AsMap(),
+               Event:           event,
                RunID:           taskContext["run_id"].GetStringValue(),
                RunNumber:       taskContext["run_number"].GetStringValue(),
                Actor:           taskContext["actor"].GetStringValue(),
                Repository:      taskContext["repository"].GetStringValue(),
-               EventName:       taskContext["event_name"].GetStringValue(),
+               EventName:       eventName,
                Sha:             taskContext["sha"].GetStringValue(),
-               Ref:             taskContext["ref"].GetStringValue(),
+               Ref:             ref,
                RefName:         taskContext["ref_name"].GetStringValue(),
                RefType:         taskContext["ref_type"].GetStringValue(),
                HeadRef:         taskContext["head_ref"].GetStringValue(),
@@ -266,19 +305,9 @@
 
        // Register the run with the cacheproxy and modify the CACHE_URL
        if r.cacheProxy != nil {
-               writeIsolationKey := ""
-
-               // When performing an action on an event from a PR, provide a 
"write isolation key" to the cache. The generated
-               // ACTIONS_CACHE_URL will be able to read the cache, and write 
to a cache, but its writes will be isolated to
-               // future runs of the PR's workflows and won't be shared with 
other pull requests or actions. This is a security
-               // measure to prevent a malicious pull request from poisoning 
the cache with secret-stealing code which would
-               // later be executed on another action.
-               if taskContext["event_name"].GetStringValue() == "pull_request" 
{
-                       // Ensure that `Ref` has the expected format so that we 
don't end up with a useless write isolation key
-                       if !strings.HasPrefix(preset.Ref, "refs/pull/") {
-                               return fmt.Errorf("write isolation key: 
expected preset.Ref to be refs/pull/..., but was %q", preset.Ref)
-                       }
-                       writeIsolationKey = preset.Ref
+               writeIsolationKey, err := getWriteIsolationKey(ctx, eventName, 
ref, event)
+               if err != nil {
+                       return err
                }
 
                timestamp := strconv.FormatInt(time.Now().Unix(), 10)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/internal/app/run/runner_test.go 
new/forgejo-runner-11.1.2/internal/app/run/runner_test.go
--- old/forgejo-runner-11.1.1/internal/app/run/runner_test.go   2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/internal/app/run/runner_test.go   2025-10-03 
10:22:06.000000000 +0200
@@ -141,6 +141,83 @@
        }), nil
 }
 
+func TestRunner_getWriteIsolationKey(t *testing.T) {
+       t.Run("push", func(t *testing.T) {
+               key, err := getWriteIsolationKey(t.Context(), "push", 
"whatever", nil)
+               require.NoError(t, err)
+               assert.Empty(t, key)
+       })
+
+       t.Run("pull_request synchronized key is ref", func(t *testing.T) {
+               expectedKey := "refs/pull/1/head"
+               actualKey, err := getWriteIsolationKey(t.Context(), 
"pull_request", expectedKey, map[string]any{
+                       "action": "synchronized",
+               })
+               require.NoError(t, err)
+               assert.Equal(t, expectedKey, actualKey)
+       })
+
+       t.Run("pull_request synchronized ref is invalid", func(t *testing.T) {
+               invalidKey := "refs/is/invalid"
+               key, err := getWriteIsolationKey(t.Context(), "pull_request", 
invalidKey, map[string]any{
+                       "action": "synchronized",
+               })
+               require.Empty(t, key)
+               assert.ErrorContains(t, err, invalidKey)
+       })
+
+       t.Run("pull_request closed and not merged key is ref", func(t 
*testing.T) {
+               expectedKey := "refs/pull/1/head"
+               actualKey, err := getWriteIsolationKey(t.Context(), 
"pull_request", expectedKey, map[string]any{
+                       "action": "closed",
+                       "pull_request": map[string]any{
+                               "merged": false,
+                       },
+               })
+               require.NoError(t, err)
+               assert.Equal(t, expectedKey, actualKey)
+       })
+
+       t.Run("pull_request closed and merged key is empty", func(t *testing.T) 
{
+               key, err := getWriteIsolationKey(t.Context(), "pull_request", 
"whatever", map[string]any{
+                       "action": "closed",
+                       "pull_request": map[string]any{
+                               "merged": true,
+                       },
+               })
+               require.NoError(t, err)
+               assert.Empty(t, key)
+       })
+
+       t.Run("pull_request missing event.pull_request", func(t *testing.T) {
+               key, err := getWriteIsolationKey(t.Context(), "pull_request", 
"whatever", map[string]any{
+                       "action": "closed",
+               })
+               require.Empty(t, key)
+               assert.ErrorContains(t, err, "event.pull_request is not a map")
+       })
+
+       t.Run("pull_request missing event.pull_request.merge", func(t 
*testing.T) {
+               key, err := getWriteIsolationKey(t.Context(), "pull_request", 
"whatever", map[string]any{
+                       "action":       "closed",
+                       "pull_request": map[string]any{},
+               })
+               require.Empty(t, key)
+               assert.ErrorContains(t, err, "event.pull_request.merged is not 
a bool")
+       })
+
+       t.Run("pull_request with event.pull_request.merge of an unexpected 
type", func(t *testing.T) {
+               key, err := getWriteIsolationKey(t.Context(), "pull_request", 
"whatever", map[string]any{
+                       "action": "closed",
+                       "pull_request": map[string]any{
+                               "merged": "string instead of bool",
+                       },
+               })
+               require.Empty(t, key)
+               assert.ErrorContains(t, err, "not a bool but string")
+       })
+}
+
 func TestRunnerCacheConfiguration(t *testing.T) {
        if testing.Short() {
                t.Skip("skipping integration test")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/internal/pkg/config/config.example.yaml 
new/forgejo-runner-11.1.2/internal/pkg/config/config.example.yaml
--- old/forgejo-runner-11.1.1/internal/pkg/config/config.example.yaml   
2025-09-18 18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/internal/pkg/config/config.example.yaml   
2025-10-03 10:22:06.000000000 +0200
@@ -110,25 +110,20 @@
   #
   external_server: ""
   #
-  #######################################################################
-  #
-  # Common to the internal and external cache server
-  #
-  #######################################################################
-  #
   # The shared cache secret used to secure the communications between
   # the cache proxy and the cache server.
   #
   # If empty, it will be generated to a new secret automatically when
   # the server starts and it will stay the same until it restarts.
   #
-  # Every time the secret is modified, all cache entries that were
-  # created with it are invalidated. In order to ensure that the cache
-  # content is reused when the runner restarts, this secret must be
-  # set, for instance with the output of openssl rand -hex 40.
-  #
   secret: ""
   #
+  #######################################################################
+  #
+  # Common to the internal and external cache server
+  #
+  #######################################################################
+  #
   # The IP or hostname (195.84.20.30 or example.com) to use when constructing
   # ACTIONS_CACHE_URL which is the URL of the cache proxy.
   #
@@ -138,7 +133,7 @@
   # different network than the Forgejo runner (for instance when the
   # docker server used to create containers is not running on the same
   # host as the Forgejo runner), it may be impossible to figure that
-  # out automatically. In that case you can specifify which IP or
+  # out automatically. In that case you can specify which IP or
   # hostname to use to reach the internal cache server created by the
   # Forgejo runner.
   #
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/forgejo-runner-11.1.1/internal/pkg/report/mask.go 
new/forgejo-runner-11.1.2/internal/pkg/report/mask.go
--- old/forgejo-runner-11.1.1/internal/pkg/report/mask.go       2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/internal/pkg/report/mask.go       2025-10-03 
10:22:06.000000000 +0200
@@ -38,7 +38,7 @@
                })
                // a multiline secret transformed into a single line by 
replacing
                // newlines with \ followed by n must also be redacted
-               secret = strings.Join(lines, "\\n")
+               o.lines = append(o.lines, strings.Join(lines, "\\n"))
        }
 
        o.lines = append(o.lines, secret)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/internal/pkg/report/mask_test.go 
new/forgejo-runner-11.1.2/internal/pkg/report/mask_test.go
--- old/forgejo-runner-11.1.1/internal/pkg/report/mask_test.go  2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/internal/pkg/report/mask_test.go  2025-10-03 
10:22:06.000000000 +0200
@@ -7,6 +7,8 @@
        "fmt"
        "testing"
 
+       runnerv1 "code.forgejo.org/forgejo/actions-proto/runner/v1"
+
        "github.com/stretchr/testify/assert"
 )
 
@@ -267,4 +269,17 @@
                        assert.Equal(t, testCase.out, rowsToString(rows))
                })
        }
+
+       t.Run("MultilineSecretInSingleRow", func(t *testing.T) {
+               secret := "ABC\nDEF\nGHI"
+               m := newMasker()
+               m.add(secret)
+               rows := []*runnerv1.LogRow{
+                       {Content: fmt.Sprintf("BEFORE%sAFTER", secret)},
+               }
+               noMore := false
+               needMore := m.replace(rows, noMore)
+               assert.False(t, needMore)
+               assert.Equal(t, "BEFORE***AFTER\n", rowsToString(rows))
+       })
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/forgejo-runner-11.1.1/internal/pkg/report/reporter.go 
new/forgejo-runner-11.1.2/internal/pkg/report/reporter.go
--- old/forgejo-runner-11.1.1/internal/pkg/report/reporter.go   2025-09-18 
18:17:56.000000000 +0200
+++ new/forgejo-runner-11.1.2/internal/pkg/report/reporter.go   2025-10-03 
10:22:06.000000000 +0200
@@ -13,6 +13,7 @@
        "time"
 
        runnerv1 "code.forgejo.org/forgejo/actions-proto/runner/v1"
+       "code.forgejo.org/forgejo/runner/v11/act/runner"
        "connectrpc.com/connect"
        retry "github.com/avast/retry-go/v4"
        log "github.com/sirupsen/logrus"
@@ -47,6 +48,7 @@
 
        debugOutputEnabled  bool
        stopCommandEndToken string
+       issuedLocalCancel   bool
 }
 
 func NewReporter(ctx context.Context, cancel context.CancelFunc, c 
client.Client, task *runnerv1.Task, reportInterval time.Duration) *Reporter {
@@ -173,7 +175,7 @@
        } else if !r.duringSteps() {
                r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
        }
-       if v, ok := entry.Data["stepResult"]; ok {
+       if v := runner.GetOuterStepResult(entry); v != nil {
                if stepResult, ok := r.parseResult(v); ok {
                        if step.LogLength == 0 {
                                step.LogIndex = int64(r.logOffset + 
len(r.logRows))
@@ -191,11 +193,19 @@
                return
        }
        if r.ctx.Err() != nil {
+               // This shouldn't happen because DaemonContext is used for 
`r.ctx` which should outlive any running job.
+               log.Warnf("Terminating RunDaemon on an active job due to error: 
%v", r.ctx.Err())
                return
        }
 
-       _ = r.ReportLog(false)
-       _ = r.ReportState()
+       err := r.ReportLog(false)
+       if err != nil {
+               log.Warnf("ReportLog error: %v", err)
+       }
+       err = r.ReportState()
+       if err != nil {
+               log.Warnf("ReportState error: %v", err)
+       }
 
        time.AfterFunc(r.reportInterval, r.RunDaemon)
 }
@@ -390,8 +400,17 @@
                r.outputs.Store(k, struct{}{})
        }
 
-       switch resp.Msg.GetState().GetResult() {
+       localResultState := state.GetResult()
+       remoteResultState := resp.Msg.GetState().GetResult()
+       switch remoteResultState {
        case runnerv1.Result_RESULT_CANCELLED, runnerv1.Result_RESULT_FAILURE:
+               // issuedLocalCancel is just used to deduplicate this log 
message if our local state doesn't catch up with our
+               // remote state as quickly as the report-interval, which would 
cause this message to repeat in the logs.
+               if !r.issuedLocalCancel && remoteResultState != 
localResultState {
+                       log.Infof("UpdateTask returned task result %v for a 
task that was in local state %v - beginning local task termination",
+                               remoteResultState, localResultState)
+                       r.issuedLocalCancel = true
+               }
                r.cancel()
        }
 

++++++ forgejo-runner.obsinfo ++++++
--- /var/tmp/diff_new_pack.9kXHYp/_old  2025-10-07 18:31:14.100728183 +0200
+++ /var/tmp/diff_new_pack.9kXHYp/_new  2025-10-07 18:31:14.112728690 +0200
@@ -1,5 +1,5 @@
 name: forgejo-runner
-version: 11.1.1
-mtime: 1758212276
-commit: 331979b887df761c14be6d59d4784727b1627a70
+version: 11.1.2
+mtime: 1759479726
+commit: b772be7131102d2c19a745811a569cd4ff1cbced
 

++++++ vendor.tar.gz ++++++

Reply via email to