Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package melange for openSUSE:Factory checked in at 2025-05-20 09:35:24 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/melange (Old) and /work/SRC/openSUSE:Factory/.melange.new.30101 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "melange" Tue May 20 09:35:24 2025 rev:87 rq:1278098 version:0.25.1 Changes: -------- --- /work/SRC/openSUSE:Factory/melange/melange.changes 2025-05-14 17:02:01.605296056 +0200 +++ /work/SRC/openSUSE:Factory/.melange.new.30101/melange.changes 2025-05-20 09:37:36.696098565 +0200 @@ -1,0 +2,20 @@ +Fri May 16 14:26:43 UTC 2025 - Johannes Kastl <opensuse_buildserv...@ojkastl.de> + +- Update to version 0.25.1: + * sbom: pass SHA512 not SHA256 when in use. (#1993) + +------------------------------------------------------------------- +Fri May 16 12:56:29 UTC 2025 - Johannes Kastl <opensuse_buildserv...@ojkastl.de> + +- Update to version 0.25.0: + * license-check: fix running checks for builds using qemu (#1989) + * fix: improve QEMU resource limits handling (#1990) + * Fix/random port pwd shell (#1988) + * sbom: Populate downloadLocation and checksums (#1983) + * feat: use unsafe IO on QEMU, improve performance (#1987) + * Fix/metapackage builds (#1986) + * fix: ensure make parent dirs when unpacking files from + workspace (#1985) + * Fix test user permissions for logstash-8 (#1984) + +------------------------------------------------------------------- Old: ---- melange-0.24.0.obscpio New: ---- melange-0.25.1.obscpio ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ melange.spec ++++++ --- /var/tmp/diff_new_pack.7r7DoL/_old 2025-05-20 09:37:38.016156174 +0200 +++ /var/tmp/diff_new_pack.7r7DoL/_new 2025-05-20 09:37:38.016156174 +0200 @@ -17,7 +17,7 @@ Name: melange -Version: 0.24.0 +Version: 0.25.1 Release: 0 Summary: Build APKs from source code License: Apache-2.0 ++++++ _service ++++++ --- /var/tmp/diff_new_pack.7r7DoL/_old 2025-05-20 09:37:38.052157745 +0200 +++ /var/tmp/diff_new_pack.7r7DoL/_new 2025-05-20 09:37:38.056157920 +0200 @@ -3,7 +3,7 @@ <param name="url">https://github.com/chainguard-dev/melange</param> <param name="scm">git</param> <param name="exclude">.git</param> - <param name="revision">v0.24.0</param> + <param name="revision">v0.25.1</param> <param name="versionformat">@PARENT_TAG@</param> <param name="versionrewrite-pattern">v(.*)</param> <param name="changesgenerate">enable</param> ++++++ _servicedata ++++++ --- /var/tmp/diff_new_pack.7r7DoL/_old 2025-05-20 09:37:38.080158967 +0200 +++ /var/tmp/diff_new_pack.7r7DoL/_new 2025-05-20 09:37:38.084159142 +0200 @@ -1,6 +1,6 @@ <servicedata> <service name="tar_scm"> <param name="url">https://github.com/chainguard-dev/melange</param> - <param name="changesrevision">a6a3a2aac90fbbb220a215e8cc3508486801c607</param></service></servicedata> + <param name="changesrevision">e23f548858796928fd89c65819b1203390547958</param></service></servicedata> (No newline at EOF) ++++++ melange-0.24.0.obscpio -> melange-0.25.1.obscpio ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/melange-0.24.0/pkg/build/build.go new/melange-0.25.1/pkg/build/build.go --- old/melange-0.24.0/pkg/build/build.go 2025-05-13 19:30:34.000000000 +0200 +++ new/melange-0.25.1/pkg/build/build.go 2025-05-16 15:59:31.000000000 +0200 @@ -512,6 +512,13 @@ return nil } +// isBuildLess returns true if the build context does not actually do any building. +// TODO(kaniini): Improve the heuristic for this by checking for uses/runs statements +// in the pipeline. +func (b *Build) isBuildLess() bool { + return len(b.Configuration.Pipeline) == 0 +} + // getBuildConfigPURL determines the package URL for the melange config file // itself. func (b Build) getBuildConfigPURL() (*purl.PackageURL, error) { @@ -716,67 +723,69 @@ linterQueue := []linterTarget{} cfg := b.workspaceConfig(ctx) - // Prepare guest directory - if err := os.MkdirAll(b.GuestDir, 0o755); err != nil { - return fmt.Errorf("mkdir -p %s: %w", b.GuestDir, err) - } + if !b.isBuildLess() { + // Prepare guest directory + if err := os.MkdirAll(b.GuestDir, 0o755); err != nil { + return fmt.Errorf("mkdir -p %s: %w", b.GuestDir, err) + } - log.Infof("building workspace in '%s' with apko", b.GuestDir) + log.Infof("building workspace in '%s' with apko", b.GuestDir) - guestFS := apkofs.DirFS(b.GuestDir, apkofs.WithCreateDir()) - imgRef, err := b.buildGuest(ctx, b.Configuration.Environment, guestFS) - if err != nil { - return fmt.Errorf("unable to build guest: %w", err) - } + guestFS := apkofs.DirFS(b.GuestDir, apkofs.WithCreateDir()) + imgRef, err := b.buildGuest(ctx, b.Configuration.Environment, guestFS) + if err != nil { + return fmt.Errorf("unable to build guest: %w", err) + } - cfg.ImgRef = imgRef - log.Debugf("ImgRef = %s", cfg.ImgRef) + cfg.ImgRef = imgRef + log.Debugf("ImgRef = %s", cfg.ImgRef) - // TODO(kaniini): Make overlay-binsh work with Docker and Kubernetes. - // Probably needs help from apko. - if err := b.overlayBinSh(); err != nil { - return fmt.Errorf("unable to install overlay /bin/sh: %w", err) - } + // TODO(kaniini): Make overlay-binsh work with Docker and Kubernetes. + // Probably needs help from apko. + if err := b.overlayBinSh(); err != nil { + return fmt.Errorf("unable to install overlay /bin/sh: %w", err) + } - if err := b.Runner.StartPod(ctx, cfg); err != nil { - return fmt.Errorf("unable to start pod: %w", err) - } - if !b.DebugRunner { - defer func() { - if err := b.Runner.TerminatePod(context.WithoutCancel(ctx), cfg); err != nil { - log.Warnf("unable to terminate pod: %s", err) + if err := b.Runner.StartPod(ctx, cfg); err != nil { + return fmt.Errorf("unable to start pod: %w", err) + } + if !b.DebugRunner { + defer func() { + if err := b.Runner.TerminatePod(context.WithoutCancel(ctx), cfg); err != nil { + log.Warnf("unable to terminate pod: %s", err) + } + }() + } + + // run the main pipeline + log.Debug("running the main pipeline") + pipelines := b.Configuration.Pipeline + if err := pr.runPipelines(ctx, pipelines); err != nil { + return fmt.Errorf("unable to run package %s pipeline: %w", b.Configuration.Name(), err) + } + + for i, p := range pipelines { + uniqueID := strconv.Itoa(i) + pkg, err := p.SBOMPackageForUpstreamSource(b.Configuration.Package.LicenseExpression(), namespace, uniqueID) + if err != nil { + return fmt.Errorf("creating SBOM package for upstream source: %w", err) } - }() - } - // run the main pipeline - log.Debug("running the main pipeline") - pipelines := b.Configuration.Pipeline - if err := pr.runPipelines(ctx, pipelines); err != nil { - return fmt.Errorf("unable to run package %s pipeline: %w", b.Configuration.Name(), err) - } + if pkg == nil { + // This particular pipeline step doesn't tell us about the upstream source code. + continue + } - for i, p := range pipelines { - uniqueID := strconv.Itoa(i) - pkg, err := p.SBOMPackageForUpstreamSource(b.Configuration.Package.LicenseExpression(), namespace, uniqueID) - if err != nil { - return fmt.Errorf("creating SBOM package for upstream source: %w", err) + b.SBOMGroup.AddUpstreamSourcePackage(pkg) } - if pkg == nil { - // This particular pipeline step doesn't tell us about the upstream source code. - continue + // add the main package to the linter queue + lintTarget := linterTarget{ + pkgName: b.Configuration.Package.Name, + disabled: b.Configuration.Package.Checks.Disabled, } - - b.SBOMGroup.AddUpstreamSourcePackage(pkg) - } - - // add the main package to the linter queue - lintTarget := linterTarget{ - pkgName: b.Configuration.Package.Name, - disabled: b.Configuration.Package.Checks.Disabled, + linterQueue = append(linterQueue, lintTarget) } - linterQueue = append(linterQueue, lintTarget) // run any pipelines for subpackages for _, sp := range b.Configuration.Subpackages { @@ -785,12 +794,14 @@ return err } - log.Infof("running pipeline for subpackage %s", sp.Name) + if !b.isBuildLess() { + log.Infof("running pipeline for subpackage %s", sp.Name) - ctx := clog.WithLogger(ctx, log.With("subpackage", sp.Name)) + ctx := clog.WithLogger(ctx, log.With("subpackage", sp.Name)) - if err := pr.runPipelines(ctx, sp.Pipeline); err != nil { - return fmt.Errorf("unable to run subpackage %s pipeline: %w", sp.Name, err) + if err := pr.runPipelines(ctx, sp.Pipeline); err != nil { + return fmt.Errorf("unable to run subpackage %s pipeline: %w", sp.Name, err) + } } // add the main package to the linter queue @@ -932,9 +943,11 @@ log.Warnf("unable to clean workspace: %s", err) } - // clean build guest container - if err := os.RemoveAll(b.GuestDir); err != nil { - log.Warnf("unable to clean guest container: %s", err) + if !b.isBuildLess() { + // clean build guest container + if err := os.RemoveAll(b.GuestDir); err != nil { + log.Warnf("unable to clean guest container: %s", err) + } } // generate APKINDEX.tar.gz and sign it @@ -1094,14 +1107,17 @@ func (b *Build) buildWorkspaceConfig(ctx context.Context) *container.Config { log := clog.FromContext(ctx) + if b.isBuildLess() { + return &container.Config{ + Arch: b.Arch, + WorkspaceDir: b.WorkspaceDir, + } + } - mounts := []container.BindMount{{ - Source: b.WorkspaceDir, - Destination: container.DefaultWorkspaceDir, - }, { - Source: "/etc/resolv.conf", - Destination: container.DefaultResolvConfPath, - }} + mounts := []container.BindMount{ + {Source: b.WorkspaceDir, Destination: container.DefaultWorkspaceDir}, + {Source: "/etc/resolv.conf", Destination: container.DefaultResolvConfPath}, + } if b.CacheDir != "" { if fi, err := os.Stat(b.CacheDir); err == nil && fi.IsDir() { @@ -1211,6 +1227,10 @@ } case tar.TypeReg: + parentDir := filepath.Dir(hdr.Name) + if err := fs.MkdirAll(parentDir, 0o755); err != nil { + return fmt.Errorf("unable to create directory %s: %w", hdr.Name, err) + } f, err := fs.OpenFile(hdr.Name, os.O_CREATE|os.O_WRONLY, hdr.FileInfo().Mode()) if err != nil { return fmt.Errorf("unable to open file %s: %w", hdr.Name, err) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/melange-0.24.0/pkg/build/testdata/goldenfiles/sboms/7zip-two-fetches-2301-r3.spdx.json new/melange-0.25.1/pkg/build/testdata/goldenfiles/sboms/7zip-two-fetches-2301-r3.spdx.json --- old/melange-0.24.0/pkg/build/testdata/goldenfiles/sboms/7zip-two-fetches-2301-r3.spdx.json 2025-05-13 19:30:34.000000000 +0200 +++ new/melange-0.25.1/pkg/build/testdata/goldenfiles/sboms/7zip-two-fetches-2301-r3.spdx.json 2025-05-16 15:59:31.000000000 +0200 @@ -60,9 +60,15 @@ "filesAnalyzed": false, "licenseConcluded": "NOASSERTION", "licenseDeclared": "NOASSERTION", - "downloadLocation": "NOASSERTION", + "downloadLocation": "https://7-zip.org/a/7z2301-src.tar.xz", "originator": "Organization: Wolfi", "supplier": "Organization: Wolfi", + "checksums": [ + { + "algorithm": "SHA512", + "checksumValue": "e39f660c023aa65e55388be225b5591fe2a5c9138693f3c9107e2eb4ce97fafde118d3375e01ada99d29de9633f56221b5b3d640c982178884670cd84c8aa986" + } + ], "externalRefs": [ { "referenceCategory": "PACKAGE-MANAGER", @@ -78,9 +84,15 @@ "filesAnalyzed": false, "licenseConcluded": "NOASSERTION", "licenseDeclared": "NOASSERTION", - "downloadLocation": "NOASSERTION", + "downloadLocation": "https://7-zip.org/a/7z2301-src.tar.xz", "originator": "Organization: Wolfi", "supplier": "Organization: Wolfi", + "checksums": [ + { + "algorithm": "SHA512", + "checksumValue": "e39f660c023aa65e55388be225b5591fe2a5c9138693f3c9107e2eb4ce97fafde118d3375e01ada99d29de9633f56221b5b3d640c982178884670cd84c8aa986" + } + ], "externalRefs": [ { "referenceCategory": "PACKAGE-MANAGER", diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/melange-0.24.0/pkg/build/testdata/goldenfiles/sboms/crane-0.20.2-r1.spdx.json new/melange-0.25.1/pkg/build/testdata/goldenfiles/sboms/crane-0.20.2-r1.spdx.json --- old/melange-0.24.0/pkg/build/testdata/goldenfiles/sboms/crane-0.20.2-r1.spdx.json 2025-05-13 19:30:34.000000000 +0200 +++ new/melange-0.25.1/pkg/build/testdata/goldenfiles/sboms/crane-0.20.2-r1.spdx.json 2025-05-16 15:59:31.000000000 +0200 @@ -60,7 +60,7 @@ "filesAnalyzed": false, "licenseConcluded": "NOASSERTION", "licenseDeclared": "Apache-2.0", - "downloadLocation": "NOASSERTION", + "downloadLocation": "git+https://github.com/google/go-containerregistry@v0.20.2", "originator": "Organization: Google", "supplier": "Organization: Google", "externalRefs": [ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/melange-0.24.0/pkg/config/config.go new/melange-0.25.1/pkg/config/config.go --- old/melange-0.24.0/pkg/config/config.go 2025-05-13 19:30:34.000000000 +0200 +++ new/melange-0.25.1/pkg/config/config.go 2025-05-16 15:59:31.000000000 +0200 @@ -539,14 +539,17 @@ case "fetch": args := make(map[string]string) args["download_url"] = with["uri"] + checksums := make(map[string]string) expectedSHA256 := with["expected-sha256"] if len(expectedSHA256) > 0 { args["checksum"] = "sha256:" + expectedSHA256 + checksums["SHA256"] = expectedSHA256 } expectedSHA512 := with["expected-sha512"] if len(expectedSHA512) > 0 { args["checksum"] = "sha512:" + expectedSHA512 + checksums["SHA512"] = expectedSHA512 } // These get defaulted correctly from within the fetch pipeline definition @@ -570,11 +573,13 @@ } return &sbom.Package{ - IDComponents: idComponents, - Name: pkgName, - Version: pkgVersion, - Namespace: supplier, - PURL: pu, + IDComponents: idComponents, + Name: pkgName, + Version: pkgVersion, + Namespace: supplier, + Checksums: checksums, + PURL: pu, + DownloadLocation: args["download_url"], }, nil case "git-checkout": @@ -582,6 +587,7 @@ branch := with["branch"] tag := with["tag"] expectedCommit := with["expected-commit"] + downloadLocation := "git+" + repo // We'll use all available data to ensure our SBOM's package ID is unique, even // when the same repo is git-checked out multiple times. @@ -615,6 +621,10 @@ continue } + // URI format supports use of commit or tag as suffix + // the commit is also passed in the checksums list. + downloadLocation += "@" + v + pu := &purl.PackageURL{ Type: purl.TypeGithub, Namespace: namespace, @@ -626,12 +636,13 @@ } return &sbom.Package{ - IDComponents: idComponents, - Name: name, - Version: v, - LicenseDeclared: licenseDeclared, - Namespace: namespace, - PURL: pu, + IDComponents: idComponents, + Name: name, + Version: v, + LicenseDeclared: licenseDeclared, + Namespace: namespace, + PURL: pu, + DownloadLocation: downloadLocation, }, nil } @@ -649,6 +660,12 @@ // Encode vcs_url with git+ prefix and @commit suffix vcsUrl := "git+" + repo + if len(tag) > 0 { + downloadLocation += "@" + tag + } else if len(expectedCommit) > 0 { + downloadLocation += "@" + expectedCommit + } + if len(expectedCommit) > 0 { vcsUrl += "@" + expectedCommit } @@ -670,12 +687,13 @@ } return &sbom.Package{ - IDComponents: idComponents, - Name: name, - Version: version, - LicenseDeclared: licenseDeclared, - Namespace: supplier, - PURL: &pu, + IDComponents: idComponents, + Name: name, + Version: version, + LicenseDeclared: licenseDeclared, + Namespace: supplier, + PURL: &pu, + DownloadLocation: downloadLocation, }, nil } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/melange-0.24.0/pkg/container/qemu_runner.go new/melange-0.25.1/pkg/container/qemu_runner.go --- old/melange-0.24.0/pkg/container/qemu_runner.go 2025-05-13 19:30:34.000000000 +0200 +++ new/melange-0.25.1/pkg/container/qemu_runner.go 2025-05-16 15:59:31.000000000 +0200 @@ -45,6 +45,7 @@ apko_types "chainguard.dev/apko/pkg/build/types" apko_cpio "chainguard.dev/apko/pkg/cpio" "chainguard.dev/melange/internal/logwriter" + "chainguard.dev/melange/pkg/license" "github.com/chainguard-dev/clog" "github.com/charmbracelet/log" v1 "github.com/google/go-containerregistry/pkg/v1" @@ -82,6 +83,11 @@ // Run runs a Qemu task given a Config and command string. func (bw *qemu) Run(ctx context.Context, cfg *Config, envOverride map[string]string, args ...string) error { + // in case of buildless pipelines we just nop + if cfg.SSHKey == nil { + return nil + } + log := clog.FromContext(ctx) stdout, stderr := logwriter.New(log.Info), logwriter.New(log.Warn) defer stdout.Close() @@ -293,7 +299,8 @@ clog.FromContext(ctx).Debugf("running debug command: %v", args) - err = session.Shell() + cmd := shellquote.Join(args...) + err = session.Run(cmd) if err != nil { clog.FromContext(ctx).Errorf("Failed to start shell: %v", err) return err @@ -331,19 +338,27 @@ ctx, span := otel.Tracer("melange").Start(ctx, "qemu.StartPod") defer span.End() - port, err := randomPortN() + sshPort, err := randomPortN() if err != nil { return err } - cfg.SSHAddress = "127.0.0.1:" + strconv.Itoa(port) + cfg.SSHAddress = "127.0.0.1:" + strconv.Itoa(sshPort) - port, err = randomPortN() - if err != nil { - return err + // ensure sshWorkspacePort is random but not same as port1 + var sshWorkspacePort int + for { + sshWorkspacePort, err = randomPortN() + if err != nil { + return err + } + + if sshWorkspacePort != sshPort { + break + } } - cfg.SSHWorkspaceAddress = "127.0.0.1:" + strconv.Itoa(port) + cfg.SSHWorkspaceAddress = "127.0.0.1:" + strconv.Itoa(sshWorkspacePort) return createMicroVM(ctx, cfg) } @@ -378,6 +393,23 @@ // WorkspaceTar implements Runner func (bw *qemu) WorkspaceTar(ctx context.Context, cfg *Config, extraFiles []string) (io.ReadCloser, error) { + // in case of buildless pipelines we just nop + if cfg.SSHKey == nil { + return nil, nil + } + + // For qemu, we also want to get all the detected license files for the + // license checking that will be done later. + // First, get the list of all files from the remote workspace. + licenseFiles, err := getWorkspaceLicenseFiles(ctx, cfg, extraFiles) + if err != nil { + clog.FromContext(ctx).Errorf("failed to extract list of files for licensing: %v", err) + return nil, err + } + // Now, append those files to the extraFiles list (there should be no + // duplicates) + extraFiles = append(extraFiles, licenseFiles...) + outFile, err := os.Create(filepath.Join(cfg.WorkspaceDir, "melange-out.tar")) if err != nil { return nil, err @@ -405,18 +437,28 @@ user = cfg.RunAs } + log := clog.FromContext(ctx) + stderr := logwriter.New(log.Debug) err = sendSSHCommand(ctx, user, cfg.SSHWorkspaceAddress, cfg, nil, nil, - nil, + stderr, outFile, false, []string{"sh", "-c", retrieveCommand}, ) + if err != nil { + var buf bytes.Buffer + _, cerr := io.Copy(&buf, outFile) + if cerr != nil { + clog.FromContext(ctx).Errorf("failed to tar workspace: %v", cerr) + return nil, cerr + } + clog.FromContext(ctx).Errorf("failed to tar workspace: %v", buf.String()) return nil, err } @@ -521,29 +563,29 @@ } } + // default to use 85% of available memory, if a mem limit is set, respect it. + mem := int64(float64(getAvailableMemoryKB())*0.85) if cfg.Memory != "" { memKb, err := convertHumanToKB(cfg.Memory) if err != nil { return err } - if memKb > int64(getAvailableMemoryKB()) { - log.Warnf("qemu: requested too much memory, requested: %d, have: %d", memKb, getAvailableMemoryKB()) - memKb = int64(getAvailableMemoryKB()) + if mem > memKb { + mem = memKb } - - cfg.Memory = fmt.Sprintf("%dk", memKb) - } else { - // Use at most ~85% of the available host memory - cfg.Memory = fmt.Sprintf("%dk", int(float64(getAvailableMemoryKB())*0.85)) } - baseargs = append(baseargs, "-m", cfg.Memory) + baseargs = append(baseargs, "-m", fmt.Sprintf("%dk", mem)) + // default to use all CPUs, if a cpu limit is set, respect it. + nproc := runtime.NumCPU() if cfg.CPU != "" { - baseargs = append(baseargs, "-smp", cfg.CPU) - } else { - baseargs = append(baseargs, "-smp", fmt.Sprintf("%d", runtime.NumCPU())) + cpu, err := strconv.Atoi(cfg.CPU) + if err == nil && nproc > cpu { + nproc = cpu + } } + baseargs = append(baseargs, "-smp", fmt.Sprintf("%d", nproc)) // use kvm on linux, Hypervisor.framework on macOS, and software for cross-arch switch { @@ -632,7 +674,7 @@ // append raw disk, init will take care of formatting it if present. baseargs = append(baseargs, "-object", "iothread,id=io1") baseargs = append(baseargs, "-device", "virtio-blk-pci,drive=disk0,iothread=io1") - baseargs = append(baseargs, "-drive", "if=none,id=disk0,cache=none,format=raw,aio=threads,werror=report,rerror=report,file="+diskFile) + baseargs = append(baseargs, "-drive", "if=none,id=disk0,cache=unsafe,format=raw,aio=threads,werror=report,rerror=report,file="+diskFile) // append the rootfs tar.gz, init will take care of populating the disk with it baseargs = append(baseargs, "-device", "virtio-blk-pci,drive=image.tar,serial=input-tar,discard=true") baseargs = append(baseargs, "-blockdev", "driver=raw,node-name=image.tar,file.driver=file,file.filename="+cfg.ImgRef) @@ -737,6 +779,9 @@ user = cfg.RunAs } + stdout, stderr := logwriter.New(log.Info), logwriter.New(log.Warn) + defer stdout.Close() + defer stderr.Close() clog.FromContext(ctx).Info("qemu: setting up local workspace") return sendSSHCommand(ctx, user, @@ -744,11 +789,67 @@ cfg, nil, nil, + stderr, + stdout, + false, + []string{"sh", "-c", "find /mnt/ -mindepth 1 -maxdepth 1 -exec cp -a {} /home/build/ \\;"}, + ) +} + +// getWorkspaceLicenseFiles returns a list of possible license files from the +// workspace +func getWorkspaceLicenseFiles(ctx context.Context, cfg *Config, extraFiles []string) ([]string, error) { + // default to root user, unless a different user is specified + user := "root" + if cfg.RunAs != "" { + user = cfg.RunAs + } + + // let's create a string writer so that the SSH command can write + // the list of files to it + var buf bytes.Buffer + bufWriter := bufio.NewWriter(&buf) + defer bufWriter.Flush() + err := sendSSHCommand(ctx, + user, + cfg.SSHWorkspaceAddress, + cfg, + nil, nil, nil, + bufWriter, false, - []string{"sh", "-c", "cp -a /mnt/. /home/build"}, + []string{"sh", "-c", "cd /mount/home/build && find . -type f -print"}, ) + + if err != nil { + clog.FromContext(ctx).Errorf("failed to extract list of files for licensing: %v", buf.String()) + return nil, err + } + + // Turn extraFiles into a map for faster lookup + extraFilesMap := make(map[string]struct{}) + for _, file := range extraFiles { + extraFilesMap[filepath.Clean(file)] = struct{}{} + } + + // Now, we can read the list of files from the string writer and add those + // license files that are not in the extraFiles list + licenseFiles := []string{} + foundFiles := strings.SplitSeq(buf.String(), "\n") + for f := range foundFiles { + if _, ok := extraFilesMap[filepath.Clean(f)]; ok { + continue + } + if strings.Contains(f, "melange-out") { + continue + } + if is, _ := license.IsLicenseFile(filepath.Base(f)); is { + licenseFiles = append(licenseFiles, f) + } + } + + return licenseFiles, nil } func getKernelPath(ctx context.Context, cfg *Config) (string, error) { diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/melange-0.24.0/pkg/license/license.go new/melange-0.25.1/pkg/license/license.go --- old/melange-0.24.0/pkg/license/license.go 2025-05-13 19:30:34.000000000 +0200 +++ new/melange-0.25.1/pkg/license/license.go 2025-05-16 15:59:31.000000000 +0200 @@ -20,6 +20,7 @@ "io/fs" "path/filepath" "regexp" + "slices" "sort" "strings" @@ -129,7 +130,6 @@ func FindLicenseFiles(fsys fs.FS) ([]LicenseFile, error) { // This file is using regular expressions defined in the regexp.go file var licenseFiles []LicenseFile - var ignore bool err := fs.WalkDir(fsys, ".", func(filePath string, info fs.DirEntry, err error) error { if err != nil { return err @@ -143,22 +143,8 @@ return nil } - // Check if the file matches any of the license-related regex patterns - for regex, weight := range filenameRegexes { - if !regex.MatchString(info.Name()) { - continue - } - // licensee does this check as part of the regex, but in go we don't have - // the same regex capabilities - for _, ext := range ignoredExt { - if ignore = filepath.Ext(info.Name()) == ext; ignore { - break - } - } - if ignore { - continue - } - + is, weight := IsLicenseFile(info.Name()) + if is { // Licenses in the top level directory have a higher weight so that they // always appear first if filepath.Dir(filePath) == "." { @@ -169,7 +155,6 @@ Path: filePath, Weight: weight, }) - break } return nil @@ -187,6 +172,26 @@ return licenseFiles, nil } +// IsLicenseFile checks if a file is a license file based on its name. +// Returns true/fals if the file is a license file, and the weight value +// associated with the match, as some matches are potentially more relevant. +func IsLicenseFile(filename string) (bool, float64) { + filenameExt := filepath.Ext(filename) + // Check if the file matches any of the license-related regex patterns + for regex, weight := range filenameRegexes { + if !regex.MatchString(filename) { + continue + } + // licensee does this check as part of the regex, but in go we don't have + // the same regex capabilities + if slices.Contains(ignoredExt, filenameExt) { + continue + } + return true, weight + } + return false, 0.0 +} + // CollectLicenseInfo collects license information from the given filesystem. func CollectLicenseInfo(ctx context.Context, fsys fs.FS) ([]License, error) { log := clog.FromContext(ctx) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/melange-0.24.0/pkg/license/license_test.go new/melange-0.25.1/pkg/license/license_test.go --- old/melange-0.24.0/pkg/license/license_test.go 2025-05-13 19:30:34.000000000 +0200 +++ new/melange-0.25.1/pkg/license/license_test.go 2025-05-16 15:59:31.000000000 +0200 @@ -117,6 +117,7 @@ "LICENSE-BSD-modified": "BSD-3-Clause", "LICENSE-GPLv2": "GPL-2.0", "LICENSE-GPLv3": "GPL-3.0", + "COPYRIGHT": "NOASSERTION", } testDataDir := "testdata" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/melange-0.24.0/pkg/sbom/package.go new/melange-0.25.1/pkg/sbom/package.go --- old/melange-0.24.0/pkg/sbom/package.go 2025-05-13 19:30:34.000000000 +0200 +++ new/melange-0.25.1/pkg/sbom/package.go 2025-05-16 15:59:31.000000000 +0200 @@ -74,14 +74,18 @@ // Checksums of the package. The keys are the checksum algorithms (e.g. "SHA-256"), // and the values are the checksums. - // - // TODO: We're not currently using this field, consider removing it. Checksums map[string]string // The Package URL for this package, if any. If set, it will be added as the // only ExternalRef of type "purl" to the SPDX package. (A package // should have only one PURL external ref.) PURL *purl.PackageURL + + // The Download Location for this package, if any; It set this is generated + // alongside the PackageURL from fetch/git-checkout pipelines for upstream + // source locations; Leaving this empty will result in NOASSERTION being + // used as its value. + DownloadLocation string } // ToSPDX returns the Package converted to its SPDX representation. @@ -98,6 +102,10 @@ } } + if p.DownloadLocation == "" { + p.DownloadLocation = spdx.NOASSERTION + } + sp := spdx.Package{ ID: p.ID(), Name: p.Name, @@ -105,7 +113,7 @@ FilesAnalyzed: false, LicenseConcluded: spdx.NOASSERTION, LicenseDeclared: p.LicenseDeclared, - DownloadLocation: spdx.NOASSERTION, + DownloadLocation: p.DownloadLocation, CopyrightText: p.Copyright, Checksums: p.getChecksums(), ExternalRefs: p.getExternalRefs(), ++++++ melange.obsinfo ++++++ --- /var/tmp/diff_new_pack.7r7DoL/_old 2025-05-20 09:37:38.316169267 +0200 +++ /var/tmp/diff_new_pack.7r7DoL/_new 2025-05-20 09:37:38.320169442 +0200 @@ -1,5 +1,5 @@ name: melange -version: 0.24.0 -mtime: 1747157434 -commit: a6a3a2aac90fbbb220a215e8cc3508486801c607 +version: 0.25.1 +mtime: 1747403971 +commit: e23f548858796928fd89c65819b1203390547958 ++++++ vendor.tar.gz ++++++ /work/SRC/openSUSE:Factory/melange/vendor.tar.gz /work/SRC/openSUSE:Factory/.melange.new.30101/vendor.tar.gz differ: char 134, line 3