Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package btrfsmaintenance for 
openSUSE:Factory checked in at 2024-07-05 19:45:21
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/btrfsmaintenance (Old)
 and      /work/SRC/openSUSE:Factory/.btrfsmaintenance.new.2080 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "btrfsmaintenance"

Fri Jul  5 19:45:21 2024 rev:25 rq:1185448 version:0.5.2

Changes:
--------
--- /work/SRC/openSUSE:Factory/btrfsmaintenance/btrfsmaintenance.changes        
2024-04-14 11:53:43.501479735 +0200
+++ 
/work/SRC/openSUSE:Factory/.btrfsmaintenance.new.2080/btrfsmaintenance.changes  
    2024-07-05 19:50:18.372447148 +0200
@@ -1,0 +2,18 @@
+Thu Jul  4 00:00:00 CEST 2024 - dste...@suse.cz
+
+- update to version 0.5.2
+  - fix syntax error in run_task, preventing jobs to start
+  - start scrub jobs sequentially if RAID5 or RAID6 data profile is found
+  - fix btrfsmaintenance-refresh.service description
+- fix bsc#1224364
+
+-------------------------------------------------------------------
+Thu May  9 00:00:00 CEST 2024 - dste...@suse.cz
+
+- update to version 0.5.1
+  - fix handling of OnCalendar timer directive in the drop-in configuration 
file
+    that reads the periods from the sysconfig
+  - fix use of --verbose option of fstrim, not available on util-linux < 2.27
+  - ship manual page of README, also available as 'systemctl help servicename'
+
+-------------------------------------------------------------------

Old:
----
  btrfsmaintenance-0.5.tar.gz

New:
----
  btrfsmaintenance-0.5.2.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ btrfsmaintenance.spec ++++++
--- /var/tmp/diff_new_pack.K0nS2c/_old  2024-07-05 19:50:19.260479841 +0200
+++ /var/tmp/diff_new_pack.K0nS2c/_new  2024-07-05 19:50:19.260479841 +0200
@@ -22,7 +22,7 @@
 %endif
 
 Name:           btrfsmaintenance
-Version:        0.5
+Version:        0.5.2
 Release:        0
 Summary:        Scripts for btrfs periodic maintenance tasks
 License:        GPL-2.0-only


++++++ btrfsmaintenance-0.5.tar.gz -> btrfsmaintenance-0.5.2.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/btrfsmaintenance-0.5/CHANGES.md 
new/btrfsmaintenance-0.5.2/CHANGES.md
--- old/btrfsmaintenance-0.5/CHANGES.md 2020-07-30 14:58:04.000000000 +0200
+++ new/btrfsmaintenance-0.5.2/CHANGES.md       2024-07-04 20:15:46.000000000 
+0200
@@ -1,4 +1,17 @@
-version 0.5 (2020-07-30)
+# Version 0.5.2 (2024-07-04)
+
+- fix syntax error in run_task, preventing jobs to start
+- start scrub jobs sequentially if RAID5 or RAID6 data profile is found
+- fix btrfsmaintenance-refresh.service description
+
+# Version 0.5.1 (2024-05-06)
+
+- fix handling of OnCalendar timer directive in the drop-in configuration file
+  that reads the periods from the sysconfig
+- fix use of --verbose option of fstrim, not available on util-linux < 2.27
+- ship manual page of README,  also available as 'systemctl help servicename'
+
+# Version 0.5 (2020-07-30)
 
 - sysconfig:
   - change defaults of MUSAGE and DUSAGE for balance task to do less work,
@@ -12,19 +25,21 @@
   - add alternative shell implementation of the plugin
 - installation docs update
 
-version 0.4.2 (2018-09-25)
+# Version 0.4.2 (2018-09-25)
+
 - CVE-2018-14722: expand auto mountpoints in a safe way
 - btrfs-defrag: fix missing function to detect btrfs filesystems (#52)
 - btrfs-trim: more verbose fstrim output (#60)
 - dist-install: print information about timer unit installation (#58)
 
-version 0.4.1 (2018-03-15)
+# Version 0.4.1 (2018-03-15)
+
 - defrag plugin: python2 and 3 compatibility
 - defrag plugin: target extent size lowered to 32MiB (#43)
 - shell compatibility fixes
 - systemd unit type fixes
 
-version 0.4 (2018-01-18)
+# Version 0.4 (2018-01-18)
 
 - add support for systemd timers and use them by default; the alternative cron
   scripts are still present (#29, #36)
@@ -34,13 +49,13 @@
 - spec file cleanups
 - documentation updates
 
-version 0.3.1 (2017-04-07)
+# Version 0.3.1 (2017-04-07)
 
 - dist-install: fix installation paths, install functions
 - functions: fix syntax to be compatible with dash
 - spec: install functions file
 
-version 0.3 (2016-11-15)
+# Version 0.3 (2016-11-15)
 
 - add syslog to logging targets
 - add none target (/dev/null)
@@ -52,7 +67,7 @@
 - add generic installation script
 - doc updates: retention policy tuning
 
-version 0.2 (2016-03-04)
+# Version 0.2 (2016-03-04)
 
 - updated documentation
 - support debian-like configuration paths
@@ -60,12 +75,12 @@
 - fixed logger name typos for 'journal' target
 - defrag fixes (sysconfig, find arguments)
 
-version 0.1.2 (2015-10-08)
+# Version 0.1.2 (2015-10-08)
 
 - change default config for trim: off
 - journal loggin should work (fixed a typo)
 
-version 0.1.1 (2015-07-13)
+# Version 0.1.1 (2015-07-13)
 
 - fix typo and make journal logging target work
 - cron refresh: remove bashism
@@ -75,6 +90,6 @@
 - add config option to specify log target (stdout, or journal)
 - fix sysconfig file Path: tags
 
-version 0.1 (2014-09-24)
+# Version 0.1 (2014-09-24)
 
 - initial
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/btrfsmaintenance-0.5/README.man 
new/btrfsmaintenance-0.5.2/README.man
--- old/btrfsmaintenance-0.5/README.man 1970-01-01 01:00:00.000000000 +0100
+++ new/btrfsmaintenance-0.5.2/README.man       2024-07-04 20:15:46.000000000 
+0200
@@ -0,0 +1,616 @@
+.nh
+.TH Btrfs maintenance toolbox
+.PP
+Table of contents:
+
+.RS
+.IP \(bu 2
+Quick start
+\[la]#quick-start\[ra]
+.IP \(bu 2
+Distro integration
+\[la]#distro-integration\[ra]
+.IP \(bu 2
+Tuning periodic snapshotting
+\[la]#tuning-periodic-snapshotting\[ra]
+
+.RE
+
+.PP
+This is a set of scripts supplementing the btrfs filesystem and aims to 
automate
+a few maintenance tasks. This means the \fIscrub\fP, \fIbalance\fP, \fItrim\fP 
or
+\fIdefragmentation\fP\&.
+
+.PP
+Each of the tasks can be turned on/off and configured independently. The
+default config values were selected to fit the default installation profile
+with btrfs on the root filesystem.
+
+.PP
+Overall tuning of the default values should give a good balance between effects
+of the tasks and low impact of other work on the system. If this does not fit
+your needs, please adjust the settings.
+
+.SH Tasks
+.PP
+The following sections will describe the tasks in detail. There's one config
+option that affects the task concurrency, 
\fB\fCBTRFS\_ALLOW\_CONCURRENCY\fR\&. This is
+to avoid extra high resource consumption or unexpected interaction among the
+tasks and will serialize them in the order they're started by timers.
+
+.SS scrub
+.PP
+\fBDescription:\fP Scrub operation reads all data and metadata from the devices
+and verifies the checksums. It's not mandatory, but may point out problems with
+faulty hardware early as it touches data that might not be in use and bit rot.
+
+.PP
+If there's a redundancy of data/metadata, ie. the \fIDUP\fP or \fIRAID1/5/6\fP 
profiles, scrub
+is able to repair the data automatically if there's a good copy available.
+
+.PP
+\fBImpact when active:\fP Intense read operations take place and may slow down 
or
+block other filesystem activies, possibly only for short periods.
+
+.PP
+\fBTuning:\fP
+
+.RS
+.IP \(bu 2
+the recommended period is once in a month but a weekly period is also 
acceptable
+.IP \(bu 2
+you can turn off the automatic repair (\fB\fCBTRFS\_SCRUB\_READ\_ONLY\fR)
+.IP \(bu 2
+the default IO priority is set to \fIidle\fP but scrub may take long to finish,
+you can change priority to \fInormal\fP (\fB\fCBTRFS\_SCRUB\_PRIORITY\fR)
+
+.RE
+
+.PP
+\fBRelated commands:\fP
+
+.RS
+.IP \(bu 2
+you can check status of last scrub run (either manual or through the cron
+job) by \fB\fCbtrfs scrub status /path\fR
+.IP \(bu 2
+you can cancel a running scrub anytime if you find it inconvenient (\fB\fCbtrfs
+scrub cancel /path\fR), the progress state is saved each 5 seconds and next
+time scrub will start from that point
+
+.RE
+
+.SS balance
+.PP
+\fBDescription:\fP The balance command can do a lot of things, in general moves
+data around in big chunks. Here we use it to reclaim back the space of the
+underused chunks so it can be allocated again according to current needs.
+
+.PP
+The point is to prevent some corner cases where it's not possible to eg.
+allocate new metadata chunks because the whole device space is reserved for all
+the chunks, although the total space occupied is smaller and the allocation
+should succeed.
+
+.PP
+The balance operation needs enough workspace so it can shuffle data around. By
+workspace we mean device space that has no filesystem chunks on it, not to be
+confused by free space as reported eg. by \fB\fCdf\fR\&.
+
+.PP
+\fBImpact when active:\fP Possibly big. There's a mix of read and write 
operations, is
+seek\-heavy on rotational devices. This can interfere with other work in case
+the same set of blocks is affected.
+
+.PP
+The balance command uses filters to do the work in smaller batches.
+
+.PP
+Before kernel version 5.2, the impact with quota groups enabled can be extreme.
+The balance operation performs quota group accounting for every extent being
+relocated, which can have the impact of stalling the file system for an
+extended period of time.
+
+.PP
+\fBExpected result:\fP If possible all the underused chunks are removed, the
+value of \fB\fCtotal\fR in output of \fB\fCbtrfs fi df /path\fR should be 
lower than before.
+Check the logs.
+
+.PP
+The balance command may fail with \fIno space\fP reason but this is considered 
a
+minor fault as the internal filesystem layout may prevent the command to find
+enough workspace. This might be a time for manual inspection of space.
+
+.PP
+\fBTuning:\fP
+
+.RS
+.IP \(bu 2
+you can make the space reclaim more aggressive by adding higher percentage to
+\fB\fCBTRFS\_BALANCE\_DUSAGE\fR or \fB\fCBTRFS\_BALANCE\_MUSAGE\fR\&. Higher 
value means bigger
+impact on your system and becomes very noticeable.
+.IP \(bu 2
+the metadata chunks usage pattern is different from data and it's not
+necessary to reclaim metadata block groups that are more than 30 full. The
+default maximum is 10 which should not degrade performance too much but may
+be suboptimal if the metadata usage varies wildly over time. The assumption
+is that underused metadata chunks will get used at some point so it's not
+absolutely required to do the reclaim.
+.IP \(bu 2
+the useful period highly depends on the overall data change pattern on the
+filesystem
+
+.RE
+
+.PP
+\fBChanged defaults since 0.5:\fP
+
+.PP
+Versions up to 0.4.2 had usage filter set up to 50% for data and up to 30% for
+metadata.  Based on user feedback, the numbers have been reduced to 10% (data)
+and 5% (metadata). The system load during the balance service will be smaller
+and the result of space compaction still reasonable. Multiple data chunks 
filled
+to less than 10% can be merged into fewer chunks. The file data can change in
+large volumes, eg. deleting a big file can free a lot of space. If the space is
+left unused for the given period, it's desirable to make it more compact.
+Metadata consumption follows a different pattern and reclaiming only the almost
+unused chunks makes more sense, otherwise there's enough reserved metadata
+space for operations like reflink or snapshotting.
+
+.PP
+A convenience script is provided to update the unchanged defaults,
+\fB\fC/usr/share/btrfsmaintenance/update\-balance\-usage\-defaults.sh\fR .
+
+.SS trim
+.PP
+\fBDescription:\fP The TRIM operation (aka. \fIdiscard\fP) can instruct the 
underlying device to
+optimize blocks that are not used by the filesystem. This task is performed
+on\-demand by the \fIfstrim\fP utility.
+
+.PP
+This makes sense for SSD devices or other type of storage that can translate
+the TRIM action to something useful (eg. thin\-provisioned storage).
+
+.PP
+\fBImpact when active:\fP Should be low, but depends on the amount of blocks
+being trimmed.
+
+.PP
+\fBTuning:\fP
+
+.RS
+.IP \(bu 2
+the recommended period is weekly, but monthly is also fine
+.IP \(bu 2
+the trim commands might not have an effect and are up to the device, eg. a
+block range too small or other constraints that may differ by device
+type/vendor/firmware
+.IP \(bu 2
+the default configuration is \fIoff\fP because of the the system fstrim.timer
+
+.RE
+
+.SS defrag
+.PP
+\fBDescription:\fP Run defragmentation on configured directories. This is for
+convenience and not necessary as defragmentation needs are usually different
+for various types of data.
+
+.PP
+Please note that the defragmentation process does not descend to other mount
+points and nested subvolumes or snapshots. All nested paths would need to be
+enumerated in the respective config variable. The command utilizes \fB\fCfind
+\-xdev\fR, you can use that to verify in advance which paths will the
+defragmentation affect.
+
+.PP
+\fBSpecial case:\fP
+
+.PP
+There's a separate defragmentation task that happens automatically and
+defragments only the RPM database files. This is done via a \fIzypper\fP plugin
+and the defrag pass triggers at the end of the installation.
+
+.PP
+This improves reading the RPM databases later, but the installation process
+fragments the files very quickly so it's not likely to bring a significant
+speedup here.
+
+.SH Periodic scheduling
+.PP
+There are now two ways how to schedule and run the periodic tasks: cron and
+systemd timers. Only one can be active on a system and this should be decided
+at the installation time.
+
+.SS Cron
+.PP
+Cron takes care of periodic execution of the scripts, but they can be run any
+time directly from \fB\fC/usr/share/btrfsmaintenance/\fR, respecting the 
configured
+values in \fB\fC/etc/sysconfig/btrfsmaintenance\fR\&.
+
+.PP
+The changes to configuration file need to be reflected in the 
\fB\fC/etc/cron\fR
+directories where the scripts are linked for the given period.
+
+.PP
+If the period is changed, the cron symlinks have to be refreshed:
+
+.RS
+.IP \(bu 2
+manually \-\- use \fB\fCsystemctl restart btrfsmaintenance\-refresh\fR (or the 
\fB\fCrcbtrfsmaintenance\-refresh\fR shortcut)
+.IP \(bu 2
+in \fIyast2\fP \-\- sysconfig editor triggers the refresh automatically
+.IP \(bu 2
+using a file watcher \-\- if you install 
\fB\fCbtrfsmaintenance\-refresh.path\fR, this will utilize the file monitor to 
detect changes and will run the refresh
+
+.RE
+
+.SS Systemd timers
+.PP
+There's a set of timer units that run the respective task script. The periods
+are configured in the \fB\fC/etc/sysconfig/btrfsmaintenance\fR file as well. 
The
+timers have to be installed using a similar way as cron.  Please note that the
+'\fI\&.timer' and respective '\fP\&.service' files have to be installed so the 
timers
+work properly.
+
+.PP
+Some package managers (eg. \fB\fCapt\fR) will configure the timers 
automatically at
+install time \- you can check with \fB\fCls 
/usr/lib/systemd/system/btrfs*\fR\&.
+
+.PP
+To install the timers manually, run \fB\fCbtrfsmaintenance\-refresh\-cron.sh 
timer\fR\&.
+
+.SH Quick start
+.PP
+The tasks' periods and other parameters should fit most use cases and do not
+need to be touched. Review the mount points (variables ending with
+\fB\fC\_MOUNTPOINTS\fR) whether you want to run the tasks there or not.
+
+.SH Distro integration
+.PP
+Currently the support for widely used distros is present.  More distros can be
+added. This section describes how the pieces are put together and should give
+some overview.
+
+.SS Installation
+.PP
+For debian based systems, run \fB\fCdist\-install.sh\fR as root.
+
+.PP
+For non\-debian based systems, check for distro provided package or
+do manual installation of files as described below.
+
+.RS
+.IP \(bu 2
+\fB\fCbtrfs\-*.sh\fR task scripts are expected at 
\fB\fC/usr/share/btrfsmaintenance\fR
+.IP \(bu 2
+\fB\fCsysconfig.btrfsmaintenance\fR configuration template is put to:
+.RS
+.IP \(bu 2
+\fB\fC/etc/sysconfig/btrfsmaintenance\fR on SUSE and RedHat based systems or 
derivatives
+.IP \(bu 2
+\fB\fC/etc/default/btrfsmaintenance\fR on Debian and derivatives
+
+.RE
+
+.IP \(bu 2
+\fB\fC/usr/lib/zypp/plugins/commit/btrfs\-defrag\-plugin.sh\fR or
+\fB\fC/usr/lib/zypp/plugins/commit/btrfs\-defrag\-plugin.py\fR post\-update 
script for
+zypper (the package manager), applies to SUSE\-based distros for now
+.IP \(bu 2
+cron refresh scripts are installed (see bellow)
+
+.RE
+
+.PP
+The defrag plugin has a shell and python implementation, choose what suits the
+installation better.
+
+.SS cron jobs
+.PP
+The periodic execution of the tasks is done by the 'cron' service.  Symlinks to
+the task scripts are located in the respective directories in
+\fB\fC/etc/cron.<PERIOD>\fR\&.
+
+.PP
+The script \fB\fCbtrfsmaintenance\-refresh\-cron.sh\fR will synchronize the 
symlinks
+according to the configuration files. This can be called automatically by a GUI
+configuration tool if it's capable of running post\-change scripts or services.
+In that case there's \fB\fCbtrfsmaintenance\-refresh.service\fR systemd 
service.
+
+.PP
+This service can also be automatically started upon any modification of the
+configuration file in \fB\fC/etc/sysconfig/btrfsmaintenance\fR by installing 
the
+\fB\fCbtrfsmaintenance\-refresh.path\fR systemd watcher.
+
+.SS Post\-update defragmentation
+.PP
+The package database files tend to be updated in a random way and get
+fragmented, which particularly hurts on btrfs. For rpm\-based distros this 
means files
+in \fB\fC/var/lib/rpm\fR\&. The script or plugin simply runs a defragmentation 
on the affected files.
+See \fB\fCbtrfs\-defrag\-plugin.sh\fR or \fB\fCbtrfs\-defrag\-plugin.py\fR for 
more details.
+
+.PP
+At the moment the 'zypper' package manager plugin exists. As the package
+managers differ significantly, there's no single plugin/script to do that.
+
+.SS Settings
+.PP
+The settings are copied to the expected system location from the template
+(\fB\fCsysconfig.btrfsmaintenance\fR). This is a shell script and can be 
sourced to obtain
+values of the variables.
+
+.PP
+The template contains descriptions of the variables, default and possible
+values and can be deployed without changes (expecting the root filesystem to be
+btrfs).
+
+.SH Tuning periodic snapshotting
+.PP
+There are various tools and handwritten scripts to manage periodic snapshots
+and cleaning. The common problem is tuning the retention policy constrained by
+the filesystem size and not running out of space.
+
+.PP
+This section will describe factors that affect that, using snapper
+\[la]https://snapper.io\[ra]
+as an example, but adapting to other tools should be straightforward.
+
+.SS Intro
+.PP
+Snapper is a tool to manage snapshots of btrfs subvolumes. It can create
+snapshots of given subvolume manually, periodically or in a pre/post way for
+a given command. It can be configured to retain existing snapshots according
+to time\-based settings. As the retention policy can be very different for
+various use cases, we need to be able to find matching settings.
+
+.PP
+The settings should satisfy user's expectation about storing previous copies of
+the subvolume but not taking too much space. In an extreme, consuming the whole
+filesystem space and preventing some operations to finish.
+
+.PP
+In order to avoid such situations, the snapper settings should be tuned 
according
+to the expected use case and filesystem size.
+
+.SS Sample problem
+.PP
+Default settings of snapper on default root partition size can easily lead to
+no\-space conditions (all TIMELINE values set to 10). Frequent system updates
+make it happen earlier, but this also affects long\-term use.
+
+.SS Factors affecting space consumption
+.RS
+.IP "  1." 5
+frequency of snapshotting
+.IP "  2." 5
+amount of data changes between snapshots (delta)
+.IP "  3." 5
+snapshot retention settings
+.IP "  4." 5
+size of the filesystem
+
+.RE
+
+.PP
+Each will be explained below.
+
+.PP
+The way how the files are changed affects the space consumption. When a new
+data overwrite existing, the new data will be pinned by the following snapshot,
+while the original data will belong to previous snapshot.  This means that the
+allocated file blocks are freed after the last snapshot pointing to them is
+gone.
+
+.SS Tuning
+.PP
+The administrator/user is supposed to know the approximate use of the partition
+with snapshots enabled.
+
+.PP
+The decision criteria for tuning is space consumption and we're optimizing to
+maximize retention without running out of space.
+
+.PP
+All the factors are intertwined and we cannot give definite answers but rather
+describe the tendencies.
+
+.SS Snapshotting frequency
+.RS
+.IP \(bu 2
+\fBautomatic\fP: if turned on with the \fB\fCTIMELINE\fR config option, the 
periodic
+snapshots are taken hourly. The daily/weekly/monthly/yearly periods will keep
+the first hourly snapshot in the given period.
+.IP \(bu 2
+\fBat package update\fP: package manager with snapper support will create
+pre/post snapshots before/after an update happens.
+.IP \(bu 2
+\fBmanual\fP: the user can create a snapshot manually with \fB\fCsnapper 
create\fR,
+with a given snapshot type (ie. single, pre, post).
+
+.RE
+
+.SS Amount of data change
+.PP
+This is a parameter hard to predict and calculate. We work with rough
+estimates, eg. megabytes, gigabytes etc.
+
+.SS Retention settings
+.PP
+The user is supposed to know possible needs of recovery or examination of
+previous file copies stored in snapshots.
+
+.PP
+It's not recommended to keep too old snapshots, eg. monthly or even yearly if
+there's no apparent need for that. The yearly snapshots should not substitute
+backups, as they reside on the same partition and cannot be used for recovery.
+
+.SS Filesystem size
+.PP
+Bigger filesystem allows for longer retention, higher frequency updates and
+amount of data changes.
+
+.PP
+As an example of a system root partition, the recommended size is 30 GiB, but
+50 GiB is selected by the installer if the snapshots are turned on.
+
+.PP
+For non\-system partition it is recommended to watch remaining free space.
+Although getting an accurate value on btrfs is tricky, due to shared extents
+and snapshots, the output of \fB\fCdf\fR gives a rough idea. Low space, like 
under a
+few gigabytes is more likely to lead to no\-space conditions, so it's a good
+time to delete old snapshots or review the snapper settings.
+
+.SS Typical use cases
+.SS A rolling distro
+.RS
+.IP \(bu 2
+frequency of updates: high, multiple times per week
+.IP \(bu 2
+amount of data changed between updates: high
+
+.RE
+
+.PP
+Suggested values:
+
+.PP
+.RS
+
+.nf
+TIMELINE\_LIMIT\_HOURLY="12"
+TIMELINE\_LIMIT\_DAILY="5"
+TIMELINE\_LIMIT\_WEEKLY="2"
+TIMELINE\_LIMIT\_MONTHLY="1"
+TIMELINE\_LIMIT\_YEARLY="0"
+
+.fi
+.RE
+
+.PP
+The size of root partition should be at least 30GiB, but more is better.
+
+.SS Regular/enterprise distro
+.RS
+.IP \(bu 2
+frequency of updates: low, a few times per month
+.IP \(bu 2
+amount of data changed between updates: low to moderate
+
+.RE
+
+.PP
+Most data changes come probably from the package updates, in the range of
+hundreds of megabytes per update.
+
+.PP
+Suggested values:
+
+.PP
+.RS
+
+.nf
+TIMELINE\_LIMIT\_HOURLY="12"
+TIMELINE\_LIMIT\_DAILY="7"
+TIMELINE\_LIMIT\_WEEKLY="4"
+TIMELINE\_LIMIT\_MONTHLY="6"
+TIMELINE\_LIMIT\_YEARLY="1"
+
+.fi
+.RE
+
+.SS Big file storage
+.RS
+.IP \(bu 2
+frequency of updates: moderate to high
+.IP \(bu 2
+amount of data changed between updates: no changes in files, new files added, 
old deleted
+
+.RE
+
+.PP
+Suggested values:
+
+.PP
+.RS
+
+.nf
+TIMELINE\_LIMIT\_HOURLY="12"
+TIMELINE\_LIMIT\_DAILY="7"
+TIMELINE\_LIMIT\_WEEKLY="4"
+TIMELINE\_LIMIT\_MONTHLY="6"
+TIMELINE\_LIMIT\_YEARLY="0"
+
+.fi
+.RE
+
+.PP
+Note, that deleting a big file that has been snapshotted will not free the 
space
+until all relevant snapshots are deleted.
+
+.SS Mixed
+.RS
+.IP \(bu 2
+frequency of updates: unpredictable
+.IP \(bu 2
+amount of data changed between updates: unpredictable
+
+.RE
+
+.PP
+Examples:
+
+.RS
+.IP \(bu 2
+home directory with small files (in range of kilobytes to megabytes), large 
files (hundreds of megabytes to gigabytes).
+.IP \(bu 2
+git trees, bare and checked out repositories
+
+.RE
+
+.PP
+Not possible to suggest config numbers as it really depends on user
+expectations. Keeping a few hourly snapshots should not consume too much space
+and provides a copy of files, eg. to restore after accidental deletion.
+
+.PP
+Starting point:
+
+.PP
+.RS
+
+.nf
+TIMELINE\_LIMIT\_HOURLY="12"
+TIMELINE\_LIMIT\_DAILY="7"
+TIMELINE\_LIMIT\_WEEKLY="1"
+TIMELINE\_LIMIT\_MONTHLY="0"
+TIMELINE\_LIMIT\_YEARLY="0"
+
+.fi
+.RE
+
+.SS Summary
+.TS
+allbox;
+l l l l l l 
+l l l l l l .
+\fB\fCType\fR  \fB\fCHourly\fR \fB\fCDaily\fR  \fB\fCWeekly\fR 
\fB\fCMonthly\fR        \fB\fCYearly\fR
+Rolling        12      5       2       1       0
+Regular        12      7       4       6       1
+Big files      12      7       4       6       0
+Mixed  12      7       1       0       0
+.TE
+
+.SH About
+.PP
+The goal of this project is to help administering btrfs filesystems. It is not
+supposed to be distribution specific. Common scripts/configs are preferred but
+per\-distro exceptions will be added when necessary.
+
+.PP
+License: GPL 2
+\[la]https://www.gnu.org/licenses/gpl-2.0.html\[ra]
+
+.PP
+Contributing guide
+\[la]CONTRIBUTING.md\[ra]\&.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/btrfsmaintenance-0.5/README.md 
new/btrfsmaintenance-0.5.2/README.md
--- old/btrfsmaintenance-0.5/README.md  2020-07-30 14:58:04.000000000 +0200
+++ new/btrfsmaintenance-0.5.2/README.md        2024-07-04 20:15:46.000000000 
+0200
@@ -2,8 +2,11 @@
 =========================
 
 Table of contents:
+
 * [Quick start](#quick-start)
+
 * [Distro integration](#distro-integration)
+
 * [Tuning periodic snapshotting](#tuning-periodic-snapshotting)
 
 This is a set of scripts supplementing the btrfs filesystem and aims to 
automate
@@ -31,11 +34,11 @@
 and verifies the checksums. It's not mandatory, but may point out problems with
 faulty hardware early as it touches data that might not be in use and bit rot.
 
-If there's a redundancy of data/metadata, ie. the *DUP* or *RAID1/5/6* 
profiles, scrub
+If there's a redundancy of data/metadata, i.e. the *DUP* or *RAID1/5/6* 
profiles, scrub
 is able to repair the data automatically if there's a good copy available.
 
 __Impact when active:__ Intense read operations take place and may slow down or
-block other filesystem activies, possibly only for short periods.
+block other filesystem activities, possibly only for short periods.
 
 __Tuning:__
 
@@ -58,14 +61,14 @@
 data around in big chunks. Here we use it to reclaim back the space of the
 underused chunks so it can be allocated again according to current needs.
 
-The point is to prevent some corner cases where it's not possible to eg.
+The point is to prevent some corner cases where it's not possible to e.g.
 allocate new metadata chunks because the whole device space is reserved for all
 the chunks, although the total space occupied is smaller and the allocation
 should succeed.
 
 The balance operation needs enough workspace so it can shuffle data around. By
 workspace we mean device space that has no filesystem chunks on it, not to be
-confused by free space as reported eg. by `df`.
+confused by free space as reported e.g. by `df`.
 
 __Impact when active:__ Possibly big. There's a mix of read and write 
operations, is
 seek-heavy on rotational devices. This can interfere with other work in case
@@ -107,7 +110,7 @@
 and 5% (metadata). The system load during the balance service will be smaller
 and the result of space compaction still reasonable. Multiple data chunks 
filled
 to less than 10% can be merged into fewer chunks. The file data can change in
-large volumes, eg. deleting a big file can free a lot of space. If the space is
+large volumes, e.g. deleting a big file can free a lot of space. If the space 
is
 left unused for the given period, it's desirable to make it more compact.
 Metadata consumption follows a different pattern and reclaiming only the almost
 unused chunks makes more sense, otherwise there's enough reserved metadata
@@ -123,7 +126,7 @@
 on-demand by the *fstrim* utility.
 
 This makes sense for SSD devices or other type of storage that can translate
-the TRIM action to something useful (eg. thin-provisioned storage).
+the TRIM action to something useful (e.g. thin-provisioned storage).
 
 __Impact when active:__ Should be low, but depends on the amount of blocks
 being trimmed.
@@ -131,10 +134,10 @@
 __Tuning:__
 
 * the recommended period is weekly, but monthly is also fine
-* the trim commands might not have an effect and are up to the device, eg. a
+* the trim commands might not have an effect and are up to the device, e.g. a
   block range too small or other constraints that may differ by device
   type/vendor/firmware
-* the default configuration is *off* because of the the system fstrim.timer
+* the default configuration is *off* because of the system fstrim.timer
 
 ### defrag ###
 
@@ -188,6 +191,11 @@
 '*.timer' and respective '*.service' files have to be installed so the timers
 work properly.
 
+Some package managers (e.g. `apt`) will configure the timers automatically at
+install time - you can check with `ls /usr/lib/systemd/system/btrfs*`.
+
+To install the timers manually, run `btrfsmaintenance-refresh-cron.sh timer`.
+
 
 ## Quick start ##
 
@@ -321,19 +329,19 @@
   pre/post snapshots before/after an update happens.
 
 * **manual**: the user can create a snapshot manually with `snapper create`,
-  with a given snapshot type (ie. single, pre, post).
+  with a given snapshot type (i.e. single, pre, post).
 
 #### Amount of data change
 
 This is a parameter hard to predict and calculate. We work with rough
-estimates, eg. megabytes, gigabytes etc.
+estimates, e.g. megabytes, gigabytes etc.
 
 #### Retention settings
 
 The user is supposed to know possible needs of recovery or examination of
 previous file copies stored in snapshots.
 
-It's not recommended to keep too old snapshots, eg. monthly or even yearly if
+It's not recommended to keep too old snapshots, e.g. monthly or even yearly if
 there's no apparent need for that. The yearly snapshots should not substitute
 backups, as they reside on the same partition and cannot be used for recovery.
 
@@ -413,7 +421,7 @@
 
 Not possible to suggest config numbers as it really depends on user
 expectations. Keeping a few hourly snapshots should not consume too much space
-and provides a copy of files, eg. to restore after accidental deletion.
+and provides a copy of files, e.g. to restore after accidental deletion.
 
 Starting point:
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/btrfsmaintenance-0.5/btrfs-scrub.sh 
new/btrfsmaintenance-0.5.2/btrfs-scrub.sh
--- old/btrfsmaintenance-0.5/btrfs-scrub.sh     2020-07-30 14:58:04.000000000 
+0200
+++ new/btrfsmaintenance-0.5.2/btrfs-scrub.sh   2024-07-04 20:15:46.000000000 
+0200
@@ -40,7 +40,21 @@
                echo "Path $MNT is not btrfs, skipping"
                continue
        fi
-       run_task btrfs scrub start -Bd $ioprio $readonly "$MNT"
+
+       if ! is_raid56 "$MNT"; then
+               echo "RAID level is not 5 or 6, parallel device scrubbing"
+               run_task btrfs scrub start -Bd $ioprio $readonly "$MNT"
+       else
+               echo "RAID level is 5 or 6, sequential device scrubbing"
+               for DEV in $(btrfs filesystem show   "$MNT" |  awk '/ path 
/{print $NF}')
+               do
+                       run_task btrfs scrub start -Bd $ioprio $readonly "$DEV"
+                       until btrfs scrub status "$DEV" | grep finished
+                               do
+                                       sleep 5
+                               done
+               done
+       fi
        if [ "$?" != "0" ]; then
                echo "Scrub cancelled at $MNT"
                exit 1
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/btrfsmaintenance-0.5/btrfsmaintenance-functions 
new/btrfsmaintenance-0.5.2/btrfsmaintenance-functions
--- old/btrfsmaintenance-0.5/btrfsmaintenance-functions 2020-07-30 
14:58:04.000000000 +0200
+++ new/btrfsmaintenance-0.5.2/btrfsmaintenance-functions       2024-07-04 
20:15:46.000000000 +0200
@@ -12,9 +12,9 @@
        local MNTLIST="$1"
 
        if [ "$MNTLIST" = "auto" ]; then
-               local BTRFS_DEVICES=""
-               local DEVICE=""
-               local MNT=""
+               local BTRFS_DEVICES
+               local DEVICE
+               local MNT
 
                # find all mounted btrfs filesystems, print their device nodes, 
sort them
                # and remove identical entries
@@ -73,10 +73,21 @@
 # check if filesystem is a btrfs
 is_btrfs() {
        local FS=$(stat -f --format=%T "$1")
+
        [ "$FS" = "btrfs" ] && return 0
        return 1
 }
 
+# function: is_raid56
+# parameter: path to a mounted filesystem
+#
+# check if filesystem is on a RAID-5 or RAID-6
+is_raid56() {
+       btrfs filesystem usage "$1" | grep Data,RAID5 && return 0
+       btrfs filesystem usage "$1" | grep Data,RAID6 && return 0
+       return 1
+}
+
 # function: btrfs_fsid
 # parameter: path to a mounted filesystem
 #
@@ -91,12 +102,19 @@
 # run the given command with concurrency protection unless allowed by the
 # config, use for tasks that should not run at the same time due to heavy IO
 run_task() {
-       MNT="${@:$#}"
-       UUID=$(btrfs_fsid "$MNT")
+       local MNT="${@:$#}"
+       local UUID=$(btrfs_fsid "$MNT")
+       local verbose
 
        if test "$BTRFS_ALLOW_CONCURRENCY" = "true"; then
                "$@"
        else
-               /usr/bin/flock --verbose /run/btrfs-maintenance-running."$UUID" 
"$@"
+               # Flock older than 2.27 does not support --verbose option, check
+               # if it's available as we'd like to log the information
+               if /usr/bin/flock --help 2>&1 | grep -q -- --verbose; then
+                       verbose="--verbose"
+               fi
+
+               /usr/bin/flock $verbose /run/btrfs-maintenance-running."$UUID" 
"$@"
        fi
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/btrfsmaintenance-0.5/btrfsmaintenance-refresh-cron.sh 
new/btrfsmaintenance-0.5.2/btrfsmaintenance-refresh-cron.sh
--- old/btrfsmaintenance-0.5/btrfsmaintenance-refresh-cron.sh   2020-07-30 
14:58:04.000000000 +0200
+++ new/btrfsmaintenance-0.5.2/btrfsmaintenance-refresh-cron.sh 2024-07-04 
20:15:46.000000000 +0200
@@ -31,18 +31,22 @@
 esac
 
 refresh_cron() {
-       EXPECTED="$1"
-       SCRIPT="$2"
+       local EXPECTED="$1"
+       local SCRIPT="$2"
+       local VALID=false
+       local PERIOD
+       local LINK
+       local FILE
+
        echo "Refresh script $SCRIPT for $EXPECTED"
 
-       valid=false
        for PERIOD in daily weekly monthly none uninstall; do
                if [ "$PERIOD" = "$EXPECTED" ]; then
-                       valid=true
+                       VALID=true
                fi
        done
 
-       if ! $valid; then
+       if ! $VALID; then
                echo "$EXPECTED is not a valid period for cron.  Not changing."
                return
        fi
@@ -60,8 +64,9 @@
 }
 
 refresh_timer() {
-       PERIOD="$1"
-       SERVICE="$2"
+       local PERIOD="$1"
+       local SERVICE="$2"
+
        echo "Refresh timer $SERVICE for $PERIOD"
 
        case "$PERIOD" in
@@ -74,6 +79,7 @@
                        mkdir -p /etc/systemd/system/"$SERVICE".timer.d/
                        cat << EOF > 
/etc/systemd/system/"$SERVICE".timer.d/schedule.conf
 [Timer]
+OnCalendar=
 OnCalendar=$PERIOD
 EOF
                        systemctl enable "$SERVICE".timer &> /dev/null
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/btrfsmaintenance-0.5/btrfsmaintenance-refresh.service 
new/btrfsmaintenance-0.5.2/btrfsmaintenance-refresh.service
--- old/btrfsmaintenance-0.5/btrfsmaintenance-refresh.service   2020-07-30 
14:58:04.000000000 +0200
+++ new/btrfsmaintenance-0.5.2/btrfsmaintenance-refresh.service 2024-07-04 
20:15:46.000000000 +0200
@@ -1,5 +1,7 @@
 [Unit]
-Description=Update cron periods from /etc/sysconfig/btrfsmaintenance
+Description=Configure systemd timer schedule according to 
/etc/sysconfig/btrfsmaintenance
+Documentation="file:/usr/share/doc/btrfsmaintenance/README.man"
+Documentation="file:/usr/share/doc/packages/btrfsmaintenance/README.man"
 
 [Service]
 ExecStart=/usr/share/btrfsmaintenance/btrfsmaintenance-refresh-cron.sh 
systemd-timer
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/btrfsmaintenance-0.5/btrfsmaintenance.spec 
new/btrfsmaintenance-0.5.2/btrfsmaintenance.spec
--- old/btrfsmaintenance-0.5/btrfsmaintenance.spec      2020-07-30 
14:58:04.000000000 +0200
+++ new/btrfsmaintenance-0.5.2/btrfsmaintenance.spec    2024-07-04 
20:15:46.000000000 +0200
@@ -22,7 +22,7 @@
 %endif
 
 Name:           btrfsmaintenance
-Version:        0.5
+Version:        0.5.2
 Release:        0
 Summary:        Scripts for btrfs periodic maintenance tasks
 License:        GPL-2.0-only
@@ -99,7 +99,7 @@
 
 %files
 %license COPYING
-%doc README.md
+%doc README.md README.man
 %{_fillupdir}/sysconfig.btrfsmaintenance
 %dir %{_datadir}/%{name}
 %{_datadir}/%{name}/*

Reply via email to