On 07/09/2014 09:05 AM, LEIBOVICI Thomas wrote:
> File classes are refreshed at scan time or at policy application time.

Mmm, OK. A scan has just finished, and I still don't have the new class listed
in the report. Is there something wrong in my configuration file (see
projets.conf) ?

Jean-Baptiste



##########################################
# Robinhood configuration file template  #
##########################################

# Global configuration
General
{
    # filesystem to be monitored
    fs_path = "/projets" ;

    # filesystem type (as returned by 'df' or 'mount' commands)
    fs_type = "nfs" ;

    # filesystem property used as FS key: fsname, devid or fsid (fsid NOT 
recommended)
    fs_key = fsname ;

    # file for suspending all actions
    lock_file = "/var/locks/robinhood.lock" ;

    # check that objects are in the same device as 'fs_path',
    # so it will not traverse mount points
    stay_in_fs = TRUE ;

    # check that the filesystem is mounted
    check_mounted = TRUE ;
}

# Log configuration
Log
{
    # Log verbosity level
    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL
    debug_level = EVENT ;

    # Log file
    log_file = "/var/log/robinhood.log" ;

    # File for reporting purge events
    report_file = "/var/log/robinhood_reports.log" ;

    # set alert_file, alert_mail or both depending on the alert method you wish
    alert_file = "/var/log/robinhood_alerts.log" ;
    alert_mail = "[email protected]" ;

    # Interval for dumping stats (to logfile)
    stats_interval = 5min ;

    # Alert batching (to send a digest instead of 1 alert per file)
    # 0: unlimited batch size, 1: no batching (1 alert per file),
    # N>1: batch N alerts per digest
    batch_alert_max = 5000 ;
    # Give the detail of entry attributes for each alert?
    alert_show_attrs = FALSE ;
}

# List Manager configuration
ListManager
{
    # Method for committing information to database.
    # Possible values are:
    # - "autocommit": weak transactions (more efficient, but database 
inconsistencies may occur)
    # - "transaction": manage operations in transactions (best consistency, 
lower performance)
    # - "periodic(<nb_transaction>)": periodically commit (every <n> 
transactions).
    #commit_behavior = transaction ;
    commit_behavior = periodic(1000) ;

    # Minimum time (in seconds) to wait before trying to reestablish a lost 
connection.
    # Then this time is multiplied by 2 until reaching 
connect_retry_interval_max
    connect_retry_interval_min = 1 ;
    connect_retry_interval_max = 30 ;
    # disable the following options if you are not interested in
    # user or group stats (to speed up scan)
    user_acct  = enabled ;
    group_acct = enabled ;

    MySQL
    {
        server = "localhost" ;
        db     = "robinhood_projets" ;
        user   = "robinhood" ;
        password_file = "/etc/robinhood.d/.dbpassword" ;
        # port   = 3306 ;
        # socket = "/tmp/mysql.sock" ;
        innodb = enabled ;
    }
}

# Policies configuration

Filesets
{
    FileClass   between_0m_and_1m
    {
        definition
        {
                last_access <= 30day
        }
    }

    FileClass   between_1m_and_3m
    {
        definition 
        {
            last_access > 30day
            and
            last_access <= 90day
        }
    }

    FileClass   between_3m_and_6m
    {
        definition
        {
            last_access > 90day
            and
            last_access <= 180day
        }
    }

    FileClass   more_than_6m
    {
        definition
        {
            last_access > 180day
        }
    }

    FileClass   older_than_2m_and_greater_than_10G      
    {
        definition
        {
            last_access > 60day
            and
            size > 10G
        }
    }
}

purge_policies {
    ignore_fileclass = between_0m_and_1m;
    ignore_fileclass = between_1m_and_3m;
    ignore_fileclass = between_3m_and_6m;
    ignore_fileclass = more_than_6m;
    ignore_fileclass = older_than_2m_and_greater_than_10G;
}

db_update_policy
{
    # possible policies for refreshing metadata and path in database:
    #   never: get the information once, then never refresh it
    #   always: always update entry info when processing it
    #   on_event: only update on related event
    #   periodic(interval): only update periodically
    #   on_event_periodic(min_interval,max_interval)= on_event + periodic

    # Updating of file metadata
    md_update = always ;
    # File classes matching
    fileclass_update = always ;
}

# Entry Processor configuration
EntryProcessor
{
    # Raise alerts for directories with too many entries
    Alert       More_than_20000_files_in_directory
    {
        type == directory
        and
        dircount > 20000
    }

    # Raise alerts for large files
    Alert       More_than_500GB_file
    {
        type == file
        and
        size > 500GB
    }

    # nbr of worker threads for processing pipeline tasks
    nb_threads = 8 ;

    # Max number of operations in the Entry Processor pipeline.
    # If the number of pending operations exceeds this limit, 
    # info collectors are suspended until this count decreases
    max_pending_operations = 10000 ;

    # max batched DB operations (1=no batching)
    max_batch_size = 1000;

    # Optionnaly specify a maximum thread count for each stage of the pipeline:
    # <stagename>_threads_max = <n> (0: use default)
    # STAGE_GET_FID_threads_max = 4 ;
    # STAGE_GET_INFO_DB_threads_max     = 4 ;
    # STAGE_GET_INFO_FS_threads_max     = 4 ;
    # STAGE_REPORTING_threads_max       = 4 ;
    # STAGE_PRE_APPLY_threads_max       = 4 ;
    # Disable batching (max_batch_size=1) to allow parallelizing the following 
step:
    # STAGE_DB_APPLY_threads_max        = 4 ;

    # if set to FALSE, classes will only be matched
    # at policy application time (not during a scan or reading changelog)
    match_classes = TRUE;

    # Faking mtime to an old time causes the file to be migrated
    # with top priority. Enabling this parameter detect this behavior
    # and doesn't allow  mtime < creation_time
    detect_fake_mtime = FALSE;
}

# FS Scan configuration
FS_Scan
{
    # simple scan interval (fixed)
    scan_interval      =   30d ;

    # min/max for adaptive scan interval:
    # the more the filesystem is full, the more frequently it is scanned.
    #min_scan_interval      =    2h ;
    #max_scan_interval      =   12h ;

    # number of threads used for scanning the filesystem
    nb_threads_scan        =     8 ;

    # when a scan fails, this is the delay before retrying
    scan_retry_delay       =    1h ;

    # timeout for operations on the filesystem
    scan_op_timeout        =    1h ;
    # exit if operation timeout is reached?
    exit_on_timeout        =    TRUE ;
    # external command called on scan termination
    # special arguments can be specified: {cfg} = config file path,
    # {fspath} = path to managed filesystem
    #completion_command     =    "/path/to/my/script.sh -f {cfg} -p {fspath}" ;

    # Internal scheduler granularity (for testing and of scan, hangs, ...)
    spooler_check_interval =  1min ;

    # Memory preallocation parameters
    nb_prealloc_tasks      =   256 ;

    Ignore
    {
        # ignore ".snapshot" and ".snapdir" directories (don't scan them)
        type == directory
        and
        ( name == ".snapdir" or name == ".snapshot" )
    }
}
------------------------------------------------------------------------------
Open source business process management suite built on Java and Eclipse
Turn processes into business applications with Bonita BPM Community Edition
Quickly connect people, data, and systems into organized workflows
Winner of BOSSIE, CODIE, OW2 and Gartner awards
http://p.sf.net/sfu/Bonitasoft
_______________________________________________
robinhood-support mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/robinhood-support

Reply via email to