Hi all,

We have an existing Nagios server that's monitoring approximately 375 
hosts and 4500 services.  We're testing migrating to Icinga set up in a 
failover configuration, and I just started noticing something odd with 
some of our service checks.  We're taking the configuration files from 
our Nagios server and loading it into Icinga, only changing a few paths 
and setting up the ocsp & ochp options.  We're not making any changes to 
any of hour host/service definitions.  A handful of services that are 
actively checked under Nagios 3.1.2 are not being checked under Icinga 
1.0.2.  On the Service Information page the check type is active on 
Nagios but passive on Icinga, and I can't figure out how to get it to 
become an active check under Icinga...

Here's how the service is defined on both the Nagios & Icinga servers:

define service{
    name                            service-base
    active_checks_enabled           1
    check_period                    24x7
    contact_groups                  null-mail
    max_check_attempts              4
    normal_check_interval           5
    notification_interval           15
    notification_options            c,r,u,w
    notification_period             24x7
    notifications_enabled           1
    parallelize_check               1
    process_perf_data               0
    register                        0
    retain_nonstatus_information    1
    retain_status_information       1
    retry_check_interval            1
}

define service {
    check_command           nrpe!USG_check_linux_nfs_mounts
    host_name               cobalt
    hostgroup_name          hpc02_compute
    service_description     NFS mountpoints
    use                     service-base
    max_check_attempts      8
}

Here's how the service shows up in the Nagios status.dat for one of our 
hosts:

servicestatus {
        host_name=contrib02
        service_description=NFS mountpoints
        modified_attributes=2
        check_command=nrpe!check_nfs_mounts
        check_period=24x7
        notification_period=24x7
        check_interval=5.000000
        retry_interval=1.000000
        event_handler=
        has_been_checked=1
        should_be_scheduled=1
        check_execution_time=0.549
        check_latency=568.154
        check_type=0
        current_state=0
        last_hard_state=0
        last_event_id=117668
        current_event_id=117684
        current_problem_id=0
        last_problem_id=56684
        current_attempt=1
        max_attempts=8
        state_type=1
        last_state_change=1279492095
        last_hard_state_change=1279492095
        last_time_ok=1279633931
        last_time_warning=0
        last_time_unknown=0
        last_time_critical=1279488700
        plugin_output=OK: All NFS mountpoints are healthy
        long_plugin_output=
        performance_data=
        last_check=1279633931
        next_check=1279634231
        check_options=0
        current_notification_number=0
        current_notification_id=100977
        last_notification=0
        next_notification=0
        no_more_notifications=0
        notifications_enabled=1
        active_checks_enabled=1
        passive_checks_enabled=1
        event_handler_enabled=1       
        problem_has_been_acknowledged=0
        acknowledgement_type=0
        flap_detection_enabled=1
        failure_prediction_enabled=1
        process_performance_data=0
        obsess_over_service=1
        last_update=1279634359
        is_flapping=0
        percent_state_change=0.00
        scheduled_downtime_depth=0
        }


And here's how the same service for the same host shows up in the Icinga 
status.dat file:

servicestatus {
        host_name=contrib02
        service_description=NFS mountpoints
        modified_attributes=2
        check_command=nrpe!USG_check_linux_nfs_mounts
        check_period=24x7
        notification_period=24x7
        check_interval=5.000000
        retry_interval=1.000000
        event_handler=
        has_been_checked=1
        should_be_scheduled=1
        check_execution_time=0.000
        check_latency=3.441
        check_type=1
        current_state=3
        last_hard_state=3
        last_event_id=0
        current_event_id=1247
        current_problem_id=1210
        last_problem_id=0
        current_attempt=8
        max_attempts=8
        state_type=1
        last_state_change=1279053247
        last_hard_state_change=1279053247
        last_time_ok=0
        last_time_warning=0
        last_time_unknown=1279288109
        last_time_critical=0
        plugin_output=UNKNOWN - manually cleared
        long_plugin_output=
        performance_data=
        last_check=1279288109
        next_check=1279634872
        check_options=0
        current_notification_number=0
        current_notification_id=0
        last_notification=0
        next_notification=0
        no_more_notifications=0
        notifications_enabled=1
        active_checks_enabled=1
        passive_checks_enabled=1
        event_handler_enabled=1
        problem_has_been_acknowledged=0
        acknowledgement_type=0
        flap_detection_enabled=1
        failure_prediction_enabled=1
        process_performance_data=0
        obsess_over_service=1
        last_update=1279634428
        is_flapping=0
        percent_state_change=5.99
        scheduled_downtime_depth=0
        }

Any ideas why this exact same service definition has an active check 
type in Nagios but a passive check type in Icinga?

-Bruce


------------------------------------------------------------------------------
This SF.net email is sponsored by Sprint
What will you do first with EVO, the first 4G phone?
Visit sprint.com/first -- http://p.sf.net/sfu/sprint-com-first
_______________________________________________
icinga-users mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/icinga-users

Reply via email to