More work on BackupPC's monitoring scripts

tags/zabbix-agent-addons-0.2.73-1
Daniel Berteaud 6 years ago
parent 2545758742
commit 14164177cb
  1. 1
      zabbix-agent-addons.spec
  2. 85
      zabbix_scripts/check_backuppc_sudo
  3. 32
      zabbix_scripts/disco_backuppc_sudo

@ -22,6 +22,7 @@ Requires: perl(POSIX)
Requires: perl(MIME::Base64) Requires: perl(MIME::Base64)
Requires: perl(File::Which) Requires: perl(File::Which)
Requires: perl(Config::Simple) Requires: perl(Config::Simple)
Requires: perl(Statistics::Descriptive)
Requires: fping Requires: fping
%if ! 0%{?_without_selinux} %if ! 0%{?_without_selinux}
Requires: policycoreutils Requires: policycoreutils

@ -6,6 +6,7 @@ use BackupPC::CGI::Lib;
use POSIX; use POSIX;
use JSON; use JSON;
use Getopt::Long; use Getopt::Long;
use Statistics::Descriptive;
use Data::Dumper; use Data::Dumper;
my $host = undef; my $host = undef;
@ -33,35 +34,61 @@ if ( $host ) {
my $hostConf = $bpc->ConfigDataRead($host); my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf }; my $conf = { %$mainConf, %$hostConf };
my $age = -1; my $age = -1;
$json = {
bkp => 0,
full_size => 0,
errors => 0,
new_size => 0,
new_size_avg => 0,
new_size_median => 0,
new_size_q1 => 0,
new_size_q3 => 0,
duration => 0,
comp_ratio => 0
};
# Backup frequency my $lastXferErrors = 0;
my $freq = ($conf->{FullPeriod} > $conf->{IncrPeriod}) ? $conf->{IncrPeriod} : $conf->{FullPeriod}; my $maxErrors = 0;
my $lastXferErrors = 0;
my $maxErrors = 0;
my $new_size_of_last_full = 0; my $new_size_of_last_full = 0;
my @bpc_info = $bpc->BackupInfoRead($host);
my $sizes = new Statistics::Descriptive::Full;
foreach my $backup ( $bpc->BackupInfoRead($host) ) { if ( scalar( @bpc_info ) ){
# Skip partial or active backups foreach my $backup ( @bpc_info ) {
next if ( $backup->{type} !~ m/^full|incr$/ ); # Skip partial or active backups
if ( $backup->{type} eq "full" ) { next if ( $backup->{type} !~ m/^full|incr$/ );
$json->{full_size} = $backup->{size}; if ( $backup->{type} eq "full" ) {
$new_size_of_last_full = $backup->{sizeNew}; $json->{full_size} = $backup->{size};
$new_size_of_last_full = $backup->{sizeNew};
}
# Push all the sizes in our data set to compute avg sizes
# Exclude backup N°0 as it'll always have much more new data than normal backups
$sizes->add_data($backup->{sizeNew}) unless ( $backup->{num} == 0 );
$json->{bkp}++;
} }
$json->{last_errors} = $backup->{xferErrs};
$json->{new_size} = $backup->{sizeNew};
$json->{total_size} += $backup->{sizeNew};
$json->{duration} = $backup->{endTime} - $backup->{startTime};
$json->{type} = $backup->{type};
$json->{ratio} = ( $backup->{sizeNew} > 0 ) ? sprintf( "%.2f", 100 - ( $backup->{sizeNewComp} * 100 / $backup->{sizeNew} ) ) : 0;
$age = $backup->{startTime};
}
$json->{enabled} = ( $conf->{BackupsDisable} > 0 ) ? 0 : 1;
$json->{total_size} += $json->{full_size} - 2 * $new_size_of_last_full;
$json->{age} = time - $age;
$json->{max_errors} = $conf->{MaxXferError} || 0;
# Ignore the last backup if it's not full or incr (which means it's either partial or active)
my $i = ( $bpc_info[-1]->{type} =~ m/^full|incr$/ ) ? -1 : -2;
$json->{errors} = $bpc_info[$i]->{xferErrs};
$json->{new_size} = $bpc_info[$i]->{sizeNew};
$json->{duration} = $bpc_info[$i]->{endTime} - $bpc_info[$i]->{startTime};
$json->{type} = $bpc_info[$i]->{type};
$json->{comp_ratio} = ( $bpc_info[$i]->{sizeNew} > 0 ) ?
sprintf( "%.2f", 100 - ( $bpc_info[$i]->{sizeNewComp} * 100 / $bpc_info[$i]->{sizeNew} ) )
:
0;
$json->{new_size_avg} = int $sizes->mean;
$json->{new_size_median} = int $sizes->median;
$json->{new_size_q1} = int $sizes->quantile(1);
$json->{new_size_q3} = int $sizes->quantile(3);
$json->{enabled} = ( $conf->{BackupsDisable} > 0 ) ? 0 : 1;
$json->{total_size} = $sizes->sum + $json->{full_size} - 2 * $new_size_of_last_full;
$json->{age} = time - $bpc_info[$i]->{startTime};
$json->{max_errors} = $conf->{MaxXferError} || 0;
}
} elsif ( $entity ) { } elsif ( $entity ) {
$json = { $json = {
@ -90,12 +117,13 @@ if ( $host ) {
foreach my $backup ( $bpc->BackupInfoRead( $host ) ) { foreach my $backup ( $bpc->BackupInfoRead( $host ) ) {
next if ( $backup->{type} !~ m/^full|incr$/ ); next if ( $backup->{type} !~ m/^full|incr$/ );
# For the last full backup of this host, we do not count
# the new file size, but the total size # Save the total size of the last full backup
if ( $backup->{type} eq 'full' ) { if ( $backup->{type} eq 'full' ) {
$full_size = $backup->{size}; $full_size = $backup->{size};
$new_size_of_last_full = $backup->{sizeNew}; $new_size_of_last_full = $backup->{sizeNew};
} }
$json->{size} += $backup->{sizeNew}; $json->{size} += $backup->{sizeNew};
$total_new += $backup->{sizeNew}; $total_new += $backup->{sizeNew};
$total_comp += $backup->{sizeNewComp}; $total_comp += $backup->{sizeNewComp};
@ -106,6 +134,10 @@ if ( $host ) {
# Compute the average cost as the number of hours per day spent # Compute the average cost as the number of hours per day spent
# to backup this host # to backup this host
$json->{perf} += ( $bkp_num > 0 ) ? $duration / ( 3600 * $bkp_num * $freq ) : 0; $json->{perf} += ( $bkp_num > 0 ) ? $duration / ( 3600 * $bkp_num * $freq ) : 0;
# $json->{size} represent the total size used by this host.
# But we want to substract the new size of the last full, as for this one we
# do not count sizeNew but size. As we've already added sizeNew we need to substract it 2 times
$json->{size} += $full_size - 2 * $new_size_of_last_full; $json->{size} += $full_size - 2 * $new_size_of_last_full;
} }
$json->{ratio} = ( $total_new > 0 ) ? 100 - ( $total_comp * 100 / $total_new ) : 0; $json->{ratio} = ( $total_new > 0 ) ? 100 - ( $total_comp * 100 / $total_new ) : 0;
@ -114,6 +146,7 @@ if ( $host ) {
foreach my $key ( qw(ratio perf) ) { foreach my $key ( qw(ratio perf) ) {
$json->{$key} = sprintf( "%.2f", $json->{$key} ); $json->{$key} = sprintf( "%.2f", $json->{$key} );
} }
} else { } else {
print<<"EOF"; print<<"EOF";

@ -7,12 +7,14 @@ use POSIX;
use JSON; use JSON;
use Getopt::Long; use Getopt::Long;
my $hosts = 1; my $hosts = 1;
my $entities = 0; my $entities = 0;
my $pretty = 0;
GetOptions( GetOptions(
"hosts" => \$hosts, "hosts" => \$hosts,
"entities" => \$entities "entities" => \$entities,
"pretty" => \$pretty
); );
# We need to switch to backuppc UID/GID # We need to switch to backuppc UID/GID
@ -39,20 +41,24 @@ if ($entities) {
push @{$json->{data}}, { '{#BPC_ENTITY}' => $_ } foreach ( keys %entities ); push @{$json->{data}}, { '{#BPC_ENTITY}' => $_ } foreach ( keys %entities );
} elsif ($hosts){ } elsif ($hosts){
foreach my $host ( keys %$hosts ){ foreach my $host ( keys %$hosts ){
my $hostConf = $bpc->ConfigDataRead($host); my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf }; my $conf = { %$mainConf, %$hostConf };
my $warning = $conf->{EMailNotifyOldBackupDays}; my $warning = $conf->{EMailNotifyOldBackupDays};
my $errors = ( defined $conf->{MaxXferError} ) ? $conf->{MaxXferError} : '0'; my $errors = ( defined $conf->{MaxXferError} ) ? $conf->{MaxXferError} : '0';
my $monitoring = $conf->{ZabbixMonitoring} || 1; my $monitoring = $conf->{ZabbixMonitoring} || 1;
my $status = ( $conf->{BackupsDisable} gt 0 or $monitoring eq '0' ) ? '0' : '1'; my $sizeTooBigFactor = $conf->{ZabbixSizeTooBigFactor} || 6;
my $sizeTooSmallFactor = $conf->{ZabbixSizeTooSmallFactore} || 3;
my $status = ( $conf->{BackupsDisable} gt 0 or $monitoring eq '0' ) ? '0' : '1';
push @{$json->{data}}, push @{$json->{data}},
{ {
"{#BPCHOST}" => $host, "{#BPCHOST}" => $host,
"{#BPCNOBACKUPWARNING}" => $warning, "{#BPCNOBACKUPWARNING}" => $warning,
"{#BPCMAXERROR}" => $errors, "{#BPCMAXERROR}" => $errors,
"{#BPCSTATUS}" => $status, "{#BPCSTATUS}" => $status,
"{#BPC_TOO_BIG_FACTOR}" => $sizeTooBigFactor,
"{#BPC_TOO_SMALL_FACTOR}" => $sizeTooSmallFactor,
}; };
} }
} }
print to_json($json); print to_json( $json, { pretty => $pretty } );
exit(0); exit(0);

Loading…
Cancel
Save