From 0a154335a6468b176ec349591acd7fa7c69fa8d5 Mon Sep 17 00:00:00 2001 From: Heuzef Date: Wed, 25 Sep 2019 10:02:19 +0200 Subject: [PATCH] add backuppc_check.pl --- backuppc_check.pl | 193 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 backuppc_check.pl diff --git a/backuppc_check.pl b/backuppc_check.pl new file mode 100644 index 0000000..2ba9b15 --- /dev/null +++ b/backuppc_check.pl @@ -0,0 +1,193 @@ +#!/usr/bin/perl + +use lib "/usr/share/BackupPC/lib"; +use BackupPC::Lib; +use BackupPC::CGI::Lib; +use POSIX; +use JSON; +use Getopt::Long; +use Statistics::Descriptive; +use Data::Dumper; + +my $host = $ARGV[0]; + +# We need to switch to backuppc UID/GID +my $uid = getuid(); +my $gid = getgid(); +my (undef,undef,$bkpuid,$bkpgid) = getpwnam('backuppc'); +setuid($bkpuid) if ($uid ne $bkpuid); +setgid($bkpgid) if ($gid ne $bkpgid); + +my $bpc = BackupPC::Lib->new(); +my $mainConf = $bpc->ConfigDataRead(); +my $json = {}; + +if ( $host ) { + my $hostConf = $bpc->ConfigDataRead($host); + my $conf = { %$mainConf, %$hostConf }; + my $age = -1; + $json = { + bkp => 0, + full_size => 0, + errors => 0, + new_size => 0, + new_size_avg => 0, + new_size_median => 0, + new_size_q1 => 0, + new_size_q3 => 0, + duration => 0, + comp_ratio => 0 + }; + + my $lastXferErrors = 0; + my $maxErrors = 0; + my $new_size_of_last_full = 0; + my @bpc_info = $bpc->BackupInfoRead($host); + my $sizes = new Statistics::Descriptive::Full; + + if ( scalar( @bpc_info ) ){ + foreach my $backup ( @bpc_info ) { + # Skip partial or active backups + next if ( $backup->{type} !~ m/^full|incr$/ ); + if ( $backup->{type} eq "full" ) { + $json->{full_size} = $backup->{size}; + $new_size_of_last_full = $backup->{sizeNew}; + } + # Push all the sizes in our data set to compute avg sizes + # Exclude backup N°0 as it'll always have much more new data than normal backups + $sizes->add_data($backup->{sizeNew}) unless ( $backup->{num} == 0 ); + $json->{bkp}++; + } + + # Ignore the last backup if it's not full or incr (which means it's either partial or active) + my $i = ( $bpc_info[-1]->{type} =~ m/^full|incr$/ ) ? -1 : -2; + + $json->{errors} = $bpc_info[$i]->{xferErrs}; + $json->{new_size} = $bpc_info[$i]->{sizeNew}; + $json->{duration} = $bpc_info[$i]->{endTime} - $bpc_info[$i]->{startTime}; + $json->{type} = $bpc_info[$i]->{type}; + $json->{comp_ratio} = ( $bpc_info[$i]->{sizeNew} > 0 ) ? + sprintf( "%.2f", 100 - ( $bpc_info[$i]->{sizeNewComp} * 100 / $bpc_info[$i]->{sizeNew} ) ) + : + 0; + + $json->{new_size_avg} = int $sizes->mean; + $json->{new_size_median} = int $sizes->median; + # Some old versions of Statistics::Descriptive (eg, on el5) do not support quantile + $json->{new_size_q1} = eval { int $sizes->quantile(1) } || 0; + $json->{new_size_q3} = eval { int $sizes->quantile(3) } || 0; + $json->{enabled} = ( $conf->{BackupsDisable} > 0 ) ? 0 : 1; + $json->{total_size} = $sizes->sum + $json->{full_size} - 2 * $new_size_of_last_full; + $json->{age} = time - $bpc_info[$i]->{startTime}; + + $json->{max_errors} = $conf->{MaxXferError} || 0; + } +} elsif ( $entity ) { + + $json = { + perf => 0, + size => 0, + hosts => 0, + bkp => 0, + ratio => 0 + }; + + my $total_new = 0; + my $total_comp = 0; + + foreach my $host ( keys %{ $bpc->HostInfoRead } ) { + next unless $host =~ m/^(vm_)?\Q$entity\E_.*/; + my $full_size; + + $json->{hosts}++; + + my $hostConf = $bpc->ConfigDataRead($host); + my $conf = { %$mainConf, %$hostConf }; + my $freq = ( $conf->{FullPeriod} > $conf->{IncrPeriod} ) ? $conf->{IncrPeriod} : $conf->{FullPeriod}; + my $duration = 0; + my $bkp_num = 0; + my $new_size_of_last_full = 0; + + foreach my $backup ( $bpc->BackupInfoRead( $host ) ) { + next if ( $backup->{type} !~ m/^full|incr$/ ); + + # Save the total size of the last full backup + if ( $backup->{type} eq 'full' ) { + $full_size = $backup->{size}; + $new_size_of_last_full = $backup->{sizeNew}; + } + + $json->{size} += $backup->{sizeNew}; + $total_new += $backup->{sizeNew}; + $total_comp += $backup->{sizeNewComp}; + $duration += $backup->{endTime} - $backup->{startTime}; + $bkp_num++; + $json->{bkp}++; + } + # Compute the average cost as the number of hours per day spent + # to backup this host + $json->{perf} += ( $bkp_num > 0 ) ? $duration / ( 3600 * $bkp_num * $freq ) : 0; + + # $json->{size} represent the total size used by this host. + # But we want to substract the new size of the last full, as for this one we + # do not count sizeNew but size. As we've already added sizeNew we need to substract it 2 times + $json->{size} += $full_size - 2 * $new_size_of_last_full; + } + $json->{ratio} = ( $total_new > 0 ) ? 100 - ( $total_comp * 100 / $total_new ) : 0; + + # Round some values + foreach my $key ( qw(ratio perf) ) { + $json->{$key} = sprintf( "%.2f", $json->{$key} ); + } + +} else { + print<<"EOF"; + +Usage: $0 --host= or --entity= + +EOF +} + +# Print result +print("\n----------------\n"); +print("new_size_median : $json->{new_size_median}"); +print("\n"); +print("new_size_avg : $json->{new_size_avg}"); +print("\n"); +print("new_size_q3 : $json->{new_size_q3}"); +print("\n"); +print("new_size : $json->{new_size}"); +print("\n"); +print("new_size_q1 : $json->{new_size_q1}"); +print("\n"); +print("full_size : $json->{full_size}"); +print("\n----------------\n"); +print("\n----------------\n"); +print("Size Consistency : "); + +# TOO SMALL ? +my $toosmall = "true"; +if ( $json->{new_size} > $json->{new_size_q3} + $json->{new_size_q3} - $json->{new_size_q1} * 1.5 && $json->{new_size} > $json->{new_size_avg} * 6 ) { + $toosmall = "false"; + } +else { + $toosmall = "true"; +} +# TOO BIG ? +my $toobig = "true"; +if ( $json->{new_size} < $json->{new_size_q1} - $json->{new_size_q3} - $json->{new_size_q1} * 1.5 && $json->{new_size} < $json->{new_size_avg} / 3 ) { + $toobig = "false"; + } +else { + $toobig = "true"; +} + +# Print result +if ( $toosmall == "false" && $toobig == "false" ) { + print("Normal"); + } +else { + print("ANOMALOUS"); +} + +exit(0);