|
|
@ -27,176 +27,176 @@ use Getopt::Long; |
|
|
|
use Statistics::Descriptive; |
|
|
|
use Statistics::Descriptive; |
|
|
|
use Data::Dumper; |
|
|
|
use Data::Dumper; |
|
|
|
|
|
|
|
|
|
|
|
sub size_consistency { |
|
|
|
my $sizeConsistency = print("ANOMALOUS"); |
|
|
|
|
|
|
|
|
|
|
|
my $host = undef; |
|
|
|
|
|
|
|
my $entity = undef; |
|
|
|
|
|
|
|
my $pretty = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GetOptions( |
|
|
|
|
|
|
|
"host=s" => \$host, |
|
|
|
|
|
|
|
"entity=s" => \$entity, |
|
|
|
|
|
|
|
"pretty" => \$pretty |
|
|
|
|
|
|
|
); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# We need to switch to backuppc UID/GID |
|
|
|
|
|
|
|
my $uid = getuid(); |
|
|
|
|
|
|
|
my $gid = getgid(); |
|
|
|
|
|
|
|
my (undef,undef,$bkpuid,$bkpgid) = getpwnam('backuppc'); |
|
|
|
|
|
|
|
setuid($bkpuid) if ($uid ne $bkpuid); |
|
|
|
|
|
|
|
setgid($bkpgid) if ($gid ne $bkpgid); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
my $bpc = BackupPC::Lib->new(); |
|
|
|
|
|
|
|
my $mainConf = $bpc->ConfigDataRead(); |
|
|
|
|
|
|
|
my $json = {}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ( $host ) { |
|
|
|
|
|
|
|
my $hostConf = $bpc->ConfigDataRead($host); |
|
|
|
|
|
|
|
my $conf = { %$mainConf, %$hostConf }; |
|
|
|
|
|
|
|
my $age = -1; |
|
|
|
|
|
|
|
$json = { |
|
|
|
|
|
|
|
bkp => 0, |
|
|
|
|
|
|
|
full_size => 0, |
|
|
|
|
|
|
|
errors => 0, |
|
|
|
|
|
|
|
new_size => 0, |
|
|
|
|
|
|
|
new_size_avg => 0, |
|
|
|
|
|
|
|
new_size_median => 0, |
|
|
|
|
|
|
|
new_size_q1 => 0, |
|
|
|
|
|
|
|
new_size_q3 => 0, |
|
|
|
|
|
|
|
duration => 0, |
|
|
|
|
|
|
|
comp_ratio => 0 |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
my $lastXferErrors = 0; |
|
|
|
|
|
|
|
my $maxErrors = 0; |
|
|
|
|
|
|
|
my $new_size_of_last_full = 0; |
|
|
|
|
|
|
|
my @bpc_info = $bpc->BackupInfoRead($host); |
|
|
|
|
|
|
|
my $sizes = new Statistics::Descriptive::Full; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ( scalar( @bpc_info ) ){ |
|
|
|
|
|
|
|
foreach my $backup ( @bpc_info ) { |
|
|
|
|
|
|
|
# Skip partial or active backups |
|
|
|
|
|
|
|
next if ( $backup->{type} !~ m/^full|incr$/ ); |
|
|
|
|
|
|
|
if ( $backup->{type} eq "full" ) { |
|
|
|
|
|
|
|
$json->{full_size} = $backup->{size}; |
|
|
|
|
|
|
|
$new_size_of_last_full = $backup->{sizeNew}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
# Push all the sizes in our data set to compute avg sizes |
|
|
|
|
|
|
|
# Exclude backup N°0 as it'll always have much more new data than normal backups |
|
|
|
|
|
|
|
$sizes->add_data($backup->{sizeNew}) unless ( $backup->{num} == 0 ); |
|
|
|
|
|
|
|
$json->{bkp}++; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Ignore the last backup if it's not full or incr (which means it's either partial or active) |
|
|
|
|
|
|
|
my $i = ( $bpc_info[-1]->{type} =~ m/^full|incr$/ ) ? -1 : -2; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
$json->{errors} = $bpc_info[$i]->{xferErrs}; |
|
|
|
|
|
|
|
$json->{new_size} = $bpc_info[$i]->{sizeNew}; |
|
|
|
|
|
|
|
$json->{duration} = $bpc_info[$i]->{endTime} - $bpc_info[$i]->{startTime}; |
|
|
|
|
|
|
|
$json->{type} = $bpc_info[$i]->{type}; |
|
|
|
|
|
|
|
$json->{comp_ratio} = ( $bpc_info[$i]->{sizeNew} > 0 ) ? |
|
|
|
|
|
|
|
sprintf( "%.2f", 100 - ( $bpc_info[$i]->{sizeNewComp} * 100 / $bpc_info[$i]->{sizeNew} ) ) |
|
|
|
|
|
|
|
: |
|
|
|
|
|
|
|
0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
$json->{new_size_avg} = int $sizes->mean; |
|
|
|
|
|
|
|
$json->{new_size_median} = int $sizes->median; |
|
|
|
|
|
|
|
# Some old versions of Statistics::Descriptive (eg, on el5) do not support quantile |
|
|
|
|
|
|
|
$json->{new_size_q1} = eval { int $sizes->quantile(1) } || 0; |
|
|
|
|
|
|
|
$json->{new_size_q3} = eval { int $sizes->quantile(3) } || 0; |
|
|
|
|
|
|
|
$json->{enabled} = ( $conf->{BackupsDisable} > 0 ) ? 0 : 1; |
|
|
|
|
|
|
|
$json->{total_size} = $sizes->sum + $json->{full_size} - 2 * $new_size_of_last_full; |
|
|
|
|
|
|
|
$json->{age} = time - $bpc_info[$i]->{startTime}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
$json->{max_errors} = $conf->{MaxXferError} || 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} elsif ( $entity ) { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
$json = { |
|
|
|
|
|
|
|
perf => 0, |
|
|
|
|
|
|
|
size => 0, |
|
|
|
|
|
|
|
hosts => 0, |
|
|
|
|
|
|
|
bkp => 0, |
|
|
|
|
|
|
|
ratio => 0 |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
my $total_new = 0; |
|
|
|
|
|
|
|
my $total_comp = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
foreach my $host ( keys %{ $bpc->HostInfoRead } ) { |
|
|
|
|
|
|
|
next unless $host =~ m/^(vm_)?\Q$entity\E_.*/; |
|
|
|
|
|
|
|
my $full_size; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
$json->{hosts}++; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sub size_consistency |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
my $host = undef; |
|
|
|
|
|
|
|
my $entity = undef; |
|
|
|
|
|
|
|
my $pretty = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GetOptions( |
|
|
|
|
|
|
|
"host=s" => \$host, |
|
|
|
|
|
|
|
"entity=s" => \$entity, |
|
|
|
|
|
|
|
"pretty" => \$pretty |
|
|
|
|
|
|
|
); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# We need to switch to backuppc UID/GID |
|
|
|
|
|
|
|
my $uid = getuid(); |
|
|
|
|
|
|
|
my $gid = getgid(); |
|
|
|
|
|
|
|
my (undef,undef,$bkpuid,$bkpgid) = getpwnam('backuppc'); |
|
|
|
|
|
|
|
setuid($bkpuid) if ($uid ne $bkpuid); |
|
|
|
|
|
|
|
setgid($bkpgid) if ($gid ne $bkpgid); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
my $bpc = BackupPC::Lib->new(); |
|
|
|
|
|
|
|
my $mainConf = $bpc->ConfigDataRead(); |
|
|
|
|
|
|
|
my $json = {}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ( $host ) { |
|
|
|
my $hostConf = $bpc->ConfigDataRead($host); |
|
|
|
my $hostConf = $bpc->ConfigDataRead($host); |
|
|
|
my $conf = { %$mainConf, %$hostConf }; |
|
|
|
my $conf = { %$mainConf, %$hostConf }; |
|
|
|
my $freq = ( $conf->{FullPeriod} > $conf->{IncrPeriod} ) ? $conf->{IncrPeriod} : $conf->{FullPeriod}; |
|
|
|
my $age = -1; |
|
|
|
my $duration = 0; |
|
|
|
$json = { |
|
|
|
my $bkp_num = 0; |
|
|
|
bkp => 0, |
|
|
|
|
|
|
|
full_size => 0, |
|
|
|
|
|
|
|
errors => 0, |
|
|
|
|
|
|
|
new_size => 0, |
|
|
|
|
|
|
|
new_size_avg => 0, |
|
|
|
|
|
|
|
new_size_median => 0, |
|
|
|
|
|
|
|
new_size_q1 => 0, |
|
|
|
|
|
|
|
new_size_q3 => 0, |
|
|
|
|
|
|
|
duration => 0, |
|
|
|
|
|
|
|
comp_ratio => 0 |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
my $lastXferErrors = 0; |
|
|
|
|
|
|
|
my $maxErrors = 0; |
|
|
|
my $new_size_of_last_full = 0; |
|
|
|
my $new_size_of_last_full = 0; |
|
|
|
|
|
|
|
my @bpc_info = $bpc->BackupInfoRead($host); |
|
|
|
|
|
|
|
my $sizes = new Statistics::Descriptive::Full; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ( scalar( @bpc_info ) ){ |
|
|
|
|
|
|
|
foreach my $backup ( @bpc_info ) { |
|
|
|
|
|
|
|
# Skip partial or active backups |
|
|
|
|
|
|
|
next if ( $backup->{type} !~ m/^full|incr$/ ); |
|
|
|
|
|
|
|
if ( $backup->{type} eq "full" ) { |
|
|
|
|
|
|
|
$json->{full_size} = $backup->{size}; |
|
|
|
|
|
|
|
$new_size_of_last_full = $backup->{sizeNew}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
# Push all the sizes in our data set to compute avg sizes |
|
|
|
|
|
|
|
# Exclude backup N°0 as it'll always have much more new data than normal backups |
|
|
|
|
|
|
|
$sizes->add_data($backup->{sizeNew}) unless ( $backup->{num} == 0 ); |
|
|
|
|
|
|
|
$json->{bkp}++; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
foreach my $backup ( $bpc->BackupInfoRead( $host ) ) { |
|
|
|
# Ignore the last backup if it's not full or incr (which means it's either partial or active) |
|
|
|
next if ( $backup->{type} !~ m/^full|incr$/ ); |
|
|
|
my $i = ( $bpc_info[-1]->{type} =~ m/^full|incr$/ ) ? -1 : -2; |
|
|
|
|
|
|
|
|
|
|
|
# Save the total size of the last full backup |
|
|
|
$json->{errors} = $bpc_info[$i]->{xferErrs}; |
|
|
|
if ( $backup->{type} eq 'full' ) { |
|
|
|
$json->{new_size} = $bpc_info[$i]->{sizeNew}; |
|
|
|
$full_size = $backup->{size}; |
|
|
|
$json->{duration} = $bpc_info[$i]->{endTime} - $bpc_info[$i]->{startTime}; |
|
|
|
$new_size_of_last_full = $backup->{sizeNew}; |
|
|
|
$json->{type} = $bpc_info[$i]->{type}; |
|
|
|
|
|
|
|
$json->{comp_ratio} = ( $bpc_info[$i]->{sizeNew} > 0 ) ? |
|
|
|
|
|
|
|
sprintf( "%.2f", 100 - ( $bpc_info[$i]->{sizeNewComp} * 100 / $bpc_info[$i]->{sizeNew} ) ) |
|
|
|
|
|
|
|
: |
|
|
|
|
|
|
|
0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
$json->{new_size_avg} = int $sizes->mean; |
|
|
|
|
|
|
|
$json->{new_size_median} = int $sizes->median; |
|
|
|
|
|
|
|
# Some old versions of Statistics::Descriptive (eg, on el5) do not support quantile |
|
|
|
|
|
|
|
$json->{new_size_q1} = eval { int $sizes->quantile(1) } || 0; |
|
|
|
|
|
|
|
$json->{new_size_q3} = eval { int $sizes->quantile(3) } || 0; |
|
|
|
|
|
|
|
$json->{enabled} = ( $conf->{BackupsDisable} > 0 ) ? 0 : 1; |
|
|
|
|
|
|
|
$json->{total_size} = $sizes->sum + $json->{full_size} - 2 * $new_size_of_last_full; |
|
|
|
|
|
|
|
$json->{age} = time - $bpc_info[$i]->{startTime}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
$json->{max_errors} = $conf->{MaxXferError} || 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} elsif ( $entity ) { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
$json = { |
|
|
|
|
|
|
|
perf => 0, |
|
|
|
|
|
|
|
size => 0, |
|
|
|
|
|
|
|
hosts => 0, |
|
|
|
|
|
|
|
bkp => 0, |
|
|
|
|
|
|
|
ratio => 0 |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
my $total_new = 0; |
|
|
|
|
|
|
|
my $total_comp = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
foreach my $host ( keys %{ $bpc->HostInfoRead } ) { |
|
|
|
|
|
|
|
next unless $host =~ m/^(vm_)?\Q$entity\E_.*/; |
|
|
|
|
|
|
|
my $full_size; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
$json->{hosts}++; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
my $hostConf = $bpc->ConfigDataRead($host); |
|
|
|
|
|
|
|
my $conf = { %$mainConf, %$hostConf }; |
|
|
|
|
|
|
|
my $freq = ( $conf->{FullPeriod} > $conf->{IncrPeriod} ) ? $conf->{IncrPeriod} : $conf->{FullPeriod}; |
|
|
|
|
|
|
|
my $duration = 0; |
|
|
|
|
|
|
|
my $bkp_num = 0; |
|
|
|
|
|
|
|
my $new_size_of_last_full = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
foreach my $backup ( $bpc->BackupInfoRead( $host ) ) { |
|
|
|
|
|
|
|
next if ( $backup->{type} !~ m/^full|incr$/ ); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Save the total size of the last full backup |
|
|
|
|
|
|
|
if ( $backup->{type} eq 'full' ) { |
|
|
|
|
|
|
|
$full_size = $backup->{size}; |
|
|
|
|
|
|
|
$new_size_of_last_full = $backup->{sizeNew}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
$json->{size} += $backup->{sizeNew}; |
|
|
|
|
|
|
|
$total_new += $backup->{sizeNew}; |
|
|
|
|
|
|
|
$total_comp += $backup->{sizeNewComp}; |
|
|
|
|
|
|
|
$duration += $backup->{endTime} - $backup->{startTime}; |
|
|
|
|
|
|
|
$bkp_num++; |
|
|
|
|
|
|
|
$json->{bkp}++; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
# Compute the average cost as the number of hours per day spent |
|
|
|
|
|
|
|
# to backup this host |
|
|
|
|
|
|
|
$json->{perf} += ( $bkp_num > 0 ) ? $duration / ( 3600 * $bkp_num * $freq ) : 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# $json->{size} represent the total size used by this host. |
|
|
|
|
|
|
|
# But we want to substract the new size of the last full, as for this one we |
|
|
|
|
|
|
|
# do not count sizeNew but size. As we've already added sizeNew we need to substract it 2 times |
|
|
|
|
|
|
|
$json->{size} += $full_size - 2 * $new_size_of_last_full; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
$json->{ratio} = ( $total_new > 0 ) ? 100 - ( $total_comp * 100 / $total_new ) : 0; |
|
|
|
|
|
|
|
|
|
|
|
$json->{size} += $backup->{sizeNew}; |
|
|
|
# Round some values |
|
|
|
$total_new += $backup->{sizeNew}; |
|
|
|
foreach my $key ( qw(ratio perf) ) { |
|
|
|
$total_comp += $backup->{sizeNewComp}; |
|
|
|
$json->{$key} = sprintf( "%.2f", $json->{$key} ); |
|
|
|
$duration += $backup->{endTime} - $backup->{startTime}; |
|
|
|
|
|
|
|
$bkp_num++; |
|
|
|
|
|
|
|
$json->{bkp}++; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
# Compute the average cost as the number of hours per day spent |
|
|
|
|
|
|
|
# to backup this host |
|
|
|
|
|
|
|
$json->{perf} += ( $bkp_num > 0 ) ? $duration / ( 3600 * $bkp_num * $freq ) : 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# $json->{size} represent the total size used by this host. |
|
|
|
|
|
|
|
# But we want to substract the new size of the last full, as for this one we |
|
|
|
|
|
|
|
# do not count sizeNew but size. As we've already added sizeNew we need to substract it 2 times |
|
|
|
|
|
|
|
$json->{size} += $full_size - 2 * $new_size_of_last_full; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
$json->{ratio} = ( $total_new > 0 ) ? 100 - ( $total_comp * 100 / $total_new ) : 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Round some values |
|
|
|
} else { |
|
|
|
foreach my $key ( qw(ratio perf) ) { |
|
|
|
print("Error : Wrong usage"); |
|
|
|
$json->{$key} = sprintf( "%.2f", $json->{$key} ); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
} else { |
|
|
|
my $toosmall = "true"; |
|
|
|
print("Error : Wrong usage"); |
|
|
|
if ( $json->{new_size} > $json->{new_size_q3} + $json->{new_size_q3} - $json->{new_size_q1} * 1.5 && $json->{new_size} > $json->{new_size_avg} * 6 ) { |
|
|
|
} |
|
|
|
$toosmall = "false"; |
|
|
|
|
|
|
|
} |
|
|
|
my $toosmall = "true"; |
|
|
|
else { |
|
|
|
if ( $json->{new_size} > $json->{new_size_q3} + $json->{new_size_q3} - $json->{new_size_q1} * 1.5 && $json->{new_size} > $json->{new_size_avg} * 6 ) { |
|
|
|
$toosmall = "true"; |
|
|
|
$toosmall = "false"; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
else { |
|
|
|
# TOO BIG ? |
|
|
|
$toosmall = "true"; |
|
|
|
my $toobig = "true"; |
|
|
|
} |
|
|
|
if ( $json->{new_size} < $json->{new_size_q1} - $json->{new_size_q3} - $json->{new_size_q1} * 1.5 && $json->{new_size} < $json->{new_size_avg} / 3 ) { |
|
|
|
# TOO BIG ? |
|
|
|
$toobig = "false"; |
|
|
|
my $toobig = "true"; |
|
|
|
} |
|
|
|
if ( $json->{new_size} < $json->{new_size_q1} - $json->{new_size_q3} - $json->{new_size_q1} * 1.5 && $json->{new_size} < $json->{new_size_avg} / 3 ) { |
|
|
|
else { |
|
|
|
$toobig = "false"; |
|
|
|
$toobig = "true"; |
|
|
|
} |
|
|
|
} |
|
|
|
else { |
|
|
|
|
|
|
|
$toobig = "true"; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Print result |
|
|
|
# Print result |
|
|
|
if ( $toosmall == "false" && $toobig == "false" ) { |
|
|
|
if ( $toosmall == "false" && $toobig == "false" ) { |
|
|
|
my $sizeConsistency = print("Normal"); |
|
|
|
my $sizeConsistency = print("Normal"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
else { |
|
|
|
|
|
|
|
my $sizeConsistency = print("ANOMALOUS"); |
|
|
|
} |
|
|
|
} |
|
|
|
else { |
|
|
|
|
|
|
|
my $sizeConsistency = print("ANOMALOUS"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
exit(0); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
sub action |
|
|
|
sub action |
|
|
|