master
Heuzef 5 years ago
parent e2adb844a4
commit 125b66f477
  1. 159
      Check.pm

@ -109,7 +109,7 @@ sub action
} else { } else {
$lastAgeColor = "Tomato"; $lastAgeColor = "Tomato";
} }
$reasonHilite = $Conf{CgiStatusHilightColor}{$Status{$host}{reason}} $reasonHilite = $Conf{CgiStatusHilightColor}{$Status{$host}{reason}}
|| $Conf{CgiStatusHilightColor}{$Status{$host}{state}}; || $Conf{CgiStatusHilightColor}{$Status{$host}{state}};
if ( $Conf{BackupsDisable} == 1 ) { if ( $Conf{BackupsDisable} == 1 ) {
if ( $Status{$host}{state} ne "Status_backup_in_progress" if ( $Status{$host}{state} ne "Status_backup_in_progress"
@ -153,6 +153,7 @@ EOF
$strGood .= $str; $strGood .= $str;
} }
} }
$fullSizeTot = sprintf("%.2f", $fullSizeTot / 1024); $fullSizeTot = sprintf("%.2f", $fullSizeTot / 1024);
$incrSizeTot = sprintf("%.2f", $incrSizeTot / 1024); $incrSizeTot = sprintf("%.2f", $incrSizeTot / 1024);
my $now = timeStamp2(time); my $now = timeStamp2(time);
@ -165,160 +166,4 @@ EOF
Trailer(); Trailer();
} }
sub size_consistency
{
my $bpc = BackupPC::Lib->new();
my $mainConf = $bpc->ConfigDataRead();
my $json = {};
if ( $host ) {
my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf };
my $age = -1;
$json = {
bkp => 0,
full_size => 0,
errors => 0,
new_size => 0,
new_size_avg => 0,
new_size_median => 0,
new_size_q1 => 0,
new_size_q3 => 0,
duration => 0,
comp_ratio => 0
};
my $lastXferErrors = 0;
my $maxErrors = 0;
my $new_size_of_last_full = 0;
my @bpc_info = $bpc->BackupInfoRead($host);
my $sizes = new Statistics::Descriptive::Full;
if ( scalar( @bpc_info ) ){
foreach my $backup ( @bpc_info ) {
# Skip partial or active backups
next if ( $backup->{type} !~ m/^full|incr$/ );
if ( $backup->{type} eq "full" ) {
$json->{full_size} = $backup->{size};
$new_size_of_last_full = $backup->{sizeNew};
}
# Push all the sizes in our data set to compute avg sizes
# Exclude backup N°0 as it'll always have much more new data than normal backups
$sizes->add_data($backup->{sizeNew}) unless ( $backup->{num} == 0 );
$json->{bkp}++;
}
# Ignore the last backup if it's not full or incr (which means it's either partial or active)
my $i = ( $bpc_info[-1]->{type} =~ m/^full|incr$/ ) ? -1 : -2;
$json->{errors} = $bpc_info[$i]->{xferErrs};
$json->{new_size} = $bpc_info[$i]->{sizeNew};
$json->{duration} = $bpc_info[$i]->{endTime} - $bpc_info[$i]->{startTime};
$json->{type} = $bpc_info[$i]->{type};
$json->{comp_ratio} = ( $bpc_info[$i]->{sizeNew} > 0 ) ?
sprintf( "%.2f", 100 - ( $bpc_info[$i]->{sizeNewComp} * 100 / $bpc_info[$i]->{sizeNew} ) )
:
0;
$json->{new_size_avg} = int $sizes->mean;
$json->{new_size_median} = int $sizes->median;
# Some old versions of Statistics::Descriptive (eg, on el5) do not support quantile
$json->{new_size_q1} = eval { int $sizes->quantile(1) } || 0;
$json->{new_size_q3} = eval { int $sizes->quantile(3) } || 0;
$json->{enabled} = ( $conf->{BackupsDisable} > 0 ) ? 0 : 1;
$json->{total_size} = $sizes->sum + $json->{full_size} - 2 * $new_size_of_last_full;
$json->{age} = time - $bpc_info[$i]->{startTime};
$json->{max_errors} = $conf->{MaxXferError} || 0;
}
} elsif ( $entity ) {
$json = {
perf => 0,
size => 0,
hosts => 0,
bkp => 0,
ratio => 0
};
my $total_new = 0;
my $total_comp = 0;
foreach my $host ( keys %{ $bpc->HostInfoRead } ) {
next unless $host =~ m/^(vm_)?\Q$entity\E_.*/;
my $full_size;
$json->{hosts}++;
my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf };
my $freq = ( $conf->{FullPeriod} > $conf->{IncrPeriod} ) ? $conf->{IncrPeriod} : $conf->{FullPeriod};
my $duration = 0;
my $bkp_num = 0;
my $new_size_of_last_full = 0;
foreach my $backup ( $bpc->BackupInfoRead( $host ) ) {
next if ( $backup->{type} !~ m/^full|incr$/ );
# Save the total size of the last full backup
if ( $backup->{type} eq 'full' ) {
$full_size = $backup->{size};
$new_size_of_last_full = $backup->{sizeNew};
}
$json->{size} += $backup->{sizeNew};
$total_new += $backup->{sizeNew};
$total_comp += $backup->{sizeNewComp};
$duration += $backup->{endTime} - $backup->{startTime};
$bkp_num++;
$json->{bkp}++;
}
# Compute the average cost as the number of hours per day spent
# to backup this host
$json->{perf} += ( $bkp_num > 0 ) ? $duration / ( 3600 * $bkp_num * $freq ) : 0;
# $json->{size} represent the total size used by this host.
# But we want to substract the new size of the last full, as for this one we
# do not count sizeNew but size. As we've already added sizeNew we need to substract it 2 times
$json->{size} += $full_size - 2 * $new_size_of_last_full;
}
$json->{ratio} = ( $total_new > 0 ) ? 100 - ( $total_comp * 100 / $total_new ) : 0;
# Round some values
foreach my $key ( qw(ratio perf) ) {
$json->{$key} = sprintf( "%.2f", $json->{$key} );
}
} else {
print("Error : Wrong usage");
}
my $toosmall = "true";
if ( $json->{new_size} > $json->{new_size_q3} + $json->{new_size_q3} - $json->{new_size_q1} * 1.5 && $json->{new_size} > $json->{new_size_avg} * 6 ) {
$toosmall = "false";
}
else {
$toosmall = "true";
}
# TOO BIG ?
my $toobig = "true";
if ( $json->{new_size} < $json->{new_size_q1} - $json->{new_size_q3} - $json->{new_size_q1} * 1.5 && $json->{new_size} < $json->{new_size_avg} / 3 ) {
$toobig = "false";
}
else {
$toobig = "true";
}
# Print result
if ( $toosmall == "false" && $toobig == "false" ) {
my $sizeConsistency = print("Normal");
}
else {
my $sizeConsistency = print("ANOMALOUS");
}
chomp $sizeConsistency;
return $sizeConsistency;
}
1; 1;

Loading…
Cancel
Save