|
|
|
#!/usr/bin/perl
|
|
|
|
|
|
|
|
use lib "/usr/share/BackupPC/lib";
|
|
|
|
use BackupPC::Lib;
|
|
|
|
use BackupPC::CGI::Lib;
|
|
|
|
use POSIX;
|
|
|
|
use JSON;
|
|
|
|
use Getopt::Long;
|
|
|
|
use Statistics::Descriptive;
|
|
|
|
use Data::Dumper;
|
|
|
|
|
|
|
|
my $host = undef;
|
|
|
|
my $entity = undef;
|
|
|
|
my $pretty = 0;
|
|
|
|
|
|
|
|
GetOptions(
|
|
|
|
"host=s" => \$host,
|
|
|
|
"entity=s" => \$entity,
|
|
|
|
"pretty" => \$pretty
|
|
|
|
);
|
|
|
|
|
|
|
|
# We need to switch to backuppc UID/GID
|
|
|
|
my $uid = getuid();
|
|
|
|
my $gid = getgid();
|
|
|
|
my (undef,undef,$bkpuid,$bkpgid) = getpwnam('backuppc');
|
|
|
|
setuid($bkpuid) if ($uid ne $bkpuid);
|
|
|
|
setgid($bkpgid) if ($gid ne $bkpgid);
|
|
|
|
|
|
|
|
my $bpc = BackupPC::Lib->new();
|
|
|
|
my $mainConf = $bpc->ConfigDataRead();
|
|
|
|
my $json = {};
|
|
|
|
|
|
|
|
if ( $host ) {
|
|
|
|
my $hostConf = $bpc->ConfigDataRead($host);
|
|
|
|
my $conf = { %$mainConf, %$hostConf };
|
|
|
|
$json = {
|
|
|
|
bkp => 0,
|
|
|
|
full_size => 0,
|
|
|
|
total_size => 0,
|
|
|
|
history_size => 0,
|
|
|
|
errors => 0,
|
|
|
|
new_size => 0,
|
|
|
|
new_size_avg => 0,
|
|
|
|
new_size_median => 0,
|
|
|
|
new_size_q1 => 0,
|
|
|
|
new_size_q3 => 0,
|
|
|
|
duration => 0,
|
|
|
|
comp_ratio => 0,
|
|
|
|
enabled => 0,
|
|
|
|
max_errors => 0,
|
|
|
|
age => 0,
|
|
|
|
type => 'none'
|
|
|
|
};
|
|
|
|
|
|
|
|
my $new_size_of_last_full = 0;
|
|
|
|
my @bpc_info = $bpc->BackupInfoRead($host);
|
|
|
|
my $sizes = new Statistics::Descriptive::Full;
|
|
|
|
|
|
|
|
if ( scalar( @bpc_info ) ){
|
|
|
|
foreach my $backup ( @bpc_info ) {
|
|
|
|
# Skip partial or active backups
|
|
|
|
next if ( $backup->{type} !~ m/^full|incr$/ );
|
|
|
|
if ( $backup->{type} eq "full" ) {
|
|
|
|
$json->{full_size} = $backup->{size};
|
|
|
|
$new_size_of_last_full = $backup->{sizeNew} if $backup->{num} > 0;
|
|
|
|
}
|
|
|
|
# Push all the sizes in our data set to compute avg sizes
|
|
|
|
# Exclude backup N°0 as it'll always have much more new data than normal backups
|
|
|
|
# Also exclude if size is not defined. This can happen in BackupPC v3 when
|
|
|
|
# the BackupPC_link process is waiting for the nightly to finish
|
|
|
|
$sizes->add_data($backup->{sizeNew}) if ( $backup->{num} > 0 && $backup->{sizeNew} );
|
|
|
|
$json->{bkp}++;
|
|
|
|
}
|
|
|
|
|
|
|
|
# Ignore the last backup if it's not full or incr (which means it's either partial or active)
|
|
|
|
my $i = ( $bpc_info[-1]->{type} =~ m/^full|incr$/ ) ? -1 : -2;
|
|
|
|
|
|
|
|
$json->{errors} = $bpc_info[$i]->{xferErrs};
|
|
|
|
$json->{duration} = $bpc_info[$i]->{endTime} - $bpc_info[$i]->{startTime};
|
|
|
|
$json->{type} = $bpc_info[$i]->{type};
|
|
|
|
|
|
|
|
$json->{new_size_avg} = int $sizes->mean;
|
|
|
|
$json->{new_size_median} = int $sizes->median;
|
|
|
|
|
|
|
|
# Some old versions of Statistics::Descriptive (eg, on el5) do not support quantile
|
|
|
|
$json->{new_size_q1} = eval { int $sizes->quantile(1) } || 0;
|
|
|
|
$json->{new_size_q3} = eval { int $sizes->quantile(3) } || 0;
|
|
|
|
$json->{enabled} = ( $conf->{BackupsDisable} > 0 ) ? 0 : 1;
|
|
|
|
$json->{total_size} = $sizes->sum + $json->{full_size} - $new_size_of_last_full;
|
|
|
|
$json->{history_size} = $json->{total_size} - $json->{full_size};
|
|
|
|
$json->{age} = time - $bpc_info[$i]->{startTime};
|
|
|
|
|
|
|
|
# For newSize, we need to wait for BackupPC_link to run, which can be delayed
|
|
|
|
# if a nightly process is running. In this case, use the stats from the previous backup
|
|
|
|
# Except when we ave a single backup, in which case we read stats of this only backup
|
|
|
|
$i = ( $backup->{sizeNew} || scalar @bpc_info == 1 ) ? -1 : -2;
|
|
|
|
$json->{new_size} = $bpc_info[$i]->{sizeNew};
|
|
|
|
$json->{comp_ratio} = ( $bpc_info[$i]->{sizeNew} > 0 ) ?
|
|
|
|
sprintf( "%.2f", 100 - ( $bpc_info[$i]->{sizeNewComp} * 100 / $bpc_info[$i]->{sizeNew} ) )
|
|
|
|
:
|
|
|
|
0;
|
|
|
|
|
|
|
|
$json->{max_errors} = $conf->{MaxXferError} || 0;
|
|
|
|
}
|
|
|
|
} elsif ( $entity ) {
|
|
|
|
|
|
|
|
$json = {
|
|
|
|
perf => 0,
|
|
|
|
size => 0,
|
|
|
|
full_size => 0,
|
|
|
|
total_size => 0,
|
|
|
|
history_size => 0,
|
|
|
|
hosts => 0,
|
|
|
|
bkp => 0,
|
|
|
|
ratio => 0
|
|
|
|
};
|
|
|
|
|
|
|
|
my $entity_total_new = 0;
|
|
|
|
my $entity_total_comp = 0;
|
|
|
|
|
|
|
|
foreach my $host ( keys %{ $bpc->HostInfoRead } ) {
|
|
|
|
next unless $host =~ m/^(vm_)?\Q$entity\E_.*/;
|
|
|
|
my $full_size;
|
|
|
|
|
|
|
|
$json->{hosts}++;
|
|
|
|
|
|
|
|
my $hostConf = $bpc->ConfigDataRead($host);
|
|
|
|
my $conf = { %$mainConf, %$hostConf };
|
|
|
|
my $freq = ( $conf->{FullPeriod} > $conf->{IncrPeriod} ) ? $conf->{IncrPeriod} : $conf->{FullPeriod};
|
|
|
|
my $host_duration = 0;
|
|
|
|
my $host_bkp_num = 0;
|
|
|
|
my $host_new_size = 0;
|
|
|
|
my $host_full_size = 0;
|
|
|
|
my $host_new_size_of_last_full = 0;
|
|
|
|
|
|
|
|
foreach my $backup ( $bpc->BackupInfoRead( $host ) ) {
|
|
|
|
next if ( $backup->{type} !~ m/^full|incr$/ );
|
|
|
|
|
|
|
|
# Save the total size of the last full backup
|
|
|
|
if ( $backup->{type} eq 'full' ) {
|
|
|
|
$host_full_size = $backup->{size};
|
|
|
|
$host_new_size_of_last_full = $backup->{sizeNew} if $backup->{num} > 0;
|
|
|
|
}
|
|
|
|
$host_new_size += $backup->{sizeNew} if ( $backup->{num} > 0 && $backup->{sizeNew} );
|
|
|
|
|
|
|
|
$entity_total_new += $backup->{sizeNew};
|
|
|
|
$entity_total_comp += $backup->{sizeNewComp};
|
|
|
|
$host_duration += $backup->{endTime} - $backup->{startTime};
|
|
|
|
$host_bkp_num++;
|
|
|
|
$json->{bkp}++;
|
|
|
|
}
|
|
|
|
# Compute the average cost as the number of hours per day spent
|
|
|
|
# to backup this host
|
|
|
|
$json->{perf} += ( $host_bkp_num > 0 ) ? $host_duration / ( 3600 * $host_bkp_num * $freq ) : 0;
|
|
|
|
|
|
|
|
# $json->{size} represents the total size used by this host.
|
|
|
|
# But we want to substract the new size of the last full, as for this one we
|
|
|
|
# do not count sizeNew but size
|
|
|
|
my $host_total_size = $host_new_size + $host_full_size - $host_new_size_of_last_full;
|
|
|
|
|
|
|
|
# This one is kept just for compatibility. New Zabbix template will use total_size
|
|
|
|
$json->{size} += $host_total_size;
|
|
|
|
$json->{total_size} += $host_total_size;
|
|
|
|
$json->{full_size} += $host_full_size;
|
|
|
|
$json->{history_size} += $host_total_size - $host_full_size;
|
|
|
|
}
|
|
|
|
$json->{ratio} = ( $entity_total_new > 0 ) ? 100 - ( $entity_total_comp * 100 / $entity_total_new ) : 0;
|
|
|
|
|
|
|
|
# Round some values
|
|
|
|
foreach my $key ( qw(ratio perf) ) {
|
|
|
|
$json->{$key} = sprintf( "%.2f", $json->{$key} );
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
print<<"EOF";
|
|
|
|
|
|
|
|
Usage: $0 --host=<host> or --entity=<entity>
|
|
|
|
|
|
|
|
EOF
|
|
|
|
}
|
|
|
|
|
|
|
|
print to_json( $json, { pretty => $pretty } );
|
|
|
|
exit(0);
|