|
|
|
#!/usr/bin/perl -w
|
|
|
|
|
|
|
|
use strict;
|
|
|
|
use warnings;
|
|
|
|
use JSON;
|
|
|
|
use Getopt::Long;
|
|
|
|
use File::Which;
|
|
|
|
use File::Path qw(make_path);
|
|
|
|
use File::Basename;
|
|
|
|
use Data::Dumper;
|
|
|
|
|
|
|
|
my $pvesh = which('pvesh');
|
|
|
|
my $json = {};
|
|
|
|
my $pretty = 0;
|
|
|
|
my ($cluster,$guest,$node,$storage,$pool) = undef;
|
|
|
|
|
|
|
|
# Max age of cached values
|
|
|
|
my $cache = 60;
|
|
|
|
my $cache_dir = '/tmp/zbx_pve_cache/';
|
|
|
|
|
|
|
|
GetOptions(
|
|
|
|
'cluster' => \$cluster,
|
|
|
|
'guest=i' => \$guest,
|
|
|
|
'node=s' => \$node,
|
|
|
|
'storage=s' => \$storage,
|
|
|
|
'pool=s' => \$pool,
|
|
|
|
'pretty' => \$pretty,
|
|
|
|
'cache=i' => \$cache,
|
|
|
|
'cache-dir=s' => \$cache_dir
|
|
|
|
);
|
|
|
|
|
|
|
|
# Old versions do not support, nore need --output-format=json
|
|
|
|
my $pvesh_opt = (system("$pvesh ls / --output-format=json 2>&1 > /dev/null") == 0) ? '--output-format=json' : '';
|
|
|
|
|
|
|
|
if ($cluster){
|
|
|
|
my $cluster = get_api_data('/cluster/status');
|
|
|
|
# Set default values so monitoring works for single node, without cluster setup
|
|
|
|
$json->{status} = {
|
|
|
|
all_online => 1,
|
|
|
|
quorate => 1,
|
|
|
|
nodes => 1,
|
|
|
|
name => 'default',
|
|
|
|
version => 1
|
|
|
|
};
|
|
|
|
# Set default global stats
|
|
|
|
$json->{network} = {
|
|
|
|
in => 0,
|
|
|
|
out => 0
|
|
|
|
};
|
|
|
|
$json->{disk} = {
|
|
|
|
read => 0,
|
|
|
|
write => 0
|
|
|
|
};
|
|
|
|
my @nodes = ();
|
|
|
|
foreach my $item (@{$cluster}){
|
|
|
|
if ($item->{type} eq 'cluster'){
|
|
|
|
$json->{status}->{$_} = $item->{$_} foreach (qw(quorate nodes name version));
|
|
|
|
} elsif ($item->{type} eq 'node' and $item->{online}){
|
|
|
|
push @nodes, $item->{name};
|
|
|
|
} elsif ($item->{type} eq 'node'){
|
|
|
|
$json->{status}->{all_online} = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
foreach my $node (@nodes){
|
|
|
|
my $n = get_api_data("/nodes/$node/status");
|
|
|
|
# Here we gather (and sum) some info about individual nodes to get the total number of
|
|
|
|
# CPU, the amount of memory etc...
|
|
|
|
$json->{memory}->{$_} += $n->{memory}->{$_} foreach (qw(free total used));
|
|
|
|
$json->{ksm}->{$_} += $n->{ksm}->{$_} foreach (qw(shared));
|
|
|
|
$json->{cpuinfo}->{$_} += $n->{cpuinfo}->{$_} foreach (qw(cpus sockets));
|
|
|
|
$json->{loadavg}[$_] += $n->{loadavg}[$_] foreach (0..2);
|
|
|
|
}
|
|
|
|
# We want average load avg of the cluster, not the sum of individual loads
|
|
|
|
$json->{loadavg}[$_] = sprintf "%.2f", $json->{loadavg}[$_] / $json->{status}->{nodes} foreach (0..2);
|
|
|
|
|
|
|
|
my $guests = get_api_data('/cluster/resources', '--type=vm');
|
|
|
|
foreach my $guest (@{$guests}){
|
|
|
|
$json->{network}->{in} += $guest->{netin} || 0;
|
|
|
|
$json->{network}->{out} += $guest->{netout} || 0;
|
|
|
|
$json->{disk}->{read} += $guest->{diskread} || 0;
|
|
|
|
$json->{disk}->{write} += $guest->{diskwrite} || 0;
|
|
|
|
}
|
|
|
|
} elsif ($node){
|
|
|
|
foreach my $item (qw(status version subscription)){
|
|
|
|
$json->{$item} = get_api_data("/nodes/$node/$item");
|
|
|
|
}
|
|
|
|
} elsif ($guest){
|
|
|
|
my $guests = get_api_data('/cluster/resources', '--type=vm');
|
|
|
|
foreach my $g (@{$guests}){
|
|
|
|
if ($g->{vmid} eq $guest){
|
|
|
|
$json = $g;
|
|
|
|
last;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} elsif ($pool){
|
|
|
|
my $pool = get_api_data("/pools/$pool");
|
|
|
|
$json->{comment} = $pool->{comment};
|
|
|
|
foreach my $type (qw(qemu lxc)){
|
|
|
|
$json->{$_}->{$type} = 0 foreach (qw(guests templates));
|
|
|
|
}
|
|
|
|
foreach my $item (@{$pool->{members}}){
|
|
|
|
if ($item->{type} =~ m/^(qemu|lxc)$/ and !$item->{template}){
|
|
|
|
$json->{guests}->{$_} += $item->{$_} foreach (qw(maxcpu diskread diskwrite maxdisk mem maxmem netin netout));
|
|
|
|
$json->{guests}->{used_cpu} += $item->{cpu} * $item->{maxcpu};
|
|
|
|
$json->{guests}->{$item->{type}}++;
|
|
|
|
}
|
|
|
|
if ($item->{type} =~ m/^(qemu|lxc)$/ and $item->{template}){
|
|
|
|
$json->{templates}->{$_} += $item->{$_} foreach (qw(maxdisk));
|
|
|
|
$json->{templates}->{$item->{type}}++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
$json->{guests}->{$_} //= 0 foreach (qw(cpu maxcpu diskread diskwrite maxdisk mem maxmem netin netout));
|
|
|
|
$json->{templates}->{$_} //= 0 foreach (qw(maxdisk));
|
|
|
|
$json->{guests}->{cpu} = ($json->{guests}->{maxcpu} == 0) ? 0 : $json->{guests}->{used_cpu} / $json->{guests}->{maxcpu};
|
|
|
|
} elsif ($storage){
|
|
|
|
my $stores = get_api_data('/cluster/resources', '--type=storage');
|
|
|
|
foreach my $s (@{$stores}){
|
|
|
|
if ($s->{storage} eq $storage){
|
|
|
|
$json->{maxdisk} = $s->{maxdisk};
|
|
|
|
$json->{disk} = $s->{disk};
|
|
|
|
last;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else{
|
|
|
|
print 'ZBX_NOTSUPPORTED';
|
|
|
|
exit 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
print to_json($json, { pretty => $pretty }) . "\n";
|
|
|
|
|
|
|
|
# Helper which will either get data from
|
|
|
|
# the cache if its fresh enough, or query the API
|
|
|
|
# and save the result in the cache for later
|
|
|
|
sub get_api_data {
|
|
|
|
my ($path, $query_opt) = @_;
|
|
|
|
$query_opt ||= '';
|
|
|
|
my $opt_filename = $query_opt;
|
|
|
|
$opt_filename =~ s/[\-=]/_/g;
|
|
|
|
my $res;
|
|
|
|
# Is the cache existing and fresh enough ?
|
|
|
|
if (-f $cache_dir . $path . $opt_filename and int((-M $cache_dir . $path . $opt_filename)*60*60*24) < $cache){
|
|
|
|
{
|
|
|
|
local $/; # Enable slurp
|
|
|
|
open my $fh, "<", $cache_dir . $path . $opt_filename;
|
|
|
|
$res = <$fh>;
|
|
|
|
close $fh;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
$res = qx($pvesh get $path $query_opt $pvesh_opt 2>/dev/null);
|
|
|
|
# Save the result in the cache for later retrival
|
|
|
|
eval{
|
|
|
|
my $dir = (fileparse($path))[1];
|
|
|
|
make_path($cache_dir . $dir, { chmod => 700 });
|
|
|
|
};
|
|
|
|
open my $fh, ">", $cache_dir . $path . $opt_filename;
|
|
|
|
print $fh $res;
|
|
|
|
close $fh;
|
|
|
|
}
|
|
|
|
return from_json($res);
|
|
|
|
}
|