buildsys: add top-level make tidy target

See pve-common's commit 5ae1f2e ("buildsys: add tidy make target")
for details about the chosen xargs parameters.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht 2025-06-01 15:33:04 +02:00
parent 96566527b3
commit 05ad6c9c11
20 changed files with 2362 additions and 2228 deletions

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -25,10 +25,10 @@ use PVE::Cluster::IPCConst;
use base 'Exporter';
our @EXPORT_OK = qw(
cfs_read_file
cfs_write_file
cfs_register_file
cfs_lock_file);
cfs_read_file
cfs_write_file
cfs_register_file
cfs_lock_file);
# x509 certificate utils
@ -94,16 +94,16 @@ my $observed = {
sub prepare_observed_file_basedirs {
if (!check_cfs_is_mounted(1)) {
warn "pmxcfs isn't mounted (/etc/pve), chickening out..\n";
return;
warn "pmxcfs isn't mounted (/etc/pve), chickening out..\n";
return;
}
for my $f (sort keys %$observed) {
next if $f !~ m!^(.*)/[^/]+$!;
my $dir = "$basedir/$1";
next if -e $dir; # can also be a link, so just use -e xist check
print "creating directory '$dir' for observed files\n";
make_path($dir);
next if $f !~ m!^(.*)/[^/]+$!;
my $dir = "$basedir/$1";
next if -e $dir; # can also be a link, so just use -e xist check
print "creating directory '$dir' for observed files\n";
make_path($dir);
}
}
@ -168,11 +168,11 @@ my $ipcc_get_config = sub {
my $bindata = pack "Z*", $path;
my $res = PVE::IPCC::ipcc_send_rec(CFS_IPC_GET_CONFIG, $bindata);
if (!defined($res)) {
if ($! != 0) {
return undef if $! == ENOENT;
die "$!\n";
}
return '';
if ($! != 0) {
return undef if $! == ENOENT;
die "$!\n";
}
return '';
}
return $res;
@ -207,7 +207,7 @@ my $ipcc_log = sub {
my ($priority, $ident, $tag, $msg) = @_;
my $bindata = pack "CCCZ*Z*Z*", $priority, bytes::length($ident) + 1,
bytes::length($tag) + 1, $ident, $tag, $msg;
bytes::length($tag) + 1, $ident, $tag, $msg;
return &$ipcc_send_rec(CFS_IPC_LOG_CLUSTER_MSG, $bindata);
};
@ -238,53 +238,56 @@ my $ccache = {};
sub cfs_update {
my ($fail) = @_;
eval {
my $res = &$ipcc_send_rec_json(CFS_IPC_GET_FS_VERSION);
die "no starttime\n" if !$res->{starttime};
my $res = &$ipcc_send_rec_json(CFS_IPC_GET_FS_VERSION);
die "no starttime\n" if !$res->{starttime};
if (!$res->{starttime} || !$versions->{starttime} ||
$res->{starttime} != $versions->{starttime}) {
#print "detected changed starttime\n";
$vmlist = {};
$clinfo = {};
$ccache = {};
}
if (
!$res->{starttime}
|| !$versions->{starttime}
|| $res->{starttime} != $versions->{starttime}
) {
#print "detected changed starttime\n";
$vmlist = {};
$clinfo = {};
$ccache = {};
}
$versions = $res;
$versions = $res;
};
my $err = $@;
if ($err) {
$versions = {};
$vmlist = {};
$clinfo = {};
$ccache = {};
die $err if $fail;
warn $err;
$versions = {};
$vmlist = {};
$clinfo = {};
$ccache = {};
die $err if $fail;
warn $err;
}
eval {
if (!$clinfo->{version} || $clinfo->{version} != $versions->{clinfo}) {
#warn "detected new clinfo\n";
$clinfo = &$ipcc_send_rec_json(CFS_IPC_GET_CLUSTER_INFO);
}
if (!$clinfo->{version} || $clinfo->{version} != $versions->{clinfo}) {
#warn "detected new clinfo\n";
$clinfo = &$ipcc_send_rec_json(CFS_IPC_GET_CLUSTER_INFO);
}
};
$err = $@;
if ($err) {
$clinfo = {};
die $err if $fail;
warn $err;
$clinfo = {};
die $err if $fail;
warn $err;
}
eval {
if (!$vmlist->{version} || $vmlist->{version} != $versions->{vmlist}) {
#warn "detected new vmlist1\n";
$vmlist = &$ipcc_send_rec_json(CFS_IPC_GET_GUEST_LIST);
}
if (!$vmlist->{version} || $vmlist->{version} != $versions->{vmlist}) {
#warn "detected new vmlist1\n";
$vmlist = &$ipcc_send_rec_json(CFS_IPC_GET_GUEST_LIST);
}
};
$err = $@;
if ($err) {
$vmlist = {};
die $err if $fail;
warn $err;
$vmlist = {};
die $err if $fail;
warn $err;
}
}
@ -306,10 +309,10 @@ sub get_nodelist {
my $nodename = PVE::INotify::nodename();
if (!$nodelist || !$nodelist->{$nodename}) {
return [ $nodename ];
return [$nodename];
}
return [ keys %$nodelist ];
return [keys %$nodelist];
}
# only stored in a in-memory hashtable inside pmxcfs, local data is gone after
@ -319,13 +322,13 @@ sub broadcast_node_kv {
my ($key, $data) = @_;
if (!defined($data)) {
eval { $ipcc_remove_status->("kv/$key") };
eval { $ipcc_remove_status->("kv/$key") };
} else {
die "cannot send a reference\n" if ref($data);
my $size = length($data);
die "data for '$key' too big\n" if $size >= (32 * 1024); # limit from pmxfs
die "cannot send a reference\n" if ref($data);
my $size = length($data);
die "data for '$key' too big\n" if $size >= (32 * 1024); # limit from pmxfs
eval { $ipcc_update_status->("kv/$key", $data) };
eval { $ipcc_update_status->("kv/$key", $data) };
}
warn $@ if $@;
}
@ -336,17 +339,17 @@ sub get_node_kv {
my $res = {};
my $get_node_data = sub {
my ($node) = @_;
my $raw = $ipcc_get_status->("kv/$key", $node);
$res->{$node} = unpack("Z*", $raw) if $raw;
my ($node) = @_;
my $raw = $ipcc_get_status->("kv/$key", $node);
$res->{$node} = unpack("Z*", $raw) if $raw;
};
if ($nodename) {
$get_node_data->($nodename);
$get_node_data->($nodename);
} else {
for my $node (get_nodelist()->@*) {
$get_node_data->($node);
}
for my $node (get_nodelist()->@*) {
$get_node_data->($node);
}
}
return $res;
@ -369,7 +372,7 @@ sub get_guest_config_properties {
die "only up to 255 properties supported" if $num_props > 255;
my $bindata = pack "VC", $vmid // 0, $num_props;
for my $property (@$properties) {
$bindata .= pack "Z*", $property;
$bindata .= pack "Z*", $property;
}
my $res = $ipcc_send_rec_json->(CFS_IPC_GET_GUEST_CONFIG_PROPERTIES, $bindata);
@ -402,8 +405,8 @@ sub broadcast_tasklist {
# drop older items until we satisfy this constraint
my $size = length(encode_json($data));
while ($size >= (32 * 1024)) { # TODO: update to 128 KiB in PVE 8.x
pop @$data;
$size = length(encode_json($data));
pop @$data;
$size = length(encode_json($data));
}
eval { $ipcc_update_status->("tasklist", $data) };
@ -421,27 +424,27 @@ sub get_tasklist {
my $res = [];
foreach my $node (@$nodelist) {
next if $nodename && ($nodename ne $node);
eval {
my $ver = exists $kvstore->{$node} ? $kvstore->{$node}->{tasklist} : undef;
my $cache = $tasklistcache->{$node};
if (!$cache || !$ver || !$cache->{version} || ($cache->{version} != $ver)) {
my $tasks = [];
if (my $raw = $ipcc_get_status->("tasklist", $node)) {
my $json_str = unpack("Z*", $raw);
$tasks = decode_json($json_str);
}
push @$res, @$tasks;
$tasklistcache->{$node} = {
data => $tasks,
version => $ver,
};
} elsif ($cache && $cache->{data}) {
push @$res, $cache->{data}->@*;
}
};
my $err = $@;
syslog('err', $err) if $err;
next if $nodename && ($nodename ne $node);
eval {
my $ver = exists $kvstore->{$node} ? $kvstore->{$node}->{tasklist} : undef;
my $cache = $tasklistcache->{$node};
if (!$cache || !$ver || !$cache->{version} || ($cache->{version} != $ver)) {
my $tasks = [];
if (my $raw = $ipcc_get_status->("tasklist", $node)) {
my $json_str = unpack("Z*", $raw);
$tasks = decode_json($json_str);
}
push @$res, @$tasks;
$tasklistcache->{$node} = {
data => $tasks,
version => $ver,
};
} elsif ($cache && $cache->{data}) {
push @$res, $cache->{data}->@*;
}
};
my $err = $@;
syslog('err', $err) if $err;
}
return $res;
@ -450,9 +453,7 @@ sub get_tasklist {
sub broadcast_rrd {
my ($rrdid, $data) = @_;
eval {
&$ipcc_update_status("rrd/$rrdid", $data);
};
eval { &$ipcc_update_status("rrd/$rrdid", $data); };
my $err = $@;
warn $err if $err;
@ -467,29 +468,27 @@ sub rrd_dump {
my $diff = $ctime - $last_rrd_dump;
if ($diff < 2) {
return $last_rrd_data;
return $last_rrd_data;
}
my $raw;
eval {
$raw = &$ipcc_send_rec(CFS_IPC_GET_RRD_DUMP);
};
eval { $raw = &$ipcc_send_rec(CFS_IPC_GET_RRD_DUMP); };
my $err = $@;
if ($err) {
warn $err;
return {};
warn $err;
return {};
}
my $res = {};
if ($raw) {
while ($raw =~ s/^(.*)\n//) {
my ($key, @ela) = split(/:/, $1);
next if !$key;
next if !(scalar(@ela) > 1);
$res->{$key} = [ map { $_ eq 'U' ? undef : $_ } @ela ];
}
while ($raw =~ s/^(.*)\n//) {
my ($key, @ela) = split(/:/, $1);
next if !$key;
next if !(scalar(@ela) > 1);
$res->{$key} = [map { $_ eq 'U' ? undef : $_ } @ela];
}
}
$last_rrd_dump = $ctime;
@ -498,7 +497,6 @@ sub rrd_dump {
return $res;
}
# a fast way to read files (avoid fuse overhead)
sub get_config {
my ($path) = @_;
@ -528,8 +526,8 @@ sub cfs_register_file {
die "file '$filename' already registered" if $file_info->{$filename};
$file_info->{$filename} = {
parser => $parser,
writer => $writer,
parser => $parser,
writer => $writer,
};
}
@ -541,11 +539,11 @@ my $ccache_read = sub {
my $ci = $ccache->{$filename};
if (!$ci->{version} || !$version || $ci->{version} != $version) {
# we always call the parser, even when the file does not exist
# (in that case $data is undef)
my $data = get_config($filename);
$ci->{data} = &$parser("/etc/pve/$filename", $data);
$ci->{version} = $version;
# we always call the parser, even when the file does not exist
# (in that case $data is undef)
my $data = get_config($filename);
$ci->{data} = &$parser("/etc/pve/$filename", $data);
$ci->{version} = $version;
}
my $res = ref($ci->{data}) ? dclone($ci->{data}) : $ci->{data};
@ -559,18 +557,18 @@ sub cfs_file_version {
my $version;
my $infotag;
if ($filename =~ m!^nodes/[^/]+/(openvz|lxc|qemu-server)/(\d+)\.conf$!) {
my ($type, $vmid) = ($1, $2);
if ($vmlist && $vmlist->{ids} && $vmlist->{ids}->{$vmid}) {
$version = $vmlist->{ids}->{$vmid}->{version};
}
$infotag = "/$type/";
my ($type, $vmid) = ($1, $2);
if ($vmlist && $vmlist->{ids} && $vmlist->{ids}->{$vmid}) {
$version = $vmlist->{ids}->{$vmid}->{version};
}
$infotag = "/$type/";
} else {
$infotag = $filename;
$version = $versions->{$filename};
$infotag = $filename;
$version = $versions->{$filename};
}
my $info = $file_info->{$infotag} ||
die "unknown file type '$filename'\n";
my $info = $file_info->{$infotag}
|| die "unknown file type '$filename'\n";
return wantarray ? ($version, $info) : $version;
}
@ -596,7 +594,7 @@ sub cfs_write_file {
my $raw = &$writer($fsname, $data);
if (my $ci = $ccache->{$filename}) {
$ci->{version} = undef;
$ci->{version} = undef;
}
PVE::Tools::file_set_contents($fsname, $raw, undef, $force_utf8);
@ -618,41 +616,41 @@ my $cfs_lock = sub {
my $is_code_err = 0;
eval {
mkdir $lockdir;
mkdir $lockdir;
if (! -d $lockdir) {
die "pve cluster filesystem not online.\n";
}
if (!-d $lockdir) {
die "pve cluster filesystem not online.\n";
}
my $timeout_err = sub { die "got lock request timeout\n"; };
local $SIG{ALRM} = $timeout_err;
my $timeout_err = sub { die "got lock request timeout\n"; };
local $SIG{ALRM} = $timeout_err;
while (1) {
alarm ($timeout);
$got_lock = mkdir($filename);
$timeout = alarm(0) - 1; # we'll sleep for 1s, see down below
while (1) {
alarm($timeout);
$got_lock = mkdir($filename);
$timeout = alarm(0) - 1; # we'll sleep for 1s, see down below
last if $got_lock;
last if $got_lock;
$timeout_err->() if $timeout <= 0;
$timeout_err->() if $timeout <= 0;
print STDERR "trying to acquire cfs lock '$lockid' ...\n";
utime (0, 0, $filename); # cfs unlock request
sleep(1);
}
print STDERR "trying to acquire cfs lock '$lockid' ...\n";
utime(0, 0, $filename); # cfs unlock request
sleep(1);
}
# fixed command timeout: cfs locks have a timeout of 120
# using 60 gives us another 60 seconds to abort the task
local $SIG{ALRM} = sub { die "'$lockid'-locked command timed out - aborting\n"; };
alarm(60);
# fixed command timeout: cfs locks have a timeout of 120
# using 60 gives us another 60 seconds to abort the task
local $SIG{ALRM} = sub { die "'$lockid'-locked command timed out - aborting\n"; };
alarm(60);
cfs_update(); # make sure we read latest versions inside code()
cfs_update(); # make sure we read latest versions inside code()
$is_code_err = 1; # allows to differ between locking and actual-work errors
$is_code_err = 1; # allows to differ between locking and actual-work errors
$res = &$code(@param);
$res = &$code(@param);
alarm(0);
alarm(0);
};
my $err = $@;
@ -664,13 +662,13 @@ my $cfs_lock = sub {
alarm($prev_alarm);
if ($err) {
if (ref($err) eq 'PVE::Exception' || $is_code_err) {
# re-raise defined exceptions
$@ = $err;
} else {
# add lock info for plain errors comming from the locking itself
$@ = "cfs-lock '$lockid' error: $err";
}
if (ref($err) eq 'PVE::Exception' || $is_code_err) {
# re-raise defined exceptions
$@ = $err;
} else {
# add lock info for plain errors comming from the locking itself
$@ = "cfs-lock '$lockid' error: $err";
}
return undef;
}
@ -743,33 +741,32 @@ my $log_levels = {
};
sub log_msg {
my ($priority, $ident, $msg) = @_;
my ($priority, $ident, $msg) = @_;
if (my $tmp = $log_levels->{$priority}) {
$priority = $tmp;
}
if (my $tmp = $log_levels->{$priority}) {
$priority = $tmp;
}
die "need numeric log priority" if $priority !~ /^\d+$/;
die "need numeric log priority" if $priority !~ /^\d+$/;
my $tag = PVE::SafeSyslog::tag();
my $tag = PVE::SafeSyslog::tag();
$msg = "empty message" if !$msg;
$msg = "empty message" if !$msg;
$ident = "" if !$ident;
$ident = encode("ascii", $ident,
sub { sprintf "\\u%04x", shift });
$ident = "" if !$ident;
$ident = encode("ascii", $ident, sub { sprintf "\\u%04x", shift });
my $ascii = encode("ascii", $msg, sub { sprintf "\\u%04x", shift });
my $ascii = encode("ascii", $msg, sub { sprintf "\\u%04x", shift });
if ($ident) {
syslog($priority, "<%s> %s", $ident, $ascii);
} else {
syslog($priority, "%s", $ascii);
}
if ($ident) {
syslog($priority, "<%s> %s", $ident, $ascii);
} else {
syslog($priority, "%s", $ascii);
}
eval { &$ipcc_log($priority, $ident, $tag, $ascii); };
eval { &$ipcc_log($priority, $ident, $tag, $ascii); };
syslog("err", "writing cluster log failed: $@") if $@;
syslog("err", "writing cluster log failed: $@") if $@;
}
sub check_vmid_unused {
@ -782,7 +779,7 @@ sub check_vmid_unused {
return undef if $noerr;
my $vmtypestr = $d->{type} eq 'qemu' ? 'VM' : 'CT';
my $vmtypestr = $d->{type} eq 'qemu' ? 'VM' : 'CT';
die "$vmtypestr $vmid already exists on node '$d->{node}'\n";
}
@ -803,16 +800,15 @@ sub remote_node_ip {
my $nodelist = $clinfo->{nodelist};
if ($nodelist && $nodelist->{$nodename}) {
if (my $ip = $nodelist->{$nodename}->{ip}) {
return $ip if !wantarray;
my $family = $nodelist->{$nodename}->{address_family};
if (!$family) {
$nodelist->{$nodename}->{address_family} =
$family =
PVE::Tools::get_host_address_family($ip);
}
return wantarray ? ($ip, $family) : $ip;
}
if (my $ip = $nodelist->{$nodename}->{ip}) {
return $ip if !wantarray;
my $family = $nodelist->{$nodename}->{address_family};
if (!$family) {
$nodelist->{$nodename}->{address_family} = $family =
PVE::Tools::get_host_address_family($ip);
}
return wantarray ? ($ip, $family) : $ip;
}
}
# fallback: try to get IP by other means
@ -838,7 +834,7 @@ sub complete_next_vmid {
my $idlist = $vmlist->{ids} || {};
for (my $i = 100; $i < 10000; $i++) {
return [$i] if !defined($idlist->{$i});
return [$i] if !defined($idlist->{$i});
}
return [];
@ -849,7 +845,7 @@ sub complete_vmid {
my $vmlist = get_vmlist();
my $ids = $vmlist->{ids} || {};
return [ keys %$ids ];
return [keys %$ids];
}
sub complete_local_vmid {
@ -861,9 +857,9 @@ sub complete_local_vmid {
my $res = [];
foreach my $vmid (keys %$ids) {
my $d = $ids->{$vmid};
next if !$d->{node} || $d->{node} ne $nodename;
push @$res, $vmid;
my $d = $ids->{$vmid};
next if !$d->{node} || $d->{node} ne $nodename;
push @$res, $vmid;
}
return $res;
@ -877,14 +873,13 @@ sub complete_migration_target {
my $nodelist = get_nodelist();
foreach my $node (@$nodelist) {
next if $node eq $nodename;
push @$res, $node;
next if $node eq $nodename;
push @$res, $node;
}
return $res;
}
# NOTE: filesystem must be offline here, no DB changes allowed
sub cfs_backup_database {
mkdir $dbbackupdir;
@ -894,18 +889,18 @@ sub cfs_backup_database {
print "backup old database to '$backup_fn'\n";
my $cmd = [ ['sqlite3', $dbfile, '.dump'], ['gzip', '-', \ ">${backup_fn}"] ];
my $cmd = [['sqlite3', $dbfile, '.dump'], ['gzip', '-', \ ">${backup_fn}"]];
run_command($cmd, 'errmsg' => "cannot backup old database\n");
my $maxfiles = 10; # purge older backup
my $backups = [ sort { $b cmp $a } <$dbbackupdir/config-*.sql.gz> ];
my $backups = [sort { $b cmp $a } <$dbbackupdir/config-*.sql.gz>];
if ((my $count = scalar(@$backups)) > $maxfiles) {
foreach my $f (@$backups[$maxfiles..$count-1]) {
next if $f !~ m/^(\S+)$/; # untaint
print "delete old backup '$1'\n";
unlink $1;
}
foreach my $f (@$backups[$maxfiles .. $count - 1]) {
next if $f !~ m/^(\S+)$/; # untaint
print "delete old backup '$1'\n";
unlink $1;
}
}
return $dbfile;

View file

@ -31,9 +31,10 @@ sub assert_we_can_join_cluster_version {
my ($version) = @_;
my $min_version = JOIN_API_VERSION - JOIN_API_AGE_AS_JOINEE;
return if $version >= $min_version;
die "error: incompatible join API version on cluster ($version), local node"
." has ". JOIN_API_VERSION ." and supports >= $min_version. Make sure"
."all cluster nodes are up-to-date.\n";
die "error: incompatible join API version on cluster ($version), local node" . " has "
. JOIN_API_VERSION
. " and supports >= $min_version. Make sure"
. "all cluster nodes are up-to-date.\n";
}
sub assert_node_can_join_our_version {
@ -41,8 +42,10 @@ sub assert_node_can_join_our_version {
my $min_version = JOIN_API_VERSION - JOIN_API_AGE_AS_CLUSTER;
return if $version >= $min_version;
die "error: unsupported old API version on joining node ($version), cluster"
." node has ". JOIN_API_VERSION ." and supports >= $min_version. Please"
." upgrade node before joining\n";
. " node has "
. JOIN_API_VERSION
. " and supports >= $min_version. Please"
. " upgrade node before joining\n";
}
my $pmxcfs_base_dir = PVE::Cluster::base_dir();
@ -58,8 +61,8 @@ sub run_silent_cmd {
eval { PVE::Tools::run_command($cmd, outfunc => $record, errfunc => $record) };
if (my $err = $@) {
print STDERR $outbuf;
die $err;
print STDERR $outbuf;
die $err;
}
}
@ -98,42 +101,42 @@ sub ssh_merge_keys {
my $data = '';
if (-f $ssh_cluster_authorized_keys) {
$data = PVE::Tools::file_get_contents($ssh_cluster_authorized_keys);
chomp($data);
$data = PVE::Tools::file_get_contents($ssh_cluster_authorized_keys);
chomp($data);
}
my $found_backup;
if (-f $ssh_root_authorized_keys_backup) {
$data .= "\n";
$data .= PVE::Tools::file_get_contents($ssh_root_authorized_keys_backup);
chomp($data);
$found_backup = 1;
$data .= "\n";
$data .= PVE::Tools::file_get_contents($ssh_root_authorized_keys_backup);
chomp($data);
$found_backup = 1;
}
# always add ourself
if (-f $ssh_root_rsa_key_public) {
my $pub = PVE::Tools::file_get_contents($ssh_root_rsa_key_public);
chomp($pub);
$data .= "\n$pub\n";
my $pub = PVE::Tools::file_get_contents($ssh_root_rsa_key_public);
chomp($pub);
$data .= "\n$pub\n";
}
my $newdata = "";
my $vhash = {};
my @lines = split(/\n/, $data);
foreach my $line (@lines) {
if ($line !~ /^#/ && $line =~ m/(^|\s)ssh-(rsa|dsa)\s+(\S+)\s+\S+$/) {
if ($line !~ /^#/ && $line =~ m/(^|\s)ssh-(rsa|dsa)\s+(\S+)\s+\S+$/) {
next if $vhash->{$3}++;
}
$newdata .= "$line\n";
}
$newdata .= "$line\n";
}
PVE::Tools::file_set_contents($ssh_cluster_authorized_keys, $newdata, 0600);
PVE::Tools::file_set_contents($ssh_cluster_authorized_keys, $newdata, 0600);
if ($found_backup && -l $ssh_root_authorized_keys) {
# everything went well, so we can remove the backup
unlink $ssh_root_authorized_keys_backup;
}
if ($found_backup && -l $ssh_root_authorized_keys) {
# everything went well, so we can remove the backup
unlink $ssh_root_authorized_keys_backup;
}
}
sub setup_sshd_config {
my () = @_;
@ -143,8 +146,8 @@ sub setup_sshd_config {
return if $conf =~ m/^PermitRootLogin\s+yes\s*$/m;
if ($conf !~ s/^#?PermitRootLogin.*$/PermitRootLogin yes/m) {
chomp $conf;
$conf .= "\nPermitRootLogin yes\n";
chomp $conf;
$conf .= "\nPermitRootLogin yes\n";
}
PVE::Tools::file_set_contents($ssh_system_server_config, $conf);
@ -155,18 +158,19 @@ sub setup_sshd_config {
sub setup_rootsshconfig {
# create ssh key if it does not exist
if (! -f $ssh_root_rsa_key_public) {
mkdir '/root/.ssh/';
system ("echo|ssh-keygen -t rsa -N '' -b 4096 -f ${ssh_root_rsa_key_private}");
if (!-f $ssh_root_rsa_key_public) {
mkdir '/root/.ssh/';
system("echo|ssh-keygen -t rsa -N '' -b 4096 -f ${ssh_root_rsa_key_private}");
}
# create ssh config if it does not exist
if (! -f $ssh_root_client_config) {
if (!-f $ssh_root_client_config) {
mkdir '/root/.ssh';
if (my $fh = IO::File->new($ssh_root_client_config, O_CREAT|O_WRONLY|O_EXCL, 0640)) {
if (my $fh = IO::File->new($ssh_root_client_config, O_CREAT | O_WRONLY | O_EXCL, 0640)) {
# this is the default ciphers list from Debian's OpenSSH package (OpenSSH_7.4p1 Debian-10, OpenSSL 1.0.2k 26 Jan 2017)
# changed order to put AES before Chacha20 (most hardware has AESNI)
print $fh "Ciphers aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm\@openssh.com,aes256-gcm\@openssh.com,chacha20-poly1305\@openssh.com\n";
# changed order to put AES before Chacha20 (most hardware has AESNI)
print $fh
"Ciphers aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm\@openssh.com,aes256-gcm\@openssh.com,chacha20-poly1305\@openssh.com\n";
close($fh);
}
}
@ -178,44 +182,47 @@ sub setup_ssh_keys {
my $import_ok;
if (! -f $ssh_cluster_authorized_keys) {
my $old;
if (-f $ssh_root_authorized_keys) {
$old = PVE::Tools::file_get_contents($ssh_root_authorized_keys);
}
if (my $fh = IO::File->new ($ssh_cluster_authorized_keys, O_CREAT|O_WRONLY|O_EXCL, 0400)) {
PVE::Tools::safe_print($ssh_cluster_authorized_keys, $fh, $old) if $old;
close($fh);
$import_ok = 1;
}
if (!-f $ssh_cluster_authorized_keys) {
my $old;
if (-f $ssh_root_authorized_keys) {
$old = PVE::Tools::file_get_contents($ssh_root_authorized_keys);
}
if (
my $fh = IO::File->new($ssh_cluster_authorized_keys, O_CREAT | O_WRONLY | O_EXCL, 0400)
) {
PVE::Tools::safe_print($ssh_cluster_authorized_keys, $fh, $old) if $old;
close($fh);
$import_ok = 1;
}
}
warn "can't create shared ssh key database '$ssh_cluster_authorized_keys'\n"
if ! -f $ssh_cluster_authorized_keys;
if !-f $ssh_cluster_authorized_keys;
if (-f $ssh_root_authorized_keys && ! -l $ssh_root_authorized_keys) {
if (!rename($ssh_root_authorized_keys , $ssh_root_authorized_keys_backup)) {
warn "rename $ssh_root_authorized_keys failed - $!\n";
}
if (-f $ssh_root_authorized_keys && !-l $ssh_root_authorized_keys) {
if (!rename($ssh_root_authorized_keys, $ssh_root_authorized_keys_backup)) {
warn "rename $ssh_root_authorized_keys failed - $!\n";
}
}
if (! -l $ssh_root_authorized_keys) {
symlink $ssh_cluster_authorized_keys, $ssh_root_authorized_keys;
if (!-l $ssh_root_authorized_keys) {
symlink $ssh_cluster_authorized_keys, $ssh_root_authorized_keys;
}
if (! -l $ssh_root_authorized_keys) {
warn "can't create symlink for ssh keys '$ssh_root_authorized_keys' -> '$ssh_cluster_authorized_keys'\n";
if (!-l $ssh_root_authorized_keys) {
warn
"can't create symlink for ssh keys '$ssh_root_authorized_keys' -> '$ssh_cluster_authorized_keys'\n";
} else {
unlink $ssh_root_authorized_keys_backup if $import_ok;
unlink $ssh_root_authorized_keys_backup if $import_ok;
}
}
sub ssh_unmerge_known_hosts {
return if ! -l $ssh_system_known_hosts;
return if !-l $ssh_system_known_hosts;
my $old = '';
$old = PVE::Tools::file_get_contents($ssh_cluster_known_hosts)
if -f $ssh_cluster_known_hosts;
if -f $ssh_cluster_known_hosts;
PVE::Tools::file_set_contents($ssh_system_known_hosts, $old);
}
@ -246,18 +253,18 @@ sub ssh_merge_known_hosts {
mkdir $pmxcfs_auth_dir;
if (! -f $ssh_cluster_known_hosts) {
if (my $fh = IO::File->new($ssh_cluster_known_hosts, O_CREAT|O_WRONLY|O_EXCL, 0600)) {
close($fh);
}
if (!-f $ssh_cluster_known_hosts) {
if (my $fh = IO::File->new($ssh_cluster_known_hosts, O_CREAT | O_WRONLY | O_EXCL, 0600)) {
close($fh);
}
}
my $old = PVE::Tools::file_get_contents($ssh_cluster_known_hosts);
my $new = '';
if ((! -l $ssh_system_known_hosts) && (-f $ssh_system_known_hosts)) {
$new = PVE::Tools::file_get_contents($ssh_system_known_hosts);
if ((!-l $ssh_system_known_hosts) && (-f $ssh_system_known_hosts)) {
$new = PVE::Tools::file_get_contents($ssh_system_known_hosts);
}
my $hostkey = PVE::Tools::file_get_contents($ssh_host_rsa_id);
@ -272,62 +279,62 @@ sub ssh_merge_known_hosts {
my $found_local_ip;
my $merge_line = sub {
my ($line, $all) = @_;
my ($line, $all) = @_;
return if $line =~ m/^\s*$/; # skip empty lines
return if $line =~ m/^#/; # skip comments
return if $line =~ m/^\s*$/; # skip empty lines
return if $line =~ m/^#/; # skip comments
if ($line =~ m/^(\S+)\s(ssh-rsa\s\S+)(\s.*)?$/) {
my $key = $1;
my $rsakey = $2;
if (!$vhash->{$key}) {
$vhash->{$key} = 1;
if ($key =~ m/\|1\|([^\|\s]+)\|([^\|\s]+)$/) {
my $salt = decode_base64($1);
my $digest = $2;
my $hmac = Digest::HMAC_SHA1->new($salt);
$hmac->add($nodename);
my $hd = $hmac->b64digest . '=';
if ($digest eq $hd) {
if ($rsakey eq $hostkey) {
$found_nodename = 1;
$data .= $line;
}
return;
}
$hmac = Digest::HMAC_SHA1->new($salt);
$hmac->add($ip_address);
$hd = $hmac->b64digest . '=';
if ($digest eq $hd) {
if ($rsakey eq $hostkey) {
$found_local_ip = 1;
$data .= $line;
}
return;
}
} else {
$key = lc($key); # avoid duplicate entries, ssh compares lowercased
if ($key eq $ip_address) {
$found_local_ip = 1 if $rsakey eq $hostkey;
} elsif ($key eq $nodename) {
$found_nodename = 1 if $rsakey eq $hostkey;
}
}
$data .= $line;
}
} elsif ($all) {
$data .= $line;
}
if ($line =~ m/^(\S+)\s(ssh-rsa\s\S+)(\s.*)?$/) {
my $key = $1;
my $rsakey = $2;
if (!$vhash->{$key}) {
$vhash->{$key} = 1;
if ($key =~ m/\|1\|([^\|\s]+)\|([^\|\s]+)$/) {
my $salt = decode_base64($1);
my $digest = $2;
my $hmac = Digest::HMAC_SHA1->new($salt);
$hmac->add($nodename);
my $hd = $hmac->b64digest . '=';
if ($digest eq $hd) {
if ($rsakey eq $hostkey) {
$found_nodename = 1;
$data .= $line;
}
return;
}
$hmac = Digest::HMAC_SHA1->new($salt);
$hmac->add($ip_address);
$hd = $hmac->b64digest . '=';
if ($digest eq $hd) {
if ($rsakey eq $hostkey) {
$found_local_ip = 1;
$data .= $line;
}
return;
}
} else {
$key = lc($key); # avoid duplicate entries, ssh compares lowercased
if ($key eq $ip_address) {
$found_local_ip = 1 if $rsakey eq $hostkey;
} elsif ($key eq $nodename) {
$found_nodename = 1 if $rsakey eq $hostkey;
}
}
$data .= $line;
}
} elsif ($all) {
$data .= $line;
}
};
while ($old && $old =~ s/^((.*?)(\n|$))//) {
my $line = "$2\n";
&$merge_line($line, 1);
my $line = "$2\n";
&$merge_line($line, 1);
}
while ($new && $new =~ s/^((.*?)(\n|$))//) {
my $line = "$2\n";
&$merge_line($line);
my $line = "$2\n";
&$merge_line($line);
}
# add our own key if not already there
@ -341,8 +348,9 @@ sub ssh_merge_known_hosts {
unlink $ssh_system_known_hosts;
symlink $ssh_cluster_known_hosts, $ssh_system_known_hosts;
warn "can't create symlink for ssh known hosts '$ssh_system_known_hosts' -> '$ssh_cluster_known_hosts'\n"
if ! -l $ssh_system_known_hosts;
warn
"can't create symlink for ssh known hosts '$ssh_system_known_hosts' -> '$ssh_cluster_known_hosts'\n"
if !-l $ssh_system_known_hosts;
}
@ -354,18 +362,19 @@ sub gen_local_dirs {
PVE::Cluster::check_cfs_is_mounted();
my @required_dirs = (
"$pmxcfs_base_dir/priv",
"$pmxcfs_base_dir/nodes",
"$pmxcfs_base_dir/nodes/$nodename",
"$pmxcfs_base_dir/nodes/$nodename/lxc",
"$pmxcfs_base_dir/nodes/$nodename/qemu-server",
"$pmxcfs_base_dir/nodes/$nodename/openvz",
"$pmxcfs_base_dir/nodes/$nodename/priv");
"$pmxcfs_base_dir/priv",
"$pmxcfs_base_dir/nodes",
"$pmxcfs_base_dir/nodes/$nodename",
"$pmxcfs_base_dir/nodes/$nodename/lxc",
"$pmxcfs_base_dir/nodes/$nodename/qemu-server",
"$pmxcfs_base_dir/nodes/$nodename/openvz",
"$pmxcfs_base_dir/nodes/$nodename/priv",
);
foreach my $dir (@required_dirs) {
if (! -d $dir) {
mkdir($dir) || $! == EEXIST || die "unable to create directory '$dir' - $!\n";
}
if (!-d $dir) {
mkdir($dir) || $! == EEXIST || die "unable to create directory '$dir' - $!\n";
}
}
}
@ -377,13 +386,19 @@ sub gen_auth_key {
PVE::Cluster::check_cfs_is_mounted();
PVE::Cluster::cfs_lock_authkey(undef, sub {
mkdir $pmxcfs_auth_dir || $! == EEXIST || die "unable to create dir '$pmxcfs_auth_dir' - $!\n";
PVE::Cluster::cfs_lock_authkey(
undef,
sub {
mkdir $pmxcfs_auth_dir
|| $! == EEXIST
|| die "unable to create dir '$pmxcfs_auth_dir' - $!\n";
run_silent_cmd(['openssl', 'genrsa', '-out', $authprivkeyfn, '2048']);
run_silent_cmd(['openssl', 'genrsa', '-out', $authprivkeyfn, '2048']);
run_silent_cmd(['openssl', 'rsa', '-in', $authprivkeyfn, '-pubout', '-out', $authpubkeyfn]);
});
run_silent_cmd(
['openssl', 'rsa', '-in', $authprivkeyfn, '-pubout', '-out', $authpubkeyfn]);
},
);
die "$@\n" if $@;
}
@ -392,9 +407,7 @@ sub gen_pveca_key {
return if -f $pveca_key_fn;
eval {
run_silent_cmd(['openssl', 'genrsa', '-out', $pveca_key_fn, '4096']);
};
eval { run_silent_cmd(['openssl', 'genrsa', '-out', $pveca_key_fn, '4096']); };
die "unable to generate pve ca key:\n$@" if $@;
}
@ -402,7 +415,7 @@ sub gen_pveca_key {
sub gen_pveca_cert {
if (-f $pveca_key_fn && -f $pveca_cert_fn) {
return 0;
return 0;
}
gen_pveca_key();
@ -414,19 +427,25 @@ sub gen_pveca_cert {
UUID::unparse($uuid, $uuid_str);
eval {
# wrap openssl with faketime to prevent bug #904
run_silent_cmd([
'faketime', 'yesterday',
'openssl', 'req',
'-batch',
'-days', '3650',
'-new',
'-x509',
'-nodes',
'-key', $pveca_key_fn,
'-out', $pveca_cert_fn,
'-subj', "/CN=Proxmox Virtual Environment/OU=$uuid_str/O=PVE Cluster Manager CA/",
]);
# wrap openssl with faketime to prevent bug #904
run_silent_cmd([
'faketime',
'yesterday',
'openssl',
'req',
'-batch',
'-days',
'3650',
'-new',
'-x509',
'-nodes',
'-key',
$pveca_key_fn,
'-out',
$pveca_cert_fn,
'-subj',
"/CN=Proxmox Virtual Environment/OU=$uuid_str/O=PVE Cluster Manager CA/",
]);
};
die "generating pve root certificate failed:\n$@" if $@;
@ -443,9 +462,7 @@ sub gen_pve_ssl_key {
return if -f $pvessl_key_fn;
eval {
run_silent_cmd(['openssl', 'genrsa', '-out', $pvessl_key_fn, '2048']);
};
eval { run_silent_cmd(['openssl', 'genrsa', '-out', $pvessl_key_fn, '2048']); };
die "unable to generate pve ssl key for node '$nodename':\n$@" if $@;
}
@ -454,9 +471,7 @@ sub gen_pve_www_key {
return if -f $pvewww_key_fn;
eval {
run_silent_cmd(['openssl', 'genrsa', '-out', $pvewww_key_fn, '2048']);
};
eval { run_silent_cmd(['openssl', 'genrsa', '-out', $pvewww_key_fn, '2048']); };
die "unable to generate pve www key:\n$@" if $@;
}
@ -487,8 +502,8 @@ sub gen_pve_ssl_cert {
my $fqdn = $nodename;
if ($rc && $rc->{search}) {
$fqdn .= ".$rc->{search}";
$names .= ",DNS:$fqdn";
$fqdn .= ".$rc->{search}";
$names .= ",DNS:$fqdn";
}
my $sslconf = <<__EOD;
@ -514,34 +529,43 @@ subjectAltName = $names
__EOD
my $cfgfn = "/tmp/pvesslconf-$$.tmp";
my $fh = IO::File->new ($cfgfn, "w");
my $fh = IO::File->new($cfgfn, "w");
print $fh $sslconf;
close ($fh);
close($fh);
my $reqfn = "/tmp/pvecertreq-$$.tmp";
unlink $reqfn;
my $pvessl_key_fn = "$pmxcfs_base_dir/nodes/$nodename/pve-ssl.key";
eval {
run_silent_cmd([
'openssl', 'req', '-batch', '-new', '-config', $cfgfn, '-key', $pvessl_key_fn, '-out', $reqfn
]);
run_silent_cmd([
'openssl',
'req',
'-batch',
'-new',
'-config',
$cfgfn,
'-key',
$pvessl_key_fn,
'-out',
$reqfn,
]);
};
if (my $err = $@) {
unlink $reqfn;
unlink $cfgfn;
die "unable to generate pve certificate request:\n$err";
unlink $reqfn;
unlink $cfgfn;
die "unable to generate pve certificate request:\n$err";
}
update_serial("0000000000000000") if ! -f $pveca_srl_fn;
update_serial("0000000000000000") if !-f $pveca_srl_fn;
# get ca expiry
my $cainfo = PVE::Certificate::get_certificate_info($pveca_cert_fn);
my $daysleft = int(($cainfo->{notafter} - time())/(24*60*60));
my $daysleft = int(($cainfo->{notafter} - time()) / (24 * 60 * 60));
if ($daysleft < 14) {
die "CA expires in less than 2 weeks, unable to generate certificate.\n";
die "CA expires in less than 2 weeks, unable to generate certificate.\n";
}
# let the certificate expire a little sooner that the ca, so subtract 2 days
@ -549,22 +573,24 @@ __EOD
# we want the certificates to only last 2 years, since some browsers
# do not accept certificates with very long expiry time
if ($daysleft >= 2*365) {
$daysleft = 2*365;
if ($daysleft >= 2 * 365) {
$daysleft = 2 * 365;
}
eval {
run_silent_cmd([
'faketime', 'yesterday', # NOTE: wrap openssl with faketime to prevent bug #904
'openssl', 'x509', '-req', '-in', $reqfn, '-days', $daysleft, '-out', $pvessl_cert_fn,
'-CAkey', $pveca_key_fn, '-CA', $pveca_cert_fn, '-CAserial', $pveca_srl_fn, '-extfile', $cfgfn
]);
run_silent_cmd([
'faketime', 'yesterday', # NOTE: wrap openssl with faketime to prevent bug #904
'openssl', 'x509', '-req', '-in', $reqfn, '-days', $daysleft, '-out',
$pvessl_cert_fn,
'-CAkey', $pveca_key_fn, '-CA', $pveca_cert_fn, '-CAserial', $pveca_srl_fn,
'-extfile', $cfgfn,
]);
};
if (my $err = $@) {
unlink $reqfn;
unlink $cfgfn;
die "unable to generate pve ssl certificate:\n$err";
unlink $reqfn;
unlink $cfgfn;
die "unable to generate pve ssl certificate:\n$err";
}
unlink $cfgfn;
@ -607,9 +633,9 @@ sub gen_pve_vzdump_symlink {
my $link_fn = "/etc/cron.d/vzdump";
if ((-f $filename) && (! -l $link_fn)) {
rename($link_fn, "/root/etc_cron_vzdump.org"); # make backup if file exists
symlink($filename, $link_fn);
if ((-f $filename) && (!-l $link_fn)) {
rename($link_fn, "/root/etc_cron_vzdump.org"); # make backup if file exists
symlink($filename, $link_fn);
}
}
@ -618,10 +644,10 @@ sub gen_pve_vzdump_files {
my $filename = "/etc/pve/vzdump.cron";
PVE::Tools::file_set_contents($filename, $vzdump_cron_dummy)
if ! -f $filename;
if !-f $filename;
gen_pve_vzdump_symlink();
};
}
# join helpers
@ -632,52 +658,52 @@ sub assert_joinable {
my $error = sub { $errors .= "* $_[0]\n"; };
if (-f $authfile) {
$error->("authentication key '$authfile' already exists");
$error->("authentication key '$authfile' already exists");
}
if (-f $clusterconf) {
$error->("cluster config '$clusterconf' already exists");
if (-f $clusterconf) {
$error->("cluster config '$clusterconf' already exists");
}
my $vmlist = PVE::Cluster::get_vmlist();
if ($vmlist && $vmlist->{ids} && scalar(keys %{$vmlist->{ids}})) {
$error->("this host already contains virtual guests");
if ($vmlist && $vmlist->{ids} && scalar(keys %{ $vmlist->{ids} })) {
$error->("this host already contains virtual guests");
}
if (PVE::Tools::run_command(['corosync-quorumtool', '-l'], noerr => 1, quiet => 1) == 0) {
$error->("corosync is already running, is this node already in a cluster?!");
$error->("corosync is already running, is this node already in a cluster?!");
}
# check if corosync ring IPs are configured on the current nodes interfaces
my $check_ip = sub {
my $ip = shift // return;
my $logid = shift;
if (!PVE::JSONSchema::pve_verify_ip($ip, 1)) {
my $host = $ip;
eval { $ip = PVE::Network::get_ip_from_hostname($host); };
if ($@) {
$error->("$logid: cannot use '$host': $@\n") ;
return;
}
}
my $ip = shift // return;
my $logid = shift;
if (!PVE::JSONSchema::pve_verify_ip($ip, 1)) {
my $host = $ip;
eval { $ip = PVE::Network::get_ip_from_hostname($host); };
if ($@) {
$error->("$logid: cannot use '$host': $@\n");
return;
}
}
my $cidr = (Net::IP::ip_is_ipv6($ip)) ? "$ip/128" : "$ip/32";
my $configured_ips = PVE::Network::get_local_ip_from_cidr($cidr);
my $cidr = (Net::IP::ip_is_ipv6($ip)) ? "$ip/128" : "$ip/32";
my $configured_ips = PVE::Network::get_local_ip_from_cidr($cidr);
$error->("$logid: cannot use IP '$ip', not found on local node!\n")
if scalar(@$configured_ips) < 1;
$error->("$logid: cannot use IP '$ip', not found on local node!\n")
if scalar(@$configured_ips) < 1;
};
$check_ip->($local_addr, 'local node address');
foreach my $link (keys %$links) {
$check_ip->($links->{$link}->{address}, "link$link");
$check_ip->($links->{$link}->{address}, "link$link");
}
if ($errors) {
warn "detected the following error(s):\n$errors";
die "Check if node may join a cluster failed!\n" if !$force;
warn "\nWARNING : detected error but forced to continue!\n\n";
warn "detected the following error(s):\n$errors";
die "Check if node may join a cluster failed!\n" if !$force;
warn "\nWARNING : detected error but forced to continue!\n\n";
}
}
@ -701,19 +727,19 @@ sub join {
my $host = $param->{hostname};
my $conn_args = {
username => 'root@pam',
password => $param->{password},
cookie_name => 'PVEAuthCookie',
protocol => 'https',
host => $host,
port => 8006,
username => 'root@pam',
password => $param->{password},
cookie_name => 'PVEAuthCookie',
protocol => 'https',
host => $host,
port => 8006,
};
if (my $fp = $param->{fingerprint}) {
$conn_args->{cached_fingerprints} = { uc($fp) => 1 };
$conn_args->{cached_fingerprints} = { uc($fp) => 1 };
} else {
# API schema ensures that we can only get here from CLI handler
$conn_args->{manual_verification} = 1;
# API schema ensures that we can only get here from CLI handler
$conn_args->{manual_verification} = 1;
}
print "Establishing API connection with host '$host'\n";
@ -733,44 +759,44 @@ sub join {
$args->{nodeid} = $param->{nodeid} if $param->{nodeid};
$args->{votes} = $param->{votes} if defined($param->{votes});
foreach my $link (keys %$links) {
$args->{"link$link"} = PVE::Corosync::print_corosync_link($links->{$link});
$args->{"link$link"} = PVE::Corosync::print_corosync_link($links->{$link});
}
# this will be used as fallback if no links are specified
if (!%$links) {
$args->{link0} = $local_ip_address if $apiver == 0;
$args->{new_node_ip} = $local_ip_address if $apiver >= 1;
$args->{link0} = $local_ip_address if $apiver == 0;
$args->{new_node_ip} = $local_ip_address if $apiver >= 1;
print "No cluster network links passed explicitly, fallback to local node"
. " IP '$local_ip_address'\n";
print "No cluster network links passed explicitly, fallback to local node"
. " IP '$local_ip_address'\n";
}
if ($apiver >= 1) {
$args->{apiversion} = JOIN_API_VERSION;
$args->{apiversion} = JOIN_API_VERSION;
}
print "Request addition of this node\n";
my $res = eval { $conn->post("/cluster/config/nodes/$nodename", $args); };
if (my $err = $@) {
if (ref($err) && $err->isa('PVE::APIClient::Exception')) {
# we received additional info about the error, show the user
chomp $err->{msg};
warn "An error occurred on the cluster node: $err->{msg}\n";
foreach my $key (sort keys %{$err->{errors}}) {
my $symbol = ($key =~ m/^warning/) ? '*' : '!';
warn "$symbol $err->{errors}->{$key}\n";
}
if (ref($err) && $err->isa('PVE::APIClient::Exception')) {
# we received additional info about the error, show the user
chomp $err->{msg};
warn "An error occurred on the cluster node: $err->{msg}\n";
foreach my $key (sort keys %{ $err->{errors} }) {
my $symbol = ($key =~ m/^warning/) ? '*' : '!';
warn "$symbol $err->{errors}->{$key}\n";
}
die "Cluster join aborted!\n";
}
die "Cluster join aborted!\n";
}
die $@;
die $@;
}
if (defined($res->{warnings})) {
foreach my $warn (@{$res->{warnings}}) {
warn "cluster: $warn\n";
}
foreach my $warn (@{ $res->{warnings} }) {
warn "cluster: $warn\n";
}
}
print "Join request OK, finishing setup locally\n";
@ -799,12 +825,12 @@ sub finish_join {
# wait for quorum
my $printqmsg = 1;
while (!PVE::Cluster::check_cfs_quorum(1)) {
if ($printqmsg) {
print "waiting for quorum...";
STDOUT->flush();
$printqmsg = 0;
}
sleep(1);
if ($printqmsg) {
print "waiting for quorum...";
STDOUT->flush();
$printqmsg = 0;
}
sleep(1);
}
print "OK\n" if !$printqmsg;
@ -828,8 +854,8 @@ sub updatecerts_and_ssh {
my $p = sub { print "$_[0]\n" if !$silent };
if (!PVE::Cluster::check_cfs_quorum(1)) {
return undef if $silent;
die "no quorum - unable to update files\n";
return undef if $silent;
die "no quorum - unable to update files\n";
}
setup_ssh_keys();
@ -844,8 +870,8 @@ sub updatecerts_and_ssh {
$p->("merge authorized SSH keys");
ssh_merge_keys();
if ($unmerge_ssh) {
$p->("unmerge SSH known hosts");
ssh_unmerge_known_hosts();
$p->("unmerge SSH known hosts");
ssh_unmerge_known_hosts();
}
ssh_create_node_known_hosts($nodename);
gen_pve_vzdump_files();

View file

@ -24,26 +24,28 @@ my $conf_array_sections = {
my $corosync_link_format = {
address => {
default_key => 1,
type => 'string', format => 'address',
format_description => 'IP',
description => "Hostname (or IP) of this corosync link address.",
default_key => 1,
type => 'string',
format => 'address',
format_description => 'IP',
description => "Hostname (or IP) of this corosync link address.",
},
priority => {
optional => 1,
type => 'integer',
minimum => 0,
maximum => 255,
default => 0,
description => "The priority for the link when knet is used in 'passive'"
. " mode (default). Lower value means higher priority. Only"
. " valid for cluster create, ignored on node add.",
optional => 1,
type => 'integer',
minimum => 0,
maximum => 255,
default => 0,
description => "The priority for the link when knet is used in 'passive'"
. " mode (default). Lower value means higher priority. Only"
. " valid for cluster create, ignored on node add.",
},
};
my $corosync_link_desc = {
type => 'string', format => $corosync_link_format,
type => 'string',
format => $corosync_link_format,
description => "Address and priority information of a single corosync link."
. " (up to 8 links supported; link0..link7)",
. " (up to 8 links supported; link0..link7)",
optional => 1,
};
PVE::JSONSchema::register_standard_option("corosync-link", $corosync_link_desc);
@ -69,8 +71,8 @@ use constant MAX_LINK_INDEX => 7;
sub add_corosync_link_properties {
my ($prop) = @_;
for my $lnum (0..MAX_LINK_INDEX) {
$prop->{"link$lnum"} = PVE::JSONSchema::get_standard_option("corosync-link");
for my $lnum (0 .. MAX_LINK_INDEX) {
$prop->{"link$lnum"} = PVE::JSONSchema::get_standard_option("corosync-link");
}
return $prop;
@ -80,9 +82,9 @@ sub extract_corosync_link_args {
my ($args) = @_;
my $links = {};
for my $lnum (0..MAX_LINK_INDEX) {
$links->{$lnum} = parse_corosync_link($args->{"link$lnum"})
if $args->{"link$lnum"};
for my $lnum (0 .. MAX_LINK_INDEX) {
$links->{$lnum} = parse_corosync_link($args->{"link$lnum"})
if $args->{"link$lnum"};
}
return $links;
@ -110,51 +112,51 @@ sub parse_conf {
my $section = $conf->{main};
while (defined(my $token = shift @tokens)) {
my $nexttok = $tokens[0];
my $nexttok = $tokens[0];
if ($nexttok && ($nexttok eq '{')) {
shift @tokens; # skip '{'
my $new_section = {};
if ($conf_array_sections->{$token}) {
$section->{$token} = [] if !defined($section->{$token});
push @{$section->{$token}}, $new_section;
} elsif (!defined($section->{$token})) {
$section->{$token} = $new_section;
} else {
die "section '$token' already exists and not marked as array!\n";
}
push @$stack, $section;
$section = $new_section;
next;
}
if ($nexttok && ($nexttok eq '{')) {
shift @tokens; # skip '{'
my $new_section = {};
if ($conf_array_sections->{$token}) {
$section->{$token} = [] if !defined($section->{$token});
push @{ $section->{$token} }, $new_section;
} elsif (!defined($section->{$token})) {
$section->{$token} = $new_section;
} else {
die "section '$token' already exists and not marked as array!\n";
}
push @$stack, $section;
$section = $new_section;
next;
}
if ($token eq '}') {
$section = pop @$stack;
die "parse error - uncexpected '}'\n" if !$section;
next;
}
if ($token eq '}') {
$section = pop @$stack;
die "parse error - uncexpected '}'\n" if !$section;
next;
}
my $key = $token;
die "missing ':' after key '$key'\n" if ! ($key =~ s/:$//);
my $key = $token;
die "missing ':' after key '$key'\n" if !($key =~ s/:$//);
die "parse error - no value for '$key'\n" if !defined($nexttok);
my $value = shift @tokens;
die "parse error - no value for '$key'\n" if !defined($nexttok);
my $value = shift @tokens;
$section->{$key} = $value;
$section->{$key} = $value;
}
# make working with the config way easier
my ($totem, $nodelist) = $conf->{main}->@{"totem", "nodelist"};
my ($totem, $nodelist) = $conf->{main}->@{ "totem", "nodelist" };
$nodelist->{node} = {
map {
$_->{name} // $_->{ring0_addr} => $_
} @{$nodelist->{node}}
map {
$_->{name} // $_->{ring0_addr} => $_
} @{ $nodelist->{node} }
};
$totem->{interface} = {
map {
$_->{linknumber} // $_->{ringnumber} => $_
} @{$totem->{interface}}
map {
$_->{linknumber} // $_->{ringnumber} => $_
} @{ $totem->{interface} }
};
$conf->{digest} = $digest;
@ -169,8 +171,8 @@ sub write_conf {
# retransform back for easier dumping
my $hash_to_array = sub {
my ($hash) = @_;
return [ $hash->@{sort keys %$hash} ];
my ($hash) = @_;
return [$hash->@{ sort keys %$hash }];
};
$c->{nodelist}->{node} = &$hash_to_array($c->{nodelist}->{node});
@ -178,32 +180,32 @@ sub write_conf {
my $dump_section_weak;
$dump_section_weak = sub {
my ($section, $prefix) = @_;
my ($section, $prefix) = @_;
my $raw = '';
my $raw = '';
foreach my $k (sort keys %$section) {
my $v = $section->{$k};
if (ref($v) eq 'HASH') {
$raw .= $prefix . "$k {\n";
$raw .= $dump_section_weak->($v, "$prefix ");
$raw .= $prefix . "}\n";
$raw .= "\n" if !$prefix; # add extra newline at 1st level only
} elsif (ref($v) eq 'ARRAY') {
foreach my $child (@$v) {
$raw .= $prefix . "$k {\n";
$raw .= $dump_section_weak->($child, "$prefix ");
$raw .= $prefix . "}\n";
}
} elsif (!ref($v)) {
die "got undefined value for key '$k'!\n" if !defined($v);
$raw .= $prefix . "$k: $v\n";
} else {
die "unexpected reference in config hash: $k => ". ref($v) ."\n";
}
}
foreach my $k (sort keys %$section) {
my $v = $section->{$k};
if (ref($v) eq 'HASH') {
$raw .= $prefix . "$k {\n";
$raw .= $dump_section_weak->($v, "$prefix ");
$raw .= $prefix . "}\n";
$raw .= "\n" if !$prefix; # add extra newline at 1st level only
} elsif (ref($v) eq 'ARRAY') {
foreach my $child (@$v) {
$raw .= $prefix . "$k {\n";
$raw .= $dump_section_weak->($child, "$prefix ");
$raw .= $prefix . "}\n";
}
} elsif (!ref($v)) {
die "got undefined value for key '$k'!\n" if !defined($v);
$raw .= $prefix . "$k: $v\n";
} else {
die "unexpected reference in config hash: $k => " . ref($v) . "\n";
}
}
return $raw;
return $raw;
};
my $dump_section = $dump_section_weak;
weaken($dump_section_weak);
@ -216,16 +218,16 @@ sub write_conf {
# read only - use atomic_write_conf method to write
PVE::Cluster::cfs_register_file('corosync.conf', \&parse_conf);
# this is read/write
PVE::Cluster::cfs_register_file('corosync.conf.new', \&parse_conf,
\&write_conf);
PVE::Cluster::cfs_register_file('corosync.conf.new', \&parse_conf, \&write_conf);
sub check_conf_exists {
my ($noerr) = @_;
my $exists = -f "$basedir/corosync.conf";
die "Error: Corosync config '$basedir/corosync.conf' does not exist - is this node part of a cluster?\n"
if !$noerr && !$exists;
die
"Error: Corosync config '$basedir/corosync.conf' does not exist - is this node part of a cluster?\n"
if !$noerr && !$exists;
return $exists;
}
@ -253,15 +255,15 @@ sub atomic_write_conf {
my ($conf, $no_increase_version) = @_;
if (!$no_increase_version) {
die "invalid corosync config: unable to read config version\n"
if !defined($conf->{main}->{totem}->{config_version});
$conf->{main}->{totem}->{config_version}++;
die "invalid corosync config: unable to read config version\n"
if !defined($conf->{main}->{totem}->{config_version});
$conf->{main}->{totem}->{config_version}++;
}
PVE::Cluster::cfs_write_file("corosync.conf.new", $conf);
rename("/etc/pve/corosync.conf.new", "/etc/pve/corosync.conf")
|| die "activating corosync.conf.new failed - $!\n";
|| die "activating corosync.conf.new failed - $!\n";
}
# for creating a new cluster with the current node
@ -279,47 +281,47 @@ sub create_conf {
# if no links given, fall back to local IP as link0
$links->{0} = { address => $local_ip_address }
if !%$links;
if !%$links;
my $conf = {
totem => {
version => 2, # protocol version
secauth => 'on',
cluster_name => $clustername,
config_version => 0,
ip_version => 'ipv4-6',
link_mode => 'passive',
interface => {},
},
nodelist => {
node => {
$nodename => {
name => $nodename,
nodeid => $nodeid,
quorum_votes => $votes,
},
},
},
quorum => {
provider => 'corosync_votequorum',
},
logging => {
to_syslog => 'yes',
debug => 'off',
},
totem => {
version => 2, # protocol version
secauth => 'on',
cluster_name => $clustername,
config_version => 0,
ip_version => 'ipv4-6',
link_mode => 'passive',
interface => {},
},
nodelist => {
node => {
$nodename => {
name => $nodename,
nodeid => $nodeid,
quorum_votes => $votes,
},
},
},
quorum => {
provider => 'corosync_votequorum',
},
logging => {
to_syslog => 'yes',
debug => 'off',
},
};
my $totem = $conf->{totem};
my $node = $conf->{nodelist}->{node}->{$nodename};
foreach my $lnum (keys %$links) {
my $link = $links->{$lnum};
my $link = $links->{$lnum};
$totem->{interface}->{$lnum} = { linknumber => $lnum };
$totem->{interface}->{$lnum} = { linknumber => $lnum };
my $prio = $link->{priority};
$totem->{interface}->{$lnum}->{knet_link_priority} = $prio if $prio;
my $prio = $link->{priority};
$totem->{interface}->{$lnum}->{knet_link_priority} = $prio if $prio;
$node->{"ring${lnum}_addr"} = $link->{address};
$node->{"ring${lnum}_addr"} = $link->{address};
}
return { main => $conf };
@ -335,36 +337,40 @@ sub verify_conf {
my $nodelist = nodelist($conf);
if (!$nodelist) {
push @errors, "no nodes found";
return (\@errors, \@warnings);
push @errors, "no nodes found";
return (\@errors, \@warnings);
}
my $totem = $conf->{main}->{totem};
if (!$totem) {
push @errors, "no totem found";
return (\@errors, \@warnings);
push @errors, "no totem found";
return (\@errors, \@warnings);
}
if ((!defined($totem->{secauth}) || $totem->{secauth} ne 'on') &&
(!defined($totem->{crypto_cipher}) || $totem->{crypto_cipher} eq 'none')) {
push @warnings, "warning: authentication/encryption is not explicitly enabled"
. " (secauth / crypto_cipher / crypto_hash)";
if (
(!defined($totem->{secauth}) || $totem->{secauth} ne 'on')
&& (!defined($totem->{crypto_cipher}) || $totem->{crypto_cipher} eq 'none')
) {
push @warnings, "warning: authentication/encryption is not explicitly enabled"
. " (secauth / crypto_cipher / crypto_hash)";
}
my $interfaces = $totem->{interface};
my $verify_link_ip = sub {
my ($key, $link, $node) = @_;
my ($resolved_ip, undef) = resolve_hostname_like_corosync($link, $conf);
if (!defined($resolved_ip)) {
push @warnings, "warning: unable to resolve $key '$link' for node '$node'"
. " to an IP address according to Corosync's resolve strategy -"
. " cluster could fail on restart!";
} elsif ($resolved_ip ne $link) {
push @warnings, "warning: $key '$link' for node '$node' resolves to"
. " '$resolved_ip' - consider replacing it with the currently"
. " resolved IP address for stability";
}
my ($key, $link, $node) = @_;
my ($resolved_ip, undef) = resolve_hostname_like_corosync($link, $conf);
if (!defined($resolved_ip)) {
push @warnings,
"warning: unable to resolve $key '$link' for node '$node'"
. " to an IP address according to Corosync's resolve strategy -"
. " cluster could fail on restart!";
} elsif ($resolved_ip ne $link) {
push @warnings,
"warning: $key '$link' for node '$node' resolves to"
. " '$resolved_ip' - consider replacing it with the currently"
. " resolved IP address for stability";
}
};
# sort for output order stability
@ -372,59 +378,59 @@ sub verify_conf {
my $node_links = {};
foreach my $node (@node_names) {
my $options = $nodelist->{$node};
foreach my $opt (keys %$options) {
my ($linktype, $linkid) = parse_link_entry($opt);
next if !defined($linktype);
$node_links->{$node}->{$linkid} = {
name => "${linktype}${linkid}_addr",
addr => $options->{$opt},
};
}
my $options = $nodelist->{$node};
foreach my $opt (keys %$options) {
my ($linktype, $linkid) = parse_link_entry($opt);
next if !defined($linktype);
$node_links->{$node}->{$linkid} = {
name => "${linktype}${linkid}_addr",
addr => $options->{$opt},
};
}
}
if (%$interfaces) {
# if interfaces are defined, *all* links must have a matching interface
# definition, and vice versa
for my $link (0..MAX_LINK_INDEX) {
my $have_interface = defined($interfaces->{$link});
foreach my $node (@node_names) {
my $linkdef = $node_links->{$node}->{$link};
if (defined($linkdef)) {
$verify_link_ip->($linkdef->{name}, $linkdef->{addr}, $node);
if (!$have_interface) {
push @errors, "node '$node' has '$linkdef->{name}', but"
. " there is no interface number $link configured";
}
} else {
if ($have_interface) {
push @errors, "node '$node' is missing address for"
. "interface number $link";
}
}
}
}
# if interfaces are defined, *all* links must have a matching interface
# definition, and vice versa
for my $link (0 .. MAX_LINK_INDEX) {
my $have_interface = defined($interfaces->{$link});
foreach my $node (@node_names) {
my $linkdef = $node_links->{$node}->{$link};
if (defined($linkdef)) {
$verify_link_ip->($linkdef->{name}, $linkdef->{addr}, $node);
if (!$have_interface) {
push @errors, "node '$node' has '$linkdef->{name}', but"
. " there is no interface number $link configured";
}
} else {
if ($have_interface) {
push @errors,
"node '$node' is missing address for" . "interface number $link";
}
}
}
}
} else {
# without interfaces, only check that links are consistent among nodes
for my $link (0..MAX_LINK_INDEX) {
my $nodes_with_link = {};
foreach my $node (@node_names) {
my $linkdef = $node_links->{$node}->{$link};
if (defined($linkdef)) {
$verify_link_ip->($linkdef->{name}, $linkdef->{addr}, $node);
$nodes_with_link->{$node} = 1;
}
}
# without interfaces, only check that links are consistent among nodes
for my $link (0 .. MAX_LINK_INDEX) {
my $nodes_with_link = {};
foreach my $node (@node_names) {
my $linkdef = $node_links->{$node}->{$link};
if (defined($linkdef)) {
$verify_link_ip->($linkdef->{name}, $linkdef->{addr}, $node);
$nodes_with_link->{$node} = 1;
}
}
if (%$nodes_with_link) {
foreach my $node (@node_names) {
if (!defined($nodes_with_link->{$node})) {
push @errors, "node '$node' is missing link $link,"
. " which is configured on other nodes";
}
}
}
}
if (%$nodes_with_link) {
foreach my $node (@node_names) {
if (!defined($nodes_with_link->{$node})) {
push @errors, "node '$node' is missing link $link,"
. " which is configured on other nodes";
}
}
}
}
}
return (\@errors, \@warnings);
@ -446,18 +452,18 @@ sub for_all_corosync_addresses {
# iterate sorted to make rules deterministic (for change detection)
foreach my $node_name (sort keys %$nodelist) {
my $node_config = $nodelist->{$node_name};
foreach my $node_key (sort keys %$node_config) {
if ($node_key =~ $link_addr_re) {
my $node_address = $node_config->{$node_key};
my $node_config = $nodelist->{$node_name};
foreach my $node_key (sort keys %$node_config) {
if ($node_key =~ $link_addr_re) {
my $node_address = $node_config->{$node_key};
my($ip, $version) = resolve_hostname_like_corosync($node_address, $corosync_conf);
next if !defined($ip);
next if defined($version) && defined($ip_version) && $version != $ip_version;
my ($ip, $version) = resolve_hostname_like_corosync($node_address, $corosync_conf);
next if !defined($ip);
next if defined($version) && defined($ip_version) && $version != $ip_version;
$func->($node_name, $ip, $version, $node_key);
}
}
$func->($node_name, $ip, $version, $node_key);
}
}
}
}
@ -467,20 +473,20 @@ sub resolve_hostname_like_corosync {
my ($hostname, $corosync_conf) = @_;
my $corosync_strategy = $corosync_conf->{main}->{totem}->{ip_version};
$corosync_strategy = lc ($corosync_strategy // "ipv6-4");
$corosync_strategy = lc($corosync_strategy // "ipv6-4");
my $match_ip_and_version = sub {
my ($addr) = @_;
my ($addr) = @_;
return undef if !defined($addr);
return undef if !defined($addr);
if ($addr =~ m/^$IPV4RE$/) {
return ($addr, 4);
} elsif ($addr =~ m/^$IPV6RE$/) {
return ($addr, 6);
}
if ($addr =~ m/^$IPV4RE$/) {
return ($addr, 4);
} elsif ($addr =~ m/^$IPV6RE$/) {
return ($addr, 6);
}
return undef;
return undef;
};
my ($resolved_ip, $ip_version) = $match_ip_and_version->($hostname);
@ -496,30 +502,30 @@ sub resolve_hostname_like_corosync {
return undef if ($@ || !@resolved_raw);
foreach my $socket_info (@resolved_raw) {
next if !$socket_info->{addr};
next if !$socket_info->{addr};
my ($family, undef, $host) = PVE::Tools::unpack_sockaddr_in46($socket_info->{addr});
my ($family, undef, $host) = PVE::Tools::unpack_sockaddr_in46($socket_info->{addr});
if ($family == AF_INET && !defined($resolved_ip4)) {
$resolved_ip4 = inet_ntop(AF_INET, $host);
} elsif ($family == AF_INET6 && !defined($resolved_ip6)) {
$resolved_ip6 = inet_ntop(AF_INET6, $host);
}
if ($family == AF_INET && !defined($resolved_ip4)) {
$resolved_ip4 = inet_ntop(AF_INET, $host);
} elsif ($family == AF_INET6 && !defined($resolved_ip6)) {
$resolved_ip6 = inet_ntop(AF_INET6, $host);
}
last if defined($resolved_ip4) && defined($resolved_ip6);
last if defined($resolved_ip4) && defined($resolved_ip6);
}
# corosync_strategy specifies the order in which IP addresses are resolved
# by corosync. We need to match that order, to ensure we create firewall
# rules for the correct address family.
if ($corosync_strategy eq "ipv4") {
$resolved_ip = $resolved_ip4;
$resolved_ip = $resolved_ip4;
} elsif ($corosync_strategy eq "ipv6") {
$resolved_ip = $resolved_ip6;
$resolved_ip = $resolved_ip6;
} elsif ($corosync_strategy eq "ipv6-4") {
$resolved_ip = $resolved_ip6 // $resolved_ip4;
$resolved_ip = $resolved_ip6 // $resolved_ip4;
} elsif ($corosync_strategy eq "ipv4-6") {
$resolved_ip = $resolved_ip4 // $resolved_ip6;
$resolved_ip = $resolved_ip4 // $resolved_ip6;
}
return $match_ip_and_version->($resolved_ip);

File diff suppressed because it is too large Load diff

View file

@ -15,14 +15,16 @@ our @ISA = qw(Exporter);
# This allows declaration use PVE::IPCC ':all';
# If you do not need this, moving things directly into @EXPORT or @EXPORT_OK
# will save memory.
our %EXPORT_TAGS = ( 'all' => [ qw(
) ] );
our %EXPORT_TAGS = (
'all' => [qw(
our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
)],
);
our @EXPORT_OK = (@{ $EXPORT_TAGS{'all'} });
our @EXPORT = qw(
);
our $VERSION = '1.0';
@ -42,9 +44,9 @@ PVE::IPCC - Perl extension to access the PVE IPC Server
=head1 SYNOPSIS
use PVE::IPCC;
my $res = PVE::IPCC::ipcc_send_rec(1, "hello");
my $res = PVE::IPCC::sendfd($socketfd, $fd, $opt_data);
=head1 DESCRIPTION

View file

@ -10,9 +10,7 @@ use PVE::Tools;
use Proxmox::RS::Notify;
cfs_register_file(
'notifications.cfg',
\&parse_notification_config,
\&write_notification_config,
'notifications.cfg', \&parse_notification_config, \&write_notification_config,
);
cfs_register_file(
@ -36,10 +34,14 @@ sub write_notification_config {
sub lock_config {
my ($code, $timeout) = @_;
cfs_lock_file('notifications.cfg', $timeout, sub {
cfs_lock_file('priv/notifications.cfg', $timeout, $code);
die $@ if $@;
});
cfs_lock_file(
'notifications.cfg',
$timeout,
sub {
cfs_lock_file('priv/notifications.cfg', $timeout, $code);
die $@ if $@;
},
);
die $@ if $@;
}
@ -69,55 +71,35 @@ my $send_notification = sub {
sub notify {
my ($severity, $template_name, $template_data, $fields, $config) = @_;
$send_notification->(
$severity,
$template_name,
$template_data,
$fields,
$config
$severity, $template_name, $template_data, $fields, $config,
);
}
sub info {
my ($template_name, $template_data, $fields, $config) = @_;
$send_notification->(
'info',
$template_name,
$template_data,
$fields,
$config
'info', $template_name, $template_data, $fields, $config,
);
}
sub notice {
my ($template_name, $template_data, $fields, $config) = @_;
$send_notification->(
'notice',
$template_name,
$template_data,
$fields,
$config
'notice', $template_name, $template_data, $fields, $config,
);
}
sub warning {
my ($template_name, $template_data, $fields, $config) = @_;
$send_notification->(
'warning',
$template_name,
$template_data,
$fields,
$config
'warning', $template_name, $template_data, $fields, $config,
);
}
sub error {
my ($template_name, $template_data, $fields, $config) = @_;
$send_notification->(
'error',
$template_name,
$template_data,
$fields,
$config
'error', $template_name, $template_data, $fields, $config,
);
}
@ -129,11 +111,12 @@ sub check_may_use_target {
my $entities = $config->get_referenced_entities($target);
for my $entity (@$entities) {
$rpcenv->check($user, "/mapping/notification/$entity", [ 'Mapping.Use' ]);
$rpcenv->check($user, "/mapping/notification/$entity", ['Mapping.Use']);
}
}
my $cached_fqdn;
sub common_template_data {
# The hostname is already cached by PVE::INotify::nodename,
# no need to cache it here as well.

View file

@ -1,6 +1,7 @@
package PVE::RRD;
use strict; use warnings;
use strict;
use warnings;
use RRDs;
@ -14,24 +15,24 @@ sub create_rrd_data {
my $rrd = "$rrddir/$rrdname";
my $setup = {
hour => [ 60, 70 ],
day => [ 60*30, 70 ],
week => [ 60*180, 70 ],
month => [ 60*720, 70 ],
year => [ 60*10080, 70 ],
hour => [60, 70],
day => [60 * 30, 70],
week => [60 * 180, 70],
month => [60 * 720, 70],
year => [60 * 10080, 70],
};
my ($reso, $count) = @{$setup->{$timeframe}};
my $ctime = $reso*int(time()/$reso);
my $req_start = $ctime - $reso*$count;
my ($reso, $count) = @{ $setup->{$timeframe} };
my $ctime = $reso * int(time() / $reso);
my $req_start = $ctime - $reso * $count;
$cf = "AVERAGE" if !$cf;
my @args = (
"-s" => $req_start,
"-e" => $ctime - 1,
"-r" => $reso,
);
"-s" => $req_start,
"-e" => $ctime - 1,
"-r" => $reso,
);
my $socket = "/var/run/rrdcached.sock";
push @args, "--daemon" => "unix:$socket" if -S $socket;
@ -42,23 +43,23 @@ sub create_rrd_data {
die "RRD error: $err\n" if $err;
die "got wrong time resolution ($step != $reso)\n"
if $step != $reso;
if $step != $reso;
my $res = [];
my $fields = scalar(@$names);
for my $line (@$data) {
my $entry = { 'time' => $start };
$start += $step;
for (my $i = 0; $i < $fields; $i++) {
my $name = $names->[$i];
if (defined(my $val = $line->[$i])) {
$entry->{$name} = $val;
} else {
# leave empty fields undefined
# maybe make this configurable?
}
}
push @$res, $entry;
my $entry = { 'time' => $start };
$start += $step;
for (my $i = 0; $i < $fields; $i++) {
my $name = $names->[$i];
if (defined(my $val = $line->[$i])) {
$entry->{$name} = $val;
} else {
# leave empty fields undefined
# maybe make this configurable?
}
}
push @$res, $entry;
}
return $res;
@ -82,24 +83,24 @@ sub create_rrd_graph {
my $filename = "${rrd}_${ds_txt}.png";
my $setup = {
hour => [ 60, 60 ],
day => [ 60*30, 70 ],
week => [ 60*180, 70 ],
month => [ 60*720, 70 ],
year => [ 60*10080, 70 ],
hour => [60, 60],
day => [60 * 30, 70],
week => [60 * 180, 70],
month => [60 * 720, 70],
year => [60 * 10080, 70],
};
my ($reso, $count) = @{$setup->{$timeframe}};
my ($reso, $count) = @{ $setup->{$timeframe} };
my @args = (
"--imgformat" => "PNG",
"--border" => 0,
"--height" => 200,
"--width" => 800,
"--start" => - $reso*$count,
"--end" => 'now' ,
"--lower-limit" => 0,
);
"--imgformat" => "PNG",
"--border" => 0,
"--height" => 200,
"--width" => 800,
"--start" => -$reso * $count,
"--end" => 'now',
"--lower-limit" => 0,
);
my $socket = "/var/run/rrdcached.sock";
push @args, "--daemon" => "unix:$socket" if -S $socket;
@ -110,14 +111,14 @@ sub create_rrd_graph {
my $i = 0;
foreach my $id (@ids) {
my $col = $coldef[$i++] || die "fixme: no color definition";
push @args, "DEF:${id}=$rrd:${id}:$cf";
my $dataid = $id;
if ($id eq 'cpu' || $id eq 'iowait') {
push @args, "CDEF:${id}_per=${id},100,*";
$dataid = "${id}_per";
}
push @args, "LINE2:${dataid}${col}:${id}";
my $col = $coldef[$i++] || die "fixme: no color definition";
push @args, "DEF:${id}=$rrd:${id}:$cf";
my $dataid = $id;
if ($id eq 'cpu' || $id eq 'iowait') {
push @args, "CDEF:${id}_per=${id},100,*";
$dataid = "${id}_per";
}
push @args, "LINE2:${dataid}${col}:${id}";
}
push @args, '--full-size-mode';

View file

@ -11,39 +11,46 @@ sub get_ssh_info {
my $ip;
if (defined($network_cidr)) {
# Use mtunnel via to get the remote node's ip inside $network_cidr.
# This goes over the regular network (iow. uses get_ssh_info() with
# $network_cidr undefined.
# FIXME: Use the REST API client for this after creating an API entry
# for get_migration_ip.
my $default_remote = get_ssh_info($node, undef);
my $default_ssh = ssh_info_to_command($default_remote);
my $cmd =[@$default_ssh, 'pvecm', 'mtunnel',
'-migration_network', $network_cidr,
'-get_migration_ip'
];
PVE::Tools::run_command($cmd, outfunc => sub {
my ($line) = @_;
chomp $line;
die "internal error: unexpected output from mtunnel\n"
if defined($ip);
if ($line =~ /^ip: '(.*)'$/) {
$ip = $1;
} else {
die "internal error: bad output from mtunnel\n"
if defined($ip);
}
});
die "failed to get ip for node '$node' in network '$network_cidr'\n"
if !defined($ip);
# Use mtunnel via to get the remote node's ip inside $network_cidr.
# This goes over the regular network (iow. uses get_ssh_info() with
# $network_cidr undefined.
# FIXME: Use the REST API client for this after creating an API entry
# for get_migration_ip.
my $default_remote = get_ssh_info($node, undef);
my $default_ssh = ssh_info_to_command($default_remote);
my $cmd = [
@$default_ssh,
'pvecm',
'mtunnel',
'-migration_network',
$network_cidr,
'-get_migration_ip',
];
PVE::Tools::run_command(
$cmd,
outfunc => sub {
my ($line) = @_;
chomp $line;
die "internal error: unexpected output from mtunnel\n"
if defined($ip);
if ($line =~ /^ip: '(.*)'$/) {
$ip = $1;
} else {
die "internal error: bad output from mtunnel\n"
if defined($ip);
}
},
);
die "failed to get ip for node '$node' in network '$network_cidr'\n"
if !defined($ip);
} else {
$ip = PVE::Cluster::remote_node_ip($node);
$ip = PVE::Cluster::remote_node_ip($node);
}
return {
ip => $ip,
name => $node,
network => $network_cidr,
ip => $ip,
name => $node,
network => $network_cidr,
};
}
@ -55,17 +62,18 @@ sub ssh_info_to_ssh_opts {
my $known_hosts_file = "/etc/pve/nodes/$nodename/ssh_known_hosts";
my $known_hosts_options = undef;
if (-f $known_hosts_file) {
$known_hosts_options = [
'-o', "UserKnownHostsFile=$known_hosts_file",
'-o', 'GlobalKnownHostsFile=none',
];
$known_hosts_options = [
'-o', "UserKnownHostsFile=$known_hosts_file", '-o', 'GlobalKnownHostsFile=none',
];
}
return [
'-o', 'BatchMode=yes',
'-o', 'HostKeyAlias='.$nodename,
defined($known_hosts_options) ? @$known_hosts_options : (),
@extra_options
'-o',
'BatchMode=yes',
'-o',
'HostKeyAlias=' . $nodename,
defined($known_hosts_options) ? @$known_hosts_options : (),
@extra_options,
];
}
@ -75,9 +83,9 @@ sub ssh_info_to_command_base {
my $opts = ssh_info_to_ssh_opts($info, @extra_options);
return [
'/usr/bin/ssh',
'-e', 'none', # only works for ssh, not scp!
$opts->@*,
'/usr/bin/ssh',
'-e', 'none', # only works for ssh, not scp!
$opts->@*,
];
}

View file

@ -12,9 +12,8 @@ if (defined(my $res = PVE::IPCC::ipcc_send_rec(2))) {
exit 0;
my $i = 0;
for($i = 0; $i < 10000; $i++) {
for ($i = 0; $i < 10000; $i++) {
print "t1\n";
print "c1: " . PVE::IPCC::ipcc_send_rec(1, "adas\0defg") . "\n";
print "t1\n";

View file

@ -7,7 +7,6 @@ use PVE::INotify;
use PVE::AccessControl;
use Data::Dumper;
my $nodename = PVE::INotify::nodename();
PVE::Cluster::log_msg(1, "ident2", "msg1 öäü");
PVE::Cluster::log_msg(1, "root\@pam", "msg1 öäü");
@ -25,7 +24,6 @@ exit 0;
while (1) {
print "update start\n";
PVE::Cluster::cfs_update();
print "update end\n";
@ -42,20 +40,19 @@ exit 0;
my $loopcount = 0;
while (1) {
PVE::Cluster::update();
PVE::Cluster::broadcast_vminfo({ count => $loopcount});
PVE::Cluster::broadcast_vminfo({ count => $loopcount });
my $res = PVE::Cluster::get_vminfo($nodename);
print "TEST1: " . Dumper($res);
if (defined($res = PVE::Cluster::get_config("cluster.conf"))) {
print "TEST2: " . Dumper($res);
if (defined($res = PVE::Cluster::get_config("cluster.conf"))) {
print "TEST2: " . Dumper($res);
} else {
warn "get_config failed: $!\n";
warn "get_config failed: $!\n";
}
$loopcount++;

View file

@ -25,7 +25,7 @@ sub mocked_resolve {
my ($hostname) = @_;
foreach my $host (keys %$known_hosts) {
return $known_hosts->{$host} if $hostname eq $host;
return $known_hosts->{$host} if $hostname eq $host;
}
die "got unknown hostname '$hostname' during mocked resolve_hostname_like_corosync";
@ -41,19 +41,20 @@ sub parser_self_check {
my ($config1, $config2, $raw1, $raw2);
eval {
# read first time
$raw1 = PVE::Tools::file_get_contents($cfg_fn);
$config1 = PVE::Corosync::parse_conf($cfg_fn, $raw1);
# read first time
$raw1 = PVE::Tools::file_get_contents($cfg_fn);
$config1 = PVE::Corosync::parse_conf($cfg_fn, $raw1);
# write config
$raw2 = PVE::Corosync::write_conf(undef, $config1);
# do not actually write cfg, but you can outcomment to do so, e.g. if
# you want to use diff for easy comparision
#PVE::Tools::file_set_contents($outfile, $raw2);
# write config
$raw2 = PVE::Corosync::write_conf(undef, $config1);
# do not actually write cfg, but you can outcomment to do so, e.g. if
# you want to use diff for easy comparision
#PVE::Tools::file_set_contents($outfile, $raw2);
# reparse written config (must be the same as config1)
$config2 = PVE::Corosync::parse_conf(undef, $raw2);
}; warn $@ if $@;
# reparse written config (must be the same as config1)
$config2 = PVE::Corosync::parse_conf(undef, $raw2);
};
warn $@ if $@;
# test verify_config
my ($err, $warn) = PVE::Corosync::verify_conf($config1);
@ -77,7 +78,7 @@ if (my $file = shift) {
parser_self_check($file);
} else {
foreach my $file (<corosync_configs/*.conf>) {
parser_self_check($file);
parser_self_check($file);
}
}

View file

@ -14,17 +14,17 @@ use PVE::QemuConfig;
use PVE::LXC::Config;
sub sec_to_unit {
my $sec = shift;
my $sec = shift;
my $unit_index = 0;
while ($sec < 1) {
$sec *= 1000;
$unit_index++;
}
my $unit_index = 0;
while ($sec < 1) {
$sec *= 1000;
$unit_index++;
}
my $unit = @{['s', 'ms', 'us', 'ns', 'ps']}[$unit_index];
my $unit = @{ ['s', 'ms', 'us', 'ns', 'ps'] }[$unit_index];
return wantarray ? ($sec, $unit) : "$sec $unit";
return wantarray ? ($sec, $unit) : "$sec $unit";
}
@ -37,22 +37,23 @@ sub perf {
my $loop = 0;
eval {
my $t0 = [gettimeofday];
my $t0 = [gettimeofday];
for (my $i = 0; $i<$loops; $i++) {
$code->();
}
for (my $i = 0; $i < $loops; $i++) {
$code->();
}
my $elapsed = tv_interval ($t0, [gettimeofday]);
my $elapsed = tv_interval($t0, [gettimeofday]);
my $total = sec_to_unit($elapsed);
my $per_loop = $elapsed/$loops;
$loop = sec_to_unit($per_loop);
my $total = sec_to_unit($elapsed);
my $per_loop = $elapsed / $loops;
$loop = sec_to_unit($per_loop);
$results->{$name} = [ $elapsed * 1000, $per_loop * 1000 ];
$results->{$name} = [$elapsed * 1000, $per_loop * 1000];
print STDERR "elapsed['$name' x $loops]: $total => $loop/loop\n";
}; warn $@ if $@;
print STDERR "elapsed['$name' x $loops]: $total => $loop/loop\n";
};
warn $@ if $@;
return $loop;
}
@ -61,28 +62,36 @@ my $loops = shift // 3;
my $vmid = shift // 0;
my $prop = shift // 'lock';
perf('cfg-get-prop', $loops, sub {
my $res = PVE::Cluster::get_guest_config_property($prop, $vmid);
});
perf(
'cfg-get-prop',
$loops,
sub {
my $res = PVE::Cluster::get_guest_config_property($prop, $vmid);
},
);
PVE::Cluster::cfs_update();
perf('perl-manual', $loops, sub {
my $res = {};
perf(
'perl-manual',
$loops,
sub {
my $res = {};
# modeled after the manager API cluster/resource call
my $vmlist = PVE::Cluster::get_vmlist() || {};
my $idlist = $vmlist->{ids} || {};
foreach my $vmid (keys %$idlist) {
# modeled after the manager API cluster/resource call
my $vmlist = PVE::Cluster::get_vmlist() || {};
my $idlist = $vmlist->{ids} || {};
foreach my $vmid (keys %$idlist) {
my $data = $idlist->{$vmid};
my $typedir = $data->{type} eq 'qemu' ? 'qemu-server' : 'lxc';
my $data = $idlist->{$vmid};
my $typedir = $data->{type} eq 'qemu' ? 'qemu-server' : 'lxc';
my $conf = PVE::Cluster::cfs_read_file("nodes/$data->{node}/$typedir/$vmid.conf");
my $conf = PVE::Cluster::cfs_read_file("nodes/$data->{node}/$typedir/$vmid.conf");
my $v = $conf->{$prop};
$res->{$vmid} = { $prop => $v } if defined($v);
}
});
my $v = $conf->{$prop};
$res->{$vmid} = { $prop => $v } if defined($v);
}
},
);
#PVE::Cluster::get_tasklist('dev5');
my $a = $results->{'cfg-get-prop'};

View file

@ -12,18 +12,18 @@ sub safe_mkdir {
(mkdir $dir) || die "safe_mkdir $dir failed - $!\n";
}
my $data = "0" x (1024*100);
my $data = "0" x (1024 * 100);
sub create_file {
my ($filename) = shift;
my $fh = new IO::File $filename, O_RDWR|O_CREAT|O_EXCL;
my $fh = new IO::File $filename, O_RDWR | O_CREAT | O_EXCL;
die "cant create file $filename - $!" if !defined $fh;
#my $data = "$filename\n" x 30;
(print $fh $data) || die "write $filename failed\n";
close ($fh);
close($fh);
#system("cat $filename");
#system("df -h /etc/pve");

View file

@ -15,14 +15,14 @@ sub safe_mkdir {
sub create_file {
my ($filename) = shift;
my $fh = new IO::File $filename, O_RDWR|O_CREAT|O_EXCL;
my $fh = new IO::File $filename, O_RDWR | O_CREAT | O_EXCL;
die "cant create file $filename - $!" if !defined $fh;
#my $data = "$filename\n" x 30;
my $data = "0" x 2048;
(print $fh $data) || die "write $filename failed\n";
close ($fh);
close($fh);
#system("cat $filename");
#system("df -h /etc/pve");
@ -39,6 +39,6 @@ for (my $i = 0; $i < 100; $i++) {
safe_mkdir "$testdir/$i";
for (my $j = 0; $j < 90; $j++) {
create_file("$testdir/$i/test$j.dat");
create_file("$testdir/$i/test$j.dat");
}
}

View file

@ -25,14 +25,14 @@ sub safe_unlink {
sub create_vmfile {
my ($filename) = shift;
my $fh = new IO::File $filename, O_RDWR|O_CREAT|O_EXCL;
my $fh = new IO::File $filename, O_RDWR | O_CREAT | O_EXCL;
die "cant create file $filename - $!" if !defined $fh;
#my $data = "$filename\n" x 30;
my $data = "0" x 1024;
(print $fh $data) || die "write $filename failed\n";
close ($fh);
close($fh);
#system("cat $filename");
#system("df -h /etc/pve");
@ -42,13 +42,13 @@ sub start_vmtest {
my ($subdir) = @_;
for (my $i = 1000; $i < 1100; $i++) {
my $filename = "$subdir/${i}.conf";
create_vmfile($filename);
my $filename = "$subdir/${i}.conf";
create_vmfile($filename);
}
for (my $i = 1000; $i < 1100; $i++) {
my $filename = "$subdir/${i}.conf";
safe_unlink($filename);
my $filename = "$subdir/${i}.conf";
safe_unlink($filename);
}
}
@ -72,21 +72,20 @@ sub start_test {
safe_rmdir $subdir;
}
my $basedir = "/etc/pve/nodes/";
my $testdir = "$basedir/${nodename}-test1";
remove_tree($testdir);
while(1) {
while (1) {
eval {
local $SIG{INT} = sub { die "interrupted" };
start_test("$testdir");
local $SIG{INT} = sub { die "interrupted" };
start_test("$testdir");
};
my $err = $@;
system("date; df -h /etc/pve");
die $err if $err;
}

View file

@ -25,4 +25,4 @@ if (defined $v) {
print "querying value for key: $k\n";
my $res = PVE::Cluster::get_node_kv($k);
print "res: " . to_json($res, {utf8 => 1, pretty => 1}) ."\n";
print "res: " . to_json($res, { utf8 => 1, pretty => 1 }) . "\n";

View file

@ -18,4 +18,4 @@ my $prop = shift // 'lock';
my $res = PVE::Cluster::get_guest_config_property($prop, $vmid);
print "res: " . to_json($res, {utf8 => 1, pretty => 1}) ."\n";
print "res: " . to_json($res, { utf8 => 1, pretty => 1 }) . "\n";

View file

@ -18,4 +18,4 @@ my $userid = shift // 'root@pam';
my $res = PVE::Cluster::verify_token($userid, $token);
print "token '$userid $token' ". ($res ? '' : "not " ) ."found\n";
print "token '$userid $token' " . ($res ? '' : "not ") . "found\n";