$out
".&html_escape($lerr)."
" ]); } if (!$noupload) { # Show mode to upload to server push(@opts, [ 5, $text{'backup_mode5'}, &ui_upload($name."_upload", 40) ]); } return &ui_radio_selector(\@opts, $name."_mode", $mode, 1); } # parse_backup_destination(name, &in, no-local, [&domain], format) # Returns a backup destination string, or calls error sub parse_backup_destination { local ($name, $in, $nolocal, $d, $fmt) = @_; local %in = %$in; local $mode = $in{$name."_mode"}; if ($mode == -1) { # Removing this one return undef; } if ($mode == 0 && defined($fmt) && $fmt == 0) { # For a single-file backup, make sure the filename makes sense $in{$name."_file"} =~ /\.(gz|zip|tar|bz2|Z|tgz|tbz2)$/i || &error($text{'backup_edestext'}); } if ($mode == 0 && $d) { # Local file under virtualmin-backup directory $in{$name."_file"} =~ /^\S+$/ || &error($text{'backup_edest2'}); $in{$name."_file"} =~ /\.\./ && &error($text{'backup_edest3'}); $in{$name."_file"} =~ s/\/+$//; $in{$name."_file"} =~ s/^\/+//; my $user_backup_dir = $home_virtualmin_backup; if ($in{'plugged'} && &plugin_defined($in{'plugged'}, 'feature_backup_dir')) { my $user_backup_plugin_dir = &plugin_call( $in{'plugged'}, 'feature_backup_dir'); $user_backup_dir = $user_backup_plugin_dir if ($user_backup_plugin_dir =~ /^\S+$/ && $user_backup_plugin_dir !~ /\//); } return "$d->{'home'}/$user_backup_dir/".$in{$name."_file"}; } elsif ($mode == 0 && !$nolocal) { # Any local file $in{$name."_file"} =~ /^\/\S/ || &error($text{'backup_edest'}); $in{$name."_file"} =~ s/\/+$//; # No need for trailing / return $in{$name."_file"}; } elsif ($mode == 1) { # FTP server local ($server, $port); if ($in{$name."_server"} =~ /^\[([^\]]+)\](:(\d+))?$/) { ($server, $port) = ($1, $3); } elsif ($in{$name."_server"} =~ /^([A-Za-z0-9\.\-\_]+)(:(\d+))?$/) { ($server, $port) = ($1, $3); } else { &error($text{'backup_eserver1'}); } &to_ipaddress($server) || defined(&to_ip6address) && &to_ip6address($server) || &error($text{'backup_eserver1a'}); $port =~ /^\d*$/ || &error($text{'backup_eport'}); $in{$name."_path"} =~ /\S/ || &error($text{'backup_epath'}); $in{$name."_user"} =~ /^[^:\/ ]*$/ || &error($text{'backup_euser'}); if ($in{$name."_path"} ne "/") { # Strip trailing / $in{$name."_path"} =~ s/\/+$//; } local $sep = $in{$name."_path"} =~ /^\// ? "" : ":"; return "ftp://".$in{$name."_user"}.":".$in{$name."_pass"}."\@". $in{$name."_server"}.$sep.$in{$name."_path"}; } elsif ($mode == 2) { # SSH server local ($server, $port); if ($in{$name."_sserver"} =~ /^\[([^\]]+)\](:(\d+))?$/) { ($server, $port) = ($1, $3); } elsif ($in{$name."_sserver"} =~ /^([A-Za-z0-9\.\-\_]+)(:(\d+))?$/) { ($server, $port) = ($1, $3); } else { &error($text{'backup_eserver2'}); } &to_ipaddress($server) || defined(&to_ip6address) && &to_ip6address($server) || &error($text{'backup_eserver2a'}); $port =~ /^\d*$/ || &error($text{'backup_eport'}); $in{$name."_spath"} =~ /\S/ || &error($text{'backup_epath'}); $in{$name."_suser"} =~ /^[^:\/ ]*$/ || &error($text{'backup_euser2'}); if ($in{$name."_spath"} ne "/") { # Strip trailing / $in{$name."_spath"} =~ s/\/+$//; } my $pass = $in{$name."_spass"}; if ($pass eq "") { $pass = $in{$name."_sshkey"}; $pass =~ s/\//\|/g; } return "ssh://".$in{$name."_suser"}.":".$pass."\@". $in{$name."_sserver"}.":".$in{$name."_spath"}; } elsif ($mode == 3) { # Amazon S3 service $in{$name.'_s3path'} =~ /^\S+$/ || &error($text{'backup_es3path'}); $in{$name.'_s3path'} =~ /\\/ && &error($text{'backup_es3pathslash'}); ($in{$name.'_s3path'} =~ /^\// || $in{$name.'_s3path'} =~ /\/$/) && &error($text{'backup_es3path2'}); local $proto = $in{$name.'_rrs'} ? 's3rrs' : 's3'; my @s3s = &list_s3_accounts(); my ($s3) = grep { $_->{'id'} eq $in{$name."_as3"} } @s3s; $s3 || &error($text{'backup_eas3'}); return $proto."://".$s3->{'id'}."\@".$in{$name.'_s3path'}; } elsif ($mode == 4) { # Just download return "download:"; } elsif ($mode == 44) { # Generate download link return "downloadlink:"; } elsif ($mode == 5) { # Uploaded file $in{$name."_upload"} || &error($text{'backup_eupload'}); return "upload:"; } elsif ($mode == 6) { # Rackspace cloud files $in{$name.'_rsuser'} =~ /^\S+$/i || &error($text{'backup_ersuser'}); $in{$name.'_rskey'} =~ /^\S+$/i || &error($text{'backup_erskey'}); $in{$name.'_rspath'} =~ /^\S+$/i || &error($text{'backup_erspath'}); ($in{$name.'_rspath'} =~ /^\// || $in{$name.'_rspath'} =~ /\/$/) && &error($text{'backup_erspath2'}); return "rs://".$in{$name.'_rsuser'}.":".$in{$name.'_rskey'}."\@". $in{$name.'_rspath'}; } elsif ($mode == 7 && &can_use_cloud("google")) { # Google cloud storage $in{$name.'_gcpath'} =~ /^\S+$/i || &error($text{'backup_egcpath'}); ($in{$name.'_gcpath'} =~ /^\// || $in{$name.'_gcpath'} =~ /\/$/) && &error($text{'backup_egcpath2'}); return "gcs://".$in{$name.'_gcpath'}; } elsif ($mode == 8 && &can_use_cloud("dropbox")) { # Dropbox $in{$name.'_dbpath'} =~ /^\S+$/i || &error($text{'backup_edbpath'}); ($in{$name.'_dbpath'} =~ /^\// || $in{$name.'_dbpath'} =~ /\/$/) && &error($text{'backup_edbpath2'}); return "dropbox://".$in{$name.'_dbpath'}; } elsif ($mode == 10 && &can_use_cloud("bb")) { # Backblaze $in{$name.'_bbpath'} =~ /^\S+$/i || &error($text{'backup_ebbpath'}); ($in{$name.'_bbpath'} =~ /^\// || $in{$name.'_bbpath'} =~ /\/$/) && &error($text{'backup_ebbpath2'}); return "bb://".$in{$name.'_bbpath'}; } elsif ($mode == 11 && &can_use_cloud("azure")) { # Azure blob storage $in{$name.'_azpath'} =~ /^\S+$/i || &error($text{'backup_eazpath'}); ($in{$name.'_azpath'} =~ /^\// || $in{$name.'_azpath'} =~ /\/$/) && &error($text{'backup_eazpath2'}); return "azure://".$in{$name.'_azpath'}; } elsif ($mode == 12 && &can_use_cloud("drive")) { # Google Drive $in{$name.'_drpath'} =~ /^\S+$/i || &error($text{'backup_edrpath'}); ($in{$name.'_drpath'} =~ /^\// || $in{$name.'_drpath'} =~ /\/$/) && &error($text{'backup_edrpath2'}); return "drive://".$in{$name.'_drpath'}; } elsif ($mode == 9) { # Webmin server local ($server, $port); if ($in{$name."_wserver"} =~ /^\[([^\]]+)\](:(\d+))?$/) { ($server, $port) = ($1, $3); } elsif ($in{$name."_wserver"} =~ /^([A-Za-z0-9\.\-\_]+)(:(\d+))?$/) { ($server, $port) = ($1, $3); } else { &error($text{'backup_eserver9'}); } &to_ipaddress($server) || defined(&to_ip6address) && &to_ip6address($server) || &error($text{'backup_eserver9a'}); $port =~ /^\d*$/ || &error($text{'backup_eport'}); $in{$name."_wpath"} =~ /\S/ || &error($text{'backup_epath'}); $in{$name."_wuser"} =~ /^[^:\/ ]*$/ || &error($text{'backup_euser2'}); if ($in{$name."_wpath"} ne "/") { # Strip trailing / $in{$name."_spath"} =~ s/\/+$//; } return "webmin://".$in{$name."_wuser"}.":".$in{$name."_wpass"}."\@". $in{$name."_wserver"}.":".$in{$name."_wpath"}; } else { &error($text{'backup_emode'}); } } # can_backup_sched([&sched]) # Returns 1 if the current user can create scheduled backups, or edit some # existing schedule. If sched is set, checks if the user is allowed to create # schedules at all. sub can_backup_sched { local ($sched) = @_; if (&master_admin()) { # Master admin can do anything return 1; } elsif (&reseller_admin()) { # Resellers can edit schedules for their domains' users return 0 if ($access{'backups'} != 2); if ($sched) { return 0 if (!$sched->{'owner'}); # Master admin's backup return 1 if ($sched->{'owner'} eq $base_remote_user); foreach my $d (&get_reseller_domains($base_remote_user)) { return 1 if ($d->{'id'} eq $sched->{'owner'}); } return 0; } return 1; } else { # Regular users can only edit their own schedules return 0 if (!$access{'edit_sched'}); if ($sched) { return 0 if (!$sched->{'owner'}); # Master admin's backup local $myd = &get_domain_by_user($base_remote_user); return 0 if (!$myd || $myd->{'id'} ne $sched->{'owner'}); } return 1; } } # Returns 1 if the current user can define pre and post-backup commands sub can_backup_commands { return &master_admin(); } # Returns 1 if the current user can configure Amazon S3 buckets sub can_backup_buckets { return &master_admin(); } # Returns 1 if the current user can configure Cloud storage providers sub can_cloud_providers { return &master_admin(); } # can_use_cloud(name) # Returns 1 if the current user has permission to use the default login of # some cloud provider sub can_use_cloud { my ($name) = @_; if (&master_admin()) { return 1; } elsif (&reseller_admin()) { return $config{'cloud_'.$name.'_reseller'}; } else { return $config{'cloud_'.$name.'_owner'}; } } # has_incremental_format([compression]) # Returns 1 if the configured backup format supports differential backups sub has_incremental_format { my ($compression) = @_; $compression = $config{'compression'} if (!defined($compression) || $compression eq ''); return $compression != 3; } # Returns 1 if tar supports differential backups sub has_incremental_tar { return 0 if ($config{'tar_args'} =~ /--acls/); my $tar = &get_tar_command(); my $out = &backquote_command("$tar --help 2>&1 &1 &1 = 1.23; } # get_tar_command() # Returns the full path to the tar command, which may be 'gtar' on BSD sub get_tar_command { my @cmds; if ($config{'tar_cmd'}) { @cmds = ( $config{'tar_cmd'} ); } else { @cmds = ( "tar" ); if ($gconfig{'os_type'} eq 'freebsd' || $gconfig{'os_type'} eq 'netbsd' || $gconfig{'os_type'} eq 'openbsd' || $gconfig{'os_type'} eq 'solaris') { unshift(@cmds, "gtar"); } else { push(@cmds, "gtar"); } } foreach my $c (@cmds) { my ($bin, @args) = split(/\s+/, $c); my $p = &has_command($bin); return join(" ", $p, @args) if ($p); } return undef; } # make_tar_command(flags, output, file, ...) # Returns a tar command using the given flags writing to the given output sub make_tar_command { my ($flags, $output, @files) = @_; my $cmd = &get_tar_command(); if ($config{'tar_args'}) { $cmd .= " ".$config{'tar_args'}; $flags = "-".$flags; if ($flags =~ s/X//) { # In -flag mode, need to move -X after the output name and # before the exclude filename. unshift(@files, "-X"); } } $cmd .= " ".$flags; $cmd .= " ".quotemeta($output); $cmd .= " ".join(" ", map { quotemeta($_) } @files) if (@files); if (&has_no_file_changed()) { # Don't fail if a file was changed while read $cmd .= " --warning=no-file-changed"; } return $cmd; } # make_zip_command(flags, output, file, ...) # Returns a ZIP command using the given flags writing to the given output sub make_zip_command { my ($flags, $output, @files) = @_; my $zip = &has_command("zip") || "zip"; my $cmd = $zip; if ($flags) { $cmd .= " ".$flags; } $cmd .= " -r ".quotemeta($output)." ".join(" ", map { quotemeta($_) } @files); return $cmd; } # make_archive_command(compression, directory, output, file, ...) # Returns a command to create an archive of the given files sub make_archive_command { my ($compression, $dir, $out, @files) = @_; if ($compression == 3) { return "cd ".quotemeta($dir)." && ". &make_zip_command("-0", $out, @files); } else { return "cd ".quotemeta($dir)." && ". &make_tar_command("cf", $out, @files); } } # make_unarchive_command(directory, input, [@files]) # Returns a command to extract an archive, possibly for just some files sub make_unarchive_command { my ($dir, $out, @files) = @_; my $cf = &compression_format($out); if ($cf == 4) { my $qfiles = join(" ", map { quotemeta($_) } @files); return "cd ".quotemeta($dir)." && ". "unzip -o ".quotemeta($out)." ".$qfiles; } else { return "cd ".quotemeta($dir)." && ". &make_tar_command("xf", $out, @files); } } # get_bzip2_command() # Returns the full path to the bzip2-compatible command sub get_bzip2_command { local $cmd = $config{'pbzip2'} ? 'pbzip2' : 'bzip2'; local $fullcmd = &has_command($cmd) || $cmd; $fullcmd .= " -c $config{'zip_args'}"; return $fullcmd; } # get_bunzip2_command() # Returns the full path to the bunzip2-compatible command sub get_bunzip2_command { if (!$config{'pbzip2'}) { return &has_command('bunzip2') || 'bunzip2'; } elsif (&has_command('pbunzip2')) { return &has_command('pbunzip2'); } else { # Fall back to using -d option return (&has_command('pbzip2') || 'pbzip2').' -d'; } } # get_gzip_command() # Returns the full path to the gzip-compatible command sub get_gzip_command { local $cmd = $config{'pigz'} ? 'pigz' : 'gzip'; local $fullcmd = &has_command($cmd) || $cmd; $fullcmd .= " -c $config{'zip_args'}"; return $fullcmd; } # get_gunzip_command() # Returns the full path to the gunzip-compatible command sub get_gunzip_command { if (!$config{'pigz'}) { return (&has_command('gunzip') || 'gunzip').' -f'; } elsif (&has_command('unpigz')) { return &has_command('unpigz').' -f'; } else { # Fall back to using -d option return (&has_command('pigz') || 'pigz').' -d'; } } # get_zstd_command() # Returns the full path to the zstd command in compression mode sub get_zstd_command { return &has_command("zstd")." -c"; } # get_unzstd_command() # Returns the full path to the zstd command in de-compression mode sub get_unzstd_command { return &has_command("zstd")." -c -d"; } # get_backup_actions() # Returns a list of arrays for backup / restore actions that the current # user is allowed to do. The first is links, the second titles, the third # long descriptions, the fourth is codes. sub get_backup_actions { local (@links, @titles, @descs, @codes); if (&can_backup_domain()) { if (&can_backup_sched()) { # Can do scheduled backups, so show list push(@links, "list_sched.cgi"); push(@titles, $text{'index_scheds'}); push(@descs, $text{'index_schedsdesc'}); push(@codes, 'sched'); # Also show any running backups push(@links, "list_running.cgi"); push(@titles, $text{'index_running'}); push(@descs, $text{'index_runningdesc'}); push(@codes, 'running'); } # Can do immediate push(@links, "backup_form.cgi"); push(@titles, $text{'index_backup'}); push(@descs, $text{'index_backupdesc'}); push(@codes, 'backup'); } if (&can_backup_log()) { # Show logged backups push(@links, "backuplog.cgi"); push(@titles, $text{'index_backuplog'}); push(@descs, $text{'index_backuplogdesc'}); push(@codes, 'backuplog'); } if (&can_restore_domain()) { # Show restore form push(@links, "restore_form.cgi"); push(@titles, $text{'index_restore'}); push(@descs, $text{'index_restoredesc'}); push(@codes, 'restore'); } if (&can_backup_keys()) { # Show list of backup keys push(@links, "pro/list_bkeys.cgi"); push(@titles, $text{'index_bkeys'}); push(@descs, $text{'index_bkeysdesc'}); push(@codes, 'bkeys'); } if (&can_cloud_providers()) { # Show a list of Cloud file provider settings pages push(@links, "list_clouds.cgi"); push(@titles, $text{'index_clouds'}); push(@descs, $text{'index_cloudsdesc'}); push(@codes, 'clouds'); # Also Amazon S3 accounts push(@links, "list_s3s.cgi"); push(@titles, $text{'index_s3s'}); push(@descs, $text{'index_s3sdesc'}); push(@codes, 's3s'); } if (&can_backup_buckets()) { # Show list of S3 buckets push(@links, "list_buckets.cgi"); push(@titles, $text{'index_buckets'}); push(@descs, $text{'index_bucketsdesc'}); push(@codes, 'buckets'); } return (\@links, \@titles, \@descs, \@codes); } # Returns 1 if the user can backup and restore all domains # Deprecated, but kept for old theme users sub can_backup_domains { return &master_admin(); } # Returns 1 if the user can backup and restore core Virtualmin settings, like # the config, resellers and so on sub can_backup_virtualmin { return &master_admin(); } # can_backup_domain([&domain], [user]) # Returns 0 if no backups are allowed, 1 if they are, 2 if only backups to # remote or a file under the domain are allowed, 3 if only remote is allowed. # If a domain is given, checks if backups of that domain are allowed. sub can_backup_domain { local ($d, $acluser) = @_; $acluser ||= $base_remote_user; local $base_remote_user = $acluser; local %access = &get_module_acl($acluser); # Use local for scoping if (&master_admin()) { # Master admin can do anything return 1; } elsif (&reseller_admin()) { # Resellers can only backup their domains, to remote return 0 if (!$access{'backups'}); if ($d) { return 0 if (!&can_edit_domain($d)); } return 3; } else { # Domain owners can only backup to their dir, or remote return 0 if (!$access{'edit_backup'}); if ($d) { return 0 if (!&can_edit_domain($d)); } return 2; } } # can_restore_domain([&domain]) # Returns 1 if the user is allowed to perform full restores, 2 if only # dir/mysql restores are allowed, 0 if nothing sub can_restore_domain { local ($d) = @_; if (&master_admin()) { # Master admin always can return 1; } else { if (&reseller_admin()) { # Resellers can do limited restores return 2; } else { # Domain owners can only restore if allowed return 0 if (!$access{'edit_restore'}); } if ($d) { return &can_edit_domain($d) ? 2 : 0; } return 2; } } # can_backup_log([&log]) # Returns 1 if the current user can view backup logs, and if given a specific # log entry returns 1 if the user can view that log (or 2 if they can but it # was created by root) sub can_backup_log { local ($log) = @_; return 1 if (&master_admin()); if ($log) { # Only allow non-admins to view their own logs local @dnames = &backup_log_own_domains($log); if (!@dnames) { # None of this user's domains are in the backup return 0; } elsif (&master_admin() || $log->{'user'} eq $base_remote_user) { # Backup was created by this user, or user is root return 1; } elsif ($log->{'ownrestore'}) { # Backup was created by root, but includes this user's domains return 2; } return 0; } else { # Do any schedules that allow restore by the domain owner exist? foreach my $s (&list_scheduled_backups()) { return 1 if ($s->{'ownrestore'}); } } return &can_backup_domain() ? 1 : 0; } # can_backup_keys() # Returns 1 if the current user can access all backup keys, 2 if only his own, # 0 if neither sub can_backup_keys { return 0 if (!$virtualmin_pro); # Pro only feature return 0 if ($access{'admin'}); # Not for extra admins return 0 if (!&can_backup_domain()); # Can't do backups, so can't manage keys return 1 if (&master_admin()); # Master admin can access all keys return 2; # Domain owner / reseller can access own } # backup_log_own_domains(&log, [error-domains-only]) # Given a backup log object, return the domain names that the current user # can restore sub backup_log_own_domains { local ($log, $errormode) = @_; local @dnames = split(/\s+/, $errormode ? $log->{'errdoms'} : $log->{'doms'}); return @dnames if (&master_admin() || $log->{'user'} eq $remote_user); if ($log->{'ownrestore'}) { local @rv; foreach my $d (&get_domains_by_names(@dnames)) { push(@rv, $d->{'dom'}) if (&can_edit_domain($d)); } return @rv; } return ( ); } # extract_purge_path(dest) # Given a backup URL with a path like /backup/%d-%m-%Y, return the base # directory (like /backup) and the regexp matching the date-based filename # (like .*-.*-.*) sub extract_purge_path { local ($dest) = @_; local ($mode, undef, undef, $host, $path) = &parse_backup_url($dest); if (($mode == 0 || $mode == 1 || $mode == 2 || $mode == 9) && $path =~ /^(\S+)\/([^%]*%.*)$/) { # Local, FTP, SSH or Webmin file like /backup/%d-%m-%Y local ($base, $date) = ($1, $2); $date =~ s/%[_\-0\^\#]*\d*[A-Za-z]/\.\*/g; return ($base, $date); } elsif (($mode == 1 || $mode == 2 || $mode == 9) && $path =~ /^([^%\/]+%.*)$/) { # FTP, SSH or Webmin file like backup-%d-%m-%Y local ($base, $date) = ("", $1); $date =~ s/%[_\-0\^\#]*\d*[A-Za-z]/\.\*/g; return ($base, $date); } elsif (($mode == 3 || $mode == 6 || $mode == 7 || $mode == 10 || $mode == 12) && $host =~ /%/) { # S3 / Rackspace / GCS / Drive bucket which is date-based $host =~ s/%[_\-0\^\#]*\d*[A-Za-z]/\.\*/g; return (undef, $host); } elsif (($mode == 3 || $mode == 6 || $mode == 7 || $mode == 10 || $mode == 11 || $mode == 12) && $path =~ /%/) { # S3 / Rackspace / GCS / Azure / Drive filename which is date-based $path =~ s/%[_\-0\^\#]*\d*[A-Za-z]/\.\*/g; return ($host, $path); } elsif ($mode == 8) { my $fullpath = $host.($host ? "/" : "").$path; if ($fullpath =~ /^\/?(\S+)\/([^%]*%.*)$/) { # Dropbox path - has to be handled differently to S3 and GCS, # as it really does support sub-directories local ($base, $date) = ($1, $2); $base = "/".$base if ($base !~ /^\//); $date =~ s/%[_\-0\^\#]*\d*[A-Za-z]/\.\*/g; return ($base, $date); } } return ( ); } # purge_domain_backups(dest, days, [time-now], [&as-domain], [detailed-output]) # Searches a backup destination for backup files or directories older than # same number of days, and deletes them. May print stuff using first_print. sub purge_domain_backups { local ($dest, $days, $start, $asd, $detail) = @_; local $asuser = $asd ? $asd->{'user'} : undef; local ($mode, $user, $pass, $host, $path, $port) = &parse_backup_url($dest); local ($base, $re) = &extract_purge_path($dest); local $nicebase = $base; if ($dest =~ /^(([a-z0-9]+):\/\/[^\/]*\@[^\/]*)/) { # Add protocol prefix back, if formatted like ftp://user:pass@host/dir $nicebase = $1.$nicebase; } elsif ($dest =~ /^(([a-z0-9]+):\/\/)/) { # Add protocol prefix back, if formatted like bb://bucket/dir $nicebase = $1.$nicebase; } &$first_print(&text('backup_purging3', $days, &nice_backup_url($nicebase), "".&html_escape($re)."")); if (!$base && !$re) { &$second_print($text{'backup_purgenobase'}); return 0; } &$indent_print(); $start ||= time(); local $cutoff = $start - $days*24*60*60; local $pcount = 0; local $mcount = 0; local $ok = 1; if ($mode == 0) { # Just search a local directory for matching files, and remove them opendir(PURGEDIR, $base); foreach my $f (readdir(PURGEDIR)) { next if ($f eq "." || $f eq ".."); local $path = "$base/$f"; local @st = stat($path); if ($detail) { &$first_print(&text('backup_purgeposs', $path, &make_date($st[9]))); } if ($f =~ /^$re$/ && $f !~ /\.(dom|info)$/) { # Found one to delete $mcount++; if (!$st[9] || $st[9] >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $st[9]) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text(-d $path ? 'backup_deletingdir' : 'backup_deletingfile', "$path", $old)); local $sz = &nice_size(&disk_usage_kb($path)*1024); &unlink_file($path.".info") if (!-d $path); &unlink_file($path.".dom") if (!-d $path); &unlink_file($path); &$second_print(&text('backup_deleted', $sz)); $pcount++; } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } closedir(PURGEDIR); } elsif ($mode == 1) { # List parent directory via FTP local $err; local $dir = &ftp_listdir($host, $base, \$err, $user, $pass, $port, 1); if ($err) { &$second_print(&text('backup_purgeelistdir', $err)); return 0; } $dir = [ grep { $_->[13] ne "." && $_->[13] ne ".." } @$dir ]; if (@$dir && !$dir->[0]->[9]) { # No times in output &$second_print(&text('backup_purgeelisttimes', $base)); return 0; } foreach my $f (@$dir) { if ($detail) { &$first_print(&text('backup_purgeposs', $f->[13], &make_date($f->[9]))); } if ($f->[13] =~ /^$re$/ && $f->[13] !~ /\.(dom|info)$/) { $mcount++; if (!$f->[9] || $f->[9] >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $f->[9]) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingftp', "$base/$f->[13]", $old)); local $err; local $sz = $f->[7]; $sz += &ftp_deletefile($host, "$base/$f->[13]", \$err, $user, $pass, $port); local $infoerr; &ftp_deletefile($host, "$base/$f->[13].info", \$infoerr, $user, $pass, $port); local $domerr; &ftp_deletefile($host, "$base/$f->[13].dom", \$domerr, $user, $pass, $port); if ($err) { &$second_print(&text('backup_edelftp', $err)); $ok = 0; } else { &$second_print(&text('backup_deleted', &nice_size($sz))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 2) { # Use ls -l via SSH to list the directory local $sshcmd = "ssh".($port ? " -p $port" : "")." ". $config{'ssh_args'}." ". ($user ? $user."\@" : "").$host; local $err; local $lscmd = $sshcmd." LANG=C ls -l ".quotemeta($base); local $lsout = &run_ssh_command($lscmd, $pass, \$err, $asuser); if ($err) { # Try again without LANG=C , in case shell isn't bash/sh $err = undef; $lscmd = $sshcmd." ls -l ".quotemeta($base); $lsout = &run_ssh_command($lscmd, $pass, \$err, $asuser); } if ($err) { &$second_print(&text('backup_purgeesshls', $err)); return 0; } foreach my $l (split(/\r?\n/, $lsout)) { local @st = &parse_lsl_line($l); next if (!scalar(@st)); next if ($st[13] eq "." || $st[13] eq ".."); if ($detail) { &$first_print(&text('backup_purgeposs', $f->[13], &make_date($f->[9]))); } if ($st[13] =~ /^$re$/ && $st[13] !~ /\.(dom|info)$/) { $mcount++; if (!$st[9] || $st[9] >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $st[9]) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingssh', "$base/$st[13]", $old)); local $rmcmd = $sshcmd." rm -rf". " ".quotemeta("$base/$st[13]"). " ".quotemeta("$base/$st[13].info"). " ".quotemeta("$base/$st[13].dom"); local $rmerr; &run_ssh_command($rmcmd, $pass, \$rmerr, $asuser); if ($rmerr) { &$second_print(&text('backup_edelssh', $rmerr)); $ok = 0; } else { &$second_print(&text('backup_deleted', &nice_size($st[7]))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 9) { # Use stat via Webmin RPC to list directory local $err; local $w = &dest_to_webmin($dest); local $files; eval { local $main::error_must_die = 1; &remote_foreign_require($w, "webmin"); $files = &remote_eval($w, "webmin", '$base = "'.quotemeta($base).'"; '. 'opendir(DIR, $base); '. '@f = readdir(DIR); '. 'closedir(DIR); '. '[ map { [ $_, stat("$base/$_") ] } @f ]'); }; my $err = $@; if ($err) { $err =~ s/\s+at\s+\S+\s+line\s+\d+.*//g; &$second_print(&text('backup_purgeewebminls', $err)); return 0; } foreach my $f (@$files) { my ($fn, @st) = @$f; next if ($fn eq "." || $fn eq ".."); if ($detail) { &$first_print(&text('backup_purgeposs', $fn, &make_date($st[9]))); } if ($fn =~ /^$re$/ && $fn !~ /\.(dom|info)$/) { $mcount++; if (!$st[9] || $st[9] >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $st[9]) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingwebmin', "$base/$fn", $old)); eval { local $main::error_must_die = 1; &remote_foreign_call($w, "webmin", "unlink_file", "$base/$fn"); &remote_foreign_call($w, "webmin", "unlink_file", "$base/$fn.info"); &remote_foreign_call($w, "webmin", "unlink_file", "$base/$fn.dom"); }; my $err = $@; if ($err) { $err =~ s/\s+at\s+\S+\s+line\s+\d+.*//g; &$second_print(&text('backup_edelwebmin',$err)); $ok = 0; } else { &$second_print(&text('backup_deleted', &nice_size($st[7]))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 3 && $host =~ /\%/) { # Search S3 for S3 buckets matching the regexp local $buckets = &s3_list_buckets($user, $pass); if (!ref($buckets)) { &$second_print(&text('backup_purgeebuckets', $buckets)); return 0; } foreach my $b (@$buckets) { if ($detail) { &$first_print(&text('backup_purgeposs2', $b->{'Name'}, $b->{'CreationDate'})); } if ($b->{'Name'} =~ /^$re$/) { # Found one to delete local $ctime = &s3_parse_date($b->{'CreationDate'}); $mcount++; if (!$ctime || $ctime >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $ctime) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingbucket', "$b->{'Name'}", $old)); # Sum up size of files local $files = &s3_list_files($user, $pass, $b->{'Name'}); local $sz = 0; if (ref($files)) { foreach my $f (@$files) { $sz += $f->{'Size'}; } } local $err = &s3_delete_bucket($user, $pass, $b->{'Name'}); if ($err) { &$second_print(&text('backup_edelbucket',$err)); $ok = 0; } else { &$second_print(&text('backup_deleted', &nice_size($sz))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 3 && $path =~ /\%/) { # Search for S3 files under the bucket local $files = &s3_list_files($user, $pass, $host); if (!ref($files)) { &$second_print(&text('backup_purgeefiles', $files)); return 0; } foreach my $f (@$files) { if ($detail) { &$first_print(&text('backup_purgeposs', $f->{'Key'}, $f->{'LastModified'})); } if (($f->{'Key'} =~ /^$re$/ || $f->{'Key'} =~ /^$re\/.*\.(tar\.gz|tar\.bz2|zip|tar)$/) && $f->{'Key'} !~ /\.(dom|info)$/) { # Found one to delete local $ctime = &s3_parse_date($f->{'LastModified'}); $mcount++; if (!$ctime || $ctime >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $ctime) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingfile', "$f->{'Key'}", $old)); local $err = &s3_delete_file($user, $pass, $host, $f->{'Key'}); if ($err) { &$second_print(&text('backup_edelbucket',$err)); $ok = 0; } else { &s3_delete_file($user, $pass, $host, $f->{'Key'}.".info"); &s3_delete_file($user, $pass, $host, $f->{'Key'}.".dom"); &$second_print(&text('backup_deleted', &nice_size($f->{'Size'}))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 6 && $host =~ /\%/) { # Search Rackspace for containers matching the regexp local $rsh = &rs_connect($config{'rs_endpoint'}, $user, $pass); if (!ref($rsh)) { return &text('backup_purgeersh', $rsh); } local $containers = &rs_list_containers($rsh); if (!ref($containers)) { &$second_print(&text('backup_purgeecontainers', $containers)); return 0; } foreach my $c (@$containers) { local $st = &rs_stat_container($rsh, $c); next if (!ref($st)); if ($detail) { &$first_print(&text('backup_purgeposs3', $c, $st->{'X-Timestamp'})); } if ($c =~ /^$re$/) { # Found one to delete local $ctime = int($st->{'X-Timestamp'}); $mcount++; if (!$ctime || $ctime >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $ctime) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingcontainer', "$c", $old)); local $err = &rs_delete_container($rsh, $c, 1); if ($err) { &$second_print( &text('backup_edelcontainer',$err)); $ok = 0; } else { &$second_print(&text('backup_deleted', &nice_size($st->{'X-Container-Bytes-Used'}))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 6 && $path =~ /\%/) { # Search for Rackspace files under the container local $rsh = &rs_connect($config{'rs_endpoint'}, $user, $pass); if (!ref($rsh)) { return &text('backup_purgeersh', $rsh); } local $files = &rs_list_objects($rsh, $host); if (!ref($files)) { &$second_print(&text('backup_purgeefiles2', $files)); return 0; } foreach my $f (@$files) { local $st = &rs_stat_object($rsh, $host, $f); next if (!ref($st)); if ($detail) { &$first_print(&text('backup_purgeposs', $c, $st->{'X-Timestamp'})); } if ($f =~ /^$re($|\/)/ && $f !~ /\.(dom|info)$/ && $f !~ /\.\d+$/) { # Found one to delete local $ctime = int($st->{'X-Timestamp'}); $mcount++; if (!$ctime || $ctime >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $ctime) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingfile', "$f", $old)); local $err = &rs_delete_object($rsh, $host, $f); if ($err) { &$second_print(&text('backup_edelbucket',$err)); $ok = 0; } else { &rs_delete_object($rsh, $host, $f.".dom"); &rs_delete_object($rsh, $host, $f.".info"); &$second_print(&text('backup_deleted', &nice_size($st->{'Content-Length'}))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 7 && $host =~ /\%/) { # Search Google for buckets matching the regexp local $buckets = &list_gcs_buckets(); if (!ref($buckets)) { &$second_print(&text('backup_purgeegcbuckets', $buckets)); return 0; } foreach my $st (@$buckets) { my $c = $st->{'name'}; if ($detail) { &$first_print(&text('backup_purgeposs2', $c, $st->{'timeCreated'})); } if ($c =~ /^$re$/) { # Found one with a name to delete local $ctime = &google_timestamp($st->{'timeCreated'}); $mcount++; if (!$ctime || $ctime >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $ctime) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingbucket', "$c", $old)); local $st2 = &stat_gcs_bucket($c, 1); local $err = &delete_gcs_bucket($c, 1); if ($err) { &$second_print( &text('backup_edelbucket', $err)); $ok = 0; } else { &$second_print(&text('backup_deleted', &nice_size($st2->{'size'}))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 7 && $path =~ /\%/) { # Search for Google files under the bucket local $files = &list_gcs_files($host); if (!ref($files)) { &$second_print(&text('backup_purgeefiles3', $files)); return 0; } foreach my $st (@$files) { my $f = $st->{'name'}; if ($detail) { &$first_print(&text('backup_purgeposs', $f, $st->{'updated'})); } if ($f =~ /^$re($|\/)/ && $f !~ /\.(dom|info)$/ && $f !~ /\.\d+$/) { # Found one to delete local $ctime = &google_timestamp($st->{'updated'}); $mcount++; if (!$ctime || $ctime >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $ctime) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingfile', "$f", $old)); local $err = &delete_gcs_file($host, $f); if ($err) { &$second_print(&text('backup_edelbucket',$err)); $ok = 0; } else { &delete_gcs_file($host, $f.".dom"); &delete_gcs_file($host, $f.".info"); &$second_print(&text('backup_deleted', &nice_size($st->{'size'}))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 8) { # Search for Dropbox files matching the date pattern local $files = &list_dropbox_files($base); if (!ref($files)) { &$second_print(&text('backup_purgeefiles4', $files)); return 0; } foreach my $st (@$files) { my $f = $st->{'path_display'}; $f =~ s/^\/?\Q$base\E\/?// || next; local $ctime; if ($st->{'.tag'} eq 'folder') { # Age is age of the oldest file $ctime = time(); my $subfiles = &list_dropbox_files( $st->{'path_display'}); if (ref($subfiles)) { foreach my $sf (@$subfiles) { my $subctime = &dropbox_timestamp( $sf->{'client_modified'}); $ctime = $subctime if ($subctime && $subctime < $ctime); } } } else { $ctime = &dropbox_timestamp($st->{'client_modified'}); } if ($detail) { &$first_print(&text('backup_purgeposs', $f, &make_date($ctime))); } if ($f =~ /^$re($|\/)/ && $f !~ /\.(dom|info)$/) { # Found one to delete $mcount++; if (!$ctime || $ctime >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $ctime) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingfile', "$f", $old)); my $p = $st->{'path'}; $p =~ s/^\///; my $size = $st->{'.tag'} eq 'folder' ? &size_dropbox_directory($p) : $st->{'size'}; local $dropbase = $base; $dropbase =~ s/^\///; local $err = &delete_dropbox_path($dropbase, $f); if ($err) { &$second_print(&text('backup_edelbucket',$err)); $ok = 0; } else { &delete_dropbox_path($dropbase, $f.".dom"); &delete_dropbox_path($dropbase, $f.".info"); &$second_print(&text('backup_deleted', &nice_size($size))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 10 && $host =~ /\%/) { # Search for Backblaze for buckets matching the date pattern my $buckets = &list_bb_buckets(); if (!ref($buckets)) { &$second_print(&text('backup_purgeebbbuckets', $buckets)); return 0; } foreach my $st (@$buckets) { my $f = $st->{'name'}; my $info = &get_bb_bucket($f); if ($detail) { &$first_print(&text('backup_purgeposs2a', $f)); } if ($f =~ /^$re$/) { # Found one with a name to delete .. check the age of # the newest file my $ctime = 0; my $files = &list_bb_files($f); next if (!ref($files)); my $totalsize = 0; foreach my $bf (@$files) { $ctime = $bf->{'time'} if ($bf->{'time'} > $ctime); $totalsize += $bf->{'size'}; } $mcount++; if (!$ctime || $ctime >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } my $old = int((time() - $ctime) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingbucket', "$f", $old)); # Delete all the files in the bucket, then itself my $err; foreach my $bf (@$files) { $err = &delete_bb_file($f, $bf->{'name'}); next if ($err); } $err = &delete_bb_bucket($f) if (!$err); if ($err) { &$second_print( &text('backup_edelbucket', $err)); $ok = 0; } else { &$second_print(&text('backup_deleted', &nice_size($totalsize))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 10 && $path =~ /\%/) { # Search for Backblaze for files matching the date pattern my $dir; if ($re =~ /^(.*)\//) { $dir = $1; } local $files = &list_bb_files($base, $dir); if (!ref($files)) { &$second_print(&text('backup_purgeefiles5', $files)); return 0; } foreach my $st (@$files) { my $f = $st->{'name'}; my $ctime; if ($st->{'folder'}) { # Age is age of the oldest file $ctime = time(); my $subfiles = &list_bb_files($base, $f); if (ref($subfiles)) { foreach my $sf (@$subfiles) { $ctime = $sf->{'time'} if ($sf->{'time'} && $sf->{'time'} < $ctime); } } } else { $ctime = $st->{'time'}; } if ($detail) { &$first_print(&text('backup_purgeposs', $f, &make_date($ctime))); } if ($f =~ /^$re($|\/)/ && $f !~ /\.(dom|info)$/) { # Found one to delete $mcount++; if (!$ctime || $ctime >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $ctime) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } my ($size, $err); if ($st->{'folder'}) { &$first_print(&text('backup_deletingdir', "$f", $old)); $size = &size_bb_directory($base, $f); $err = &delete_bb_directory($base, $f); } else { &$first_print(&text('backup_deletingfile', "$f", $old)); $size = $st->{'size'}; $err = &delete_bb_file($base, $f); } if ($err) { &$second_print(&text('backup_edelbucket',$err)); $ok = 0; } else { &delete_bb_file($base, $f.".dom"); &delete_bb_file($base, $f.".info"); &$second_print(&text('backup_deleted', &nice_size($size))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 11 && $path =~ /\%/) { # Search for Azure files under the container local $files = &list_azure_files($host); if (!ref($files)) { &$second_print(&text('backup_purgeefiles3', $files)); return 0; } foreach my $st (@$files) { my $f = $st->{'name'}; if ($detail) { &$first_print(&text('backup_purgeposs', $f, $st->{'properties'}->{'lastModified'})); } if ($f =~ /^$re($|\/)/ && $f !~ /\.(dom|info)$/ && $f !~ /\.\d+$/) { # Found one to delete local $ctime = &google_timestamp( $st->{'properties'}->{'lastModified'}); $mcount++; if (!$ctime || $ctime >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $ctime) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingfile', "$f", $old)); local $err = &delete_azure_file($host, $f); if ($err) { &$second_print(&text('backup_edelbucket',$err)); $ok = 0; } else { &delete_azure_file($host, $f.".dom"); &delete_azure_file($host, $f.".info"); &$second_print(&text('backup_deleted', &nice_size($st->{'properties'}->{'contentLength'}))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 12 && $path =~ /\%/) { # Search for Google drive files under the folder local $files = &list_drive_files($host, 1); if (!ref($files)) { &$second_print(&text('backup_purgeefiles6', $files)); return 0; } foreach my $st (@$files) { my $f = $st->{'name'}; my $info; if ($detail) { &$first_print(&text('backup_purgeposs', $f, $st->{'modifiedTime'})); } if ($f =~ /^$re($|\/)/ && $f !~ /\.(dom|info)$/ && $f !~ /\.\d+$/) { # Found one to delete local $ctime = &google_timestamp( $st->{'modifiedTime'}); $mcount++; if (!$ctime || $ctime >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $ctime) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingfile', "$f", $old)); local $err = &delete_drive_file($host, $f); if ($err) { &$second_print(&text('backup_edelbucket',$err)); $ok = 0; } else { &delete_drive_file($host, $f.".dom"); &delete_drive_file($host, $f.".info"); &$second_print(&text('backup_deleted', &nice_size($st->{'size'}))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } elsif ($mode == 12 && $host =~ /\%/) { # Search for Google drive folders my $parent; my $pfx = ""; if ($re =~ /^(.*)\/([^\/]+)$/) { my $pname = $1; $re = $2; $parent = &get_drive_folder($pname, 0); return $parent if (!ref($parent)); $pfx = $pname."/"; } local $folders = &list_drive_folders(1, $parent); if (!ref($folders)) { &$second_print(&text('backup_purgeefiles6', $folders)); return 0; } foreach my $st (@$folders) { my $f = $st->{'name'}; my $info; if ($detail) { &$first_print(&text('backup_purgeposs4', $f, $st->{'modifiedTime'})); } if ($f =~ /^$re$/) { # Found one to delete local $ctime = &google_timestamp( $st->{'modifiedTime'}); $mcount++; if (!$ctime || $ctime >= $cutoff) { if ($detail) { &$second_print(&text('backup_purgenew', &make_date($cutoff))); } next; } local $old = int((time() - $ctime) / (24*60*60)); if ($detail) { &$second_print(&text('backup_purgecan', $re, $old)); } &$first_print(&text('backup_deletingdir', "$f", $old)); my $sz = &size_drive_folder($pfx.$f); my $err = &delete_drive_folder($pfx.$f); if ($err) { &$second_print(&text('backup_edelbucket',$err)); $ok = 0; } else { &$second_print(&text('backup_deleted', &nice_size($sz))); $pcount++; } } elsif ($detail) { &$second_print(&text('backup_purgepat', $re)); } } } &$outdent_print(); &$second_print($pcount ? &text('backup_purged', $pcount, $mcount - $pcount) : $mcount ? &text('backup_purgedtime', $mcount) : $text{'backup_purgednone'}); return $ok; } # write_backup_log(&domains, dest, differential?, start, size, ok?, # "cgi"|"sched"|"api", output, &errordoms, [user], [&key], # [schedule-id], [separate-format], [allow-owner-restore], # [compression], [description], [&sched]) # Record that some backup was made and succeeded or failed sub write_backup_log { local ($doms, $dest, $increment, $start, $size, $ok, $mode, $output, $errdoms, $user, $key, $schedid, $separate, $ownrestore, $compression, $desc, $sched) = @_; $compression = $config{'compression'} if (!defined($compression) || $compression eq ''); if (!-d $backups_log_dir) { &make_dir($backups_log_dir, 0700); } my @plugged; if ($sched && $sched->{plugged}) { my %plugged = map { $_ => $sched->{$_} } grep { /^plugged/ } keys %$sched; $plugged{plugged_opts} = $sched->{"backup_opts_$sched->{plugged}"}; @plugged = %plugged; } local %log = ( 'doms' => join(' ', map { $_->{'dom'} } @$doms), 'errdoms' => join(' ', map { $_->{'dom'} } @$errdoms), 'dest' => $dest, 'increment' => $increment, 'start' => $start, 'end' => time(), 'size' => $size, 'ok' => $ok, 'user' => $user || $remote_user, 'mode' => $mode, 'key' => $key->{'id'}, 'sched' => $schedid, 'compression' => $compression, 'separate' => $separate, 'ownrestore' => $ownrestore, 'desc' => $desc, @plugged, ); $main::backup_log_id_count++; $log{'id'} = $log{'end'}."-".$$."-".$main::backup_log_id_count; &write_file("$backups_log_dir/$log{'id'}", \%log); if ($output) { &open_tempfile(OUTPUT, ">$backups_log_dir/$log{'id'}.out"); &print_tempfile(OUTPUT, $output); &close_tempfile(OUTPUT); } if ($config{'backuplog_age'}) { # Delete logs older than this number of days my @del; my $cutoff = time() - $config{'backuplog_age'}*86400; opendir(LOGS, $backups_log_dir); while(my $id = readdir(LOGS)) { next if ($id eq "." || $id eq ".."); next if ($id =~ /\.out$/); my ($time, $pid, $count) = split(/\-/, $id); if ($time < $cutoff) { push(@del, $backups_log_dir."/".$id); push(@del, $backups_log_dir."/".$id.".out"); } } closedir(LOGS); if (@del) { &unlink_file(@del); } } } # list_backup_logs([start-time]) # Returns a list of all backup logs, optionally limited to after some time sub list_backup_logs { local ($start) = @_; local @rv; opendir(LOGS, $backups_log_dir); while(my $id = readdir(LOGS)) { next if ($id eq "." || $id eq ".."); next if ($id =~ /\.out$/); my ($time, $pid, $count) = split(/\-/, $id); next if (!$time || !$pid); next if ($start && $time < $start); local %log; &read_file("$backups_log_dir/$id", \%log) || next; $log{'output'} = &read_file_contents("$backups_log_dir/$id.out"); $log{'id'} = $id; push(@rv, \%log); } close(LOGS); return @rv; } # get_backup_log(id) # Read and return a single logged backup sub get_backup_log { local ($id) = @_; local %log; &read_file("$backups_log_dir/$id", \%log) || return undef; $log{'output'} = &read_file_contents("$backups_log_dir/$id.out"); return \%log; } # delete_backup_log(&log) # Deletes the log entry for a backup sub delete_backup_log { my ($log) = @_; $log->{'id'} || return "Backup log to delete has no ID!"; &unlink_logged("$backups_log_dir/$log->{'id'}"); return undef; } # record_backup_bandwidth(&domain, bytes-in, bytes-out, start, end) # Add to the bandwidth files for some domain data transfer used by a backup sub record_backup_bandwidth { local ($d, $inb, $outb, $start, $end) = @_; if ($config{'bw_backup'}) { local $bwinfo = &get_bandwidth($d); local $startday = int($start / (24*60*60)); local $endday = int($end / (24*60*60)); for(my $day=$startday; $day<=$endday; $day++) { $bwinfo->{"backup_".$day} += $outb / ($endday - $startday + 1); $bwinfo->{"restore_".$day} += $inb / ($endday - $startday + 1); } &save_bandwidth($d, $bwinfo); } } # check_backup_limits(as-owner, on-schedule, dest) # Check if the limit on the number of running backups has been exceeded, and # if so either waits or returns an error. Returns undef if OK to proceed. May # print a message if waiting. sub check_backup_limits { local ($asowner, $sched, $dest) = @_; local %maxes; local $start = time(); local $printed; while(1) { # Lock the file listing current backups, clean it up and read it &lock_file($backup_maxes_file); &cleanup_backup_limits(1); %maxes = ( ); &read_file($backup_maxes_file, \%maxes); # Check if we are under the limit, or it doesn't apply local @pids = keys %maxes; local $waiting = time() - $start; if (!$config{'max_backups'} || @pids < $config{'max_backups'} || !$asowner && $config{'max_all'} == 0 || !$sched && $config{'max_manual'} == 0) { # Under the limit, or no limit applies in this case if ($printed) { &$second_print($text{'backup_waited'}); } last; } elsif (!$config{'max_timeout'}) { # Too many, and no timeout is set .. give up now &unlock_file($backup_maxes_file); return &text('backup_maxhit', scalar(@pids), $config{'max_backups'}); } elsif ($waiting < $config{'max_timeout'}) { # Too many, but still under timeout .. wait for a while &unlock_file($backup_maxes_file); if (!$printed) { &$first_print(&text('backup_waiting', $config{'max_backups'})); $printed++; } sleep(10); } else { # Over the timeout .. give up &unlock_file($backup_maxes_file); return &text('backup_waitfailed', $config{'max_timeout'}); } } # Add this job to the file $maxes{$$} = $dest; &write_file($backup_maxes_file, \%maxes); &unlock_file($backup_maxes_file); return undef; } # cleanup_backup_limits([no-lock], [include-this]) # Delete from the backup limits file any entries for PIDs that are not running sub cleanup_backup_limits { local ($nolock, $includethis) = @_; local (%maxes, $changed); &lock_file($backup_maxes_file) if (!$nolock); &read_file($backup_maxes_file, \%maxes); foreach my $pid (keys %maxes) { if (!kill(0, $pid) || ($includethis && $pid == $$)) { delete($maxes{$pid}); $changed++; } } if ($changed) { &write_file($backup_maxes_file, \%maxes); } &unlock_file($backup_maxes_file) if (!$nolock); } # get_scheduled_backup_dests(&sched) # Returns a list of destinations for some scheduled backup sub get_scheduled_backup_dests { local ($sched) = @_; local @dests = ( $sched->{'dest0'} || $sched->{'dest'} ); for(my $i=1; $sched->{'dest'.$i}; $i++) { push(@dests, $sched->{'dest'.$i}); } return @dests; } # get_scheduled_backup_purges(&sched) # Returns a list of purge times for some scheduled backup sub get_scheduled_backup_purges { local ($sched) = @_; local @purges = ( $sched->{'purge0'} || $sched->{'purge'} ); for(my $i=1; exists($sched->{'purge'.$i}); $i++) { push(@purges, $sched->{'purge'.$i}); } return @purges; } # get_scheduled_backup_keys(&sched) # Returns a list of encryption key IDs for some scheduled backup sub get_scheduled_backup_keys { local ($sched) = @_; local @keys = ( $sched->{'key0'} || $sched->{'key'} ); for(my $i=1; exists($sched->{'key'.$i}); $i++) { push(@keys, $sched->{'key'.$i}); } return @keys; } # clean_domain_passwords(&domain) # Removes any passwords or other secure information from a domain hash sub clean_domain_passwords { local ($d) = @_; local $rv = { %$d }; foreach my $f ("pass", "enc_pass", "mysql_pass", "postgres_pass") { delete($rv->{$f}); } return $rv; } # rename_backup_owner(&domain, &old-domain) # Updates all scheduled backups and backup keys to reflect a username change sub rename_backup_owner { local ($d, $oldd) = @_; local $owner = $d->{'parent'} ? &get_domain($d->{'parent'})->{'user'} : $d->{'user'}; local $oldowner = $oldd->{'parent'} ? &get_domain($oldd->{'parent'})->{'user'} : $oldd->{'user'}; if ($owner ne $oldowner) { if (defined(&list_backup_keys)) { foreach my $key (&list_backup_keys()) { if ($key->{'owner'} eq $oldowner) { $key->{'owner'} = $owner; &save_backup_key($key); } } } } } # merge_ipinfo_domain(&domain, &ipinfo) # Update the IP in a domain based on an ipinfo hash sub merge_ipinfo_domain { local ($d, $ipinfo) = @_; $d->{'virt'} = $ipinfo->{'virt'}; $d->{'ip'} = $ipinfo->{'ip'}; $d->{'virtalready'} = $ipinfo->{'virtalready'}; $d->{'netmask'} = $ipinfo->{'netmask'}; $d->{'name'} = !$ipinfo->{'virt'}; if ($ipinfo->{'ip6'}) { $d->{'virt6'} = $ipinfo->{'virt6'}; $d->{'ip6'} = $ipinfo->{'ip6'}; $d->{'virt6already'} = $ipinfo->{'virt6already'}; $d->{'netmask6'} = $ipinfo->{'netmask6'}; $d->{'name6'} = !$ipinfo->{'virt6'}; } } # start_running_backup(&backup) # Write out a status file indicating that some backup is running sub start_running_backup { my ($sched) = @_; if (!-d $backups_running_dir) { &make_dir($backups_running_dir, 0700); } my $file = $backups_running_dir."/".$sched->{'id'}."-".$$; my %hash = %$sched; $hash{'pid'} = $$; $hash{'scripttype'} = $main::webmin_script_type; $hash{'started'} = time(); if ($main::webmin_script_type eq 'web') { $hash{'webminuser'} = $remote_user; } &write_file($file, \%hash); } # stop_running_backup(&backup) # Clear the status file indicating that some backup is running sub stop_running_backup { my ($sched) = @_; my $file = $backups_running_dir."/".$sched->{'id'}."-".$$; unlink($file); } # list_running_backups() # Returns a list of the hash refs for currently running backups sub list_running_backups { my @rv; opendir(RUNNING, $backups_running_dir); my @files = readdir(RUNNING); closedir(RUNNING); foreach my $f (@files) { next if ($f eq "." || $f eq ".."); next if ($f !~ /^(\S+)\-(\d+)$/); my %sched; &read_file("$backups_running_dir/$f", \%sched) || next; if ($sched{'pid'} && kill(0, $sched{'pid'})) { push(@rv, \%sched); } else { unlink("$backups_running_dir/$f"); } } return @rv; } # kill_running_backup(&sched) # Kills one scheduled running backup sub kill_running_backup { my ($sched) = @_; $sched->{'pid'} || &error("Backup has no PID!"); foreach my $pid (&find_backup_subprocesses($sched->{'pid'})) { &kill_logged(9, $pid); } my $file = $backups_running_dir."/".$sched->{'id'}."-".$sched->{'pid'}; unlink($file); } # find_backup_subprocesses(pid, [&procs]) # Returns a list of all subprocesses of the given PID sub find_backup_subprocesses { my ($pid, $procs) = @_; &foreign_require("proc"); $procs ||= [ &proc::list_processes() ]; my @rv = ( $pid ); foreach my $sp (map { $_->{'pid'} } grep { $_->{'ppid'} == $pid } @$procs) { push(@rv, &find_backup_subprocesses($sp, $procs)); } return @rv; } # delete_backup_from_log(&log) # If a backup log used a separate file for each domain, delete them all sub delete_backup_from_log { my ($log) = @_; my $dest = $log->{'dest'}; my $c = defined($log->{'compression'}) ? $log->{'compression'} : $config{'compression'}; my $sfx = &compression_to_suffix($c); if ($log->{'separate'}) { my $err; foreach my $dname (split(/\s+/, $log->{'doms'})) { my $ddest = $dest."/".$dname.".".$sfx; $err ||= &delete_backup($ddest); } return $err; } else { return &delete_backup($dest); } } # delete_backup(dest) # Delete the backup from some destination path, like /backup/foo.com.tar.gz sub delete_backup { my ($dest) = @_; my ($proto, $user, $pass, $host, $path, $port) = &parse_backup_url($dest); my $rsh; foreach my $sfx ("", ".info", ".dom") { my $spath = $path.$sfx; my $err; if ($proto == 0) { # File on this system (but skip if missing) if (-e $spath) { $err = &unlink_logged($spath) ? undef : $!; } } elsif ($proto == 1) { # FTP server &ftp_deletefile($host, $path, \$err, $user, $pass, $port); } elsif ($proto == 2) { # SSH server my $sshcmd = "ssh".($port ? " -p $port" : "")." ". $config{'ssh_args'}." ". ($user ? $user."\@" : "").$host; my $rmcmd = $sshcmd." rm -rf ".quotemeta($spath); &run_ssh_command($rmcmd, $pass, \$err); } elsif ($proto == 3) { # S3 bucket file $err = &s3_delete_file($user, $pass, $host, $spath); } elsif ($proto == 6) { # Rackspace container file $rsh ||= &rs_connect($config{'rs_endpoint'}, $user, $pass); $err = &rs_delete_object($rsh, $host, $spath); } elsif ($proto == 7) { # GCS bucket file $err = &delete_gcs_file($host, $spath); } elsif ($proto == 8) { # Dropbox file if ($spath =~ /^(.*)\/([^\/]+)$/) { my ($dir, $f) = ($1, $2); $err = &delete_dropbox_path($dir, $f); } else { $err = &delete_dropbox_path($spath); } } elsif ($proto == 10) { # Backblaze bucket file $err = &delete_bb_file($host, $spath); } else { return "Deletion of remote backups is not supported yet"; } if ($err && !$sfx) { return $err; } } return undef; } # compression_to_suffix(format) # Converts a compression format integer to a filename suffix. Supported formats # are 0 (gzipped tar), 1 (bzipped tar), 2 (plain tar), 3 (ZIP), 4 (zstd tar) sub compression_to_suffix { my ($c) = @_; return $c == 0 ? "tar.gz" : $c == 1 ? "tar.bz2" : $c == 2 ? "tar" : $c == 3 ? "zip" : $c == 4 ? "tar.zst" : "unknown"; } # compression_to_suffix_inner(format) # Converts a compressioin format integer to a filename suffix for interal # archive files. sub compression_to_suffix_inner { my ($c) = @_; return $c == 3 ? "zip" : 'tar'; } # suffix_to_compression(filename) # Use the suffix of a filename to determine the compression format number. # Supported formats are 0 (gzipped tar), 1 (bzipped tar), 2 (plain tar), # 3 (ZIP), 4 (zstd tar) sub suffix_to_compression { my ($file) = @_; return $file =~ /\.zip$/i ? 3 : $file =~ /\.tar\.zst$/i ? 4 : $file =~ /\.tar\.gz$/i ? 0 : $file =~ /\.tar\.bz2$/i ? 1 : $file =~ /\.tar$/i ? 2 : -1; } # set_backup_envs(&backup, &doms, ok-flag, &error-doms, [force-timestamp]) # Set environment variables from a backup object sub set_backup_envs { my ($sched, $doms, $status, $errdoms, $timestamp) = @_; foreach my $k (keys %$sched) { $ENV{'BACKUP_'.uc($k)} = $sched->{$k}; } if ($sched->{'strftime'}) { # Expand out date-based paths foreach my $k (keys %$sched) { if ($k eq 'dest' || $k =~ /^dest\d+$/) { $ENV{'BACKUP_'.uc($k)} = &backup_strftime($sched->{$k}, $timestamp); } } } $ENV{'BACKUP_DOMAIN_NAMES'} = join(" ", map { $_->{'dom'} } @$doms); $ENV{'BACKUP_ERROR_NAMES'} = join(" ", map { $_->{'dom'} } @$errdoms) if ($errdoms); $ENV{'BACKUP_STATUS'} = $status if (defined($status)); } # reset_backup_envs() # Clear variables set by set_backup_envs sub reset_backup_envs { foreach my $e (keys %ENV) { delete($ENV{$e}) if ($e =~ /^(BACKUP_)/); } } # dest_to_webmin(&dest-string) # Converts a backup destination string into a Webmin server object sub dest_to_webmin { my ($dest) = @_; my ($mode, $user, $pass, $server, $path, $port) = &parse_backup_url($dest); # Clear any previous handler that would prefer error from calling die &remote_error_setup(undef); # Find existing registered server, if any &foreign_require("servers"); my @servers = &servers::list_servers(); my ($already) = grep { $_->{'host'} eq $server && $_->{'port'} == $port } @servers; if (!$already) { ($already) = grep { $_->{'host'} eq $server } @servers; } # Construct a server object using provided and stored info $user ||= $already->{'user'} if ($already); $pass ||= $already->{'pass'} if ($already); $port ||= $already->{'port'} if ($already); return { 'host' => $server, 'ip' => $already ? $already->{'ip'} : undef, 'ip6_force' => $already ? $already->{'ip6_force'} : undef, 'port' => $port || 10000, 'ssl' => $already ? $already->{'ssl'} : 1, 'fast' => $already ? $already->{'fast'} : 1, 'user' => $user, 'pass' => $pass }; } # expand_glob_to_files(directory, glob, ...) # Given a list of globs relative to some directory, return the actual files # also relative to that directory sub expand_glob_to_files { my ($dir, @globs) = @_; my @files; foreach my $g (@globs) { push(@files, glob("$dir/$g")); } foreach my $f (@files) { $f =~ s/^\Q$dir\E\///; } return @files; } # sync_directories(source, target, [&rsync_flags], [$d]) # Runs an rsync command to sync source to target with optional extra parameters. # If $d is provided, the command is run as the domain owner. The function # returns the output of the rsync command, and in list context, it also returns # the exit status and the command run. sub sync_directories { my ($source, $target, $rsync_flags, $d) = @_; my $rsync = &has_command("rsync"); my $emsg = &text('restore_ersync', 'rsync'); return (wantarray ? ($emsg, 1) : 1) if (!$rsync); my $cmd = "$rsync -av "; $cmd .= join(' ', @$rsync_flags) . " " if (ref($rsync_flags) && @$rsync_flags); $cmd .= quotemeta("$source/") . ' ' . quotemeta("$target/"); my $out; if ($d) { $out = &run_as_domain_user($d, "$cmd 2>&1"); } else { $out = &backquote_command("$cmd 2>&1"); } return wantarray ? ($out, $?, $cmd) : $?; } # has_feature_link(&hash, [key], [absolute]) # Returns feature link based on the given object sub has_feature_link { my ($sched, $key, $abs) = @_; $key //= 'plugged'; return '' unless ($sched && $sched->{$key}); return ($abs ? &get_webprefix() : '..')."/$sched->{$key}/"; } # check_backup_pluging(\%sched, [key-name]) # Build a normalized plugin hash sub check_backup_pluging { my ($sched, $name_key) = @_; $name_key //= 'plugged'; return () unless ($sched && ref $sched && exists $sched->{$name_key}); my %sched = map { $_ => $sched->{$_} } grep { /^$name_key/i } keys %$sched; my %plugged; # Plugin name if (my $name = delete $sched{plugged}) { $plugged{name} = $name; } # Plugin opts my $opts_src; $opts_src = $sched->{"backup_opts_$plugged{$name_key}"} if (defined $plugged{$name_key}); $opts_src = delete $sched{plugged_opts} if (!defined $opts_src); if (defined $opts_src) { if (ref $opts_src) { $plugged{opts} = $opts_src; } else { my %opts = map { my ($k, $v) = split /=/, $_, 2; $k => (defined $v ? $v : '') } grep { length } split /,/, $opts_src; $plugged{opts} = \%opts; } } # Plugin flags foreach my $k (keys %sched) { next unless $k =~ /^plugged_(.+)/i; $plugged{ lc $1 } = $sched{$k}; } return %plugged; } # backup_fmt_javascript() # Returns JS for use inside backup_form.cgi to update the recommended path sub backup_fmt_javascript { return < setTimeout(function() { let extension; const modes = document.querySelectorAll('[name="fmt"]'), nameFields = document.querySelectorAll('[data-backup-path]'), compressions = document.querySelectorAll('[name="compression"]'), inputFields = document.querySelectorAll('[name\$="path"], [name\$="file"]'), strfTime = document.querySelectorAll('[name="strftime"]'), strfTimeFn = function() { return strfTime && strfTime[0] && strfTime[0].checked }, modes_check = function() { Array.from(modes).find(radio => radio.checked).dispatchEvent(new Event('input')); }, compressions_check = function() { Array.from(compressions).find(radio => radio.checked).dispatchEvent(new Event('input')); }, updateNameFields = function(directory) { if (nameFields && nameFields.length) { nameFields.forEach(function(td) { const filetext = td.dataset.backupPathFile, dirtext = td.dataset.backupPathDir; if (directory) { td.innerHTML = dirtext; } else { td.innerHTML = filetext; } }); } }; modes.forEach(function(radio) { radio.addEventListener('input', function() { const placeholdertype = this.value, filename = strfTimeFn() ? 'domains-%Y-%m-%d-%H-%M' : 'domains'; inputFields.forEach(function(input) { if (placeholdertype == '0') { input.placeholder = \`e.g. /backups/\$\{filename\}.\$\{extension\}\`; updateNameFields(); } else { const backupPlaceHolder = strfTimeFn() ? 'backups/backup-%Y-%m-%d' : 'backups'; input.placeholder = \`e.g. /\$\{backupPlaceHolder\}/\`; updateNameFields('dir'); } }); }); }); (strfTime && strfTime[0]) && strfTime[0].addEventListener('input', modes_check); compressions.forEach(function(compression) { compression.addEventListener('input', function() { const v = parseInt(this.value || $config{'compression'}), a = {3: 'zip', 2: 'tar', 1: 'tar.bz2', 0: 'tar.gz'}; extension = a[v]; modes_check(); }); }); compressions_check(); modes_check(); }); EOF } # can_trust_backup(&log) # Returns 1 if a backup can be safely restored by the current user. This is true # if the backup was created by root, or by the usewr currently logged in. sub can_trust_backup { my ($log) = @_; return $log->{'user'} eq $remote_user || $log->{'user'} eq 'root' || !$log->{'user'} || $log->{'ownrestore'}; } # prune_all_features_for_backup(&features) # Remove features whose plugin does not support backup for all features. sub prune_all_features_for_backup { my @features = @_; my %rm = map { $_, 1 } grep { &plugin_defined($_, 'feature_backup_no_all_features') && &plugin_call($_, 'feature_backup_no_all_features') } &list_backup_plugins(); return grep { !$rm{$_} } @features; } 1;