Obtain job details from Arvados, run tasks on compute nodes (typically
invoked by scheduler on controller):
- crunch-job --job x-y-z
+ crunch-job --job x-y-z --git-dir /path/to/repo/.git
Obtain job details from command line, run tasks on local machine
(typically invoked by application or developer on VM):
- crunch-job --job '{"script_version":"/path/to/tree","script":"scriptname",...}'
+ crunch-job --job '{"script_version":"/path/to/working/tree","script":"scriptname",...}'
+
+ crunch-job --job '{"repository":"https://github.com/curoverse/arvados.git","script_version":"master","script":"scriptname",...}'
=head1 OPTIONS
=item --git-dir
-Path to .git directory where the specified commit is found.
+Path to a .git directory (or a git URL) where the commit given in the
+job's C<script_version> attribute is to be found. If this is I<not>
+given, the job's C<repository> attribute will be used.
=item --job-api-token
setup. This can speed up development and debugging when running jobs
locally.
+=item --job
+
+UUID of the job to run, or a JSON-encoded job resource without a
+UUID. If the latter is given, a new job object will be created.
+
=back
=head1 RUNNING JOBS LOCALLY
use strict;
use POSIX ':sys_wait_h';
+use POSIX qw(strftime);
use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
use Arvados;
+use Data::Dumper;
use Digest::MD5 qw(md5_hex);
use Getopt::Long;
use IPC::Open2;
use IO::Select;
use File::Temp;
use Fcntl ':flock';
+use File::Path qw( make_path remove_tree );
+
+use constant EX_TEMPFAIL => 75;
$ENV{"TMPDIR"} ||= "/tmp";
unless (defined $ENV{"CRUNCH_TMP"}) {
$ENV{"CRUNCH_TMP"} .= "-$<";
}
}
+
+# Create the tmp directory if it does not exist
+if ( ! -d $ENV{"CRUNCH_TMP"} ) {
+ make_path $ENV{"CRUNCH_TMP"} or die "Failed to create temporary working directory: " . $ENV{"CRUNCH_TMP"};
+}
+
$ENV{"JOB_WORK"} = $ENV{"CRUNCH_TMP"} . "/work";
$ENV{"CRUNCH_INSTALL"} = "$ENV{CRUNCH_TMP}/opt";
$ENV{"CRUNCH_WORK"} = $ENV{"JOB_WORK"}; # deprecated
mkdir ($ENV{"JOB_WORK"});
-my $arv_cli;
-
-if (defined $ENV{"ARV_CLI"}) {
- $arv_cli = $ENV{"ARV_CLI"};
-}
-else {
- $arv_cli = 'arv';
-}
-
my $force_unlock;
my $git_dir;
my $jobspec;
}
my $have_slurm = exists $ENV{SLURM_JOBID} && exists $ENV{SLURM_NODELIST};
-my $job_has_uuid = $jobspec =~ /^[-a-z\d]+$/;
-my $local_job = !$job_has_uuid;
+my $local_job = 0;
$SIG{'USR1'} = sub
my $arv = Arvados->new('apiVersion' => 'v1');
-my $local_logfile;
-
-my $User = $arv->{'users'}->{'current'}->execute;
-my $Job = {};
+my $Job;
my $job_id;
my $dbh;
my $sth;
-if ($job_has_uuid)
+my @jobstep;
+
+my $User = api_call("users/current");
+
+if ($jobspec =~ /^[-a-z\d]+$/)
{
- $Job = $arv->{'jobs'}->{'get'}->execute('uuid' => $jobspec);
+ # $jobspec is an Arvados UUID, not a JSON job specification
+ $Job = api_call("jobs/get", uuid => $jobspec);
if (!$force_unlock) {
- if ($Job->{'is_locked_by_uuid'}) {
- croak("Job is locked: " . $Job->{'is_locked_by_uuid'});
- }
- if ($Job->{'success'} ne undef) {
- croak("Job 'success' flag (" . $Job->{'success'} . ") is not null");
- }
- if ($Job->{'running'}) {
- croak("Job 'running' flag is already set");
- }
- if ($Job->{'started_at'}) {
- croak("Job 'started_at' time is already set (" . $Job->{'started_at'} . ")");
- }
+ # Claim this job, and make sure nobody else does
+ eval { api_call("jobs/lock", uuid => $Job->{uuid}); };
+ if ($@) {
+ Log(undef, "Error while locking job, exiting ".EX_TEMPFAIL);
+ exit EX_TEMPFAIL;
+ };
}
}
else
$Job->{'is_locked_by_uuid'} = $User->{'uuid'};
$Job->{'started_at'} = gmtime;
+ $Job->{'state'} = 'Running';
- $Job = $arv->{'jobs'}->{'create'}->execute('job' => $Job);
-
- $job_has_uuid = 1;
+ $Job = api_call("jobs/create", job => $Job);
}
$job_id = $Job->{'uuid'};
my $keep_logfile = $job_id . '.log.txt';
-$local_logfile = File::Temp->new();
+log_writer_start($keep_logfile);
$Job->{'runtime_constraints'} ||= {};
$Job->{'runtime_constraints'}->{'max_tasks_per_node'} ||= 0;
@slot = sort { $a->{cpu} <=> $b->{cpu} } @slot;
-
-my $jobmanager_id;
-if ($job_has_uuid)
-{
- # Claim this job, and make sure nobody else does
- unless ($Job->update_attributes('is_locked_by_uuid' => $User->{'uuid'}) &&
- $Job->{'is_locked_by_uuid'} == $User->{'uuid'}) {
- croak("Error while updating / locking job");
- }
- $Job->update_attributes('started_at' => scalar gmtime,
- 'running' => 1,
- 'success' => undef,
- 'tasks_summary' => { 'failed' => 0,
- 'todo' => 1,
- 'running' => 0,
- 'done' => 0 });
-}
-
+$Job->update_attributes(
+ 'tasks_summary' => { 'failed' => 0,
+ 'todo' => 1,
+ 'running' => 0,
+ 'done' => 0 });
Log (undef, "start");
$SIG{'INT'} = sub { $main::please_freeze = 1; };
$ENV{"JOB_UUID"} = $job_id;
-my @jobstep;
my @jobstep_todo = ();
my @jobstep_done = ();
my @jobstep_tomerge = ();
}
else
{
- my $first_task = $arv->{'job_tasks'}->{'create'}->execute('job_task' => {
+ my $first_task = api_call("job_tasks/create", job_task => {
'job_uuid' => $Job->{'uuid'},
'sequence' => 0,
'qsequence' => 0,
'parameters' => {},
- });
+ });
push @jobstep, { 'level' => 0,
'failures' => 0,
'arvados_task' => $first_task,
my $build_script;
+do {
+ local $/ = undef;
+ $build_script = <DATA>;
+};
+my $nodelist = join(",", @node);
+if (!defined $no_clear_tmp) {
+ # Clean out crunch_tmp/work, crunch_tmp/opt, crunch_tmp/src*
+ Log (undef, "Clean work dirs");
-$ENV{"CRUNCH_SRC_COMMIT"} = $Job->{script_version};
-
-my $skip_install = ($local_job && $Job->{script_version} =~ m{^/});
-if ($skip_install)
-{
- if (!defined $no_clear_tmp) {
- my $clear_tmp_cmd = 'rm -rf $JOB_WORK $CRUNCH_TMP/opt $CRUNCH_TMP/src*';
- system($clear_tmp_cmd) == 0
- or croak ("`$clear_tmp_cmd` failed: ".($?>>8));
- }
- $ENV{"CRUNCH_SRC"} = $Job->{script_version};
- for my $src_path ("$ENV{CRUNCH_SRC}/arvados/sdk/python") {
- if (-d $src_path) {
- system("virtualenv", "$ENV{CRUNCH_TMP}/opt") == 0
- or croak ("virtualenv $ENV{CRUNCH_TMP}/opt failed: exit ".($?>>8));
- system ("cd $src_path && ./build.sh && \$CRUNCH_TMP/opt/bin/python setup.py install")
- == 0
- or croak ("setup.py in $src_path failed: exit ".($?>>8));
- }
+ my $cleanpid = fork();
+ if ($cleanpid == 0)
+ {
+ srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
+ ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then for i in $JOB_WORK/*keep $CRUNCH_TMP/task/*.keep; do /bin/fusermount -z -u $i; done; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_INSTALL $CRUNCH_TMP/task $CRUNCH_TMP/src*']);
+ exit (1);
+ }
+ while (1)
+ {
+ last if $cleanpid == waitpid (-1, WNOHANG);
+ freeze_if_want_freeze ($cleanpid);
+ select (undef, undef, undef, 0.1);
}
+ Log (undef, "Cleanup command exited ".exit_status_s($?));
}
-else
-{
- do {
- local $/ = undef;
- $build_script = <DATA>;
- };
- Log (undef, "Install revision ".$Job->{script_version});
- my $nodelist = join(",", @node);
- if (!defined $no_clear_tmp) {
- # Clean out crunch_tmp/work, crunch_tmp/opt, crunch_tmp/src*
-
- my $cleanpid = fork();
- if ($cleanpid == 0)
- {
- srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
- ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then sudo /bin/umount $JOB_WORK/* 2>/dev/null; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_TMP/opt $CRUNCH_TMP/src*']);
- exit (1);
- }
- while (1)
- {
- last if $cleanpid == waitpid (-1, WNOHANG);
- freeze_if_want_freeze ($cleanpid);
- select (undef, undef, undef, 0.1);
- }
- Log (undef, "Clean-work-dir exited $?");
- }
- # Install requested code version
+my $git_archive;
+if (!defined $git_dir && $Job->{'script_version'} =~ m{^/}) {
+ # If script_version looks like an absolute path, *and* the --git-dir
+ # argument was not given -- which implies we were not invoked by
+ # crunch-dispatch -- we will use the given path as a working
+ # directory instead of resolving script_version to a git commit (or
+ # doing anything else with git).
+ $ENV{"CRUNCH_SRC_COMMIT"} = $Job->{'script_version'};
+ $ENV{"CRUNCH_SRC"} = $Job->{'script_version'};
+}
+else {
+ # Resolve the given script_version to a git commit sha1. Also, if
+ # the repository is remote, clone it into our local filesystem: this
+ # ensures "git archive" will work, and is necessary to reliably
+ # resolve a symbolic script_version like "master^".
+ $ENV{"CRUNCH_SRC"} = "$ENV{CRUNCH_TMP}/src";
- my @execargs;
- my @srunargs = ("srun",
- "--nodelist=$nodelist",
- "-D", $ENV{'TMPDIR'}, "--job-name=$job_id");
+ Log (undef, "Looking for version ".$Job->{script_version}." from repository ".$Job->{repository});
$ENV{"CRUNCH_SRC_COMMIT"} = $Job->{script_version};
- $ENV{"CRUNCH_SRC"} = "$ENV{CRUNCH_TMP}/src";
-
- my $commit;
- my $git_archive;
- my $treeish = $Job->{'script_version'};
- # If we're running under crunch-dispatch, it will have pulled the
- # appropriate source tree into its own repository, and given us that
- # repo's path as $git_dir. If we're running a "local" job, and a
- # script_version was specified, it's up to the user to provide the
- # full path to a local repository in Job->{repository}.
+ # If we're running under crunch-dispatch, it will have already
+ # pulled the appropriate source tree into its own repository, and
+ # given us that repo's path as $git_dir.
#
- # TODO: Accept URLs too, not just local paths. Use git-ls-remote and
- # git-archive --remote where appropriate.
+ # If we're running a "local" job, we might have to fetch content
+ # from a remote repository.
#
- # TODO: Accept a locally-hosted Arvados repository by name or
- # UUID. Use arvados.v1.repositories.list or .get to figure out the
- # appropriate fetch-url.
- my $repo = $git_dir || $ENV{'CRUNCH_DEFAULT_GIT_DIR'} || $Job->{'repository'};
-
+ # (Currently crunch-dispatch gives a local path with --git-dir, but
+ # we might as well accept URLs there too in case it changes its
+ # mind.)
+ my $repo = $git_dir || $Job->{'repository'};
+
+ # Repository can be remote or local. If remote, we'll need to fetch it
+ # to a local dir before doing `git log` et al.
+ my $repo_location;
+
+ if ($repo =~ m{://|^[^/]*:}) {
+ # $repo is a git url we can clone, like git:// or https:// or
+ # file:/// or [user@]host:repo.git. Note "user/name@host:foo" is
+ # not recognized here because distinguishing that from a local
+ # path is too fragile. If you really need something strange here,
+ # use the ssh:// form.
+ $repo_location = 'remote';
+ } elsif ($repo =~ m{^\.*/}) {
+ # $repo is a local path to a git index. We'll also resolve ../foo
+ # to ../foo/.git if the latter is a directory. To help
+ # disambiguate local paths from named hosted repositories, this
+ # form must be given as ./ or ../ if it's a relative path.
+ if (-d "$repo/.git") {
+ $repo = "$repo/.git";
+ }
+ $repo_location = 'local';
+ } else {
+ # $repo is none of the above. It must be the name of a hosted
+ # repository.
+ my $arv_repo_list = api_call("repositories/list",
+ 'filters' => [['name','=',$repo]]);
+ my @repos_found = @{$arv_repo_list->{'items'}};
+ my $n_found = $arv_repo_list->{'serverResponse'}->{'items_available'};
+ if ($n_found > 0) {
+ Log(undef, "Repository '$repo' -> "
+ . join(", ", map { $_->{'uuid'} } @repos_found));
+ }
+ if ($n_found != 1) {
+ croak("Error: Found $n_found repositories with name '$repo'.");
+ }
+ $repo = $repos_found[0]->{'fetch_url'};
+ $repo_location = 'remote';
+ }
+ Log(undef, "Using $repo_location repository '$repo'");
$ENV{"CRUNCH_SRC_URL"} = $repo;
- if (-d "$repo/.git") {
- # We were given a working directory, but we are only interested in
- # the index.
- $repo = "$repo/.git";
- }
+ # Resolve given script_version (we'll call that $treeish here) to a
+ # commit sha1 ($commit).
+ my $treeish = $Job->{'script_version'};
+ my $commit;
+ if ($repo_location eq 'remote') {
+ # We minimize excess object-fetching by re-using the same bare
+ # repository in CRUNCH_TMP/.git for multiple crunch-jobs -- we
+ # just keep adding remotes to it as needed.
+ my $local_repo = $ENV{'CRUNCH_TMP'}."/.git";
+ my $gitcmd = "git --git-dir=\Q$local_repo\E";
+
+ # Set up our local repo for caching remote objects, making
+ # archives, etc.
+ if (!-d $local_repo) {
+ make_path($local_repo) or croak("Error: could not create $local_repo");
+ }
+ # This works (exits 0 and doesn't delete fetched objects) even
+ # if $local_repo is already initialized:
+ `$gitcmd init --bare`;
+ if ($?) {
+ croak("Error: $gitcmd init --bare exited ".exit_status_s($?));
+ }
+
+ # If $treeish looks like a hash (or abbrev hash) we look it up in
+ # our local cache first, since that's cheaper. (We don't want to
+ # do that with tags/branches though -- those change over time, so
+ # they should always be resolved by the remote repo.)
+ if ($treeish =~ /^[0-9a-f]{7,40}$/s) {
+ # Hide stderr because it's normal for this to fail:
+ my $sha1 = `$gitcmd rev-list -n1 ''\Q$treeish\E 2>/dev/null`;
+ if ($? == 0 &&
+ # Careful not to resolve a branch named abcdeff to commit 1234567:
+ $sha1 =~ /^$treeish/ &&
+ $sha1 =~ /^([0-9a-f]{40})$/s) {
+ $commit = $1;
+ Log(undef, "Commit $commit already present in $local_repo");
+ }
+ }
+
+ if (!defined $commit) {
+ # If $treeish isn't just a hash or abbrev hash, or isn't here
+ # yet, we need to fetch the remote to resolve it correctly.
- # If this looks like a subversion r#, look for it in git-svn commit messages
+ # First, remove all local heads. This prevents a name that does
+ # not exist on the remote from resolving to (or colliding with)
+ # a previously fetched branch or tag (possibly from a different
+ # remote).
+ remove_tree("$local_repo/refs/heads", {keep_root => 1});
- if ($treeish =~ m{^\d{1,4}$}) {
- my $gitlog = `git --git-dir=\Q$repo\E log --pretty="format:%H" --grep="git-svn-id:.*\@"\Q$treeish\E" " master`;
- chomp $gitlog;
- if ($gitlog =~ /^[a-f0-9]{40}$/) {
- $commit = $gitlog;
- Log (undef, "Using commit $commit for script_version $treeish");
+ Log(undef, "Fetching objects from $repo to $local_repo");
+ `$gitcmd fetch --no-progress --tags ''\Q$repo\E \Q+refs/heads/*:refs/heads/*\E`;
+ if ($?) {
+ croak("Error: `$gitcmd fetch` exited ".exit_status_s($?));
+ }
}
+
+ # Now that the data is all here, we will use our local repo for
+ # the rest of our git activities.
+ $repo = $local_repo;
}
- # If that didn't work, try asking git to look it up as a tree-ish.
-
- if (!defined $commit) {
- my $found = `git --git-dir=\Q$repo\E rev-list -1 ''\Q$treeish\E`;
- chomp $found;
- if ($found =~ /^[0-9a-f]{40}$/s) {
- $commit = $found;
- if ($commit ne $treeish) {
- # Make sure we record the real commit id in the database,
- # frozentokey, logs, etc. -- instead of an abbreviation or a
- # branch name which can become ambiguous or point to a
- # different commit in the future.
- $ENV{"CRUNCH_SRC_COMMIT"} = $commit;
- Log (undef, "Using commit $commit for tree-ish $treeish");
- if ($commit ne $treeish) {
- $Job->{'script_version'} = $commit;
- !$job_has_uuid or
- $Job->update_attributes('script_version' => $commit) or
- croak("Error while updating job");
- }
- }
+ my $gitcmd = "git --git-dir=\Q$repo\E";
+ my $sha1 = `$gitcmd rev-list -n1 ''\Q$treeish\E`;
+ unless ($? == 0 && $sha1 =~ /^([0-9a-f]{40})$/) {
+ croak("`$gitcmd rev-list` exited "
+ .exit_status_s($?)
+ .", '$treeish' not found. Giving up.");
+ }
+ $commit = $1;
+ Log(undef, "Version $treeish is commit $commit");
+
+ if ($commit ne $Job->{'script_version'}) {
+ # Record the real commit id in the database, frozentokey, logs,
+ # etc. -- instead of an abbreviation or a branch name which can
+ # become ambiguous or point to a different commit in the future.
+ if (!$Job->update_attributes('script_version' => $commit)) {
+ croak("Error: failed to update job's script_version attribute");
}
}
- if (defined $commit) {
- $ENV{"CRUNCH_SRC_COMMIT"} = $commit;
- @execargs = ("sh", "-c",
- "mkdir -p $ENV{CRUNCH_INSTALL} && cd $ENV{CRUNCH_TMP} && perl -");
- $git_archive = `git --git-dir=\Q$repo\E archive ''\Q$commit\E`;
+ $ENV{"CRUNCH_SRC_COMMIT"} = $commit;
+ $git_archive = `$gitcmd archive ''\Q$commit\E`;
+ if ($?) {
+ croak("Error: $gitcmd archive exited ".exit_status_s($?));
}
- else {
- croak ("could not figure out commit id for $treeish");
+}
+
+if (!defined $git_archive) {
+ Log(undef, "Skip install phase (no git archive)");
+ if ($have_slurm) {
+ Log(undef, "Warning: This probably means workers have no source tree!");
}
+}
+else {
+ Log(undef, "Run install script on all workers");
+
+ my @srunargs = ("srun",
+ "--nodelist=$nodelist",
+ "-D", $ENV{'TMPDIR'}, "--job-name=$job_id");
+ my @execargs = ("sh", "-c",
+ "mkdir -p $ENV{CRUNCH_INSTALL} && cd $ENV{CRUNCH_TMP} && perl -");
my $installpid = fork();
if ($installpid == 0)
freeze_if_want_freeze ($installpid);
select (undef, undef, undef, 0.1);
}
- Log (undef, "Install exited $?");
+ Log (undef, "Install script exited ".exit_status_s($?));
}
if (!$have_slurm)
# If this job requires a Docker image, install that.
my $docker_bin = "/usr/bin/docker.io";
-my ($docker_locator, $docker_hash);
+my ($docker_locator, $docker_stream, $docker_hash);
if ($docker_locator = $Job->{docker_image_locator}) {
- $docker_hash = find_docker_hash($docker_locator);
+ ($docker_stream, $docker_hash) = find_docker_image($docker_locator);
if (!$docker_hash)
{
croak("No Docker image hash found from locator $docker_locator");
}
+ $docker_stream =~ s/^\.//;
my $docker_install_script = qq{
if ! $docker_bin images -q --no-trunc | grep -qxF \Q$docker_hash\E; then
- arv-get \Q$docker_locator/$docker_hash.tar\E | $docker_bin load
+ arv-get \Q$docker_locator$docker_stream/$docker_hash.tar\E | $docker_bin load
fi
};
my $docker_pid = fork();
}
if ($? != 0)
{
- croak("Installing Docker image from $docker_locator returned exit code $?");
+ croak("Installing Docker image from $docker_locator exited "
+ .exit_status_s($?));
}
}
}
$ENV{"TASK_SLOT_NODE"} = $slot[$childslot]->{node}->{name};
$ENV{"TASK_SLOT_NUMBER"} = $slot[$childslot]->{cpu};
- $ENV{"TASK_WORK"} = $ENV{"JOB_WORK"}."/$id.$$";
+ $ENV{"TASK_WORK"} = $ENV{"CRUNCH_TMP"}."/task/$childslotname";
+ $ENV{"HOME"} = $ENV{"TASK_WORK"};
$ENV{"TASK_KEEPMOUNT"} = $ENV{"TASK_WORK"}.".keep";
$ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated
$ENV{"CRUNCH_NODE_SLOTS"} = $slot[$childslot]->{node}->{ncpus};
my $build_script_to_send = "";
my $command =
"if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; "
- ."mkdir -p $ENV{JOB_WORK} $ENV{CRUNCH_TMP} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT} "
- ."&& chmod og+wrx $ENV{TASK_WORK}"
+ ."mkdir -p $ENV{CRUNCH_TMP} $ENV{JOB_WORK} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT} "
."&& cd $ENV{CRUNCH_TMP} ";
if ($build_script)
{
$command .=
"&& perl -";
}
- $command .= "&& exec arv-mount --allow-other $ENV{TASK_KEEPMOUNT} --exec ";
+ $command .= "&& exec arv-mount --by-id --allow-other $ENV{TASK_KEEPMOUNT} --exec ";
if ($docker_hash)
{
- $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$ENV{TASK_WORK}/docker.cid -poll=10000 ";
- $command .= "$docker_bin run -i -a stdin -a stdout -a stderr --cidfile=$ENV{TASK_WORK}/docker.cid ";
+ my $cidfile = "$ENV{CRUNCH_TMP}/$ENV{TASK_UUID}.cid";
+ $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$cidfile -poll=10000 ";
+ $command .= "$docker_bin run --rm=true --attach=stdout --attach=stderr --attach=stdin -i --user=crunch --cidfile=$cidfile --sig-proxy ";
+
# Dynamically configure the container to use the host system as its
# DNS server. Get the host's global addresses from the ip command,
# and turn them into docker --dns options using gawk.
$command .=
q{$(ip -o address show scope global |
gawk 'match($4, /^([0-9\.:]+)\//, x){print "--dns", x[1]}') };
- $command .= "-v \Q$ENV{TASK_WORK}:/tmp/crunch-job:rw\E ";
- $command .= "-v \Q$ENV{CRUNCH_SRC}:/tmp/crunch-src:ro\E ";
- $command .= "-v \Q$ENV{TASK_KEEPMOUNT}:/mnt:ro\E ";
+
+ # The source tree and $destdir directory (which we have
+ # installed on the worker host) are available in the container,
+ # under the same path.
+ $command .= "--volume=\Q$ENV{CRUNCH_SRC}:$ENV{CRUNCH_SRC}:ro\E ";
+ $command .= "--volume=\Q$ENV{CRUNCH_INSTALL}:$ENV{CRUNCH_INSTALL}:ro\E ";
+
+ # Currently, we make arv-mount's mount point appear at /keep
+ # inside the container (instead of using the same path as the
+ # host like we do with CRUNCH_SRC and CRUNCH_INSTALL). However,
+ # crunch scripts and utilities must not rely on this. They must
+ # use $TASK_KEEPMOUNT.
+ $command .= "--volume=\Q$ENV{TASK_KEEPMOUNT}:/keep:ro\E ";
+ $ENV{TASK_KEEPMOUNT} = "/keep";
+
+ # TASK_WORK is a plain docker data volume: it starts out empty,
+ # is writable, and persists until no containers use it any
+ # more. We don't use --volumes-from to share it with other
+ # containers: it is only accessible to this task, and it goes
+ # away when this task stops.
+ $command .= "--volume=\Q$ENV{TASK_WORK}\E ";
+
+ # JOB_WORK is also a plain docker data volume for now. TODO:
+ # Share a single JOB_WORK volume across all task containers on a
+ # given worker node, and delete it when the job ends (and, in
+ # case that doesn't work, when the next job starts).
+ $command .= "--volume=\Q$ENV{JOB_WORK}\E ";
+
while (my ($env_key, $env_val) = each %ENV)
{
- if ($env_key =~ /^(ARVADOS|JOB|TASK)_/) {
- if ($env_key eq "TASK_WORK") {
- $command .= "-e \QTASK_WORK=/tmp/crunch-job\E ";
- }
- elsif ($env_key eq "TASK_KEEPMOUNT") {
- $command .= "-e \QTASK_KEEPMOUNT=/mnt\E ";
- }
- elsif ($env_key eq "CRUNCH_SRC") {
- $command .= "-e \QCRUNCH_SRC=/tmp/crunch-src\E ";
- }
- else {
- $command .= "-e \Q$env_key=$env_val\E ";
- }
+ if ($env_key =~ /^(ARVADOS|CRUNCH|JOB|TASK)_/) {
+ $command .= "--env=\Q$env_key=$env_val\E ";
}
}
+ $command .= "--env=\QHOME=$ENV{HOME}\E ";
$command .= "\Q$docker_hash\E ";
- $command .= "stdbuf -o0 -e0 ";
- $command .= "/tmp/crunch-src/crunch_scripts/" . $Job->{"script"};
+ $command .= "stdbuf --output=0 --error=0 ";
+ $command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
} else {
# Non-docker run
$command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 ";
- $command .= "stdbuf -o0 -e0 ";
+ $command .= "stdbuf --output=0 --error=0 ";
$command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
}
my @execargs = ('bash', '-c', $command);
srun (\@srunargs, \@execargs, undef, $build_script_to_send);
- exit (111);
+ # exec() failed, we assume nothing happened.
+ die "srun() failed on build script\n";
}
close("writer");
if (!defined $childpid)
delete $Jobstep->{stderr};
delete $Jobstep->{finishtime};
+ $Jobstep->{'arvados_task'}->{started_at} = strftime "%Y-%m-%dT%H:%M:%SZ", gmtime($Jobstep->{starttime});
+ $Jobstep->{'arvados_task'}->save;
+
splice @jobstep_todo, $todo_ptr, 1;
--$todo_ptr;
freeze();
my $collated_output = &collate_output();
-if ($job_has_uuid) {
- $Job->update_attributes('running' => 0,
- 'success' => $collated_output && $main::success,
- 'finished_at' => scalar gmtime)
+if (!$collated_output) {
+ Log(undef, "output undef");
}
-
-if ($collated_output)
-{
+else {
eval {
- open(my $orig_manifest, '-|', 'arv', 'keep', 'get', $collated_output)
+ open(my $orig_manifest, '-|', 'arv-get', $collated_output)
or die "failed to get collated manifest: $!";
- # Read the original manifest, and strip permission hints from it,
- # so we can put the result in a Collection.
- my @stripped_manifest_lines = ();
my $orig_manifest_text = '';
while (my $manifest_line = <$orig_manifest>) {
$orig_manifest_text .= $manifest_line;
- my @words = split(/ /, $manifest_line, -1);
- foreach my $ii (0..$#words) {
- if ($words[$ii] =~ /^[0-9a-f]{32}\+/) {
- $words[$ii] =~ s/\+A[0-9a-f]{40}@[0-9a-f]{8}\b//;
- }
- }
- push(@stripped_manifest_lines, join(" ", @words));
- }
- my $stripped_manifest_text = join("", @stripped_manifest_lines);
- my $output = $arv->{'collections'}->{'create'}->execute('collection' => {
- 'uuid' => md5_hex($stripped_manifest_text),
- 'manifest_text' => $orig_manifest_text,
- });
- $Job->update_attributes('output' => $output->{uuid});
- if ($Job->{'output_is_persistent'}) {
- $arv->{'links'}->{'create'}->execute('link' => {
- 'tail_kind' => 'arvados#user',
- 'tail_uuid' => $User->{'uuid'},
- 'head_kind' => 'arvados#collection',
- 'head_uuid' => $Job->{'output'},
- 'link_class' => 'resources',
- 'name' => 'wants',
- });
}
+ my $output = api_call("collections/create", collection => {
+ 'manifest_text' => $orig_manifest_text});
+ Log(undef, "output uuid " . $output->{uuid});
+ Log(undef, "output hash " . $output->{portable_data_hash});
+ $Job->update_attributes('output' => $output->{portable_data_hash});
};
if ($@) {
Log (undef, "Failed to register output manifest: $@");
Log (undef, "finish");
save_meta();
-exit 0;
+
+my $final_state;
+if ($collated_output && $main::success) {
+ $final_state = 'Complete';
+} else {
+ $final_state = 'Failed';
+}
+$Job->update_attributes('state' => $final_state);
+
+exit (($final_state eq 'Complete') ? 0 : 1);
$Job->{'tasks_summary'}->{'todo'} = $todo;
$Job->{'tasks_summary'}->{'done'} = $done;
$Job->{'tasks_summary'}->{'running'} = $running;
- if ($job_has_uuid) {
- $Job->update_attributes('tasks_summary' => $Job->{'tasks_summary'});
- }
+ $Job->update_attributes('tasks_summary' => $Job->{'tasks_summary'});
Log (undef, "status: $done done, $running running, $todo todo");
$progress_is_dirty = 0;
}
my $childstatus = $?;
my $exitvalue = $childstatus >> 8;
- my $exitinfo = sprintf("exit %d signal %d%s",
- $exitvalue,
- $childstatus & 127,
- ($childstatus & 128 ? ' core dump' : ''));
+ my $exitinfo = "exit ".exit_status_s($childstatus);
$Jobstep->{'arvados_task'}->reload;
my $task_success = $Jobstep->{'arvados_task'}->{success};
$main::success = 0;
$main::please_freeze = 1;
}
- else {
- # Put this task back on the todo queue
- push @jobstep_todo, $jobstepid;
- }
+ # Put this task back on the todo queue
+ push @jobstep_todo, $jobstepid;
$Job->{'tasks_summary'}->{'failed'}++;
}
else
}
$Jobstep->{exitcode} = $childstatus;
$Jobstep->{finishtime} = time;
+ $Jobstep->{'arvados_task'}->{finished_at} = strftime "%Y-%m-%dT%H:%M:%SZ", gmtime($Jobstep->{finishtime});
+ $Jobstep->{'arvados_task'}->save;
process_stderr ($jobstepid, $task_success);
Log ($jobstepid, "output " . $Jobstep->{'arvados_task'}->{output});
push @freeslot, $proc{$pid}->{slot};
delete $proc{$pid};
- # Load new tasks
- my $newtask_list = [];
- my $newtask_results;
- do {
- $newtask_results = $arv->{'job_tasks'}->{'list'}->execute(
- 'where' => {
- 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
- },
- 'order' => 'qsequence',
- 'offset' => scalar(@$newtask_list),
- );
- push(@$newtask_list, @{$newtask_results->{items}});
- } while (@{$newtask_results->{items}});
- foreach my $arvados_task (@$newtask_list) {
- my $jobstep = {
- 'level' => $arvados_task->{'sequence'},
- 'failures' => 0,
- 'arvados_task' => $arvados_task
- };
- push @jobstep, $jobstep;
- push @jobstep_todo, $#jobstep;
+ if ($task_success) {
+ # Load new tasks
+ my $newtask_list = [];
+ my $newtask_results;
+ do {
+ $newtask_results = api_call(
+ "job_tasks/list",
+ 'where' => {
+ 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
+ },
+ 'order' => 'qsequence',
+ 'offset' => scalar(@$newtask_list),
+ );
+ push(@$newtask_list, @{$newtask_results->{items}});
+ } while (@{$newtask_results->{items}});
+ foreach my $arvados_task (@$newtask_list) {
+ my $jobstep = {
+ 'level' => $arvados_task->{'sequence'},
+ 'failures' => 0,
+ 'arvados_task' => $arvados_task
+ };
+ push @jobstep, $jobstep;
+ push @jobstep_todo, $#jobstep;
+ }
}
$progress_is_dirty = 1;
my @stat = stat $ENV{"CRUNCH_REFRESH_TRIGGER"};
if (@stat && $stat[9] > $latest_refresh) {
$latest_refresh = scalar time;
- if ($job_has_uuid) {
- my $Job2 = $arv->{'jobs'}->{'get'}->execute('uuid' => $jobspec);
- for my $attr ('cancelled_at',
- 'cancelled_by_user_uuid',
- 'cancelled_by_client_uuid') {
- $Job->{$attr} = $Job2->{$attr};
- }
- if ($Job->{'cancelled_at'}) {
- Log (undef, "Job cancelled at " . $Job->{cancelled_at} .
- " by user " . $Job->{cancelled_by_user_uuid});
- $main::success = 0;
- $main::please_freeze = 1;
+ my $Job2 = api_call("jobs/get", uuid => $jobspec);
+ for my $attr ('cancelled_at',
+ 'cancelled_by_user_uuid',
+ 'cancelled_by_client_uuid',
+ 'state') {
+ $Job->{$attr} = $Job2->{$attr};
+ }
+ if ($Job->{'state'} ne "Running") {
+ if ($Job->{'state'} eq "Cancelled") {
+ Log (undef, "Job cancelled at " . $Job->{'cancelled_at'} . " by user " . $Job->{'cancelled_by_user_uuid'});
+ } else {
+ Log (undef, "Job state unexpectedly changed to " . $Job->{'state'});
}
+ $main::success = 0;
+ $main::please_freeze = 1;
}
}
}
my $hash = shift;
my ($keep, $child_out, $output_block);
- my $cmd = "$arv_cli keep get \Q$hash\E";
+ my $cmd = "arv-get \Q$hash\E";
open($keep, '-|', $cmd) or die "fetch_block: $cmd: $!";
- sysread($keep, $output_block, 64 * 1024 * 1024);
+ $output_block = '';
+ while (1) {
+ my $buf;
+ my $bytes = sysread($keep, $buf, 1024 * 1024);
+ if (!defined $bytes) {
+ die "reading from arv-get: $!";
+ } elsif ($bytes == 0) {
+ # sysread returns 0 at the end of the pipe.
+ last;
+ } else {
+ # some bytes were read into buf.
+ $output_block .= $buf;
+ }
+ }
close $keep;
return $output_block;
}
Log (undef, "collate");
my ($child_out, $child_in);
- my $pid = open2($child_out, $child_in, $arv_cli, 'keep', 'put', '--raw');
+ my $pid = open2($child_out, $child_in, 'arv-put', '--raw',
+ '--retries', retry_count());
my $joboutput;
for (@jobstep)
{
- next if (!exists $_->{'arvados_task'}->{output} ||
- !$_->{'arvados_task'}->{'success'} ||
- $_->{'exitcode'} != 0);
+ next if (!exists $_->{'arvados_task'}->{'output'} ||
+ !$_->{'arvados_task'}->{'success'});
my $output = $_->{'arvados_task'}->{output};
if ($output !~ /^[0-9a-f]{32}(\+\S+)*$/)
{
if ($s->can_read(120)) {
sysread($child_out, $joboutput, 64 * 1024 * 1024);
chomp($joboutput);
+ # TODO: Ensure exit status == 0.
} else {
- Log (undef, "timed out reading from 'arv keep put'");
+ Log (undef, "timed out reading from 'arv-put'");
}
}
+ # TODO: kill $pid instead of waiting, now that we've decided to
+ # ignore further output.
waitpid($pid, 0);
- if ($joboutput)
- {
- Log (undef, "output $joboutput");
- $Job->update_attributes('output' => $joboutput) if $job_has_uuid;
- }
- else
- {
- Log (undef, "output undef");
- }
return $joboutput;
}
}
+# Send log output to Keep via arv-put.
+#
+# $log_pipe_in and $log_pipe_out are the input and output filehandles to the arv-put pipe.
+# $log_pipe_pid is the pid of the arv-put subprocess.
+#
+# The only functions that should access these variables directly are:
+#
+# log_writer_start($logfilename)
+# Starts an arv-put pipe, reading data on stdin and writing it to
+# a $logfilename file in an output collection.
+#
+# log_writer_send($txt)
+# Writes $txt to the output log collection.
+#
+# log_writer_finish()
+# Closes the arv-put pipe and returns the output that it produces.
+#
+# log_writer_is_active()
+# Returns a true value if there is currently a live arv-put
+# process, false otherwise.
+#
+my ($log_pipe_in, $log_pipe_out, $log_pipe_pid);
+
+sub log_writer_start($)
+{
+ my $logfilename = shift;
+ $log_pipe_pid = open2($log_pipe_out, $log_pipe_in,
+ 'arv-put', '--portable-data-hash',
+ '--retries', '3',
+ '--filename', $logfilename,
+ '-');
+}
+
+sub log_writer_send($)
+{
+ my $txt = shift;
+ print $log_pipe_in $txt;
+}
+
+sub log_writer_finish()
+{
+ return unless $log_pipe_pid;
+
+ close($log_pipe_in);
+ my $arv_put_output;
+
+ my $s = IO::Select->new($log_pipe_out);
+ if ($s->can_read(120)) {
+ sysread($log_pipe_out, $arv_put_output, 1024);
+ chomp($arv_put_output);
+ } else {
+ Log (undef, "timed out reading from 'arv-put'");
+ }
+
+ waitpid($log_pipe_pid, 0);
+ $log_pipe_pid = $log_pipe_in = $log_pipe_out = undef;
+ if ($?) {
+ Log("log_writer_finish: arv-put exited ".exit_status_s($?))
+ }
+
+ return $arv_put_output;
+}
+
+sub log_writer_is_active() {
+ return $log_pipe_pid;
+}
+
sub Log # ($jobstep_id, $logmessage)
{
if ($_[1] =~ /\n/) {
$message =~ s{([^ -\176])}{"\\" . sprintf ("%03o", ord($1))}ge;
$message .= "\n";
my $datetime;
- if ($local_logfile || -t STDERR) {
+ if (log_writer_is_active() || -t STDERR) {
my @gmtime = gmtime;
$datetime = sprintf ("%04d-%02d-%02d_%02d:%02d:%02d",
$gmtime[5]+1900, $gmtime[4]+1, @gmtime[3,2,1,0]);
}
print STDERR ((-t STDERR) ? ($datetime." ".$message) : $message);
- if ($local_logfile) {
- print $local_logfile $datetime . " " . $message;
+ if (log_writer_is_active()) {
+ log_writer_send($datetime . " " . $message);
}
}
freeze() if @jobstep_todo;
collate_output() if @jobstep_todo;
cleanup();
- save_meta() if $local_logfile;
+ save_meta();
die;
}
sub cleanup
{
- return if !$job_has_uuid;
- $Job->update_attributes('running' => 0,
- 'success' => 0,
- 'finished_at' => scalar gmtime);
+ return unless $Job;
+ if ($Job->{'state'} eq 'Cancelled') {
+ $Job->update_attributes('finished_at' => scalar gmtime);
+ } else {
+ $Job->update_attributes('state' => 'Failed');
+ }
}
{
my $justcheckpoint = shift; # false if this will be the last meta saved
return if $justcheckpoint; # checkpointing is not relevant post-Warehouse.pm
+ return unless log_writer_is_active();
- $local_logfile->flush;
- my $cmd = "$arv_cli keep put --filename ''\Q$keep_logfile\E "
- . quotemeta($local_logfile->filename);
- my $loglocator = `$cmd`;
- die "system $cmd failed: $?" if $?;
- chomp($loglocator);
-
- $local_logfile = undef; # the temp file is automatically deleted
+ my $loglocator = log_writer_finish();
Log (undef, "log manifest is $loglocator");
$Job->{'log'} = $loglocator;
- $Job->update_attributes('log', $loglocator) if $job_has_uuid;
+ $Job->update_attributes('log', $loglocator);
}
collate_output();
cleanup();
save_meta();
- exit 0;
+ exit 1;
}
}
my $opts = shift || {};
my $stdin = shift;
my $args = $have_slurm ? [@$srunargs, @$execargs] : $execargs;
- print STDERR (join (" ",
- map { / / ? "'$_'" : $_ }
- (@$args)),
- "\n")
- if $ENV{CRUNCH_DEBUG};
+
+ $Data::Dumper::Terse = 1;
+ $Data::Dumper::Indent = 0;
+ my $show_cmd = Dumper($args);
+ $show_cmd =~ s/(TOKEN\\*=)[^\s\']+/${1}[...]/g;
+ $show_cmd =~ s/\n/ /g;
+ warn "starting: $show_cmd\n";
if (defined $stdin) {
my $child = open STDIN, "-|";
}
}
-sub find_docker_hash {
- # Given a Keep locator, search for a matching link to find the Docker hash
- # of the stored image.
+sub find_docker_image {
+ # Given a Keep locator, check to see if it contains a Docker image.
+ # If so, return its stream name and Docker hash.
+ # If not, return undef for both values.
my $locator = shift;
- my $links_result = $arv->{links}->{list}->execute(
- filters => [["head_uuid", "=", $locator],
- ["link_class", "=", "docker_image_hash"]],
- limit => 1);
- my $docker_hash;
- foreach my $link (@{$links_result->{items}}) {
- $docker_hash = lc($link->{name});
+ my ($streamname, $filename);
+ my $image = api_call("collections/get", uuid => $locator);
+ if ($image) {
+ foreach my $line (split(/\n/, $image->{manifest_text})) {
+ my @tokens = split(/\s+/, $line);
+ next if (!@tokens);
+ $streamname = shift(@tokens);
+ foreach my $filedata (grep(/^\d+:\d+:/, @tokens)) {
+ if (defined($filename)) {
+ return (undef, undef); # More than one file in the Collection.
+ } else {
+ $filename = (split(/:/, $filedata, 3))[2];
+ }
+ }
+ }
+ }
+ if (defined($filename) and ($filename =~ /^([0-9A-Fa-f]{64})\.tar$/)) {
+ return ($streamname, $1);
+ } else {
+ return (undef, undef);
+ }
+}
+
+sub retry_count {
+ # Calculate the number of times an operation should be retried,
+ # assuming exponential backoff, and that we're willing to retry as
+ # long as tasks have been running. Enforce a minimum of 3 retries.
+ my ($starttime, $endtime, $timediff, $retries);
+ if (@jobstep) {
+ $starttime = $jobstep[0]->{starttime};
+ $endtime = $jobstep[-1]->{finishtime};
+ }
+ if (!defined($starttime)) {
+ $timediff = 0;
+ } elsif (!defined($endtime)) {
+ $timediff = time - $starttime;
+ } else {
+ $timediff = ($endtime - $starttime) - (time - $endtime);
+ }
+ if ($timediff > 0) {
+ $retries = int(log($timediff) / log(2));
+ } else {
+ $retries = 1; # Use the minimum.
+ }
+ return ($retries > 3) ? $retries : 3;
+}
+
+sub retry_op {
+ # Pass in two function references.
+ # This method will be called with the remaining arguments.
+ # If it dies, retry it with exponential backoff until it succeeds,
+ # or until the current retry_count is exhausted. After each failure
+ # that can be retried, the second function will be called with
+ # the current try count (0-based), next try time, and error message.
+ my $operation = shift;
+ my $retry_callback = shift;
+ my $retries = retry_count();
+ foreach my $try_count (0..$retries) {
+ my $next_try = time + (2 ** $try_count);
+ my $result = eval { $operation->(@_); };
+ if (!$@) {
+ return $result;
+ } elsif ($try_count < $retries) {
+ $retry_callback->($try_count, $next_try, $@);
+ my $sleep_time = $next_try - time;
+ sleep($sleep_time) if ($sleep_time > 0);
+ }
+ }
+ # Ensure the error message ends in a newline, so Perl doesn't add
+ # retry_op's line number to it.
+ chomp($@);
+ die($@ . "\n");
+}
+
+sub api_call {
+ # Pass in a /-separated API method name, and arguments for it.
+ # This function will call that method, retrying as needed until
+ # the current retry_count is exhausted, with a log on the first failure.
+ my $method_name = shift;
+ my $log_api_retry = sub {
+ my ($try_count, $next_try_at, $errmsg) = @_;
+ $errmsg =~ s/\s*\bat \Q$0\E line \d+\.?\s*//;
+ $errmsg =~ s/\s/ /g;
+ $errmsg =~ s/\s+$//;
+ my $retry_msg;
+ if ($next_try_at < time) {
+ $retry_msg = "Retrying.";
+ } else {
+ my $next_try_fmt = strftime("%Y-%m-%d %H:%M:%S", $next_try_at);
+ $retry_msg = "Retrying at $next_try_fmt.";
+ }
+ Log(undef, "API method $method_name failed: $errmsg. $retry_msg");
+ };
+ my $method = $arv;
+ foreach my $key (split(/\//, $method_name)) {
+ $method = $method->{$key};
}
- return $docker_hash;
+ return retry_op(sub { $method->execute(@_); }, $log_api_retry, @_);
+}
+
+sub exit_status_s {
+ # Given a $?, return a human-readable exit code string like "0" or
+ # "1" or "0 with signal 1" or "1 with signal 11".
+ my $exitcode = shift;
+ my $s = $exitcode >> 8;
+ if ($exitcode & 0x7f) {
+ $s .= " with signal " . ($exitcode & 0x7f);
+ }
+ if ($exitcode & 0x80) {
+ $s .= " with core dump";
+ }
+ return $s;
}
__DATA__
# checkout-and-build
use Fcntl ':flock';
+use File::Path qw( make_path remove_tree );
my $destdir = $ENV{"CRUNCH_SRC"};
my $commit = $ENV{"CRUNCH_SRC_COMMIT"};
my $repo = $ENV{"CRUNCH_SRC_URL"};
+my $task_work = $ENV{"TASK_WORK"};
+
+for my $dir ($destdir, $task_work) {
+ if ($dir) {
+ make_path $dir;
+ -e $dir or die "Failed to create temporary directory ($dir): $!";
+ }
+}
+
+if ($task_work) {
+ remove_tree($task_work, {keep_root => 1});
+}
+
open L, ">", "$destdir.lock" or die "$destdir.lock: $!";
flock L, LOCK_EX;
if (readlink ("$destdir.commit") eq $commit && -d $destdir) {
- exit 0;
+ if (@ARGV) {
+ exec(@ARGV);
+ die "Cannot exec `@ARGV`: $!";
+ } else {
+ exit 0;
+ }
}
unlink "$destdir.commit";
+open STDERR_ORIG, ">&STDERR";
open STDOUT, ">", "$destdir.log";
open STDERR, ">&STDOUT";
close L;
-exit 0;
+if (@ARGV) {
+ exec(@ARGV);
+ die "Cannot exec `@ARGV`: $!";
+} else {
+ exit 0;
+}
sub shell_or_die
{
if ($ENV{"DEBUG"}) {
print STDERR "@_\n";
}
- system (@_) == 0
- or die "@_ failed: $! exit 0x".sprintf("%x",$?);
+ if (system (@_) != 0) {
+ my $err = $!;
+ my $exitstatus = sprintf("exit %d signal %d", $? >> 8, $? & 0x7f);
+ open STDERR, ">&STDERR_ORIG";
+ system ("cat $destdir.log >&2");
+ die "@_ failed ($err): $exitstatus";
+ }
}
__DATA__