use IO::Select;
use File::Temp;
use Fcntl ':flock';
+use File::Path qw( make_path );
+
+use constant EX_TEMPFAIL => 75;
$ENV{"TMPDIR"} ||= "/tmp";
unless (defined $ENV{"CRUNCH_TMP"}) {
$ENV{"CRUNCH_TMP"} .= "-$<";
}
}
+
+# Create the tmp directory if it does not exist
+if ( ! -d $ENV{"CRUNCH_TMP"} ) {
+ make_path $ENV{"CRUNCH_TMP"} or die "Failed to create temporary working directory: " . $ENV{"CRUNCH_TMP"};
+}
+
$ENV{"JOB_WORK"} = $ENV{"CRUNCH_TMP"} . "/work";
$ENV{"CRUNCH_INSTALL"} = "$ENV{CRUNCH_TMP}/opt";
$ENV{"CRUNCH_WORK"} = $ENV{"JOB_WORK"}; # deprecated
mkdir ($ENV{"JOB_WORK"});
-my $arv_cli;
-
-if (defined $ENV{"ARV_CLI"}) {
- $arv_cli = $ENV{"ARV_CLI"};
-}
-else {
- $arv_cli = 'arv';
-}
-
my $force_unlock;
my $git_dir;
my $jobspec;
{
$Job = $arv->{'jobs'}->{'get'}->execute('uuid' => $jobspec);
if (!$force_unlock) {
+ # If some other crunch-job process has grabbed this job (or we see
+ # other evidence that the job is already underway) we exit
+ # EX_TEMPFAIL so crunch-dispatch (our parent process) doesn't
+ # mark the job as failed.
if ($Job->{'is_locked_by_uuid'}) {
- croak("Job is locked: " . $Job->{'is_locked_by_uuid'});
+ Log(undef, "Job is locked by " . $Job->{'is_locked_by_uuid'});
+ exit EX_TEMPFAIL;
}
if ($Job->{'success'} ne undef) {
- croak("Job 'success' flag (" . $Job->{'success'} . ") is not null");
+ Log(undef, "Job 'success' flag (" . $Job->{'success'} . ") is not null");
+ exit EX_TEMPFAIL;
}
if ($Job->{'running'}) {
- croak("Job 'running' flag is already set");
+ Log(undef, "Job 'running' flag is already set");
+ exit EX_TEMPFAIL;
}
if ($Job->{'started_at'}) {
- croak("Job 'started_at' time is already set (" . $Job->{'started_at'} . ")");
+ Log(undef, "Job 'started_at' time is already set (" . $Job->{'started_at'} . ")");
+ exit EX_TEMPFAIL;
}
}
}
# Claim this job, and make sure nobody else does
unless ($Job->update_attributes('is_locked_by_uuid' => $User->{'uuid'}) &&
$Job->{'is_locked_by_uuid'} == $User->{'uuid'}) {
- croak("Error while updating / locking job");
+ Log(undef, "Error while updating / locking job, exiting ".EX_TEMPFAIL);
+ exit EX_TEMPFAIL;
}
$Job->update_attributes('started_at' => scalar gmtime,
'running' => 1,
if ($cleanpid == 0)
{
srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
- ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then sudo /bin/umount $JOB_WORK/* 2>/dev/null; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_TMP/opt $CRUNCH_TMP/src*']);
+ ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then for i in $JOB_WORK/*keep; do /bin/fusermount -z -u $i; done; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_TMP/opt $CRUNCH_TMP/src*']);
exit (1);
}
while (1)
# If this job requires a Docker image, install that.
my $docker_bin = "/usr/bin/docker.io";
-my ($docker_locator, $docker_hash);
+my ($docker_locator, $docker_stream, $docker_hash);
if ($docker_locator = $Job->{docker_image_locator}) {
- $docker_hash = find_docker_hash($docker_locator);
+ ($docker_stream, $docker_hash) = find_docker_image($docker_locator);
if (!$docker_hash)
{
croak("No Docker image hash found from locator $docker_locator");
}
+ $docker_stream =~ s/^\.//;
my $docker_install_script = qq{
if ! $docker_bin images -q --no-trunc | grep -qxF \Q$docker_hash\E; then
- arv-get \Q$docker_locator/$docker_hash.tar\E | $docker_bin load
+ arv-get \Q$docker_locator$docker_stream/$docker_hash.tar\E | $docker_bin load
fi
};
my $docker_pid = fork();
$ENV{"TASK_SLOT_NODE"} = $slot[$childslot]->{node}->{name};
$ENV{"TASK_SLOT_NUMBER"} = $slot[$childslot]->{cpu};
$ENV{"TASK_WORK"} = $ENV{"JOB_WORK"}."/$id.$$";
+ $ENV{"HOME"} = $ENV{"TASK_WORK"};
$ENV{"TASK_KEEPMOUNT"} = $ENV{"TASK_WORK"}.".keep";
$ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated
$ENV{"CRUNCH_NODE_SLOTS"} = $slot[$childslot]->{node}->{ncpus};
my $build_script_to_send = "";
my $command =
"if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; "
- ."mkdir -p $ENV{JOB_WORK} $ENV{CRUNCH_TMP} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT} "
+ ."mkdir -p $ENV{CRUNCH_TMP} $ENV{JOB_WORK} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT} "
."&& cd $ENV{CRUNCH_TMP} ";
if ($build_script)
{
if ($docker_hash)
{
$command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$ENV{TASK_WORK}/docker.cid -poll=10000 ";
- $command .= "$docker_bin run -i -a stdin -a stdout -a stderr --cidfile=$ENV{TASK_WORK}/docker.cid ";
+ $command .= "$docker_bin run --rm=true --attach=stdout --attach=stderr --user=crunch --cidfile=$ENV{TASK_WORK}/docker.cid ";
# Dynamically configure the container to use the host system as its
# DNS server. Get the host's global addresses from the ip command,
# and turn them into docker --dns options using gawk.
$command .=
q{$(ip -o address show scope global |
gawk 'match($4, /^([0-9\.:]+)\//, x){print "--dns", x[1]}') };
- foreach my $env_key (qw(CRUNCH_SRC CRUNCH_TMP TASK_KEEPMOUNT))
- {
- $command .= "-v \Q$ENV{$env_key}:$ENV{$env_key}:rw\E ";
- }
+ $command .= "--volume=\Q$ENV{CRUNCH_SRC}:/tmp/crunch-src:ro\E ";
+ $command .= "--volume=\Q$ENV{TASK_KEEPMOUNT}:/keep:ro\E ";
+ $command .= "--env=\QHOME=/home/crunch\E ";
while (my ($env_key, $env_val) = each %ENV)
{
if ($env_key =~ /^(ARVADOS|JOB|TASK)_/) {
- $command .= "-e \Q$env_key=$env_val\E ";
+ if ($env_key eq "TASK_KEEPMOUNT") {
+ $command .= "--env=\QTASK_KEEPMOUNT=/keep\E ";
+ }
+ else {
+ $command .= "--env=\Q$env_key=$env_val\E ";
+ }
}
}
+ $command .= "--env=\QCRUNCH_NODE_SLOTS=$ENV{CRUNCH_NODE_SLOTS}\E ";
+ $command .= "--env=\QCRUNCH_SRC=/tmp/crunch-src\E ";
$command .= "\Q$docker_hash\E ";
+ $command .= "stdbuf --output=0 --error=0 ";
+ $command .= "/tmp/crunch-src/crunch_scripts/" . $Job->{"script"};
} else {
- $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 "
+ # Non-docker run
+ $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 ";
+ $command .= "stdbuf --output=0 --error=0 ";
+ $command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
}
- $command .= "stdbuf -o0 -e0 ";
- $command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
+
my @execargs = ('bash', '-c', $command);
srun (\@srunargs, \@execargs, undef, $build_script_to_send);
- exit (111);
+ # exec() failed, we assume nothing happened.
+ Log(undef, "srun() failed on build script");
+ die;
}
close("writer");
if (!defined $childpid)
'finished_at' => scalar gmtime)
}
-if ($collated_output)
-{
+if (!$collated_output) {
+ Log(undef, "output undef");
+}
+else {
eval {
- open(my $orig_manifest, '-|', 'arv', 'keep', 'get', $collated_output)
+ open(my $orig_manifest, '-|', 'arv-get', $collated_output)
or die "failed to get collated manifest: $!";
# Read the original manifest, and strip permission hints from it,
# so we can put the result in a Collection.
'uuid' => md5_hex($stripped_manifest_text),
'manifest_text' => $orig_manifest_text,
});
- $Job->update_attributes('output' => $output->{uuid});
+ Log(undef, "output " . $output->{uuid});
+ $Job->update_attributes('output' => $output->{uuid}) if $job_has_uuid;
if ($Job->{'output_is_persistent'}) {
$arv->{'links'}->{'create'}->execute('link' => {
'tail_kind' => 'arvados#user',
Log (undef, "finish");
save_meta();
-exit 0;
+exit ($Job->{'success'} ? 1 : 0);
push @freeslot, $proc{$pid}->{slot};
delete $proc{$pid};
- # Load new tasks
- my $newtask_list = [];
- my $newtask_results;
- do {
- $newtask_results = $arv->{'job_tasks'}->{'list'}->execute(
- 'where' => {
- 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
- },
- 'order' => 'qsequence',
- 'offset' => scalar(@$newtask_list),
- );
- push(@$newtask_list, @{$newtask_results->{items}});
- } while (@{$newtask_results->{items}});
- foreach my $arvados_task (@$newtask_list) {
- my $jobstep = {
- 'level' => $arvados_task->{'sequence'},
- 'failures' => 0,
- 'arvados_task' => $arvados_task
- };
- push @jobstep, $jobstep;
- push @jobstep_todo, $#jobstep;
+ if ($task_success) {
+ # Load new tasks
+ my $newtask_list = [];
+ my $newtask_results;
+ do {
+ $newtask_results = $arv->{'job_tasks'}->{'list'}->execute(
+ 'where' => {
+ 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
+ },
+ 'order' => 'qsequence',
+ 'offset' => scalar(@$newtask_list),
+ );
+ push(@$newtask_list, @{$newtask_results->{items}});
+ } while (@{$newtask_results->{items}});
+ foreach my $arvados_task (@$newtask_list) {
+ my $jobstep = {
+ 'level' => $arvados_task->{'sequence'},
+ 'failures' => 0,
+ 'arvados_task' => $arvados_task
+ };
+ push @jobstep, $jobstep;
+ push @jobstep_todo, $#jobstep;
+ }
}
$progress_is_dirty = 1;
my $hash = shift;
my ($keep, $child_out, $output_block);
- my $cmd = "$arv_cli keep get \Q$hash\E";
+ my $cmd = "arv-get \Q$hash\E";
open($keep, '-|', $cmd) or die "fetch_block: $cmd: $!";
- sysread($keep, $output_block, 64 * 1024 * 1024);
+ $output_block = '';
+ while (1) {
+ my $buf;
+ my $bytes = sysread($keep, $buf, 1024 * 1024);
+ if (!defined $bytes) {
+ die "reading from arv-get: $!";
+ } elsif ($bytes == 0) {
+ # sysread returns 0 at the end of the pipe.
+ last;
+ } else {
+ # some bytes were read into buf.
+ $output_block .= $buf;
+ }
+ }
close $keep;
return $output_block;
}
Log (undef, "collate");
my ($child_out, $child_in);
- my $pid = open2($child_out, $child_in, $arv_cli, 'keep', 'put', '--raw');
+ my $pid = open2($child_out, $child_in, 'arv-put', '--raw');
my $joboutput;
for (@jobstep)
{
- next if (!exists $_->{'arvados_task'}->{output} ||
- !$_->{'arvados_task'}->{'success'} ||
- $_->{'exitcode'} != 0);
+ next if (!exists $_->{'arvados_task'}->{'output'} ||
+ !$_->{'arvados_task'}->{'success'});
my $output = $_->{'arvados_task'}->{output};
if ($output !~ /^[0-9a-f]{32}(\+\S+)*$/)
{
sysread($child_out, $joboutput, 64 * 1024 * 1024);
chomp($joboutput);
} else {
- Log (undef, "timed out reading from 'arv keep put'");
+ Log (undef, "timed out reading from 'arv-put'");
}
}
waitpid($pid, 0);
- if ($joboutput)
- {
- Log (undef, "output $joboutput");
- $Job->update_attributes('output' => $joboutput) if $job_has_uuid;
- }
- else
- {
- Log (undef, "output undef");
- }
return $joboutput;
}
return if $justcheckpoint; # checkpointing is not relevant post-Warehouse.pm
$local_logfile->flush;
- my $cmd = "$arv_cli keep put --filename ''\Q$keep_logfile\E "
+ my $cmd = "arv-put --filename ''\Q$keep_logfile\E "
. quotemeta($local_logfile->filename);
my $loglocator = `$cmd`;
die "system $cmd failed: $?" if $?;
}
}
-sub find_docker_hash {
- # Given a Keep locator, search for a matching link to find the Docker hash
- # of the stored image.
+sub find_docker_image {
+ # Given a Keep locator, check to see if it contains a Docker image.
+ # If so, return its stream name and Docker hash.
+ # If not, return undef for both values.
my $locator = shift;
- my $links_result = $arv->{links}->{list}->execute(
- filters => [["head_uuid", "=", $locator],
- ["link_class", "=", "docker_image_hash"]],
- limit => 1);
- my $docker_hash;
- foreach my $link (@{$links_result->{items}}) {
- $docker_hash = lc($link->{name});
+ if (my $image = $arv->{collections}->{get}->execute(uuid => $locator)) {
+ my @file_list = @{$image->{files}};
+ if ((scalar(@file_list) == 1) &&
+ ($file_list[0][1] =~ /^([0-9A-Fa-f]{64})\.tar$/)) {
+ return ($file_list[0][0], $1);
+ }
}
- return $docker_hash;
+ return (undef, undef);
}
__DATA__
# checkout-and-build
use Fcntl ':flock';
+use File::Path qw( make_path );
my $destdir = $ENV{"CRUNCH_SRC"};
my $commit = $ENV{"CRUNCH_SRC_COMMIT"};
my $repo = $ENV{"CRUNCH_SRC_URL"};
+my $task_work = $ENV{"TASK_WORK"};
+
+make_path $task_work or die "Failed to create temporary working directory ($task_work): $!";
open L, ">", "$destdir.lock" or die "$destdir.lock: $!";
flock L, LOCK_EX;