use POSIX ':sys_wait_h';
use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
use Arvados;
+use Digest::MD5 qw(md5_hex);
use Getopt::Long;
use IPC::Open2;
use IO::Select;
$ENV{"CRUNCH_WORK"} = $ENV{"JOB_WORK"}; # deprecated
mkdir ($ENV{"JOB_WORK"});
+my $arv_cli;
+
+if (defined $ENV{"ARV_CLI"}) {
+ $arv_cli = $ENV{"ARV_CLI"};
+}
+else {
+ $arv_cli = 'arv';
+}
+
my $force_unlock;
my $git_dir;
my $jobspec;
my $arv = Arvados->new('apiVersion' => 'v1');
-my $metastream;
+my $local_logfile;
my $User = $arv->{'users'}->{'current'}->execute;
$job_id = $Job->{'uuid'};
my $keep_logfile = $job_id . '.log.txt';
-my $local_logfile = File::Temp->new();
+$local_logfile = File::Temp->new();
$Job->{'runtime_constraints'} ||= {};
$Job->{'runtime_constraints'}->{'max_tasks_per_node'} ||= 0;
must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here.");
}
-
+# If this job requires a Docker image, install that.
+my $docker_bin = "/usr/bin/docker.io";
+my ($docker_locator, $docker_hash);
+if ($docker_locator = $Job->{docker_image_locator}) {
+ $docker_hash = find_docker_hash($docker_locator);
+ if (!$docker_hash)
+ {
+ croak("No Docker image hash found from locator $docker_locator");
+ }
+ my $docker_install_script = qq{
+if ! $docker_bin images -q --no-trunc | grep -qxF \Q$docker_hash\E; then
+ arv-get \Q$docker_locator/$docker_hash.tar\E | $docker_bin load
+fi
+};
+ my $docker_pid = fork();
+ if ($docker_pid == 0)
+ {
+ srun (["srun", "--nodelist=" . join(',', @node)],
+ ["/bin/sh", "-ec", $docker_install_script]);
+ exit ($?);
+ }
+ while (1)
+ {
+ last if $docker_pid == waitpid (-1, WNOHANG);
+ freeze_if_want_freeze ($docker_pid);
+ select (undef, undef, undef, 0.1);
+ }
+ if ($? != 0)
+ {
+ croak("Installing Docker image from $docker_locator returned exit code $?");
+ }
+}
foreach (qw (script script_version script_parameters runtime_constraints))
{
qw(-n1 -c1 -N1 -D), $ENV{'TMPDIR'},
"--job-name=$job_id.$id.$$",
);
- my @execargs = qw(sh);
my $build_script_to_send = "";
my $command =
"if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; "
$command .=
"&& perl -";
}
- $command .=
- "&& exec arv-mount $ENV{TASK_KEEPMOUNT} --exec $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
+ $command .= "&& exec arv-mount --allow-other $ENV{TASK_KEEPMOUNT} --exec ";
+ if ($docker_hash)
+ {
+ $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$ENV{TASK_WORK}/docker.cid -poll=10000 ";
+ $command .= "$docker_bin run -i -a stdin -a stdout -a stderr --cidfile=$ENV{TASK_WORK}/docker.cid ";
+ # Dynamically configure the container to use the host system as its
+ # DNS server. Get the host's global addresses from the ip command,
+ # and turn them into docker --dns options using gawk.
+ $command .=
+ q{$(ip -o address show scope global |
+ gawk 'match($4, /^([0-9\.:]+)\//, x){print "--dns", x[1]}') };
+ foreach my $env_key (qw(CRUNCH_SRC CRUNCH_TMP TASK_KEEPMOUNT))
+ {
+ $command .= "-v \Q$ENV{$env_key}:$ENV{$env_key}:rw\E ";
+ }
+ while (my ($env_key, $env_val) = each %ENV)
+ {
+ if ($env_key =~ /^(ARVADOS|JOB|TASK)_/) {
+ $command .= "-e \Q$env_key=$env_val\E ";
+ }
+ }
+ $command .= "\Q$docker_hash\E ";
+ } else {
+ $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 "
+ }
+ $command .= "stdbuf -o0 -e0 ";
+ $command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
my @execargs = ('bash', '-c', $command);
srun (\@srunargs, \@execargs, undef, $build_script_to_send);
exit (111);
release_allocation();
freeze();
+my $collated_output = &collate_output();
+
if ($job_has_uuid) {
- $Job->update_attributes('output' => &collate_output(),
- 'running' => 0,
- 'success' => $Job->{'output'} && $main::success,
+ $Job->update_attributes('running' => 0,
+ 'success' => $collated_output && $main::success,
'finished_at' => scalar gmtime)
}
-if ($Job->{'output'})
+if ($collated_output)
{
eval {
- my $manifest_text = `arv keep get ''\Q$Job->{'output'}\E`;
- $arv->{'collections'}->{'create'}->execute('collection' => {
- 'uuid' => $Job->{'output'},
- 'manifest_text' => $manifest_text,
+ open(my $orig_manifest, '-|', 'arv', 'keep', 'get', $collated_output)
+ or die "failed to get collated manifest: $!";
+ # Read the original manifest, and strip permission hints from it,
+ # so we can put the result in a Collection.
+ my @stripped_manifest_lines = ();
+ my $orig_manifest_text = '';
+ while (my $manifest_line = <$orig_manifest>) {
+ $orig_manifest_text .= $manifest_line;
+ my @words = split(/ /, $manifest_line, -1);
+ foreach my $ii (0..$#words) {
+ if ($words[$ii] =~ /^[0-9a-f]{32}\+/) {
+ $words[$ii] =~ s/\+A[0-9a-f]{40}@[0-9a-f]{8}\b//;
+ }
+ }
+ push(@stripped_manifest_lines, join(" ", @words));
+ }
+ my $stripped_manifest_text = join("", @stripped_manifest_lines);
+ my $output = $arv->{'collections'}->{'create'}->execute('collection' => {
+ 'uuid' => md5_hex($stripped_manifest_text),
+ 'manifest_text' => $orig_manifest_text,
});
+ $Job->update_attributes('output' => $output->{uuid});
if ($Job->{'output_is_persistent'}) {
$arv->{'links'}->{'create'}->execute('link' => {
'tail_kind' => 'arvados#user',
delete $proc{$pid};
# Load new tasks
- my $newtask_list = $arv->{'job_tasks'}->{'list'}->execute(
- 'where' => {
- 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
- },
- 'order' => 'qsequence'
- );
- foreach my $arvados_task (@{$newtask_list->{'items'}}) {
+ my $newtask_list = [];
+ my $newtask_results;
+ do {
+ $newtask_results = $arv->{'job_tasks'}->{'list'}->execute(
+ 'where' => {
+ 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
+ },
+ 'order' => 'qsequence',
+ 'offset' => scalar(@$newtask_list),
+ );
+ push(@$newtask_list, @{$newtask_results->{items}});
+ } while (@{$newtask_results->{items}});
+ foreach my $arvados_task (@$newtask_list) {
my $jobstep = {
'level' => $arvados_task->{'sequence'},
'failures' => 0,
my $hash = shift;
my ($keep, $child_out, $output_block);
- my $cmd = "arv keep get \Q$hash\E";
+ my $cmd = "$arv_cli keep get \Q$hash\E";
open($keep, '-|', $cmd) or die "fetch_block: $cmd: $!";
sysread($keep, $output_block, 64 * 1024 * 1024);
close $keep;
Log (undef, "collate");
my ($child_out, $child_in);
- my $pid = open2($child_out, $child_in, 'arv', 'keep', 'put', '--raw');
+ my $pid = open2($child_out, $child_in, $arv_cli, 'keep', 'put', '--raw');
my $joboutput;
for (@jobstep)
{
$message =~ s{([^ -\176])}{"\\" . sprintf ("%03o", ord($1))}ge;
$message .= "\n";
my $datetime;
- if ($metastream || -t STDERR) {
+ if ($local_logfile || -t STDERR) {
my @gmtime = gmtime;
$datetime = sprintf ("%04d-%02d-%02d_%02d:%02d:%02d",
$gmtime[5]+1900, $gmtime[4]+1, @gmtime[3,2,1,0]);
}
print STDERR ((-t STDERR) ? ($datetime." ".$message) : $message);
- if ($metastream) {
- print $metastream $datetime . " " . $message;
+ if ($local_logfile) {
+ print $local_logfile $datetime . " " . $message;
}
}
freeze() if @jobstep_todo;
collate_output() if @jobstep_todo;
cleanup();
- save_meta() if $metastream;
+ save_meta() if $local_logfile;
die;
}
return if $justcheckpoint; # checkpointing is not relevant post-Warehouse.pm
$local_logfile->flush;
- my $cmd = "arv keep put --filename ''\Q$keep_logfile\E "
+ my $cmd = "$arv_cli keep put --filename ''\Q$keep_logfile\E "
. quotemeta($local_logfile->filename);
my $loglocator = `$cmd`;
die "system $cmd failed: $?" if $?;
+ chomp($loglocator);
$local_logfile = undef; # the temp file is automatically deleted
Log (undef, "log manifest is $loglocator");
}
}
+sub find_docker_hash {
+ # Given a Keep locator, search for a matching link to find the Docker hash
+ # of the stored image.
+ my $locator = shift;
+ my $links_result = $arv->{links}->{list}->execute(
+ filters => [["head_uuid", "=", $locator],
+ ["link_class", "=", "docker_image_hash"]],
+ limit => 1);
+ my $docker_hash;
+ foreach my $link (@{$links_result->{items}}) {
+ $docker_hash = lc($link->{name});
+ }
+ return $docker_hash;
+}
+
__DATA__
#!/usr/bin/perl