#!/usr/bin/perl
-# -*- mode: perl; perl-indent-level: 2; -*-
+# -*- mode: perl; perl-indent-level: 2; indent-tabs-mode: nil; -*-
=head1 NAME
Arvados API authorization token to use during the course of the job.
+=item --no-clear-tmp
+
+Do not clear per-job/task temporary directories during initial job
+setup. This can speed up development and debugging when running jobs
+locally.
+
=back
=head1 RUNNING JOBS LOCALLY
use Getopt::Long;
use IPC::Open2;
use IO::Select;
+use File::Temp;
+use Fcntl ':flock';
$ENV{"TMPDIR"} ||= "/tmp";
unless (defined $ENV{"CRUNCH_TMP"}) {
$ENV{"CRUNCH_WORK"} = $ENV{"JOB_WORK"}; # deprecated
mkdir ($ENV{"JOB_WORK"});
+my $arv_cli;
+
+if (defined $ENV{"ARV_CLI"}) {
+ $arv_cli = $ENV{"ARV_CLI"};
+}
+else {
+ $arv_cli = 'arv';
+}
+
my $force_unlock;
my $git_dir;
my $jobspec;
my $job_api_token;
+my $no_clear_tmp;
my $resume_stash;
GetOptions('force-unlock' => \$force_unlock,
'git-dir=s' => \$git_dir,
'job=s' => \$jobspec,
'job-api-token=s' => \$job_api_token,
+ 'no-clear-tmp' => \$no_clear_tmp,
'resume-stash=s' => \$resume_stash,
);
my $arv = Arvados->new('apiVersion' => 'v1');
-my $metastream;
+my $local_logfile;
my $User = $arv->{'users'}->{'current'}->execute;
}
$job_id = $Job->{'uuid'};
-# $metastream = Warehouse::Stream->new(whc => new Warehouse);
-# $metastream->clear;
-# $metastream->name('.');
-# $metastream->write_start($job_id . '.log.txt');
-
+my $keep_logfile = $job_id . '.log.txt';
+$local_logfile = File::Temp->new();
$Job->{'runtime_constraints'} ||= {};
$Job->{'runtime_constraints'}->{'max_tasks_per_node'} ||= 0;
}
if (exists $ENV{SLURM_NODELIST})
{
- push @sinfo, `sinfo -h --format='%c %N' --nodes='$ENV{SLURM_NODELIST}'`;
+ push @sinfo, `sinfo -h --format='%c %N' --nodes=\Q$ENV{SLURM_NODELIST}\E`;
}
foreach (@sinfo)
{
}
+if (!$have_slurm)
+{
+ must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here.");
+}
+
+
my $build_script;
my $skip_install = ($local_job && $Job->{script_version} =~ m{^/});
if ($skip_install)
{
+ if (!defined $no_clear_tmp) {
+ my $clear_tmp_cmd = 'rm -rf $JOB_WORK $CRUNCH_TMP/opt $CRUNCH_TMP/src*';
+ system($clear_tmp_cmd) == 0
+ or croak ("`$clear_tmp_cmd` failed: ".($?>>8));
+ }
$ENV{"CRUNCH_SRC"} = $Job->{script_version};
for my $src_path ("$ENV{CRUNCH_SRC}/arvados/sdk/python") {
if (-d $src_path) {
Log (undef, "Install revision ".$Job->{script_version});
my $nodelist = join(",", @node);
- # Clean out crunch_tmp/work, crunch_tmp/opt, crunch_tmp/src*
+ if (!defined $no_clear_tmp) {
+ # Clean out crunch_tmp/work, crunch_tmp/opt, crunch_tmp/src*
- my $cleanpid = fork();
- if ($cleanpid == 0)
- {
- srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
- ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then sudo /bin/umount $JOB_WORK/* 2>/dev/null; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_TMP/opt $CRUNCH_TMP/src*']);
- exit (1);
- }
- while (1)
- {
- last if $cleanpid == waitpid (-1, WNOHANG);
- freeze_if_want_freeze ($cleanpid);
- select (undef, undef, undef, 0.1);
+ my $cleanpid = fork();
+ if ($cleanpid == 0)
+ {
+ srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
+ ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then sudo /bin/umount $JOB_WORK/* 2>/dev/null; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_TMP/opt $CRUNCH_TMP/src*']);
+ exit (1);
+ }
+ while (1)
+ {
+ last if $cleanpid == waitpid (-1, WNOHANG);
+ freeze_if_want_freeze ($cleanpid);
+ select (undef, undef, undef, 0.1);
+ }
+ Log (undef, "Clean-work-dir exited $?");
}
- Log (undef, "Clean-work-dir exited $?");
# Install requested code version
my $commit;
my $git_archive;
my $treeish = $Job->{'script_version'};
- my $repo = $git_dir || $ENV{'CRUNCH_DEFAULT_GIT_DIR'};
- # Todo: let script_version specify repository instead of expecting
- # parent process to figure it out.
- $ENV{"CRUNCH_SRC_URL"} = $repo;
- # Create/update our clone of the remote git repo
+ # If we're running under crunch-dispatch, it will have pulled the
+ # appropriate source tree into its own repository, and given us that
+ # repo's path as $git_dir. If we're running a "local" job, and a
+ # script_version was specified, it's up to the user to provide the
+ # full path to a local repository in Job->{repository}.
+ #
+ # TODO: Accept URLs too, not just local paths. Use git-ls-remote and
+ # git-archive --remote where appropriate.
+ #
+ # TODO: Accept a locally-hosted Arvados repository by name or
+ # UUID. Use arvados.v1.repositories.list or .get to figure out the
+ # appropriate fetch-url.
+ my $repo = $git_dir || $ENV{'CRUNCH_DEFAULT_GIT_DIR'} || $Job->{'repository'};
+
+ $ENV{"CRUNCH_SRC_URL"} = $repo;
- if (!-d $ENV{"CRUNCH_SRC"}) {
- system(qw(git clone), $repo, $ENV{"CRUNCH_SRC"}) == 0
- or croak ("git clone $repo failed: exit ".($?>>8));
- system("cd $ENV{CRUNCH_SRC} && git config clean.requireForce false");
+ if (-d "$repo/.git") {
+ # We were given a working directory, but we are only interested in
+ # the index.
+ $repo = "$repo/.git";
}
- `cd $ENV{CRUNCH_SRC} && git remote set-url origin \"\$CRUNCH_SRC_URL\" && git fetch -q origin`;
# If this looks like a subversion r#, look for it in git-svn commit messages
if ($treeish =~ m{^\d{1,4}$}) {
- my $gitlog = `cd $ENV{CRUNCH_SRC} && git log --pretty="format:%H" --grep="git-svn-id:.*\@$treeish " origin/master`;
+ my $gitlog = `git --git-dir=\Q$repo\E log --pretty="format:%H" --grep="git-svn-id:.*\@"\Q$treeish\E" " master`;
chomp $gitlog;
if ($gitlog =~ /^[a-f0-9]{40}$/) {
$commit = $gitlog;
# If that didn't work, try asking git to look it up as a tree-ish.
if (!defined $commit) {
-
- my $cooked_treeish = $treeish;
- if ($treeish !~ m{^[0-9a-f]{5,}$}) {
- # Looks like a git branch name -- make sure git knows it's
- # relative to the remote repo
- $cooked_treeish = "origin/$treeish";
- }
-
- my $found = `cd $ENV{CRUNCH_SRC} && git rev-list -1 $cooked_treeish`;
+ my $found = `git --git-dir=\Q$repo\E rev-list -1 ''\Q$treeish\E`;
chomp $found;
if ($found =~ /^[0-9a-f]{40}$/s) {
$commit = $found;
$ENV{"CRUNCH_SRC_COMMIT"} = $commit;
@execargs = ("sh", "-c",
"mkdir -p $ENV{CRUNCH_INSTALL} && cd $ENV{CRUNCH_TMP} && perl -");
- $git_archive = `cd $ENV{CRUNCH_SRC} && git archive $commit`;
+ $git_archive = `git --git-dir=\Q$repo\E archive ''\Q$commit\E`;
}
else {
croak ("could not figure out commit id for $treeish");
Log (undef, "Install exited $?");
}
+if (!$have_slurm)
+{
+ # Grab our lock again (we might have deleted and re-created CRUNCH_TMP above)
+ must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here.");
+}
+# If this job requires a Docker image, install that.
+my $docker_bin = "/usr/bin/docker.io";
+my $docker_image = $Job->{runtime_constraints}->{docker_image} || "";
+if ($docker_image) {
+ my $docker_pid = fork();
+ if ($docker_pid == 0)
+ {
+ srun (["srun", "--nodelist=" . join(' ', @node)],
+ [$docker_bin, 'pull', $docker_image]);
+ exit ($?);
+ }
+ while (1)
+ {
+ last if $docker_pid == waitpid (-1, WNOHANG);
+ freeze_if_want_freeze ($docker_pid);
+ select (undef, undef, undef, 0.1);
+ }
+ # If the Docker image was specified as a hash, pull will fail.
+ # Ignore that error. We'll see what happens when we try to run later.
+ if (($? != 0) && ($docker_image !~ /^[0-9a-fA-F]{5,64}$/))
+ {
+ croak("Installing Docker image $docker_image returned exit code $?");
+ }
+}
foreach (qw (script script_version script_parameters runtime_constraints))
{
}
$ENV{"TASK_SLOT_NODE"} = $slot[$childslot]->{node}->{name};
$ENV{"TASK_SLOT_NUMBER"} = $slot[$childslot]->{cpu};
- $ENV{"TASK_WORK"} = $ENV{"JOB_WORK"}."/".$slot[$childslot]->{cpu};
- $ENV{"TASK_KEEPMOUNT"} = $ENV{"TASK_WORK"}."/keep";
+ $ENV{"TASK_WORK"} = $ENV{"JOB_WORK"}."/$id.$$";
+ $ENV{"TASK_KEEPMOUNT"} = $ENV{"TASK_WORK"}.".keep";
$ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated
$ENV{"CRUNCH_NODE_SLOTS"} = $slot[$childslot]->{node}->{ncpus};
$ENV{"PATH"} = $ENV{"CRUNCH_INSTALL"} . "/bin:" . $ENV{"PATH"};
qw(-n1 -c1 -N1 -D), $ENV{'TMPDIR'},
"--job-name=$job_id.$id.$$",
);
- my @execargs = qw(sh);
my $build_script_to_send = "";
my $command =
"if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; "
$command .=
"&& perl -";
}
- $command .=
- "&& exec arv-mount $ENV{TASK_KEEPMOUNT} --exec $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
+ $command .= "&& exec arv-mount --allow-other $ENV{TASK_KEEPMOUNT} --exec ";
+ if ($docker_image)
+ {
+ $command .= "$docker_bin run -i -a stdin -a stdout -a stderr ";
+ # Dynamically configure the container to use the host system as its
+ # DNS server. Get the host's global addresses from the ip command,
+ # and turn them into docker --dns options using gawk.
+ $command .=
+ q{$(ip -o address show scope global |
+ gawk 'match($4, /^([0-9\.:]+)\//, x){print "--dns", x[1]}') };
+ foreach my $env_key (qw(CRUNCH_SRC CRUNCH_TMP TASK_KEEPMOUNT))
+ {
+ $command .= "-v \Q$ENV{$env_key}:$ENV{$env_key}:rw\E ";
+ }
+ while (my ($env_key, $env_val) = each %ENV)
+ {
+ $command .= "-e \Q$env_key=$env_val\E ";
+ }
+ $command .= "\Q$docker_image\E ";
+ }
+ $command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
my @execargs = ('bash', '-c', $command);
srun (\@srunargs, \@execargs, undef, $build_script_to_send);
exit (111);
if ($Job->{'output'})
{
eval {
- my $manifest_text = `arv keep get $Job->{'output'}`;
+ my $manifest_text = `arv keep get ''\Q$Job->{'output'}\E`;
$arv->{'collections'}->{'create'}->execute('collection' => {
'uuid' => $Job->{'output'},
'manifest_text' => $manifest_text,
});
+ if ($Job->{'output_is_persistent'}) {
+ $arv->{'links'}->{'create'}->execute('link' => {
+ 'tail_kind' => 'arvados#user',
+ 'tail_uuid' => $User->{'uuid'},
+ 'head_kind' => 'arvados#collection',
+ 'head_uuid' => $Job->{'output'},
+ 'link_class' => 'resources',
+ 'name' => 'wants',
+ });
+ }
};
if ($@) {
Log (undef, "Failed to register output manifest: $@");
delete $proc{$pid};
# Load new tasks
- my $newtask_list = $arv->{'job_tasks'}->{'list'}->execute(
- 'where' => {
- 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
- },
- 'order' => 'qsequence'
- );
- foreach my $arvados_task (@{$newtask_list->{'items'}}) {
+ my $newtask_list = [];
+ my $newtask_results;
+ do {
+ $newtask_results = $arv->{'job_tasks'}->{'list'}->execute(
+ 'where' => {
+ 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
+ },
+ 'order' => 'qsequence',
+ 'offset' => scalar(@$newtask_list),
+ );
+ push(@$newtask_list, @{$newtask_results->{items}});
+ } while (@{$newtask_results->{items}});
+ foreach my $arvados_task (@$newtask_list) {
my $jobstep = {
'level' => $arvados_task->{'sequence'},
'failures' => 0,
sub fetch_block
{
my $hash = shift;
- my ($child_out, $child_in, $output_block);
+ my ($keep, $child_out, $output_block);
- my $pid = open2($child_out, $child_in, 'arv', 'keep', 'get', $hash);
- sysread($child_out, $output_block, 64 * 1024 * 1024);
- waitpid($pid, 0);
+ my $cmd = "$arv_cli keep get \Q$hash\E";
+ open($keep, '-|', $cmd) or die "fetch_block: $cmd: $!";
+ sysread($keep, $output_block, 64 * 1024 * 1024);
+ close $keep;
return $output_block;
}
Log (undef, "collate");
my ($child_out, $child_in);
- my $pid = open2($child_out, $child_in, 'arv', 'keep', 'put', '--raw');
+ my $pid = open2($child_out, $child_in, $arv_cli, 'keep', 'put', '--raw');
my $joboutput;
for (@jobstep)
{
}
else
{
- print $child_in "XXX fetch_block($output) failed XXX\n";
+ Log (undef, "XXX fetch_block($output) failed XXX");
$main::success = 0;
}
}
+ $child_in->close;
+
if (!defined $joboutput) {
my $s = IO::Select->new($child_out);
- sysread($child_out, $joboutput, 64 * 1024 * 1024) if $s->can_read(0);
+ if ($s->can_read(120)) {
+ sysread($child_out, $joboutput, 64 * 1024 * 1024);
+ chomp($joboutput);
+ } else {
+ Log (undef, "timed out reading from 'arv keep put'");
+ }
}
- $child_in->close;
waitpid($pid, 0);
if ($joboutput)
$message =~ s{([^ -\176])}{"\\" . sprintf ("%03o", ord($1))}ge;
$message .= "\n";
my $datetime;
- if ($metastream || -t STDERR) {
+ if ($local_logfile || -t STDERR) {
my @gmtime = gmtime;
$datetime = sprintf ("%04d-%02d-%02d_%02d:%02d:%02d",
$gmtime[5]+1900, $gmtime[4]+1, @gmtime[3,2,1,0]);
}
print STDERR ((-t STDERR) ? ($datetime." ".$message) : $message);
- # return if !$metastream;
- # $metastream->write_data ($datetime . " " . $message);
+ if ($local_logfile) {
+ print $local_logfile $datetime . " " . $message;
+ }
}
freeze() if @jobstep_todo;
collate_output() if @jobstep_todo;
cleanup();
- save_meta() if $metastream;
+ save_meta() if $local_logfile;
die;
}
sub save_meta
{
-# my $justcheckpoint = shift; # false if this will be the last meta saved
-# my $m = $metastream;
-# $m = $m->copy if $justcheckpoint;
-# $m->write_finish;
-# my $whc = Warehouse->new;
-# my $loglocator = $whc->store_block ($m->as_string);
-# $arv->{'collections'}->{'create'}->execute('collection' => {
-# 'uuid' => $loglocator,
-# 'manifest_text' => $m->as_string,
-# });
-# undef $metastream if !$justcheckpoint; # otherwise Log() will try to use it
-# Log (undef, "log manifest is $loglocator");
-# $Job->{'log'} = $loglocator;
-# $Job->update_attributes('log', $loglocator) if $job_has_uuid;
+ my $justcheckpoint = shift; # false if this will be the last meta saved
+ return if $justcheckpoint; # checkpointing is not relevant post-Warehouse.pm
+
+ $local_logfile->flush;
+ my $cmd = "$arv_cli keep put --filename ''\Q$keep_logfile\E "
+ . quotemeta($local_logfile->filename);
+ my $loglocator = `$cmd`;
+ die "system $cmd failed: $?" if $?;
+ chomp($loglocator);
+
+ $local_logfile = undef; # the temp file is automatically deleted
+ Log (undef, "log manifest is $loglocator");
+ $Job->{'log'} = $loglocator;
+ $Job->update_attributes('log', $loglocator) if $job_has_uuid;
}
sub thaw
{
croak ("Thaw not implemented");
-
- # my $whc;
- # my $key = shift;
- # Log (undef, "thaw from $key");
-
- # @jobstep = ();
- # @jobstep_done = ();
- # @jobstep_todo = ();
- # @jobstep_tomerge = ();
- # $jobstep_tomerge_level = 0;
- # my $frozenjob = {};
-
- # my $stream = new Warehouse::Stream ( whc => $whc,
- # hash => [split (",", $key)] );
- # $stream->rewind;
- # while (my $dataref = $stream->read_until (undef, "\n\n"))
- # {
- # if ($$dataref =~ /^job /)
- # {
- # foreach (split ("\n", $$dataref))
- # {
- # my ($k, $v) = split ("=", $_, 2);
- # $frozenjob->{$k} = freezeunquote ($v);
- # }
- # next;
- # }
-
- # if ($$dataref =~ /^merge (\d+) (.*)/)
- # {
- # $jobstep_tomerge_level = $1;
- # @jobstep_tomerge
- # = map { freezeunquote ($_) } split ("\n", freezeunquote($2));
- # next;
- # }
-
- # my $Jobstep = { };
- # foreach (split ("\n", $$dataref))
- # {
- # my ($k, $v) = split ("=", $_, 2);
- # $Jobstep->{$k} = freezeunquote ($v) if $k;
- # }
- # $Jobstep->{'failures'} = 0;
- # push @jobstep, $Jobstep;
-
- # if ($Jobstep->{exitcode} eq "0")
- # {
- # push @jobstep_done, $#jobstep;
- # }
- # else
- # {
- # push @jobstep_todo, $#jobstep;
- # }
- # }
-
- # foreach (qw (script script_version script_parameters))
- # {
- # $Job->{$_} = $frozenjob->{$_};
- # }
- # $Job->save if $job_has_uuid;
}
Log (undef, "backing off node " . $slot[$slotid]->{node}->{name} . " for 60 seconds");
}
+sub must_lock_now
+{
+ my ($lockfile, $error_message) = @_;
+ open L, ">", $lockfile or croak("$lockfile: $!");
+ if (!flock L, LOCK_EX|LOCK_NB) {
+ croak("Can't lock $lockfile: $error_message\n");
+ }
+}
+
__DATA__
#!/usr/bin/perl