Obtain job details from Arvados, run tasks on compute nodes (typically
invoked by scheduler on controller):
- crunch-job --uuid x-y-z
+ crunch-job --job x-y-z
Obtain job details from command line, run tasks on local machine
(typically invoked by application or developer on VM):
crunch-job --job '{"script_version":"/path/to/tree","script":"scriptname",...}'
+=head1 OPTIONS
+
+=over
+
+=item --force-unlock
+
+If the job is already locked, steal the lock and run it anyway.
+
+=item --git-dir
+
+Path to .git directory where the specified commit is found.
+
+=item --job-api-token
+
+Arvados API authorization token to use during the course of the job.
+
+=back
+
=head1 RUNNING JOBS LOCALLY
crunch-job's log messages appear on stderr along with the job tasks'
use strict;
-use DBI;
use POSIX ':sys_wait_h';
use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
use Arvados;
use Getopt::Long;
use Warehouse;
use Warehouse::Stream;
+use IPC::System::Simple qw(capturex);
$ENV{"TMPDIR"} ||= "/tmp";
$ENV{"CRUNCH_TMP"} = $ENV{"TMPDIR"} . "/crunch-job";
-$ENV{"CRUNCH_WORK"} = $ENV{"CRUNCH_TMP"} . "/work";
-mkdir ($ENV{"CRUNCH_TMP"});
+if ($ENV{"USER"} ne "crunch" && $< != 0) {
+ # use a tmp dir unique for my uid
+ $ENV{"CRUNCH_TMP"} .= "-$<";
+}
+$ENV{"JOB_WORK"} = $ENV{"CRUNCH_TMP"} . "/work";
+$ENV{"CRUNCH_TMP"} = $ENV{"JOB_WORK"}; # deprecated
+mkdir ($ENV{"JOB_WORK"});
my $force_unlock;
+my $git_dir;
my $jobspec;
+my $job_api_token;
my $resume_stash;
GetOptions('force-unlock' => \$force_unlock,
+ 'git-dir=s' => \$git_dir,
'job=s' => \$jobspec,
+ 'job-api-token=s' => \$job_api_token,
'resume-stash=s' => \$resume_stash,
);
+if (defined $job_api_token) {
+ $ENV{ARVADOS_API_TOKEN} = $job_api_token;
+}
+
my $have_slurm = exists $ENV{SLURM_JOBID} && exists $ENV{SLURM_NODELIST};
my $job_has_uuid = $jobspec =~ /^[-a-z\d]+$/;
+my $local_job = !$job_has_uuid;
$SIG{'HUP'} = sub
my $arv = Arvados->new;
-my $metastream = Warehouse::Stream->new;
+my $metastream = Warehouse::Stream->new(whc => new Warehouse);
$metastream->clear;
$metastream->write_start('log.txt');
-my $User = {};
+my $User = $arv->{'users'}->{'current'}->execute;
+
my $Job = {};
my $job_id;
my $dbh;
if ($job_has_uuid)
{
$Job = $arv->{'jobs'}->{'get'}->execute('uuid' => $jobspec);
- $User = $arv->{'users'}->{'current'}->execute;
if (!$force_unlock) {
- if ($Job->{'is_locked_by'}) {
- croak("Job is locked: " . $Job->{'is_locked_by'});
+ if ($Job->{'is_locked_by_uuid'}) {
+ croak("Job is locked: " . $Job->{'is_locked_by_uuid'});
}
if ($Job->{'success'} ne undef) {
croak("Job 'success' flag (" . $Job->{'success'} . ") is not null");
qw(script script_version script_parameters);
}
- if (!defined $Job->{'uuid'}) {
- chomp ($Job->{'uuid'} = sprintf ("%s-t%d-p%d", `hostname -s`, time, $$));
- }
+ $Job->{'is_locked_by_uuid'} = $User->{'uuid'};
+ $Job->{'started_at'} = gmtime;
+
+ $Job = $arv->{'jobs'}->{'create'}->execute('job' => $Job);
+
+ $job_has_uuid = 1;
}
$job_id = $Job->{'uuid'};
{
# Claim this job, and make sure nobody else does
- $Job->{'is_locked_by'} = $User->{'uuid'};
- $Job->{'started_at'} = time;
+ $Job->{'is_locked_by_uuid'} = $User->{'uuid'};
+ $Job->{'started_at'} = gmtime;
$Job->{'running'} = 1;
$Job->{'success'} = undef;
$Job->{'tasks_summary'} = { 'failed' => 0,
'todo' => 1,
'running' => 0,
'done' => 0 };
- unless ($Job->save() && $Job->{'is_locked_by'} == $User->{'uuid'}) {
- croak("Error while updating / locking job");
+ if ($job_has_uuid) {
+ unless ($Job->save() && $Job->{'is_locked_by_uuid'} == $User->{'uuid'}) {
+ croak("Error while updating / locking job");
+ }
}
}
'qsequence' => 0,
'parameters' => {},
});
- push @jobstep, { level => 0,
- attempts => 0,
- arvados_task => $first_task,
+ push @jobstep, { 'level' => 0,
+ 'attempts' => 0,
+ 'arvados_task' => $first_task,
};
push @jobstep_todo, 0;
}
my $build_script;
-do {
- local $/ = undef;
- $build_script = <DATA>;
-};
-$ENV{"CRUNCH_SRC_COMMIT"} = $Job->{revision};
+$ENV{"CRUNCH_SRC_COMMIT"} = $Job->{script_version};
-my $skip_install = (!$have_slurm && $Job->{revision} =~ m{^/});
+my $skip_install = ($local_job && $Job->{script_version} =~ m{^/});
if ($skip_install)
{
- $ENV{"CRUNCH_SRC"} = $Job->{revision};
+ $ENV{"CRUNCH_SRC"} = $Job->{script_version};
}
else
{
- Log (undef, "Install revision ".$Job->{revision});
+ do {
+ local $/ = undef;
+ $build_script = <DATA>;
+ };
+ Log (undef, "Install revision ".$Job->{script_version});
my $nodelist = join(",", @node);
# Clean out crunch_tmp/work and crunch_tmp/opt
if ($cleanpid == 0)
{
srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
- ['bash', '-c', 'if mount | grep -q $CRUNCH_WORK/; then sudo /bin/umount $CRUNCH_WORK/* 2>/dev/null; fi; sleep 1; rm -rf $CRUNCH_WORK $CRUNCH_TMP/opt']);
+ ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then sudo /bin/umount $JOB_WORK/* 2>/dev/null; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_TMP/opt']);
exit (1);
}
while (1)
$ENV{"CRUNCH_INSTALL"} = "$ENV{CRUNCH_TMP}/opt";
my $commit;
+ my $git_archive;
my $treeish = $Job->{'script_version'};
- my $repo = $ENV{'CRUNCH_DEFAULT_GIT_DIR'};
- # Todo: let script_version specify alternate repo
+ my $repo = $git_dir || $ENV{'CRUNCH_DEFAULT_GIT_DIR'};
+ # Todo: let script_version specify repository instead of expecting
+ # parent process to figure it out.
$ENV{"CRUNCH_SRC_URL"} = $repo;
# Create/update our clone of the remote git repo
chomp $gitlog;
if ($gitlog =~ /^[a-f0-9]{40}$/) {
$commit = $gitlog;
- Log (undef, "Using commit $commit for revision $treeish");
+ Log (undef, "Using commit $commit for script_version $treeish");
}
}
Log (undef, "Using commit $commit for tree-ish $treeish");
if ($commit ne $treeish) {
$Job->{'script_version'} = $commit;
- $Job->save() or croak("Error while updating job");
+ !$job_has_uuid or $Job->save() or croak("Error while updating job");
}
}
}
$ENV{"CRUNCH_SRC_COMMIT"} = $commit;
@execargs = ("sh", "-c",
"mkdir -p $ENV{CRUNCH_INSTALL} && cd $ENV{CRUNCH_TMP} && perl -");
+ $git_archive = `cd $ENV{CRUNCH_SRC} && git archive $commit`;
}
else {
croak ("could not figure out commit id for $treeish");
my $installpid = fork();
if ($installpid == 0)
{
- srun (\@srunargs, \@execargs, {}, $build_script);
+ srun (\@srunargs, \@execargs, {}, $build_script . $git_archive);
exit (1);
}
while (1)
foreach (qw (script script_version script_parameters resource_limits))
{
- Log (undef, $_ . " " . $Job->{$_});
+ Log (undef,
+ "$_ " .
+ (ref($Job->{$_}) ? JSON::encode_json($Job->{$_}) : $Job->{$_}));
}
foreach (split (/\n/, $Job->{knobs}))
{
}
$ENV{"TASK_SLOT_NODE"} = $slot[$childslot]->{node}->{name};
$ENV{"TASK_SLOT_NUMBER"} = $slot[$childslot]->{cpu};
- $ENV{"TASK_TMPDIR"} = $ENV{"CRUNCH_WORK"}.$slot[$childslot]->{cpu};
+ $ENV{"TASK_WORK"} = $ENV{"JOB_WORK"}."/".$slot[$childslot]->{cpu};
+ $ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated
$ENV{"CRUNCH_NODE_SLOTS"} = $slot[$childslot]->{node}->{ncpus};
$ENV{"GZIP"} = "-n";
my @execargs = qw(sh);
my $build_script_to_send = "";
my $command =
- "mkdir -p $ENV{CRUNCH_TMP}/revision "
+ "mkdir -p $ENV{JOB_WORK} $ENV{CRUNCH_TMP} $ENV{TASK_WORK} "
."&& cd $ENV{CRUNCH_TMP} ";
if ($build_script)
{
$command .=
"&& perl -";
}
- elsif (!$skip_install)
- {
- $command .=
- "&& "
- ."( "
- ." [ -e '$ENV{CRUNCH_INSTALL}/.tested' ] "
- ."|| "
- ." ( svn export --quiet '$ENV{INSTALL_REPOS}/installrevision' "
- ." && ./installrevision "
- ." ) "
- .") ";
- }
$ENV{"PYTHONPATH"} = "$ENV{CRUNCH_SRC}/sdk/python"; # xxx hack
$command .=
"&& exec $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
}
# give up if no nodes are succeeding
- if (!grep { $_->{node}->{losing_streak} == 0 } @slot) {
+ if (!grep { $_->{node}->{losing_streak} == 0 &&
+ $_->{node}->{hold_count} < 4 } @slot) {
my $message = "Every node has failed -- giving up on this round";
Log (undef, $message);
last THISROUND;
release_allocation();
freeze();
+$Job->reload;
$Job->{'output'} = &collate_output();
-$Job->{'success'} = 0 if !$Job->{'output'};
-$Job->save;
+$Job->{'running'} = 0;
+$Job->{'success'} = $Job->{'output'} && $success;
+$Job->{'finished_at'} = gmtime;
+$Job->save if $job_has_uuid;
if ($Job->{'output'})
{
- $arv->{'collections'}->{'create'}->execute('collection' => {
- 'uuid' => $Job->{'output'},
- 'manifest_text' => system("whget", $Job->{arvados_task}->{output}),
- });;
+ eval {
+ my $manifest_text = capturex("whget", $Job->{'output'});
+ $arv->{'collections'}->{'create'}->execute('collection' => {
+ 'uuid' => $Job->{'output'},
+ 'manifest_text' => $manifest_text,
+ });
+ };
+ if ($@) {
+ Log (undef, "Failed to register output manifest: $@");
+ }
}
-
Log (undef, "finish");
-$Job->{'success'} = $Job->{'output'} && $success;
-$Job->save;
-
save_meta();
exit 0;
$Job->{'tasks_summary'}->{'todo'} = $todo;
$Job->{'tasks_summary'}->{'done'} = $done;
$Job->{'tasks_summary'}->{'running'} = $running;
- $Job->save;
+ $Job->save if $job_has_uuid;
Log (undef, "status: $done done, $running running, $todo todo");
$progress_is_dirty = 0;
}
my $exitcode = $?;
my $exitinfo = "exit $exitcode";
- $Jobstep->{arvados_task}->reload;
- my $success = $Jobstep->{arvados_task}->{success};
+ $Jobstep->{'arvados_task'}->reload;
+ my $success = $Jobstep->{'arvados_task'}->{success};
Log ($jobstepid, "child $pid on $whatslot $exitinfo success=$success");
if (!defined $success) {
# task did not indicate one way or the other --> fail
- $Jobstep->{arvados_task}->{success} = 0;
- $Jobstep->{arvados_task}->save;
+ $Jobstep->{'arvados_task'}->{success} = 0;
+ $Jobstep->{'arvados_task'}->save;
$success = 0;
}
if (!$success)
{
- --$Jobstep->{attempts} if $Jobstep->{node_fail};
+ my $no_incr_attempts;
+ $no_incr_attempts = 1 if $Jobstep->{node_fail};
+
++$thisround_failed;
++$thisround_failed_multiple if $Jobstep->{attempts} > 1;
$elapsed < 5 &&
$Jobstep->{attempts} > 1) {
Log ($jobstepid, "blaming failure on suspect node " . $slot[$proc{$pid}->{slot}]->{node}->{name} . " instead of incrementing jobstep attempts");
+ $no_incr_attempts = 1;
--$Jobstep->{attempts};
}
ban_node_by_slot($proc{$pid}->{slot});
push @jobstep_todo, $jobstepid;
Log ($jobstepid, "failure in $elapsed seconds");
+
+ --$Jobstep->{attempts} if $no_incr_attempts;
$Job->{'tasks_summary'}->{'failed'}++;
}
else
$Jobstep->{exitcode} = $exitcode;
$Jobstep->{finishtime} = time;
process_stderr ($jobstepid, $success);
- Log ($jobstepid, "output " . $Jobstep->{arvados_task}->{output});
+ Log ($jobstepid, "output " . $Jobstep->{'arvados_task'}->{output});
close $reader{$jobstepid};
delete $reader{$jobstepid};
delete $proc{$pid};
# Load new tasks
- my $newtask_list = $arv->{'job_tasks'}->{'list'}->execute('where' => {
- 'created_by_job_task' => $Jobstep->{arvados_task}->{uuid}
- });
+ my $newtask_list = $arv->{'job_tasks'}->{'list'}->execute(
+ 'where' => {
+ 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
+ },
+ 'order' => 'qsequence'
+ );
foreach my $arvados_task (@{$newtask_list->{'items'}}) {
my $jobstep = {
'level' => $arvados_task->{'sequence'},
while ($jobstep[$job]->{stderr} =~ /^(.*?)\n/) {
my $line = $1;
- if ($line =~ /\+\+\+mr/) {
- last;
- }
substr $jobstep[$job]->{stderr}, 0, 1+length($line), "";
Log ($job, "stderr $line");
- if ($line =~ /srun: error: (SLURM job $ENV{SLURM_JOBID} has expired) /) {
+ if ($line =~ /srun: error: (SLURM job $ENV{SLURM_JOBID} has expired|Unable to confirm allocation for job) /) {
# whoa.
$main::please_freeze = 1;
}
my $joboutput;
for (@jobstep)
{
- next if !exists $_->{arvados_task}->{output} || $_->{exitcode} != 0;
- my $output = $_->{arvados_task}->{output};
+ next if (!exists $_->{'arvados_task'}->{output} ||
+ !$_->{'arvados_task'}->{'success'} ||
+ $_->{'exitcode'} != 0);
+ my $output = $_->{'arvados_task'}->{output};
if ($output !~ /^[0-9a-f]{32}(\+\S+)*$/)
{
$output_in_keep ||= $output =~ / [0-9a-f]{32}\S*\+K/;
{
Log (undef, "output $joboutput");
$Job->{'output'} = $joboutput;
- $Job->save;
+ $Job->save if $job_has_uuid;
}
else
{
}
-sub reconnect_database
-{
- return if !$job_has_uuid;
- return if ($dbh && $dbh->do ("select now()"));
- for (1..16)
- {
- $dbh = DBI->connect(@$Warehouse::Server::DatabaseDSN);
- if ($dbh) {
- $dbh->{InactiveDestroy} = 1;
- return;
- }
- warn ($DBI::errstr);
- sleep $_;
- }
- croak ($DBI::errstr) if !$dbh;
-}
-
-
-sub dbh_do
-{
- return 1 if !$job_has_uuid;
- my $ret = $dbh->do (@_);
- return $ret unless (!$ret && $DBI::errstr =~ /server has gone away/);
- reconnect_database();
- return $dbh->do (@_);
-}
-
-
sub croak
{
my ($package, $file, $line) = caller;
$Job->reload;
$Job->{'running'} = 0;
$Job->{'success'} = 0;
- $Job->{'finished_at'} = time;
+ $Job->{'finished_at'} = gmtime;
$Job->save;
}
undef $metastream if !$justcheckpoint; # otherwise Log() will try to use it
Log (undef, "meta key is $loglocator");
$Job->{'log'} = $loglocator;
- $Job->save;
+ $Job->save if $job_has_uuid;
}
{
Log (undef, "Freeze not implemented");
return;
-
- my $whc; # todo
- Log (undef, "freeze");
-
- my $freezer = new Warehouse::Stream (whc => $whc);
- $freezer->clear;
- $freezer->name (".");
- $freezer->write_start ("state.txt");
-
- $freezer->write_data (join ("\n",
- "job $Job->{uuid}",
- map
- {
- $_ . "=" . freezequote($Job->{$_})
- } grep { $_ ne "id" } keys %$Job) . "\n\n");
-
- foreach my $Jobstep (@jobstep)
- {
- my $str = join ("\n",
- map
- {
- $_ . "=" . freezequote ($Jobstep->{$_})
- } grep {
- $_ !~ /^stderr|slotindex|node_fail/
- } keys %$Jobstep);
- $freezer->write_data ($str."\n\n");
- }
- if (@jobstep_tomerge)
- {
- $freezer->write_data
- ("merge $jobstep_tomerge_level "
- . freezequote (join ("\n",
- map { freezequote ($_) } @jobstep_tomerge))
- . "\n\n");
- }
-
- $freezer->write_finish;
- my $frozentokey = $freezer->as_key;
- undef $freezer;
- Log (undef, "frozento key is $frozentokey");
- dbh_do ("update mrjob set frozentokey=? where id=?", undef,
- $frozentokey, $job_id);
- my $kfrozentokey = $whc->store_in_keep (hash => $frozentokey, nnodes => 3);
- Log (undef, "frozento+K key is $kfrozentokey");
- return $frozentokey;
}
{
$Job->{$_} = $frozenjob->{$_};
}
- $Job->save;
+ $Job->save if $job_has_uuid;
}
# Don't start any new jobsteps on this node for 60 seconds
my $slotid = shift;
$slot[$slotid]->{node}->{hold_until} = 60 + scalar time;
+ $slot[$slotid]->{node}->{hold_count}++;
Log (undef, "backing off node " . $slot[$slotid]->{node}->{name} . " for 60 seconds");
}
exit 0;
}
+unlink "$destdir.commit";
open STDOUT, ">", "$destdir.log";
open STDERR, ">&STDOUT";
-if (-d "$destdir/.git") {
- chdir $destdir or die "chdir $destdir: $!";
- if (0 != system (qw(git remote set-url origin), $repo)) {
- # awful... for old versions of git that don't know "remote set-url"
- shell_or_die (q(perl -pi~ -e '$_="\turl = ).$repo.q(\n" if /url = /' .git/config));
- }
-}
-elsif ($repo && $commit)
-{
- shell_or_die('git', 'clone', $repo, $destdir);
- chdir $destdir or die "chdir $destdir: $!";
- shell_or_die(qw(git config clean.requireForce false));
-}
-else {
- die "$destdir does not exist, and no repo/commit specified -- giving up";
-}
-
-if ($commit) {
- unlink "$destdir.commit";
- shell_or_die (qw(git stash));
- shell_or_die (qw(git clean -d -x));
- shell_or_die (qw(git fetch origin));
- shell_or_die (qw(git checkout), $commit);
+mkdir $destdir;
+open TARX, "|-", "tar", "-C", $destdir, "-xf", "-";
+print TARX <DATA>;
+if(!close(TARX)) {
+ die "'tar -C $destdir -xf -' exited $?: $!";
}
my $pwd;
system (@_) == 0
or die "@_ failed: $! exit 0x".sprintf("%x",$?);
}
+
+__DATA__